All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/3] misc flush and dirty-mask related adjustments
@ 2018-01-23 10:07 Jan Beulich
  2018-01-23 10:12 ` [PATCH v2 1/3] replace vCPU's dirty CPU mask by numeric ID Jan Beulich
                   ` (3 more replies)
  0 siblings, 4 replies; 10+ messages in thread
From: Jan Beulich @ 2018-01-23 10:07 UTC (permalink / raw)
  To: xen-devel

1: replace vCPU's dirty CPU mask by numeric ID
2: x86: avoid explicit TLB flush when saving exec state
3: drop "domain_" prefix from struct domain's dirty CPU mask

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Addressing review comments - see individual patches.


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH v2 1/3] replace vCPU's dirty CPU mask by numeric ID
  2018-01-23 10:07 [PATCH v2 0/3] misc flush and dirty-mask related adjustments Jan Beulich
@ 2018-01-23 10:12 ` Jan Beulich
  2018-01-23 10:20   ` Andrew Cooper
  2018-01-23 11:03   ` Julien Grall
  2018-01-23 10:14 ` [PATCH v2 2/3] x86: avoid explicit TLB flush when saving exec state Jan Beulich
                   ` (2 subsequent siblings)
  3 siblings, 2 replies; 10+ messages in thread
From: Jan Beulich @ 2018-01-23 10:12 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Julien Grall

At most one bit can be set in the masks, so especially on larger systems
it's quite a bit of unnecessary memory and processing overhead to track
the information as a mask. Store the numeric ID of the respective CPU
instead, or VCPU_CPU_CLEAN if no dirty state exists.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
ARM adjustments compile tested only.
---
v2: Introduce VCPU_CPU_CLEAN and vcpu_cpu_dirty(). Re-word comments.
    Re-base.

--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -330,7 +330,7 @@ void context_switch(struct vcpu *prev, s
 {
     ASSERT(local_irq_is_enabled());
     ASSERT(prev != next);
-    ASSERT(cpumask_empty(next->vcpu_dirty_cpumask));
+    ASSERT(!vcpu_cpu_dirty(next));
 
     if ( prev != next )
         update_runstate_area(prev);
@@ -471,7 +471,7 @@ void startup_cpu_idle_loop(void)
     ASSERT(is_idle_vcpu(v));
     /* TODO
        cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
-       cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask);
+       v->dirty_cpu = v->processor;
     */
 
     reset_stack_and_jump(idle_loop);
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -146,7 +146,7 @@ void startup_cpu_idle_loop(void)
 
     ASSERT(is_idle_vcpu(v));
     cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
-    cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask);
+    v->dirty_cpu = v->processor;
 
     reset_stack_and_jump(idle_loop);
 }
@@ -1602,7 +1602,7 @@ static void __context_switch(void)
     struct desc_ptr       gdt_desc;
 
     ASSERT(p != n);
-    ASSERT(cpumask_empty(n->vcpu_dirty_cpumask));
+    ASSERT(!vcpu_cpu_dirty(n));
 
     if ( !is_idle_domain(pd) )
     {
@@ -1618,7 +1618,7 @@ static void __context_switch(void)
      */
     if ( pd != nd )
         cpumask_set_cpu(cpu, nd->domain_dirty_cpumask);
-    cpumask_set_cpu(cpu, n->vcpu_dirty_cpumask);
+    n->dirty_cpu = cpu;
 
     if ( !is_idle_domain(nd) )
     {
@@ -1674,7 +1674,7 @@ static void __context_switch(void)
 
     if ( pd != nd )
         cpumask_clear_cpu(cpu, pd->domain_dirty_cpumask);
-    cpumask_clear_cpu(cpu, p->vcpu_dirty_cpumask);
+    p->dirty_cpu = VCPU_CPU_CLEAN;
 
     per_cpu(curr_vcpu, cpu) = n;
 }
@@ -1684,20 +1684,16 @@ void context_switch(struct vcpu *prev, s
 {
     unsigned int cpu = smp_processor_id();
     const struct domain *prevd = prev->domain, *nextd = next->domain;
-    cpumask_t dirty_mask;
+    unsigned int dirty_cpu = next->dirty_cpu;
 
     ASSERT(local_irq_is_enabled());
 
     get_cpu_info()->xen_cr3 = 0;
 
-    cpumask_copy(&dirty_mask, next->vcpu_dirty_cpumask);
-    /* Allow at most one CPU at a time to be dirty. */
-    ASSERT(cpumask_weight(&dirty_mask) <= 1);
-    if ( unlikely(!cpumask_test_cpu(cpu, &dirty_mask) &&
-                  !cpumask_empty(&dirty_mask)) )
+    if ( unlikely(dirty_cpu != cpu) && dirty_cpu != VCPU_CPU_CLEAN )
     {
-        /* Other cpus call __sync_local_execstate from flush ipi handler. */
-        flush_mask(&dirty_mask, FLUSH_TLB | FLUSH_VCPU_STATE);
+        /* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
+        flush_mask(cpumask_of(dirty_cpu), FLUSH_TLB | FLUSH_VCPU_STATE);
     }
 
     if ( prev != next )
@@ -1802,11 +1798,14 @@ void sync_local_execstate(void)
 
 void sync_vcpu_execstate(struct vcpu *v)
 {
-    if ( cpumask_test_cpu(smp_processor_id(), v->vcpu_dirty_cpumask) )
+    if ( v->dirty_cpu == smp_processor_id() )
         sync_local_execstate();
 
-    /* Other cpus call __sync_local_execstate from flush ipi handler. */
-    flush_mask(v->vcpu_dirty_cpumask, FLUSH_TLB | FLUSH_VCPU_STATE);
+    if ( vcpu_cpu_dirty(v) )
+    {
+        /* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
+        flush_mask(cpumask_of(v->dirty_cpu), FLUSH_TLB | FLUSH_VCPU_STATE);
+    }
 }
 
 static int relinquish_memory(
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1212,7 +1212,7 @@ void put_page_from_l1e(l1_pgentry_t l1e,
             for_each_vcpu ( pg_owner, v )
             {
                 if ( pv_destroy_ldt(v) )
-                    flush_tlb_mask(v->vcpu_dirty_cpumask);
+                    flush_tlb_mask(cpumask_of(v->dirty_cpu));
             }
         }
         put_page(page);
@@ -2937,8 +2937,8 @@ static inline int vcpumask_to_pcpumask(
             vcpu_id += vcpu_bias;
             if ( (vcpu_id >= d->max_vcpus) )
                 return 0;
-            if ( ((v = d->vcpu[vcpu_id]) != NULL) )
-                cpumask_or(pmask, pmask, v->vcpu_dirty_cpumask);
+            if ( ((v = d->vcpu[vcpu_id]) != NULL) && vcpu_cpu_dirty(v) )
+                __cpumask_set_cpu(v->dirty_cpu, pmask);
         }
     }
 }
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -135,6 +135,7 @@ struct vcpu *alloc_vcpu(
 
     v->domain = d;
     v->vcpu_id = vcpu_id;
+    v->dirty_cpu = VCPU_CPU_CLEAN;
 
     spin_lock_init(&v->virq_lock);
 
@@ -145,8 +146,7 @@ struct vcpu *alloc_vcpu(
     if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) ||
          !zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) ||
          !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
-         !zalloc_cpumask_var(&v->cpu_soft_affinity) ||
-         !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
+         !zalloc_cpumask_var(&v->cpu_soft_affinity) )
         goto fail_free;
 
     if ( is_idle_domain(d) )
@@ -175,7 +175,6 @@ struct vcpu *alloc_vcpu(
         free_cpumask_var(v->cpu_hard_affinity_tmp);
         free_cpumask_var(v->cpu_hard_affinity_saved);
         free_cpumask_var(v->cpu_soft_affinity);
-        free_cpumask_var(v->vcpu_dirty_cpumask);
         free_vcpu_struct(v);
         return NULL;
     }
@@ -863,7 +862,6 @@ static void complete_domain_destroy(stru
             free_cpumask_var(v->cpu_hard_affinity_tmp);
             free_cpumask_var(v->cpu_hard_affinity_saved);
             free_cpumask_var(v->cpu_soft_affinity);
-            free_cpumask_var(v->vcpu_dirty_cpumask);
             free_vcpu_struct(v);
         }
 
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -340,8 +340,9 @@ static void dump_domains(unsigned char k
                    v->is_running ? 'T':'F', v->poll_evtchn,
                    vcpu_info(v, evtchn_upcall_pending),
                    !vcpu_event_delivery_is_enabled(v));
-            cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
-            printk("dirty_cpus=%s\n", tmpstr);
+            if ( vcpu_cpu_dirty(v) )
+                printk("dirty_cpu=%u", v->dirty_cpu);
+            printk("\n");
             cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity);
             printk("    cpu_hard_affinity=%s ", tmpstr);
             cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_soft_affinity);
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -210,6 +210,9 @@ struct vcpu
     bool             hcall_compat;
 #endif
 
+    /* The CPU, if any, which is holding onto this VCPU's state. */
+#define VCPU_CPU_CLEAN (~0u)
+    unsigned int     dirty_cpu;
 
     /*
      * > 0: a single port is being polled;
@@ -248,9 +251,6 @@ struct vcpu
     /* Bitmask of CPUs on which this VCPU prefers to run. */
     cpumask_var_t    cpu_soft_affinity;
 
-    /* Bitmask of CPUs which are holding onto this VCPU's state. */
-    cpumask_var_t    vcpu_dirty_cpumask;
-
     /* Tasklet for continue_hypercall_on_cpu(). */
     struct tasklet   continue_hypercall_tasklet;
 
@@ -803,6 +803,11 @@ static inline int vcpu_runnable(struct v
              atomic_read(&v->domain->pause_count));
 }
 
+static inline bool vcpu_cpu_dirty(const struct vcpu *v)
+{
+    return v->dirty_cpu != VCPU_CPU_CLEAN;
+}
+
 void vcpu_block(void);
 void vcpu_unblock(struct vcpu *v);
 void vcpu_pause(struct vcpu *v);



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH v2 2/3] x86: avoid explicit TLB flush when saving exec state
  2018-01-23 10:07 [PATCH v2 0/3] misc flush and dirty-mask related adjustments Jan Beulich
  2018-01-23 10:12 ` [PATCH v2 1/3] replace vCPU's dirty CPU mask by numeric ID Jan Beulich
@ 2018-01-23 10:14 ` Jan Beulich
  2018-01-23 10:16 ` [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask Jan Beulich
  2018-01-23 10:17 ` [PATCH v2 0/3] misc flush and dirty-mask related adjustments Andrew Cooper
  3 siblings, 0 replies; 10+ messages in thread
From: Jan Beulich @ 2018-01-23 10:14 UTC (permalink / raw)
  To: xen-devel; +Cc: George Dunlap, Andrew Cooper

Now that it's obvious that only a single dirty CPU can exist for a vCPU,
it becomes clear that flush_mask() doesn't need to be invoked when
sync_local_execstate() was already run. And with the IPI handler
clearing FLUSH_TLB from the passed flags anyway if
__sync_local_execstate() returns true, it also becomes clear that
FLUSH_TLB doesn't need to be passed here in the first place; neither of
the two places actually have a need to flush the TLB in any event (quite
possibly FLUSH_TLB was being passed there solely for flush_area_mask()
to make it past its no-op check).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Slightly extend description. Re-base.

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1693,7 +1693,7 @@ void context_switch(struct vcpu *prev, s
     if ( unlikely(dirty_cpu != cpu) && dirty_cpu != VCPU_CPU_CLEAN )
     {
         /* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
-        flush_mask(cpumask_of(dirty_cpu), FLUSH_TLB | FLUSH_VCPU_STATE);
+        flush_mask(cpumask_of(dirty_cpu), FLUSH_VCPU_STATE);
     }
 
     if ( prev != next )
@@ -1800,11 +1800,10 @@ void sync_vcpu_execstate(struct vcpu *v)
 {
     if ( v->dirty_cpu == smp_processor_id() )
         sync_local_execstate();
-
-    if ( vcpu_cpu_dirty(v) )
+    else if ( vcpu_cpu_dirty(v) )
     {
         /* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
-        flush_mask(cpumask_of(v->dirty_cpu), FLUSH_TLB | FLUSH_VCPU_STATE);
+        flush_mask(cpumask_of(v->dirty_cpu), FLUSH_VCPU_STATE);
     }
 }
 




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask
  2018-01-23 10:07 [PATCH v2 0/3] misc flush and dirty-mask related adjustments Jan Beulich
  2018-01-23 10:12 ` [PATCH v2 1/3] replace vCPU's dirty CPU mask by numeric ID Jan Beulich
  2018-01-23 10:14 ` [PATCH v2 2/3] x86: avoid explicit TLB flush when saving exec state Jan Beulich
@ 2018-01-23 10:16 ` Jan Beulich
  2018-01-23 11:04   ` Julien Grall
                     ` (2 more replies)
  2018-01-23 10:17 ` [PATCH v2 0/3] misc flush and dirty-mask related adjustments Andrew Cooper
  3 siblings, 3 replies; 10+ messages in thread
From: Jan Beulich @ 2018-01-23 10:16 UTC (permalink / raw)
  To: xen-devel
  Cc: Kevin Tian, Stefano Stabellini, Wei Liu, Jun Nakajima,
	George Dunlap, Andrew Cooper, Ian Jackson, Tim Deegan,
	Julien Grall, Suravee Suthikulpanit, Boris Ostrovsky

It being a field of struct domain is sufficient to recognize its
purpose.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
v2: White space changes (consolidate split line statements into single
    line ones). Re-base.

--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -470,7 +470,7 @@ void startup_cpu_idle_loop(void)
 
     ASSERT(is_idle_vcpu(v));
     /* TODO
-       cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
+       cpumask_set_cpu(v->processor, v->domain->dirty_cpumask);
        v->dirty_cpu = v->processor;
     */
 
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -145,7 +145,7 @@ void startup_cpu_idle_loop(void)
     struct vcpu *v = current;
 
     ASSERT(is_idle_vcpu(v));
-    cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
+    cpumask_set_cpu(v->processor, v->domain->dirty_cpumask);
     v->dirty_cpu = v->processor;
 
     reset_stack_and_jump(idle_loop);
@@ -1617,7 +1617,7 @@ static void __context_switch(void)
      * which is synchronised on that function.
      */
     if ( pd != nd )
-        cpumask_set_cpu(cpu, nd->domain_dirty_cpumask);
+        cpumask_set_cpu(cpu, nd->dirty_cpumask);
     n->dirty_cpu = cpu;
 
     if ( !is_idle_domain(nd) )
@@ -1673,7 +1673,7 @@ static void __context_switch(void)
     }
 
     if ( pd != nd )
-        cpumask_clear_cpu(cpu, pd->domain_dirty_cpumask);
+        cpumask_clear_cpu(cpu, pd->dirty_cpumask);
     p->dirty_cpu = VCPU_CPU_CLEAN;
 
     per_cpu(curr_vcpu, cpu) = n;
@@ -1922,7 +1922,7 @@ int domain_relinquish_resources(struct d
     int ret;
     struct vcpu *v;
 
-    BUG_ON(!cpumask_empty(d->domain_dirty_cpumask));
+    BUG_ON(!cpumask_empty(d->dirty_cpumask));
 
     switch ( d->arch.relmem )
     {
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4045,7 +4045,7 @@ static int hvmop_flush_tlb_all(void)
         paging_update_cr3(v);
 
     /* Flush all dirty TLBs. */
-    flush_tlb_mask(d->domain_dirty_cpumask);
+    flush_tlb_mask(d->dirty_cpumask);
 
     /* Done. */
     for_each_vcpu ( d, v )
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2322,7 +2322,7 @@ static int svm_is_erratum_383(struct cpu
     wrmsrl(MSR_IA32_MCG_STATUS, msr_content & ~(1ULL << 2));
 
     /* flush TLB */
-    flush_tlb_mask(v->domain->domain_dirty_cpumask);
+    flush_tlb_mask(v->domain->dirty_cpumask);
 
     return 1;
 }
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2546,7 +2546,7 @@ static int _get_page_type(struct page_in
                 cpumask_t *mask = this_cpu(scratch_cpumask);
 
                 BUG_ON(in_irq());
-                cpumask_copy(mask, d->domain_dirty_cpumask);
+                cpumask_copy(mask, d->dirty_cpumask);
 
                 /* Don't flush if the timestamp is old enough */
                 tlbflush_filter(mask, page->tlbflush_timestamp);
@@ -3277,7 +3277,7 @@ long do_mmuext_op(
 
         case MMUEXT_TLB_FLUSH_ALL:
             if ( likely(currd == pg_owner) )
-                flush_tlb_mask(currd->domain_dirty_cpumask);
+                flush_tlb_mask(currd->dirty_cpumask);
             else
                 rc = -EPERM;
             break;
@@ -3286,8 +3286,7 @@ long do_mmuext_op(
             if ( unlikely(currd != pg_owner) )
                 rc = -EPERM;
             else if ( __addr_ok(op.arg1.linear_addr) )
-                flush_tlb_one_mask(currd->domain_dirty_cpumask,
-                                   op.arg1.linear_addr);
+                flush_tlb_one_mask(currd->dirty_cpumask, op.arg1.linear_addr);
             break;
 
         case MMUEXT_FLUSH_CACHE:
@@ -3772,7 +3771,7 @@ long do_mmu_update(
         unsigned int cpu = smp_processor_id();
         cpumask_t *mask = per_cpu(scratch_cpumask, cpu);
 
-        cpumask_andnot(mask, pt_owner->domain_dirty_cpumask, cpumask_of(cpu));
+        cpumask_andnot(mask, pt_owner->dirty_cpumask, cpumask_of(cpu));
         if ( !cpumask_empty(mask) )
             flush_area_mask(mask, ZERO_BLOCK_PTR, FLUSH_VA_VALID);
     }
@@ -3955,7 +3954,7 @@ static int __do_update_va_mapping(
             flush_tlb_local();
             break;
         case UVMF_ALL:
-            mask = d->domain_dirty_cpumask;
+            mask = d->dirty_cpumask;
             break;
         default:
             mask = this_cpu(scratch_cpumask);
@@ -3975,7 +3974,7 @@ static int __do_update_va_mapping(
             paging_invlpg(v, va);
             break;
         case UVMF_ALL:
-            mask = d->domain_dirty_cpumask;
+            mask = d->dirty_cpumask;
             break;
         default:
             mask = this_cpu(scratch_cpumask);
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -124,7 +124,7 @@ int hap_track_dirty_vram(struct domain *
             p2m_change_type_range(d, begin_pfn, begin_pfn + nr,
                                   p2m_ram_rw, p2m_ram_logdirty);
 
-            flush_tlb_mask(d->domain_dirty_cpumask);
+            flush_tlb_mask(d->dirty_cpumask);
 
             memset(dirty_bitmap, 0xff, size); /* consider all pages dirty */
         }
@@ -211,7 +211,7 @@ static int hap_enable_log_dirty(struct d
          * to be read-only, or via hardware-assisted log-dirty.
          */
         p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
     }
     return 0;
 }
@@ -240,7 +240,7 @@ static void hap_clean_dirty_bitmap(struc
      * be read-only, or via hardware-assisted log-dirty.
      */
     p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
-    flush_tlb_mask(d->domain_dirty_cpumask);
+    flush_tlb_mask(d->dirty_cpumask);
 }
 
 /************************************************/
@@ -741,7 +741,7 @@ hap_write_p2m_entry(struct domain *d, un
 
     safe_write_pte(p, new);
     if ( old_flags & _PAGE_PRESENT )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
 
     paging_unlock(d);
 
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1195,12 +1195,12 @@ void ept_sync_domain(struct p2m_domain *
         return;
     }
 
-    ept_sync_domain_mask(p2m, d->domain_dirty_cpumask);
+    ept_sync_domain_mask(p2m, d->dirty_cpumask);
 }
 
 static void ept_tlb_flush(struct p2m_domain *p2m)
 {
-    ept_sync_domain_mask(p2m, p2m->domain->domain_dirty_cpumask);
+    ept_sync_domain_mask(p2m, p2m->domain->dirty_cpumask);
 }
 
 static void ept_enable_pml(struct p2m_domain *p2m)
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -929,7 +929,7 @@ static void p2m_pt_change_entry_type_glo
     unmap_domain_page(tab);
 
     if ( changed )
-         flush_tlb_mask(p2m->domain->domain_dirty_cpumask);
+         flush_tlb_mask(p2m->domain->dirty_cpumask);
 }
 
 static int p2m_pt_change_entry_type_range(struct p2m_domain *p2m,
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -619,7 +619,7 @@ void paging_log_dirty_range(struct domai
 
     p2m_unlock(p2m);
 
-    flush_tlb_mask(d->domain_dirty_cpumask);
+    flush_tlb_mask(d->dirty_cpumask);
 }
 
 /*
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -637,7 +637,7 @@ static int oos_remove_write_access(struc
     }
 
     if ( ftlb )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
 
     return 0;
 }
@@ -1064,7 +1064,7 @@ sh_validate_guest_pt_write(struct vcpu *
     rc = sh_validate_guest_entry(v, gmfn, entry, size);
     if ( rc & SHADOW_SET_FLUSH )
         /* Need to flush TLBs to pick up shadow PT changes */
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
     if ( rc & SHADOW_SET_ERROR )
     {
         /* This page is probably not a pagetable any more: tear it out of the
@@ -1227,7 +1227,7 @@ static void _shadow_prealloc(struct doma
                 /* See if that freed up enough space */
                 if ( d->arch.paging.shadow.free_pages >= pages )
                 {
-                    flush_tlb_mask(d->domain_dirty_cpumask);
+                    flush_tlb_mask(d->dirty_cpumask);
                     return;
                 }
             }
@@ -1281,7 +1281,7 @@ static void shadow_blow_tables(struct do
                                pagetable_get_mfn(v->arch.shadow_table[i]), 0);
 
     /* Make sure everyone sees the unshadowings */
-    flush_tlb_mask(d->domain_dirty_cpumask);
+    flush_tlb_mask(d->dirty_cpumask);
 }
 
 void shadow_blow_tables_per_domain(struct domain *d)
@@ -1385,7 +1385,7 @@ mfn_t shadow_alloc(struct domain *d,
         sp = page_list_remove_head(&d->arch.paging.shadow.freelist);
         /* Before we overwrite the old contents of this page,
          * we need to be sure that no TLB holds a pointer to it. */
-        cpumask_copy(&mask, d->domain_dirty_cpumask);
+        cpumask_copy(&mask, d->dirty_cpumask);
         tlbflush_filter(&mask, sp->tlbflush_timestamp);
         if ( unlikely(!cpumask_empty(&mask)) )
         {
@@ -2797,7 +2797,7 @@ void sh_remove_shadows(struct domain *d,
 
     /* Need to flush TLBs now, so that linear maps are safe next time we
      * take a fault. */
-    flush_tlb_mask(d->domain_dirty_cpumask);
+    flush_tlb_mask(d->dirty_cpumask);
 
     paging_unlock(d);
 }
@@ -3481,7 +3481,7 @@ static void sh_unshadow_for_p2m_change(s
         {
             sh_remove_all_shadows_and_parents(d, mfn);
             if ( sh_remove_all_mappings(d, mfn, _gfn(gfn)) )
-                flush_tlb_mask(d->domain_dirty_cpumask);
+                flush_tlb_mask(d->dirty_cpumask);
         }
     }
 
@@ -3517,8 +3517,7 @@ static void sh_unshadow_for_p2m_change(s
                     sh_remove_all_shadows_and_parents(d, omfn);
                     if ( sh_remove_all_mappings(d, omfn,
                                                 _gfn(gfn + (i << PAGE_SHIFT))) )
-                        cpumask_or(&flushmask, &flushmask,
-                                   d->domain_dirty_cpumask);
+                        cpumask_or(&flushmask, &flushmask, d->dirty_cpumask);
                 }
                 omfn = _mfn(mfn_x(omfn) + 1);
             }
@@ -3795,7 +3794,7 @@ int shadow_track_dirty_vram(struct domai
         }
     }
     if ( flush_tlb )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
     goto out;
 
 out_sl1ma:
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3134,7 +3134,7 @@ static int sh_page_fault(struct vcpu *v,
         perfc_incr(shadow_rm_write_flush_tlb);
         smp_wmb();
         atomic_inc(&d->arch.paging.shadow.gtable_dirty_version);
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
     }
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -4114,7 +4114,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
      * (old) shadow linear maps in the writeable mapping heuristics. */
 #if GUEST_PAGING_LEVELS == 2
     if ( sh_remove_write_access(d, gmfn, 2, 0) != 0 )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
     sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
 #elif GUEST_PAGING_LEVELS == 3
     /* PAE guests have four shadow_table entries, based on the
@@ -4137,7 +4137,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
             }
         }
         if ( flush )
-            flush_tlb_mask(d->domain_dirty_cpumask);
+            flush_tlb_mask(d->dirty_cpumask);
         /* Now install the new shadows. */
         for ( i = 0; i < 4; i++ )
         {
@@ -4158,7 +4158,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
     }
 #elif GUEST_PAGING_LEVELS == 4
     if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
     sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
     if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) )
     {
@@ -4605,7 +4605,7 @@ static void sh_pagetable_dying(struct vc
         }
     }
     if ( flush )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
 
     /* Remember that we've seen the guest use this interface, so we
      * can rely on it using it in future, instead of guessing at
@@ -4641,7 +4641,7 @@ static void sh_pagetable_dying(struct vc
         mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
         shadow_unhook_mappings(d, smfn, 1/* user pages only */);
         /* Now flush the TLB: we removed toplevel mappings. */
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
     }
 
     /* Remember that we've seen the guest use this interface, so we
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -297,7 +297,7 @@ struct domain *domain_create(domid_t dom
     rwlock_init(&d->vnuma_rwlock);
 
     err = -ENOMEM;
-    if ( !zalloc_cpumask_var(&d->domain_dirty_cpumask) )
+    if ( !zalloc_cpumask_var(&d->dirty_cpumask) )
         goto fail;
 
     if ( domcr_flags & DOMCRF_hvm )
@@ -415,7 +415,7 @@ struct domain *domain_create(domid_t dom
         watchdog_domain_destroy(d);
     if ( init_status & INIT_xsm )
         xsm_free_security_domain(d);
-    free_cpumask_var(d->domain_dirty_cpumask);
+    free_cpumask_var(d->dirty_cpumask);
     free_domain_struct(d);
     return ERR_PTR(err);
 }
@@ -851,7 +851,7 @@ static void complete_domain_destroy(stru
     radix_tree_destroy(&d->pirq_tree, free_pirq_struct);
 
     xsm_free_security_domain(d);
-    free_cpumask_var(d->domain_dirty_cpumask);
+    free_cpumask_var(d->dirty_cpumask);
     xfree(d->vcpu);
     free_domain_struct(d);
 
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -276,7 +276,7 @@ static inline void grant_write_unlock(st
 static inline void gnttab_flush_tlb(const struct domain *d)
 {
     if ( !paging_mode_external(d) )
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        flush_tlb_mask(d->dirty_cpumask);
 }
 
 static inline unsigned int
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -298,7 +298,7 @@ static void dump_domains(unsigned char k
         process_pending_softirqs();
 
         printk("General information for domain %u:\n", d->domain_id);
-        cpuset_print(tmpstr, sizeof(tmpstr), d->domain_dirty_cpumask);
+        cpuset_print(tmpstr, sizeof(tmpstr), d->dirty_cpumask);
         printk("    refcnt=%d dying=%d pause_count=%d\n",
                atomic_read(&d->refcnt), d->is_dying,
                atomic_read(&d->pause_count));
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -193,7 +193,8 @@ struct p2m_domain {
     /* Shadow translated domain: p2m mapping */
     pagetable_t        phys_table;
 
-    /* Same as domain_dirty_cpumask but limited to
+    /*
+     * Same as a domain's dirty_cpumask but limited to
      * this p2m and those physical cpus whose vcpu's are in
      * guestmode.
      */
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -417,7 +417,7 @@ struct domain
     unsigned long    vm_assist;
 
     /* Bitmask of CPUs which are holding onto this domain's state. */
-    cpumask_var_t    domain_dirty_cpumask;
+    cpumask_var_t    dirty_cpumask;
 
     struct arch_domain arch;
 



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 0/3] misc flush and dirty-mask related adjustments
  2018-01-23 10:07 [PATCH v2 0/3] misc flush and dirty-mask related adjustments Jan Beulich
                   ` (2 preceding siblings ...)
  2018-01-23 10:16 ` [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask Jan Beulich
@ 2018-01-23 10:17 ` Andrew Cooper
  3 siblings, 0 replies; 10+ messages in thread
From: Andrew Cooper @ 2018-01-23 10:17 UTC (permalink / raw)
  To: Jan Beulich, xen-devel

On 23/01/2018 10:07, Jan Beulich wrote:
> 1: replace vCPU's dirty CPU mask by numeric ID
> 2: x86: avoid explicit TLB flush when saving exec state
> 3: drop "domain_" prefix from struct domain's dirty CPU mask

Much clearer.

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 1/3] replace vCPU's dirty CPU mask by numeric ID
  2018-01-23 10:12 ` [PATCH v2 1/3] replace vCPU's dirty CPU mask by numeric ID Jan Beulich
@ 2018-01-23 10:20   ` Andrew Cooper
  2018-01-23 11:03   ` Julien Grall
  1 sibling, 0 replies; 10+ messages in thread
From: Andrew Cooper @ 2018-01-23 10:20 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Tim Deegan,
	Ian Jackson, Julien Grall

On 23/01/2018 10:12, Jan Beulich wrote:
> @@ -803,6 +803,11 @@ static inline int vcpu_runnable(struct v
>               atomic_read(&v->domain->pause_count));
>  }
>  
> +static inline bool vcpu_cpu_dirty(const struct vcpu *v)
> +{

Oh - one extra through.  BUILD_BUG_ON(NR_CPUS >= VCPU_CPU_CLEAN) ?

R-by stands either way.

~Andrew

> +    return v->dirty_cpu != VCPU_CPU_CLEAN;
> +}
> +
>  void vcpu_block(void);
>  void vcpu_unblock(struct vcpu *v);
>  void vcpu_pause(struct vcpu *v);
>
>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 1/3] replace vCPU's dirty CPU mask by numeric ID
  2018-01-23 10:12 ` [PATCH v2 1/3] replace vCPU's dirty CPU mask by numeric ID Jan Beulich
  2018-01-23 10:20   ` Andrew Cooper
@ 2018-01-23 11:03   ` Julien Grall
  1 sibling, 0 replies; 10+ messages in thread
From: Julien Grall @ 2018-01-23 11:03 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Julien Grall

Hi Jan,

On 23/01/18 10:12, Jan Beulich wrote:
> At most one bit can be set in the masks, so especially on larger systems
> it's quite a bit of unnecessary memory and processing overhead to track
> the information as a mask. Store the numeric ID of the respective CPU
> instead, or VCPU_CPU_CLEAN if no dirty state exists.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Julien Grall <julien.grall@linaro.org>

Cheers,

> ---
> ARM adjustments compile tested only.
> ---
> v2: Introduce VCPU_CPU_CLEAN and vcpu_cpu_dirty(). Re-word comments.
>      Re-base.
> 
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -330,7 +330,7 @@ void context_switch(struct vcpu *prev, s
>   {
>       ASSERT(local_irq_is_enabled());
>       ASSERT(prev != next);
> -    ASSERT(cpumask_empty(next->vcpu_dirty_cpumask));
> +    ASSERT(!vcpu_cpu_dirty(next));
>   
>       if ( prev != next )
>           update_runstate_area(prev);
> @@ -471,7 +471,7 @@ void startup_cpu_idle_loop(void)
>       ASSERT(is_idle_vcpu(v));
>       /* TODO
>          cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
> -       cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask);
> +       v->dirty_cpu = v->processor;
>       */
>   
>       reset_stack_and_jump(idle_loop);
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -146,7 +146,7 @@ void startup_cpu_idle_loop(void)
>   
>       ASSERT(is_idle_vcpu(v));
>       cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
> -    cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask);
> +    v->dirty_cpu = v->processor;
>   
>       reset_stack_and_jump(idle_loop);
>   }
> @@ -1602,7 +1602,7 @@ static void __context_switch(void)
>       struct desc_ptr       gdt_desc;
>   
>       ASSERT(p != n);
> -    ASSERT(cpumask_empty(n->vcpu_dirty_cpumask));
> +    ASSERT(!vcpu_cpu_dirty(n));
>   
>       if ( !is_idle_domain(pd) )
>       {
> @@ -1618,7 +1618,7 @@ static void __context_switch(void)
>        */
>       if ( pd != nd )
>           cpumask_set_cpu(cpu, nd->domain_dirty_cpumask);
> -    cpumask_set_cpu(cpu, n->vcpu_dirty_cpumask);
> +    n->dirty_cpu = cpu;
>   
>       if ( !is_idle_domain(nd) )
>       {
> @@ -1674,7 +1674,7 @@ static void __context_switch(void)
>   
>       if ( pd != nd )
>           cpumask_clear_cpu(cpu, pd->domain_dirty_cpumask);
> -    cpumask_clear_cpu(cpu, p->vcpu_dirty_cpumask);
> +    p->dirty_cpu = VCPU_CPU_CLEAN;
>   
>       per_cpu(curr_vcpu, cpu) = n;
>   }
> @@ -1684,20 +1684,16 @@ void context_switch(struct vcpu *prev, s
>   {
>       unsigned int cpu = smp_processor_id();
>       const struct domain *prevd = prev->domain, *nextd = next->domain;
> -    cpumask_t dirty_mask;
> +    unsigned int dirty_cpu = next->dirty_cpu;
>   
>       ASSERT(local_irq_is_enabled());
>   
>       get_cpu_info()->xen_cr3 = 0;
>   
> -    cpumask_copy(&dirty_mask, next->vcpu_dirty_cpumask);
> -    /* Allow at most one CPU at a time to be dirty. */
> -    ASSERT(cpumask_weight(&dirty_mask) <= 1);
> -    if ( unlikely(!cpumask_test_cpu(cpu, &dirty_mask) &&
> -                  !cpumask_empty(&dirty_mask)) )
> +    if ( unlikely(dirty_cpu != cpu) && dirty_cpu != VCPU_CPU_CLEAN )
>       {
> -        /* Other cpus call __sync_local_execstate from flush ipi handler. */
> -        flush_mask(&dirty_mask, FLUSH_TLB | FLUSH_VCPU_STATE);
> +        /* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
> +        flush_mask(cpumask_of(dirty_cpu), FLUSH_TLB | FLUSH_VCPU_STATE);
>       }
>   
>       if ( prev != next )
> @@ -1802,11 +1798,14 @@ void sync_local_execstate(void)
>   
>   void sync_vcpu_execstate(struct vcpu *v)
>   {
> -    if ( cpumask_test_cpu(smp_processor_id(), v->vcpu_dirty_cpumask) )
> +    if ( v->dirty_cpu == smp_processor_id() )
>           sync_local_execstate();
>   
> -    /* Other cpus call __sync_local_execstate from flush ipi handler. */
> -    flush_mask(v->vcpu_dirty_cpumask, FLUSH_TLB | FLUSH_VCPU_STATE);
> +    if ( vcpu_cpu_dirty(v) )
> +    {
> +        /* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
> +        flush_mask(cpumask_of(v->dirty_cpu), FLUSH_TLB | FLUSH_VCPU_STATE);
> +    }
>   }
>   
>   static int relinquish_memory(
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -1212,7 +1212,7 @@ void put_page_from_l1e(l1_pgentry_t l1e,
>               for_each_vcpu ( pg_owner, v )
>               {
>                   if ( pv_destroy_ldt(v) )
> -                    flush_tlb_mask(v->vcpu_dirty_cpumask);
> +                    flush_tlb_mask(cpumask_of(v->dirty_cpu));
>               }
>           }
>           put_page(page);
> @@ -2937,8 +2937,8 @@ static inline int vcpumask_to_pcpumask(
>               vcpu_id += vcpu_bias;
>               if ( (vcpu_id >= d->max_vcpus) )
>                   return 0;
> -            if ( ((v = d->vcpu[vcpu_id]) != NULL) )
> -                cpumask_or(pmask, pmask, v->vcpu_dirty_cpumask);
> +            if ( ((v = d->vcpu[vcpu_id]) != NULL) && vcpu_cpu_dirty(v) )
> +                __cpumask_set_cpu(v->dirty_cpu, pmask);
>           }
>       }
>   }
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -135,6 +135,7 @@ struct vcpu *alloc_vcpu(
>   
>       v->domain = d;
>       v->vcpu_id = vcpu_id;
> +    v->dirty_cpu = VCPU_CPU_CLEAN;
>   
>       spin_lock_init(&v->virq_lock);
>   
> @@ -145,8 +146,7 @@ struct vcpu *alloc_vcpu(
>       if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) ||
>            !zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) ||
>            !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
> -         !zalloc_cpumask_var(&v->cpu_soft_affinity) ||
> -         !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
> +         !zalloc_cpumask_var(&v->cpu_soft_affinity) )
>           goto fail_free;
>   
>       if ( is_idle_domain(d) )
> @@ -175,7 +175,6 @@ struct vcpu *alloc_vcpu(
>           free_cpumask_var(v->cpu_hard_affinity_tmp);
>           free_cpumask_var(v->cpu_hard_affinity_saved);
>           free_cpumask_var(v->cpu_soft_affinity);
> -        free_cpumask_var(v->vcpu_dirty_cpumask);
>           free_vcpu_struct(v);
>           return NULL;
>       }
> @@ -863,7 +862,6 @@ static void complete_domain_destroy(stru
>               free_cpumask_var(v->cpu_hard_affinity_tmp);
>               free_cpumask_var(v->cpu_hard_affinity_saved);
>               free_cpumask_var(v->cpu_soft_affinity);
> -            free_cpumask_var(v->vcpu_dirty_cpumask);
>               free_vcpu_struct(v);
>           }
>   
> --- a/xen/common/keyhandler.c
> +++ b/xen/common/keyhandler.c
> @@ -340,8 +340,9 @@ static void dump_domains(unsigned char k
>                      v->is_running ? 'T':'F', v->poll_evtchn,
>                      vcpu_info(v, evtchn_upcall_pending),
>                      !vcpu_event_delivery_is_enabled(v));
> -            cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
> -            printk("dirty_cpus=%s\n", tmpstr);
> +            if ( vcpu_cpu_dirty(v) )
> +                printk("dirty_cpu=%u", v->dirty_cpu);
> +            printk("\n");
>               cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity);
>               printk("    cpu_hard_affinity=%s ", tmpstr);
>               cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_soft_affinity);
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -210,6 +210,9 @@ struct vcpu
>       bool             hcall_compat;
>   #endif
>   
> +    /* The CPU, if any, which is holding onto this VCPU's state. */
> +#define VCPU_CPU_CLEAN (~0u)
> +    unsigned int     dirty_cpu;
>   
>       /*
>        * > 0: a single port is being polled;
> @@ -248,9 +251,6 @@ struct vcpu
>       /* Bitmask of CPUs on which this VCPU prefers to run. */
>       cpumask_var_t    cpu_soft_affinity;
>   
> -    /* Bitmask of CPUs which are holding onto this VCPU's state. */
> -    cpumask_var_t    vcpu_dirty_cpumask;
> -
>       /* Tasklet for continue_hypercall_on_cpu(). */
>       struct tasklet   continue_hypercall_tasklet;
>   
> @@ -803,6 +803,11 @@ static inline int vcpu_runnable(struct v
>                atomic_read(&v->domain->pause_count));
>   }
>   
> +static inline bool vcpu_cpu_dirty(const struct vcpu *v)
> +{
> +    return v->dirty_cpu != VCPU_CPU_CLEAN;
> +}
> +
>   void vcpu_block(void);
>   void vcpu_unblock(struct vcpu *v);
>   void vcpu_pause(struct vcpu *v);
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xenproject.org
> https://lists.xenproject.org/mailman/listinfo/xen-devel
> 

-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask
  2018-01-23 10:16 ` [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask Jan Beulich
@ 2018-01-23 11:04   ` Julien Grall
  2018-01-23 14:06   ` Boris Ostrovsky
  2018-01-24  1:53   ` Tian, Kevin
  2 siblings, 0 replies; 10+ messages in thread
From: Julien Grall @ 2018-01-23 11:04 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Kevin Tian, Stefano Stabellini, Wei Liu, Suravee Suthikulpanit,
	George Dunlap, Andrew Cooper, Ian Jackson, Tim Deegan,
	Julien Grall, Jun Nakajima, Boris Ostrovsky

Hi Jan,

On 23/01/18 10:16, Jan Beulich wrote:
> It being a field of struct domain is sufficient to recognize its
> purpose.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Wei Liu <wei.liu2@citrix.com>
> Reviewed-by: George Dunlap <george.dunlap@citrix.com>
> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>

Cheers,

> ---
> v2: White space changes (consolidate split line statements into single
>      line ones). Re-base.
> 
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -470,7 +470,7 @@ void startup_cpu_idle_loop(void)
>   
>       ASSERT(is_idle_vcpu(v));
>       /* TODO
> -       cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
> +       cpumask_set_cpu(v->processor, v->domain->dirty_cpumask);
>          v->dirty_cpu = v->processor;
>       */
>   
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -145,7 +145,7 @@ void startup_cpu_idle_loop(void)
>       struct vcpu *v = current;
>   
>       ASSERT(is_idle_vcpu(v));
> -    cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
> +    cpumask_set_cpu(v->processor, v->domain->dirty_cpumask);
>       v->dirty_cpu = v->processor;
>   
>       reset_stack_and_jump(idle_loop);
> @@ -1617,7 +1617,7 @@ static void __context_switch(void)
>        * which is synchronised on that function.
>        */
>       if ( pd != nd )
> -        cpumask_set_cpu(cpu, nd->domain_dirty_cpumask);
> +        cpumask_set_cpu(cpu, nd->dirty_cpumask);
>       n->dirty_cpu = cpu;
>   
>       if ( !is_idle_domain(nd) )
> @@ -1673,7 +1673,7 @@ static void __context_switch(void)
>       }
>   
>       if ( pd != nd )
> -        cpumask_clear_cpu(cpu, pd->domain_dirty_cpumask);
> +        cpumask_clear_cpu(cpu, pd->dirty_cpumask);
>       p->dirty_cpu = VCPU_CPU_CLEAN;
>   
>       per_cpu(curr_vcpu, cpu) = n;
> @@ -1922,7 +1922,7 @@ int domain_relinquish_resources(struct d
>       int ret;
>       struct vcpu *v;
>   
> -    BUG_ON(!cpumask_empty(d->domain_dirty_cpumask));
> +    BUG_ON(!cpumask_empty(d->dirty_cpumask));
>   
>       switch ( d->arch.relmem )
>       {
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -4045,7 +4045,7 @@ static int hvmop_flush_tlb_all(void)
>           paging_update_cr3(v);
>   
>       /* Flush all dirty TLBs. */
> -    flush_tlb_mask(d->domain_dirty_cpumask);
> +    flush_tlb_mask(d->dirty_cpumask);
>   
>       /* Done. */
>       for_each_vcpu ( d, v )
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -2322,7 +2322,7 @@ static int svm_is_erratum_383(struct cpu
>       wrmsrl(MSR_IA32_MCG_STATUS, msr_content & ~(1ULL << 2));
>   
>       /* flush TLB */
> -    flush_tlb_mask(v->domain->domain_dirty_cpumask);
> +    flush_tlb_mask(v->domain->dirty_cpumask);
>   
>       return 1;
>   }
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -2546,7 +2546,7 @@ static int _get_page_type(struct page_in
>                   cpumask_t *mask = this_cpu(scratch_cpumask);
>   
>                   BUG_ON(in_irq());
> -                cpumask_copy(mask, d->domain_dirty_cpumask);
> +                cpumask_copy(mask, d->dirty_cpumask);
>   
>                   /* Don't flush if the timestamp is old enough */
>                   tlbflush_filter(mask, page->tlbflush_timestamp);
> @@ -3277,7 +3277,7 @@ long do_mmuext_op(
>   
>           case MMUEXT_TLB_FLUSH_ALL:
>               if ( likely(currd == pg_owner) )
> -                flush_tlb_mask(currd->domain_dirty_cpumask);
> +                flush_tlb_mask(currd->dirty_cpumask);
>               else
>                   rc = -EPERM;
>               break;
> @@ -3286,8 +3286,7 @@ long do_mmuext_op(
>               if ( unlikely(currd != pg_owner) )
>                   rc = -EPERM;
>               else if ( __addr_ok(op.arg1.linear_addr) )
> -                flush_tlb_one_mask(currd->domain_dirty_cpumask,
> -                                   op.arg1.linear_addr);
> +                flush_tlb_one_mask(currd->dirty_cpumask, op.arg1.linear_addr);
>               break;
>   
>           case MMUEXT_FLUSH_CACHE:
> @@ -3772,7 +3771,7 @@ long do_mmu_update(
>           unsigned int cpu = smp_processor_id();
>           cpumask_t *mask = per_cpu(scratch_cpumask, cpu);
>   
> -        cpumask_andnot(mask, pt_owner->domain_dirty_cpumask, cpumask_of(cpu));
> +        cpumask_andnot(mask, pt_owner->dirty_cpumask, cpumask_of(cpu));
>           if ( !cpumask_empty(mask) )
>               flush_area_mask(mask, ZERO_BLOCK_PTR, FLUSH_VA_VALID);
>       }
> @@ -3955,7 +3954,7 @@ static int __do_update_va_mapping(
>               flush_tlb_local();
>               break;
>           case UVMF_ALL:
> -            mask = d->domain_dirty_cpumask;
> +            mask = d->dirty_cpumask;
>               break;
>           default:
>               mask = this_cpu(scratch_cpumask);
> @@ -3975,7 +3974,7 @@ static int __do_update_va_mapping(
>               paging_invlpg(v, va);
>               break;
>           case UVMF_ALL:
> -            mask = d->domain_dirty_cpumask;
> +            mask = d->dirty_cpumask;
>               break;
>           default:
>               mask = this_cpu(scratch_cpumask);
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -124,7 +124,7 @@ int hap_track_dirty_vram(struct domain *
>               p2m_change_type_range(d, begin_pfn, begin_pfn + nr,
>                                     p2m_ram_rw, p2m_ram_logdirty);
>   
> -            flush_tlb_mask(d->domain_dirty_cpumask);
> +            flush_tlb_mask(d->dirty_cpumask);
>   
>               memset(dirty_bitmap, 0xff, size); /* consider all pages dirty */
>           }
> @@ -211,7 +211,7 @@ static int hap_enable_log_dirty(struct d
>            * to be read-only, or via hardware-assisted log-dirty.
>            */
>           p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>       }
>       return 0;
>   }
> @@ -240,7 +240,7 @@ static void hap_clean_dirty_bitmap(struc
>        * be read-only, or via hardware-assisted log-dirty.
>        */
>       p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
> -    flush_tlb_mask(d->domain_dirty_cpumask);
> +    flush_tlb_mask(d->dirty_cpumask);
>   }
>   
>   /************************************************/
> @@ -741,7 +741,7 @@ hap_write_p2m_entry(struct domain *d, un
>   
>       safe_write_pte(p, new);
>       if ( old_flags & _PAGE_PRESENT )
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>   
>       paging_unlock(d);
>   
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -1195,12 +1195,12 @@ void ept_sync_domain(struct p2m_domain *
>           return;
>       }
>   
> -    ept_sync_domain_mask(p2m, d->domain_dirty_cpumask);
> +    ept_sync_domain_mask(p2m, d->dirty_cpumask);
>   }
>   
>   static void ept_tlb_flush(struct p2m_domain *p2m)
>   {
> -    ept_sync_domain_mask(p2m, p2m->domain->domain_dirty_cpumask);
> +    ept_sync_domain_mask(p2m, p2m->domain->dirty_cpumask);
>   }
>   
>   static void ept_enable_pml(struct p2m_domain *p2m)
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -929,7 +929,7 @@ static void p2m_pt_change_entry_type_glo
>       unmap_domain_page(tab);
>   
>       if ( changed )
> -         flush_tlb_mask(p2m->domain->domain_dirty_cpumask);
> +         flush_tlb_mask(p2m->domain->dirty_cpumask);
>   }
>   
>   static int p2m_pt_change_entry_type_range(struct p2m_domain *p2m,
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -619,7 +619,7 @@ void paging_log_dirty_range(struct domai
>   
>       p2m_unlock(p2m);
>   
> -    flush_tlb_mask(d->domain_dirty_cpumask);
> +    flush_tlb_mask(d->dirty_cpumask);
>   }
>   
>   /*
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -637,7 +637,7 @@ static int oos_remove_write_access(struc
>       }
>   
>       if ( ftlb )
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>   
>       return 0;
>   }
> @@ -1064,7 +1064,7 @@ sh_validate_guest_pt_write(struct vcpu *
>       rc = sh_validate_guest_entry(v, gmfn, entry, size);
>       if ( rc & SHADOW_SET_FLUSH )
>           /* Need to flush TLBs to pick up shadow PT changes */
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>       if ( rc & SHADOW_SET_ERROR )
>       {
>           /* This page is probably not a pagetable any more: tear it out of the
> @@ -1227,7 +1227,7 @@ static void _shadow_prealloc(struct doma
>                   /* See if that freed up enough space */
>                   if ( d->arch.paging.shadow.free_pages >= pages )
>                   {
> -                    flush_tlb_mask(d->domain_dirty_cpumask);
> +                    flush_tlb_mask(d->dirty_cpumask);
>                       return;
>                   }
>               }
> @@ -1281,7 +1281,7 @@ static void shadow_blow_tables(struct do
>                                  pagetable_get_mfn(v->arch.shadow_table[i]), 0);
>   
>       /* Make sure everyone sees the unshadowings */
> -    flush_tlb_mask(d->domain_dirty_cpumask);
> +    flush_tlb_mask(d->dirty_cpumask);
>   }
>   
>   void shadow_blow_tables_per_domain(struct domain *d)
> @@ -1385,7 +1385,7 @@ mfn_t shadow_alloc(struct domain *d,
>           sp = page_list_remove_head(&d->arch.paging.shadow.freelist);
>           /* Before we overwrite the old contents of this page,
>            * we need to be sure that no TLB holds a pointer to it. */
> -        cpumask_copy(&mask, d->domain_dirty_cpumask);
> +        cpumask_copy(&mask, d->dirty_cpumask);
>           tlbflush_filter(&mask, sp->tlbflush_timestamp);
>           if ( unlikely(!cpumask_empty(&mask)) )
>           {
> @@ -2797,7 +2797,7 @@ void sh_remove_shadows(struct domain *d,
>   
>       /* Need to flush TLBs now, so that linear maps are safe next time we
>        * take a fault. */
> -    flush_tlb_mask(d->domain_dirty_cpumask);
> +    flush_tlb_mask(d->dirty_cpumask);
>   
>       paging_unlock(d);
>   }
> @@ -3481,7 +3481,7 @@ static void sh_unshadow_for_p2m_change(s
>           {
>               sh_remove_all_shadows_and_parents(d, mfn);
>               if ( sh_remove_all_mappings(d, mfn, _gfn(gfn)) )
> -                flush_tlb_mask(d->domain_dirty_cpumask);
> +                flush_tlb_mask(d->dirty_cpumask);
>           }
>       }
>   
> @@ -3517,8 +3517,7 @@ static void sh_unshadow_for_p2m_change(s
>                       sh_remove_all_shadows_and_parents(d, omfn);
>                       if ( sh_remove_all_mappings(d, omfn,
>                                                   _gfn(gfn + (i << PAGE_SHIFT))) )
> -                        cpumask_or(&flushmask, &flushmask,
> -                                   d->domain_dirty_cpumask);
> +                        cpumask_or(&flushmask, &flushmask, d->dirty_cpumask);
>                   }
>                   omfn = _mfn(mfn_x(omfn) + 1);
>               }
> @@ -3795,7 +3794,7 @@ int shadow_track_dirty_vram(struct domai
>           }
>       }
>       if ( flush_tlb )
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>       goto out;
>   
>   out_sl1ma:
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -3134,7 +3134,7 @@ static int sh_page_fault(struct vcpu *v,
>           perfc_incr(shadow_rm_write_flush_tlb);
>           smp_wmb();
>           atomic_inc(&d->arch.paging.shadow.gtable_dirty_version);
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>       }
>   
>   #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
> @@ -4114,7 +4114,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
>        * (old) shadow linear maps in the writeable mapping heuristics. */
>   #if GUEST_PAGING_LEVELS == 2
>       if ( sh_remove_write_access(d, gmfn, 2, 0) != 0 )
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>       sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
>   #elif GUEST_PAGING_LEVELS == 3
>       /* PAE guests have four shadow_table entries, based on the
> @@ -4137,7 +4137,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
>               }
>           }
>           if ( flush )
> -            flush_tlb_mask(d->domain_dirty_cpumask);
> +            flush_tlb_mask(d->dirty_cpumask);
>           /* Now install the new shadows. */
>           for ( i = 0; i < 4; i++ )
>           {
> @@ -4158,7 +4158,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
>       }
>   #elif GUEST_PAGING_LEVELS == 4
>       if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>       sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
>       if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) )
>       {
> @@ -4605,7 +4605,7 @@ static void sh_pagetable_dying(struct vc
>           }
>       }
>       if ( flush )
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>   
>       /* Remember that we've seen the guest use this interface, so we
>        * can rely on it using it in future, instead of guessing at
> @@ -4641,7 +4641,7 @@ static void sh_pagetable_dying(struct vc
>           mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
>           shadow_unhook_mappings(d, smfn, 1/* user pages only */);
>           /* Now flush the TLB: we removed toplevel mappings. */
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>       }
>   
>       /* Remember that we've seen the guest use this interface, so we
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -297,7 +297,7 @@ struct domain *domain_create(domid_t dom
>       rwlock_init(&d->vnuma_rwlock);
>   
>       err = -ENOMEM;
> -    if ( !zalloc_cpumask_var(&d->domain_dirty_cpumask) )
> +    if ( !zalloc_cpumask_var(&d->dirty_cpumask) )
>           goto fail;
>   
>       if ( domcr_flags & DOMCRF_hvm )
> @@ -415,7 +415,7 @@ struct domain *domain_create(domid_t dom
>           watchdog_domain_destroy(d);
>       if ( init_status & INIT_xsm )
>           xsm_free_security_domain(d);
> -    free_cpumask_var(d->domain_dirty_cpumask);
> +    free_cpumask_var(d->dirty_cpumask);
>       free_domain_struct(d);
>       return ERR_PTR(err);
>   }
> @@ -851,7 +851,7 @@ static void complete_domain_destroy(stru
>       radix_tree_destroy(&d->pirq_tree, free_pirq_struct);
>   
>       xsm_free_security_domain(d);
> -    free_cpumask_var(d->domain_dirty_cpumask);
> +    free_cpumask_var(d->dirty_cpumask);
>       xfree(d->vcpu);
>       free_domain_struct(d);
>   
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -276,7 +276,7 @@ static inline void grant_write_unlock(st
>   static inline void gnttab_flush_tlb(const struct domain *d)
>   {
>       if ( !paging_mode_external(d) )
> -        flush_tlb_mask(d->domain_dirty_cpumask);
> +        flush_tlb_mask(d->dirty_cpumask);
>   }
>   
>   static inline unsigned int
> --- a/xen/common/keyhandler.c
> +++ b/xen/common/keyhandler.c
> @@ -298,7 +298,7 @@ static void dump_domains(unsigned char k
>           process_pending_softirqs();
>   
>           printk("General information for domain %u:\n", d->domain_id);
> -        cpuset_print(tmpstr, sizeof(tmpstr), d->domain_dirty_cpumask);
> +        cpuset_print(tmpstr, sizeof(tmpstr), d->dirty_cpumask);
>           printk("    refcnt=%d dying=%d pause_count=%d\n",
>                  atomic_read(&d->refcnt), d->is_dying,
>                  atomic_read(&d->pause_count));
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -193,7 +193,8 @@ struct p2m_domain {
>       /* Shadow translated domain: p2m mapping */
>       pagetable_t        phys_table;
>   
> -    /* Same as domain_dirty_cpumask but limited to
> +    /*
> +     * Same as a domain's dirty_cpumask but limited to
>        * this p2m and those physical cpus whose vcpu's are in
>        * guestmode.
>        */
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -417,7 +417,7 @@ struct domain
>       unsigned long    vm_assist;
>   
>       /* Bitmask of CPUs which are holding onto this domain's state. */
> -    cpumask_var_t    domain_dirty_cpumask;
> +    cpumask_var_t    dirty_cpumask;
>   
>       struct arch_domain arch;
>   
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xenproject.org
> https://lists.xenproject.org/mailman/listinfo/xen-devel
> 

-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask
  2018-01-23 10:16 ` [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask Jan Beulich
  2018-01-23 11:04   ` Julien Grall
@ 2018-01-23 14:06   ` Boris Ostrovsky
  2018-01-24  1:53   ` Tian, Kevin
  2 siblings, 0 replies; 10+ messages in thread
From: Boris Ostrovsky @ 2018-01-23 14:06 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Kevin Tian, Stefano Stabellini, Wei Liu, Jun Nakajima,
	George Dunlap, Andrew Cooper, Ian Jackson, Tim Deegan,
	Julien Grall, Suravee Suthikulpanit

On 01/23/2018 05:16 AM, Jan Beulich wrote:
> It being a field of struct domain is sufficient to recognize its
> purpose.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Wei Liu <wei.liu2@citrix.com>
> Reviewed-by: George Dunlap <george.dunlap@citrix.com>
> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> ---
> v2: White space changes (consolidate split line statements into single
>     line ones). Re-base.


Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask
  2018-01-23 10:16 ` [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask Jan Beulich
  2018-01-23 11:04   ` Julien Grall
  2018-01-23 14:06   ` Boris Ostrovsky
@ 2018-01-24  1:53   ` Tian, Kevin
  2 siblings, 0 replies; 10+ messages in thread
From: Tian, Kevin @ 2018-01-24  1:53 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Stefano Stabellini, Wei Liu, Nakajima, Jun, George Dunlap,
	Andrew Cooper, Ian Jackson, Tim Deegan, Julien Grall,
	Suravee Suthikulpanit, Boris Ostrovsky

> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Tuesday, January 23, 2018 6:17 PM
> 
> It being a field of struct domain is sufficient to recognize its
> purpose.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Wei Liu <wei.liu2@citrix.com>
> Reviewed-by: George Dunlap <george.dunlap@citrix.com>
> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> ---
> v2: White space changes (consolidate split line statements into single
>     line ones). Re-base.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2018-01-24  1:53 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-23 10:07 [PATCH v2 0/3] misc flush and dirty-mask related adjustments Jan Beulich
2018-01-23 10:12 ` [PATCH v2 1/3] replace vCPU's dirty CPU mask by numeric ID Jan Beulich
2018-01-23 10:20   ` Andrew Cooper
2018-01-23 11:03   ` Julien Grall
2018-01-23 10:14 ` [PATCH v2 2/3] x86: avoid explicit TLB flush when saving exec state Jan Beulich
2018-01-23 10:16 ` [PATCH v2 3/3] drop "domain_" prefix from struct domain's dirty CPU mask Jan Beulich
2018-01-23 11:04   ` Julien Grall
2018-01-23 14:06   ` Boris Ostrovsky
2018-01-24  1:53   ` Tian, Kevin
2018-01-23 10:17 ` [PATCH v2 0/3] misc flush and dirty-mask related adjustments Andrew Cooper

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.