All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array
@ 2016-06-15  9:21 ` Suraj Jitindar Singh
  0 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-06-15  9:21 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: kvm-ppc, mpe, paulus, benh, kvm, pbonzini, agraf, rkrcmar,
	Suraj Jitindar Singh

The struct kvmppc_vcore is a structure used to store various information
about a virtual core for a kvm guest. The runnable_threads element of the
struct provides a list of all of the currently runnable vcpus on the core
(those in the KVMPPC_VCPU_RUNNABLE state). The previous implementation of
this list was a linked_list. The next patch requires that the list be able
to be iterated over without holding the vcore lock.

Reimplement the runnable_threads list in the kvmppc_vcore struct as an
array. Implement function to iterate over valid entries in the array and
update access sites accordingly.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/include/asm/kvm_host.h |  3 +-
 arch/powerpc/kvm/book3s_hv.c        | 68 +++++++++++++++++++++++--------------
 2 files changed, 43 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index ec35af3..4915443 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -293,7 +293,7 @@ struct kvmppc_vcore {
 	u8 vcore_state;
 	u8 in_guest;
 	struct kvmppc_vcore *master_vcore;
-	struct list_head runnable_threads;
+	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
 	struct list_head preempt_list;
 	spinlock_t lock;
 	struct swait_queue_head wq;
@@ -668,7 +668,6 @@ struct kvm_vcpu_arch {
 	long pgfault_index;
 	unsigned long pgfault_hpte[2];
 
-	struct list_head run_list;
 	struct task_struct *run_task;
 	struct kvm_run *kvm_run;
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index e20beae..3bcf9e6 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -57,6 +57,7 @@
 #include <linux/highmem.h>
 #include <linux/hugetlb.h>
 #include <linux/module.h>
+#include <linux/compiler.h>
 
 #include "book3s.h"
 
@@ -96,6 +97,26 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
+static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
+		int *ip)
+{
+	int i = *ip;
+	struct kvm_vcpu *vcpu;
+
+	while (++i < MAX_SMT_THREADS) {
+		vcpu = READ_ONCE(vc->runnable_threads[i]);
+		if (vcpu) {
+			*ip = i;
+			return vcpu;
+		}
+	}
+	return NULL;
+}
+
+/* Used to traverse the list of runnable threads for a given vcore */
+#define for_each_runnable_thread(i, vcpu, vc) \
+	for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
+
 static bool kvmppc_ipi_thread(int cpu)
 {
 	/* On POWER8 for IPIs to threads in the same core, use msgsnd */
@@ -1492,7 +1513,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
 	if (vcore == NULL)
 		return NULL;
 
-	INIT_LIST_HEAD(&vcore->runnable_threads);
 	spin_lock_init(&vcore->lock);
 	spin_lock_init(&vcore->stoltb_lock);
 	init_swait_queue_head(&vcore->wq);
@@ -1801,7 +1821,7 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 	spin_unlock_irq(&vcpu->arch.tbacct_lock);
 	--vc->n_runnable;
-	list_del(&vcpu->arch.run_list);
+	WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
 }
 
 static int kvmppc_grab_hwthread(int cpu)
@@ -2208,10 +2228,10 @@ static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
 
 static void prepare_threads(struct kvmppc_vcore *vc)
 {
-	struct kvm_vcpu *vcpu, *vnext;
+	int i;
+	struct kvm_vcpu *vcpu;
 
-	list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
-				 arch.run_list) {
+	for_each_runnable_thread(i, vcpu, vc) {
 		if (signal_pending(vcpu->arch.run_task))
 			vcpu->arch.ret = -EINTR;
 		else if (vcpu->arch.vpa.update_pending ||
@@ -2258,15 +2278,14 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
 
 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
 {
-	int still_running = 0;
+	int still_running = 0, i;
 	u64 now;
 	long ret;
-	struct kvm_vcpu *vcpu, *vnext;
+	struct kvm_vcpu *vcpu;
 
 	spin_lock(&vc->lock);
 	now = get_tb();
-	list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
-				 arch.run_list) {
+	for_each_runnable_thread(i, vcpu, vc) {
 		/* cancel pending dec exception if dec is positive */
 		if (now < vcpu->arch.dec_expires &&
 		    kvmppc_core_pending_dec(vcpu))
@@ -2306,8 +2325,8 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
 		}
 		if (vc->n_runnable > 0 && vc->runner == NULL) {
 			/* make sure there's a candidate runner awake */
-			vcpu = list_first_entry(&vc->runnable_threads,
-						struct kvm_vcpu, arch.run_list);
+			i = -1;
+			vcpu = next_runnable_thread(vc, &i);
 			wake_up(&vcpu->arch.cpu_run);
 		}
 	}
@@ -2360,7 +2379,7 @@ static inline void kvmppc_set_host_core(int cpu)
  */
 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 {
-	struct kvm_vcpu *vcpu, *vnext;
+	struct kvm_vcpu *vcpu;
 	int i;
 	int srcu_idx;
 	struct core_info core_info;
@@ -2396,8 +2415,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	 */
 	if ((threads_per_core > 1) &&
 	    ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
-		list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
-					 arch.run_list) {
+		for_each_runnable_thread(i, vcpu, vc) {
 			vcpu->arch.ret = -EBUSY;
 			kvmppc_remove_runnable(vc, vcpu);
 			wake_up(&vcpu->arch.cpu_run);
@@ -2476,8 +2494,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 		active |= 1 << thr;
 		list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) {
 			pvc->pcpu = pcpu + thr;
-			list_for_each_entry(vcpu, &pvc->runnable_threads,
-					    arch.run_list) {
+			for_each_runnable_thread(i, vcpu, pvc) {
 				kvmppc_start_thread(vcpu, pvc);
 				kvmppc_create_dtl_entry(vcpu, pvc);
 				trace_kvm_guest_enter(vcpu);
@@ -2610,7 +2627,7 @@ static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 {
 	struct kvm_vcpu *vcpu;
-	int do_sleep = 1;
+	int do_sleep = 1, i;
 	DECLARE_SWAITQUEUE(wait);
 
 	prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
@@ -2619,7 +2636,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	 * Check one last time for pending exceptions and ceded state after
 	 * we put ourselves on the wait queue
 	 */
-	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
+	for_each_runnable_thread(i, vcpu, vc) {
 		if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
 			do_sleep = 0;
 			break;
@@ -2643,9 +2660,9 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 
 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
-	int n_ceded;
+	int n_ceded, i;
 	struct kvmppc_vcore *vc;
-	struct kvm_vcpu *v, *vn;
+	struct kvm_vcpu *v;
 
 	trace_kvmppc_run_vcpu_enter(vcpu);
 
@@ -2665,7 +2682,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
 	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
 	vcpu->arch.busy_preempt = TB_NIL;
-	list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
+	WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
 	++vc->n_runnable;
 
 	/*
@@ -2705,8 +2722,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 			kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
 			continue;
 		}
-		list_for_each_entry_safe(v, vn, &vc->runnable_threads,
-					 arch.run_list) {
+		for_each_runnable_thread(i, v, vc) {
 			kvmppc_core_prepare_to_enter(v);
 			if (signal_pending(v->arch.run_task)) {
 				kvmppc_remove_runnable(vc, v);
@@ -2719,7 +2735,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 		if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
 			break;
 		n_ceded = 0;
-		list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
+		for_each_runnable_thread(i, v, vc) {
 			if (!v->arch.pending_exceptions)
 				n_ceded += v->arch.ceded;
 			else
@@ -2758,8 +2774,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
 	if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
 		/* Wake up some vcpu to run the core */
-		v = list_first_entry(&vc->runnable_threads,
-				     struct kvm_vcpu, arch.run_list);
+		i = -1;
+		v = next_runnable_thread(vc, &i);
 		wake_up(&v->arch.cpu_run);
 	}
 
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array
@ 2016-06-15  9:21 ` Suraj Jitindar Singh
  0 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-06-15  9:21 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: kvm-ppc, mpe, paulus, benh, kvm, pbonzini, agraf, rkrcmar,
	Suraj Jitindar Singh

The struct kvmppc_vcore is a structure used to store various information
about a virtual core for a kvm guest. The runnable_threads element of the
struct provides a list of all of the currently runnable vcpus on the core
(those in the KVMPPC_VCPU_RUNNABLE state). The previous implementation of
this list was a linked_list. The next patch requires that the list be able
to be iterated over without holding the vcore lock.

Reimplement the runnable_threads list in the kvmppc_vcore struct as an
array. Implement function to iterate over valid entries in the array and
update access sites accordingly.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/include/asm/kvm_host.h |  3 +-
 arch/powerpc/kvm/book3s_hv.c        | 68 +++++++++++++++++++++++--------------
 2 files changed, 43 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index ec35af3..4915443 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -293,7 +293,7 @@ struct kvmppc_vcore {
 	u8 vcore_state;
 	u8 in_guest;
 	struct kvmppc_vcore *master_vcore;
-	struct list_head runnable_threads;
+	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
 	struct list_head preempt_list;
 	spinlock_t lock;
 	struct swait_queue_head wq;
@@ -668,7 +668,6 @@ struct kvm_vcpu_arch {
 	long pgfault_index;
 	unsigned long pgfault_hpte[2];
 
-	struct list_head run_list;
 	struct task_struct *run_task;
 	struct kvm_run *kvm_run;
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index e20beae..3bcf9e6 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -57,6 +57,7 @@
 #include <linux/highmem.h>
 #include <linux/hugetlb.h>
 #include <linux/module.h>
+#include <linux/compiler.h>
 
 #include "book3s.h"
 
@@ -96,6 +97,26 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
+static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
+		int *ip)
+{
+	int i = *ip;
+	struct kvm_vcpu *vcpu;
+
+	while (++i < MAX_SMT_THREADS) {
+		vcpu = READ_ONCE(vc->runnable_threads[i]);
+		if (vcpu) {
+			*ip = i;
+			return vcpu;
+		}
+	}
+	return NULL;
+}
+
+/* Used to traverse the list of runnable threads for a given vcore */
+#define for_each_runnable_thread(i, vcpu, vc) \
+	for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
+
 static bool kvmppc_ipi_thread(int cpu)
 {
 	/* On POWER8 for IPIs to threads in the same core, use msgsnd */
@@ -1492,7 +1513,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
 	if (vcore = NULL)
 		return NULL;
 
-	INIT_LIST_HEAD(&vcore->runnable_threads);
 	spin_lock_init(&vcore->lock);
 	spin_lock_init(&vcore->stoltb_lock);
 	init_swait_queue_head(&vcore->wq);
@@ -1801,7 +1821,7 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 	spin_unlock_irq(&vcpu->arch.tbacct_lock);
 	--vc->n_runnable;
-	list_del(&vcpu->arch.run_list);
+	WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
 }
 
 static int kvmppc_grab_hwthread(int cpu)
@@ -2208,10 +2228,10 @@ static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
 
 static void prepare_threads(struct kvmppc_vcore *vc)
 {
-	struct kvm_vcpu *vcpu, *vnext;
+	int i;
+	struct kvm_vcpu *vcpu;
 
-	list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
-				 arch.run_list) {
+	for_each_runnable_thread(i, vcpu, vc) {
 		if (signal_pending(vcpu->arch.run_task))
 			vcpu->arch.ret = -EINTR;
 		else if (vcpu->arch.vpa.update_pending ||
@@ -2258,15 +2278,14 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
 
 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
 {
-	int still_running = 0;
+	int still_running = 0, i;
 	u64 now;
 	long ret;
-	struct kvm_vcpu *vcpu, *vnext;
+	struct kvm_vcpu *vcpu;
 
 	spin_lock(&vc->lock);
 	now = get_tb();
-	list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
-				 arch.run_list) {
+	for_each_runnable_thread(i, vcpu, vc) {
 		/* cancel pending dec exception if dec is positive */
 		if (now < vcpu->arch.dec_expires &&
 		    kvmppc_core_pending_dec(vcpu))
@@ -2306,8 +2325,8 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
 		}
 		if (vc->n_runnable > 0 && vc->runner = NULL) {
 			/* make sure there's a candidate runner awake */
-			vcpu = list_first_entry(&vc->runnable_threads,
-						struct kvm_vcpu, arch.run_list);
+			i = -1;
+			vcpu = next_runnable_thread(vc, &i);
 			wake_up(&vcpu->arch.cpu_run);
 		}
 	}
@@ -2360,7 +2379,7 @@ static inline void kvmppc_set_host_core(int cpu)
  */
 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 {
-	struct kvm_vcpu *vcpu, *vnext;
+	struct kvm_vcpu *vcpu;
 	int i;
 	int srcu_idx;
 	struct core_info core_info;
@@ -2396,8 +2415,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	 */
 	if ((threads_per_core > 1) &&
 	    ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
-		list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
-					 arch.run_list) {
+		for_each_runnable_thread(i, vcpu, vc) {
 			vcpu->arch.ret = -EBUSY;
 			kvmppc_remove_runnable(vc, vcpu);
 			wake_up(&vcpu->arch.cpu_run);
@@ -2476,8 +2494,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 		active |= 1 << thr;
 		list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) {
 			pvc->pcpu = pcpu + thr;
-			list_for_each_entry(vcpu, &pvc->runnable_threads,
-					    arch.run_list) {
+			for_each_runnable_thread(i, vcpu, pvc) {
 				kvmppc_start_thread(vcpu, pvc);
 				kvmppc_create_dtl_entry(vcpu, pvc);
 				trace_kvm_guest_enter(vcpu);
@@ -2610,7 +2627,7 @@ static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 {
 	struct kvm_vcpu *vcpu;
-	int do_sleep = 1;
+	int do_sleep = 1, i;
 	DECLARE_SWAITQUEUE(wait);
 
 	prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
@@ -2619,7 +2636,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	 * Check one last time for pending exceptions and ceded state after
 	 * we put ourselves on the wait queue
 	 */
-	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
+	for_each_runnable_thread(i, vcpu, vc) {
 		if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
 			do_sleep = 0;
 			break;
@@ -2643,9 +2660,9 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 
 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
-	int n_ceded;
+	int n_ceded, i;
 	struct kvmppc_vcore *vc;
-	struct kvm_vcpu *v, *vn;
+	struct kvm_vcpu *v;
 
 	trace_kvmppc_run_vcpu_enter(vcpu);
 
@@ -2665,7 +2682,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
 	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
 	vcpu->arch.busy_preempt = TB_NIL;
-	list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
+	WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
 	++vc->n_runnable;
 
 	/*
@@ -2705,8 +2722,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 			kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
 			continue;
 		}
-		list_for_each_entry_safe(v, vn, &vc->runnable_threads,
-					 arch.run_list) {
+		for_each_runnable_thread(i, v, vc) {
 			kvmppc_core_prepare_to_enter(v);
 			if (signal_pending(v->arch.run_task)) {
 				kvmppc_remove_runnable(vc, v);
@@ -2719,7 +2735,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 		if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
 			break;
 		n_ceded = 0;
-		list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
+		for_each_runnable_thread(i, v, vc) {
 			if (!v->arch.pending_exceptions)
 				n_ceded += v->arch.ceded;
 			else
@@ -2758,8 +2774,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
 	if (vc->n_runnable && vc->vcore_state = VCORE_INACTIVE) {
 		/* Wake up some vcpu to run the core */
-		v = list_first_entry(&vc->runnable_threads,
-				     struct kvm_vcpu, arch.run_list);
+		i = -1;
+		v = next_runnable_thread(vc, &i);
 		wake_up(&v->arch.cpu_run);
 	}
 
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 2/4] kvm/ppc/book3s_hv: Implement halt polling in the kvm_hv kernel module
  2016-06-15  9:21 ` Suraj Jitindar Singh
@ 2016-06-15  9:21   ` Suraj Jitindar Singh
  -1 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-06-15  9:21 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: kvm-ppc, mpe, paulus, benh, kvm, pbonzini, agraf, rkrcmar,
	Suraj Jitindar Singh

This patch introduces new halt polling functionality into the kvm_hv kernel
module. When a vcore is idle it will poll for some period of time before
scheduling itself out.

When all of the runnable vcpus on a vcore have ceded (and thus the vcore is
idle) we schedule ourselves out to allow something else to run. In the
event that we need to wake up very quickly (for example an interrupt
arrives), we are required to wait until we get scheduled again.

Implement halt polling so that when a vcore is idle, and before scheduling
ourselves, we poll for vcpus in the runnable_threads list which have
pending exceptions or which leave the ceded state. If we poll successfully
then we can get back into the guest very quickly without ever scheduling
ourselves, otherwise we schedule ourselves out as before.

Testing of this patch with a TCP round robin test between two guests with
virtio network interfaces has found a decrease in round trip time from
~140us to ~115us. A performance gain is only seen when going out of and
back into the guest often and quickly, otherwise there is no net benefit
from the polling. The polling interval is adjusted such that when we are
often scheduled out for long periods of time it is reduced, and when we
often poll successfully it is increased. The rate at which the polling
interval increases or decreases, and the maximum polling interval, can
be set through module parameters.

Based on the implementation in the generic kvm module by Wanpeng Li and
Paolo Bonzini, and on direction from Paul Mackerras.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/include/asm/kvm_host.h |   2 +
 arch/powerpc/kvm/book3s_hv.c        | 115 +++++++++++++++++++++++++++++++-----
 arch/powerpc/kvm/trace_hv.h         |  22 +++++++
 3 files changed, 125 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 4915443..b34efe8 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -308,6 +308,7 @@ struct kvmppc_vcore {
 	ulong pcr;
 	ulong dpdes;		/* doorbell state (POWER8) */
 	ulong conferring_threads;
+	unsigned int halt_poll_ns;
 };
 
 #define VCORE_ENTRY_MAP(vc)	((vc)->entry_exit_map & 0xff)
@@ -329,6 +330,7 @@ struct kvmppc_vcore {
 #define VCORE_SLEEPING	3
 #define VCORE_RUNNING	4
 #define VCORE_EXITING	5
+#define VCORE_POLLING	6
 
 /*
  * Struct used to manage memory for a virtual processor area
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3bcf9e6..0d8ce14 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -94,6 +94,23 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
 MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 #endif
 
+/* Maximum halt poll interval defaults to KVM_HALT_POLL_NS_DEFAULT */
+static unsigned int halt_poll_max_ns = KVM_HALT_POLL_NS_DEFAULT;
+module_param(halt_poll_max_ns, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(halt_poll_max_ns, "Maximum halt poll time in ns");
+
+/* Factor by which the vcore halt poll interval is grown, default is to double
+ */
+static unsigned int halt_poll_ns_grow = 2;
+module_param(halt_poll_ns_grow, int, S_IRUGO);
+MODULE_PARM_DESC(halt_poll_ns_grow, "Factor halt poll time is grown by");
+
+/* Factor by which the vcore halt poll interval is shrunk, default is to reset
+ */
+static unsigned int halt_poll_ns_shrink;
+module_param(halt_poll_ns_shrink, int, S_IRUGO);
+MODULE_PARM_DESC(halt_poll_ns_shrink, "Factor halt poll time is shrunk by");
+
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
@@ -2620,32 +2637,82 @@ static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
 	finish_wait(&vcpu->arch.cpu_run, &wait);
 }
 
+static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
+{
+	/* 10us base */
+	if (vc->halt_poll_ns == 0 && halt_poll_ns_grow)
+		vc->halt_poll_ns = 10000;
+	else
+		vc->halt_poll_ns *= halt_poll_ns_grow;
+
+	if (vc->halt_poll_ns > halt_poll_max_ns)
+		vc->halt_poll_ns = halt_poll_max_ns;
+}
+
+static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
+{
+	if (halt_poll_ns_shrink == 0)
+		vc->halt_poll_ns = 0;
+	else
+		vc->halt_poll_ns /= halt_poll_ns_shrink;
+}
+
+/* Check to see if any of the runnable vcpus on the vcore have pending
+ * exceptions or are no longer ceded
+ */
+static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
+{
+	struct kvm_vcpu *vcpu;
+	int i;
+
+	for_each_runnable_thread(i, vcpu, vc) {
+		if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded)
+			return 1;
+	}
+
+	return 0;
+}
+
 /*
  * All the vcpus in this vcore are idle, so wait for a decrementer
  * or external interrupt to one of the vcpus.  vc->lock is held.
  */
 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 {
-	struct kvm_vcpu *vcpu;
-	int do_sleep = 1, i;
+	int do_sleep = 1;
+	ktime_t cur, start;
+	u64 block_ns;
 	DECLARE_SWAITQUEUE(wait);
 
-	prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+	/* Poll for pending exceptions and ceded state */
+	cur = start = ktime_get();
+	if (vc->halt_poll_ns) {
+		ktime_t stop = ktime_add_ns(start, vc->halt_poll_ns);
 
-	/*
-	 * Check one last time for pending exceptions and ceded state after
-	 * we put ourselves on the wait queue
-	 */
-	for_each_runnable_thread(i, vcpu, vc) {
-		if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
-			do_sleep = 0;
-			break;
-		}
+		vc->vcore_state = VCORE_POLLING;
+		spin_unlock(&vc->lock);
+
+		do {
+			if (kvmppc_vcore_check_block(vc)) {
+				do_sleep = 0;
+				break;
+			}
+			cur = ktime_get();
+		} while (ktime_before(cur, stop));
+
+		spin_lock(&vc->lock);
+		vc->vcore_state = VCORE_INACTIVE;
+
+		if (!do_sleep)
+			goto out;
 	}
 
-	if (!do_sleep) {
+	prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+
+	if (kvmppc_vcore_check_block(vc)) {
 		finish_swait(&vc->wq, &wait);
-		return;
+		do_sleep = 0;
+		goto out;
 	}
 
 	vc->vcore_state = VCORE_SLEEPING;
@@ -2656,6 +2723,26 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	spin_lock(&vc->lock);
 	vc->vcore_state = VCORE_INACTIVE;
 	trace_kvmppc_vcore_blocked(vc, 1);
+
+	cur = ktime_get();
+
+out:
+	block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
+
+	if (halt_poll_max_ns) {
+		if (block_ns <= vc->halt_poll_ns)
+			;
+		/* We slept and blocked for longer than the max halt time */
+		else if (vc->halt_poll_ns && block_ns > halt_poll_max_ns)
+			shrink_halt_poll_ns(vc);
+		/* We slept and our poll time is too small */
+		else if (vc->halt_poll_ns < halt_poll_max_ns &&
+				block_ns < halt_poll_max_ns)
+			grow_halt_poll_ns(vc);
+	} else
+		vc->halt_poll_ns = 0;
+
+	trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
 }
 
 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index 33d9daf..fb21990 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -432,6 +432,28 @@ TRACE_EVENT(kvmppc_vcore_blocked,
 		   __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
 );
 
+TRACE_EVENT(kvmppc_vcore_wakeup,
+	TP_PROTO(int do_sleep, __u64 ns),
+
+	TP_ARGS(do_sleep, ns),
+
+	TP_STRUCT__entry(
+		__field(__u64,  ns)
+		__field(int,    waited)
+		__field(pid_t,  tgid)
+	),
+
+	TP_fast_assign(
+		__entry->ns     = ns;
+		__entry->waited = do_sleep;
+		__entry->tgid   = current->tgid;
+	),
+
+	TP_printk("%s time %lld ns, tgid=%d",
+		__entry->waited ? "wait" : "poll",
+		__entry->ns, __entry->tgid)
+);
+
 TRACE_EVENT(kvmppc_run_vcpu_enter,
 	TP_PROTO(struct kvm_vcpu *vcpu),
 
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 2/4] kvm/ppc/book3s_hv: Implement halt polling in the kvm_hv kernel module
@ 2016-06-15  9:21   ` Suraj Jitindar Singh
  0 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-06-15  9:21 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: kvm-ppc, mpe, paulus, benh, kvm, pbonzini, agraf, rkrcmar,
	Suraj Jitindar Singh

This patch introduces new halt polling functionality into the kvm_hv kernel
module. When a vcore is idle it will poll for some period of time before
scheduling itself out.

When all of the runnable vcpus on a vcore have ceded (and thus the vcore is
idle) we schedule ourselves out to allow something else to run. In the
event that we need to wake up very quickly (for example an interrupt
arrives), we are required to wait until we get scheduled again.

Implement halt polling so that when a vcore is idle, and before scheduling
ourselves, we poll for vcpus in the runnable_threads list which have
pending exceptions or which leave the ceded state. If we poll successfully
then we can get back into the guest very quickly without ever scheduling
ourselves, otherwise we schedule ourselves out as before.

Testing of this patch with a TCP round robin test between two guests with
virtio network interfaces has found a decrease in round trip time from
~140us to ~115us. A performance gain is only seen when going out of and
back into the guest often and quickly, otherwise there is no net benefit
from the polling. The polling interval is adjusted such that when we are
often scheduled out for long periods of time it is reduced, and when we
often poll successfully it is increased. The rate at which the polling
interval increases or decreases, and the maximum polling interval, can
be set through module parameters.

Based on the implementation in the generic kvm module by Wanpeng Li and
Paolo Bonzini, and on direction from Paul Mackerras.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/include/asm/kvm_host.h |   2 +
 arch/powerpc/kvm/book3s_hv.c        | 115 +++++++++++++++++++++++++++++++-----
 arch/powerpc/kvm/trace_hv.h         |  22 +++++++
 3 files changed, 125 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 4915443..b34efe8 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -308,6 +308,7 @@ struct kvmppc_vcore {
 	ulong pcr;
 	ulong dpdes;		/* doorbell state (POWER8) */
 	ulong conferring_threads;
+	unsigned int halt_poll_ns;
 };
 
 #define VCORE_ENTRY_MAP(vc)	((vc)->entry_exit_map & 0xff)
@@ -329,6 +330,7 @@ struct kvmppc_vcore {
 #define VCORE_SLEEPING	3
 #define VCORE_RUNNING	4
 #define VCORE_EXITING	5
+#define VCORE_POLLING	6
 
 /*
  * Struct used to manage memory for a virtual processor area
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3bcf9e6..0d8ce14 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -94,6 +94,23 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
 MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 #endif
 
+/* Maximum halt poll interval defaults to KVM_HALT_POLL_NS_DEFAULT */
+static unsigned int halt_poll_max_ns = KVM_HALT_POLL_NS_DEFAULT;
+module_param(halt_poll_max_ns, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(halt_poll_max_ns, "Maximum halt poll time in ns");
+
+/* Factor by which the vcore halt poll interval is grown, default is to double
+ */
+static unsigned int halt_poll_ns_grow = 2;
+module_param(halt_poll_ns_grow, int, S_IRUGO);
+MODULE_PARM_DESC(halt_poll_ns_grow, "Factor halt poll time is grown by");
+
+/* Factor by which the vcore halt poll interval is shrunk, default is to reset
+ */
+static unsigned int halt_poll_ns_shrink;
+module_param(halt_poll_ns_shrink, int, S_IRUGO);
+MODULE_PARM_DESC(halt_poll_ns_shrink, "Factor halt poll time is shrunk by");
+
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
@@ -2620,32 +2637,82 @@ static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
 	finish_wait(&vcpu->arch.cpu_run, &wait);
 }
 
+static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
+{
+	/* 10us base */
+	if (vc->halt_poll_ns = 0 && halt_poll_ns_grow)
+		vc->halt_poll_ns = 10000;
+	else
+		vc->halt_poll_ns *= halt_poll_ns_grow;
+
+	if (vc->halt_poll_ns > halt_poll_max_ns)
+		vc->halt_poll_ns = halt_poll_max_ns;
+}
+
+static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
+{
+	if (halt_poll_ns_shrink = 0)
+		vc->halt_poll_ns = 0;
+	else
+		vc->halt_poll_ns /= halt_poll_ns_shrink;
+}
+
+/* Check to see if any of the runnable vcpus on the vcore have pending
+ * exceptions or are no longer ceded
+ */
+static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
+{
+	struct kvm_vcpu *vcpu;
+	int i;
+
+	for_each_runnable_thread(i, vcpu, vc) {
+		if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded)
+			return 1;
+	}
+
+	return 0;
+}
+
 /*
  * All the vcpus in this vcore are idle, so wait for a decrementer
  * or external interrupt to one of the vcpus.  vc->lock is held.
  */
 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 {
-	struct kvm_vcpu *vcpu;
-	int do_sleep = 1, i;
+	int do_sleep = 1;
+	ktime_t cur, start;
+	u64 block_ns;
 	DECLARE_SWAITQUEUE(wait);
 
-	prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+	/* Poll for pending exceptions and ceded state */
+	cur = start = ktime_get();
+	if (vc->halt_poll_ns) {
+		ktime_t stop = ktime_add_ns(start, vc->halt_poll_ns);
 
-	/*
-	 * Check one last time for pending exceptions and ceded state after
-	 * we put ourselves on the wait queue
-	 */
-	for_each_runnable_thread(i, vcpu, vc) {
-		if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
-			do_sleep = 0;
-			break;
-		}
+		vc->vcore_state = VCORE_POLLING;
+		spin_unlock(&vc->lock);
+
+		do {
+			if (kvmppc_vcore_check_block(vc)) {
+				do_sleep = 0;
+				break;
+			}
+			cur = ktime_get();
+		} while (ktime_before(cur, stop));
+
+		spin_lock(&vc->lock);
+		vc->vcore_state = VCORE_INACTIVE;
+
+		if (!do_sleep)
+			goto out;
 	}
 
-	if (!do_sleep) {
+	prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+
+	if (kvmppc_vcore_check_block(vc)) {
 		finish_swait(&vc->wq, &wait);
-		return;
+		do_sleep = 0;
+		goto out;
 	}
 
 	vc->vcore_state = VCORE_SLEEPING;
@@ -2656,6 +2723,26 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	spin_lock(&vc->lock);
 	vc->vcore_state = VCORE_INACTIVE;
 	trace_kvmppc_vcore_blocked(vc, 1);
+
+	cur = ktime_get();
+
+out:
+	block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
+
+	if (halt_poll_max_ns) {
+		if (block_ns <= vc->halt_poll_ns)
+			;
+		/* We slept and blocked for longer than the max halt time */
+		else if (vc->halt_poll_ns && block_ns > halt_poll_max_ns)
+			shrink_halt_poll_ns(vc);
+		/* We slept and our poll time is too small */
+		else if (vc->halt_poll_ns < halt_poll_max_ns &&
+				block_ns < halt_poll_max_ns)
+			grow_halt_poll_ns(vc);
+	} else
+		vc->halt_poll_ns = 0;
+
+	trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
 }
 
 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index 33d9daf..fb21990 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -432,6 +432,28 @@ TRACE_EVENT(kvmppc_vcore_blocked,
 		   __entry->runner_vcpu, __entry->n_runnable, __entry->tgid)
 );
 
+TRACE_EVENT(kvmppc_vcore_wakeup,
+	TP_PROTO(int do_sleep, __u64 ns),
+
+	TP_ARGS(do_sleep, ns),
+
+	TP_STRUCT__entry(
+		__field(__u64,  ns)
+		__field(int,    waited)
+		__field(pid_t,  tgid)
+	),
+
+	TP_fast_assign(
+		__entry->ns     = ns;
+		__entry->waited = do_sleep;
+		__entry->tgid   = current->tgid;
+	),
+
+	TP_printk("%s time %lld ns, tgid=%d",
+		__entry->waited ? "wait" : "poll",
+		__entry->ns, __entry->tgid)
+);
+
 TRACE_EVENT(kvmppc_run_vcpu_enter,
 	TP_PROTO(struct kvm_vcpu *vcpu),
 
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 3/4] kvm/stats: Add provisioning for 64-bit vcpu statistics
  2016-06-15  9:21 ` Suraj Jitindar Singh
@ 2016-06-15  9:21   ` Suraj Jitindar Singh
  -1 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-06-15  9:21 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: kvm-ppc, mpe, paulus, benh, kvm, pbonzini, agraf, rkrcmar,
	Suraj Jitindar Singh

vcpus have statistics associated with them which can be viewed within the
debugfs. Currently it is assumed within the vcpu_stat_get() and
vcpu_stat_get_per_vm() functions that all of these statistics are
represented as 32-bit numbers. The next patch adds some 64-bit statistics,
so add provisioning for the display of 64-bit vcpu statistics.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/kvm/book3s.c |  1 +
 include/linux/kvm_host.h  |  1 +
 virt/kvm/kvm_main.c       | 60 +++++++++++++++++++++++++++++++++++++++++++----
 3 files changed, 58 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 47018fc..ed9132b 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -40,6 +40,7 @@
 #include "trace.h"
 
 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+#define VCPU_STAT_U64(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU_U64
 
 /* #define EXIT_DEBUG */
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1c9c973..667b30e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -991,6 +991,7 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
 enum kvm_stat_kind {
 	KVM_STAT_VM,
 	KVM_STAT_VCPU,
+	KVM_STAT_VCPU_U64,
 };
 
 struct kvm_stat_data {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 02e98f3..ac47ffb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3566,6 +3566,20 @@ static int vcpu_stat_get_per_vm(void *data, u64 *val)
 	return 0;
 }
 
+static int vcpu_stat_u64_get_per_vm(void *data, u64 *val)
+{
+	int i;
+	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+	struct kvm_vcpu *vcpu;
+
+	*val = 0;
+
+	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
+		*val += *(u64 *)((void *)vcpu + stat_data->offset);
+
+	return 0;
+}
+
 static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
 {
 	__simple_attr_check_format("%llu\n", 0ull);
@@ -3573,6 +3587,13 @@ static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
 				 NULL, "%llu\n");
 }
 
+static int vcpu_stat_u64_get_per_vm_open(struct inode *inode, struct file *file)
+{
+	__simple_attr_check_format("%llu\n", 0ull);
+	return kvm_debugfs_open(inode, file, vcpu_stat_u64_get_per_vm,
+				 NULL, "%llu\n");
+}
+
 static const struct file_operations vcpu_stat_get_per_vm_fops = {
 	.owner   = THIS_MODULE,
 	.open    = vcpu_stat_get_per_vm_open,
@@ -3582,9 +3603,19 @@ static const struct file_operations vcpu_stat_get_per_vm_fops = {
 	.llseek  = generic_file_llseek,
 };
 
+static const struct file_operations vcpu_stat_u64_get_per_vm_fops = {
+	.owner   = THIS_MODULE,
+	.open    = vcpu_stat_u64_get_per_vm_open,
+	.release = kvm_debugfs_release,
+	.read    = simple_attr_read,
+	.write   = simple_attr_write,
+	.llseek  = generic_file_llseek,
+};
+
 static const struct file_operations *stat_fops_per_vm[] = {
-	[KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
-	[KVM_STAT_VM]   = &vm_stat_get_per_vm_fops,
+	[KVM_STAT_VCPU]		= &vcpu_stat_get_per_vm_fops,
+	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_get_per_vm_fops,
+	[KVM_STAT_VM]		= &vm_stat_get_per_vm_fops,
 };
 
 static int vm_stat_get(void *_offset, u64 *val)
@@ -3627,9 +3658,30 @@ static int vcpu_stat_get(void *_offset, u64 *val)
 
 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
 
+static int vcpu_stat_u64_get(void *_offset, u64 *val)
+{
+	unsigned offset = (long)_offset;
+	struct kvm *kvm;
+	struct kvm_stat_data stat_tmp = {.offset = offset};
+	u64 tmp_val;
+
+	*val = 0;
+	spin_lock(&kvm_lock);
+	list_for_each_entry(kvm, &vm_list, vm_list) {
+		stat_tmp.kvm = kvm;
+		vcpu_stat_u64_get_per_vm((void *)&stat_tmp, &tmp_val);
+		*val += tmp_val;
+	}
+	spin_unlock(&kvm_lock);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_u64_fops, vcpu_stat_u64_get, NULL, "%llu\n");
+
 static const struct file_operations *stat_fops[] = {
-	[KVM_STAT_VCPU] = &vcpu_stat_fops,
-	[KVM_STAT_VM]   = &vm_stat_fops,
+	[KVM_STAT_VCPU]		= &vcpu_stat_fops,
+	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_fops,
+	[KVM_STAT_VM]		= &vm_stat_fops,
 };
 
 static int kvm_init_debug(void)
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 3/4] kvm/stats: Add provisioning for 64-bit vcpu statistics
@ 2016-06-15  9:21   ` Suraj Jitindar Singh
  0 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-06-15  9:21 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: kvm-ppc, mpe, paulus, benh, kvm, pbonzini, agraf, rkrcmar,
	Suraj Jitindar Singh

vcpus have statistics associated with them which can be viewed within the
debugfs. Currently it is assumed within the vcpu_stat_get() and
vcpu_stat_get_per_vm() functions that all of these statistics are
represented as 32-bit numbers. The next patch adds some 64-bit statistics,
so add provisioning for the display of 64-bit vcpu statistics.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/kvm/book3s.c |  1 +
 include/linux/kvm_host.h  |  1 +
 virt/kvm/kvm_main.c       | 60 +++++++++++++++++++++++++++++++++++++++++++----
 3 files changed, 58 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 47018fc..ed9132b 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -40,6 +40,7 @@
 #include "trace.h"
 
 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+#define VCPU_STAT_U64(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU_U64
 
 /* #define EXIT_DEBUG */
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1c9c973..667b30e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -991,6 +991,7 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
 enum kvm_stat_kind {
 	KVM_STAT_VM,
 	KVM_STAT_VCPU,
+	KVM_STAT_VCPU_U64,
 };
 
 struct kvm_stat_data {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 02e98f3..ac47ffb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3566,6 +3566,20 @@ static int vcpu_stat_get_per_vm(void *data, u64 *val)
 	return 0;
 }
 
+static int vcpu_stat_u64_get_per_vm(void *data, u64 *val)
+{
+	int i;
+	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+	struct kvm_vcpu *vcpu;
+
+	*val = 0;
+
+	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
+		*val += *(u64 *)((void *)vcpu + stat_data->offset);
+
+	return 0;
+}
+
 static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
 {
 	__simple_attr_check_format("%llu\n", 0ull);
@@ -3573,6 +3587,13 @@ static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
 				 NULL, "%llu\n");
 }
 
+static int vcpu_stat_u64_get_per_vm_open(struct inode *inode, struct file *file)
+{
+	__simple_attr_check_format("%llu\n", 0ull);
+	return kvm_debugfs_open(inode, file, vcpu_stat_u64_get_per_vm,
+				 NULL, "%llu\n");
+}
+
 static const struct file_operations vcpu_stat_get_per_vm_fops = {
 	.owner   = THIS_MODULE,
 	.open    = vcpu_stat_get_per_vm_open,
@@ -3582,9 +3603,19 @@ static const struct file_operations vcpu_stat_get_per_vm_fops = {
 	.llseek  = generic_file_llseek,
 };
 
+static const struct file_operations vcpu_stat_u64_get_per_vm_fops = {
+	.owner   = THIS_MODULE,
+	.open    = vcpu_stat_u64_get_per_vm_open,
+	.release = kvm_debugfs_release,
+	.read    = simple_attr_read,
+	.write   = simple_attr_write,
+	.llseek  = generic_file_llseek,
+};
+
 static const struct file_operations *stat_fops_per_vm[] = {
-	[KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
-	[KVM_STAT_VM]   = &vm_stat_get_per_vm_fops,
+	[KVM_STAT_VCPU]		= &vcpu_stat_get_per_vm_fops,
+	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_get_per_vm_fops,
+	[KVM_STAT_VM]		= &vm_stat_get_per_vm_fops,
 };
 
 static int vm_stat_get(void *_offset, u64 *val)
@@ -3627,9 +3658,30 @@ static int vcpu_stat_get(void *_offset, u64 *val)
 
 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
 
+static int vcpu_stat_u64_get(void *_offset, u64 *val)
+{
+	unsigned offset = (long)_offset;
+	struct kvm *kvm;
+	struct kvm_stat_data stat_tmp = {.offset = offset};
+	u64 tmp_val;
+
+	*val = 0;
+	spin_lock(&kvm_lock);
+	list_for_each_entry(kvm, &vm_list, vm_list) {
+		stat_tmp.kvm = kvm;
+		vcpu_stat_u64_get_per_vm((void *)&stat_tmp, &tmp_val);
+		*val += tmp_val;
+	}
+	spin_unlock(&kvm_lock);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_u64_fops, vcpu_stat_u64_get, NULL, "%llu\n");
+
 static const struct file_operations *stat_fops[] = {
-	[KVM_STAT_VCPU] = &vcpu_stat_fops,
-	[KVM_STAT_VM]   = &vm_stat_fops,
+	[KVM_STAT_VCPU]		= &vcpu_stat_fops,
+	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_fops,
+	[KVM_STAT_VM]		= &vm_stat_fops,
 };
 
 static int kvm_init_debug(void)
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 4/4] powerpc/kvm/stats: Implement existing and add new halt polling vcpu stats
  2016-06-15  9:21 ` Suraj Jitindar Singh
@ 2016-06-15  9:21   ` Suraj Jitindar Singh
  -1 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-06-15  9:21 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: kvm-ppc, mpe, paulus, benh, kvm, pbonzini, agraf, rkrcmar,
	Suraj Jitindar Singh

vcpu stats are used to collect information about a vcpu which can be viewed
in the debugfs. For example halt_attempted_poll and halt_successful_poll
are used to keep track of the number of times the vcpu attempts to and
successfully polls. These stats are currently not used on powerpc.

Implement incrementation of the halt_attempted_poll and
halt_successful_poll vcpu stats for powerpc. Since these stats are summed
over all the vcpus for all running guests it doesn't matter which vcpu
they are attributed to, thus we choose the current runner vcpu of the
vcore.

Also add new vcpu stats: halt_poll_time and halt_wait_time to be used to
accumulate the total time spend polling and waiting respectively, and
halt_successful_wait to accumulate the number of times the vcpu waits.
Given that halt_poll_time and halt_wait_time are expressed in nanoseconds
it is necessary to represent these as 64-bit quantities, otherwise they
would overflow after only about 4 seconds.

Given that the total time spend either polling or waiting will be known and
the number of times that each was done, it will be possible to determine
the average poll and wait times. This will give the ability to tune the kvm
module parameters based on the calculated average wait and poll times.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/include/asm/kvm_host.h |  3 +++
 arch/powerpc/kvm/book3s.c           |  3 +++
 arch/powerpc/kvm/book3s_hv.c        | 14 +++++++++++++-
 3 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index b34efe8..2ece5fc 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -114,8 +114,11 @@ struct kvm_vcpu_stat {
 	u32 emulated_inst_exits;
 	u32 dec_exits;
 	u32 ext_intr_exits;
+	u64 halt_poll_time;
+	u64 halt_wait_time;
 	u32 halt_successful_poll;
 	u32 halt_attempted_poll;
+	u32 halt_successful_wait;
 	u32 halt_poll_invalid;
 	u32 halt_wakeup;
 	u32 dbell_exits;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index ed9132b..6217bea 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -53,8 +53,11 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ "dec",         VCPU_STAT(dec_exits) },
 	{ "ext_intr",    VCPU_STAT(ext_intr_exits) },
 	{ "queue_intr",  VCPU_STAT(queue_intr) },
+	{ "halt_poll_time_ns",		VCPU_STAT_U64(halt_poll_time) },
+	{ "halt_wait_time_ns",		VCPU_STAT_U64(halt_wait_time) },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
+	{ "halt_successful_wait",	VCPU_STAT(halt_successful_wait) },
 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
 	{ "pf_storage",  VCPU_STAT(pf_storage) },
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0d8ce14..a0dae63 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2688,6 +2688,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	cur = start = ktime_get();
 	if (vc->halt_poll_ns) {
 		ktime_t stop = ktime_add_ns(start, vc->halt_poll_ns);
+		++vc->runner->stat.halt_attempted_poll;
 
 		vc->vcore_state = VCORE_POLLING;
 		spin_unlock(&vc->lock);
@@ -2703,8 +2704,10 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 		spin_lock(&vc->lock);
 		vc->vcore_state = VCORE_INACTIVE;
 
-		if (!do_sleep)
+		if (!do_sleep) {
+			++vc->runner->stat.halt_successful_poll;
 			goto out;
+		}
 	}
 
 	prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
@@ -2712,6 +2715,9 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	if (kvmppc_vcore_check_block(vc)) {
 		finish_swait(&vc->wq, &wait);
 		do_sleep = 0;
+		/* If we polled, count this as a successful poll */
+		if (vc->halt_poll_ns)
+			++vc->runner->stat.halt_successful_poll;
 		goto out;
 	}
 
@@ -2723,12 +2729,18 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	spin_lock(&vc->lock);
 	vc->vcore_state = VCORE_INACTIVE;
 	trace_kvmppc_vcore_blocked(vc, 1);
+	++vc->runner->stat.halt_successful_wait;
 
 	cur = ktime_get();
 
 out:
 	block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
 
+	if (do_sleep)
+		vc->runner->stat.halt_wait_time += block_ns;
+	else if (vc->halt_poll_ns)
+		vc->runner->stat.halt_poll_time += block_ns;
+
 	if (halt_poll_max_ns) {
 		if (block_ns <= vc->halt_poll_ns)
 			;
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 4/4] powerpc/kvm/stats: Implement existing and add new halt polling vcpu stats
@ 2016-06-15  9:21   ` Suraj Jitindar Singh
  0 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-06-15  9:21 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: kvm-ppc, mpe, paulus, benh, kvm, pbonzini, agraf, rkrcmar,
	Suraj Jitindar Singh

vcpu stats are used to collect information about a vcpu which can be viewed
in the debugfs. For example halt_attempted_poll and halt_successful_poll
are used to keep track of the number of times the vcpu attempts to and
successfully polls. These stats are currently not used on powerpc.

Implement incrementation of the halt_attempted_poll and
halt_successful_poll vcpu stats for powerpc. Since these stats are summed
over all the vcpus for all running guests it doesn't matter which vcpu
they are attributed to, thus we choose the current runner vcpu of the
vcore.

Also add new vcpu stats: halt_poll_time and halt_wait_time to be used to
accumulate the total time spend polling and waiting respectively, and
halt_successful_wait to accumulate the number of times the vcpu waits.
Given that halt_poll_time and halt_wait_time are expressed in nanoseconds
it is necessary to represent these as 64-bit quantities, otherwise they
would overflow after only about 4 seconds.

Given that the total time spend either polling or waiting will be known and
the number of times that each was done, it will be possible to determine
the average poll and wait times. This will give the ability to tune the kvm
module parameters based on the calculated average wait and poll times.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/include/asm/kvm_host.h |  3 +++
 arch/powerpc/kvm/book3s.c           |  3 +++
 arch/powerpc/kvm/book3s_hv.c        | 14 +++++++++++++-
 3 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index b34efe8..2ece5fc 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -114,8 +114,11 @@ struct kvm_vcpu_stat {
 	u32 emulated_inst_exits;
 	u32 dec_exits;
 	u32 ext_intr_exits;
+	u64 halt_poll_time;
+	u64 halt_wait_time;
 	u32 halt_successful_poll;
 	u32 halt_attempted_poll;
+	u32 halt_successful_wait;
 	u32 halt_poll_invalid;
 	u32 halt_wakeup;
 	u32 dbell_exits;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index ed9132b..6217bea 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -53,8 +53,11 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ "dec",         VCPU_STAT(dec_exits) },
 	{ "ext_intr",    VCPU_STAT(ext_intr_exits) },
 	{ "queue_intr",  VCPU_STAT(queue_intr) },
+	{ "halt_poll_time_ns",		VCPU_STAT_U64(halt_poll_time) },
+	{ "halt_wait_time_ns",		VCPU_STAT_U64(halt_wait_time) },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
+	{ "halt_successful_wait",	VCPU_STAT(halt_successful_wait) },
 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
 	{ "pf_storage",  VCPU_STAT(pf_storage) },
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0d8ce14..a0dae63 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2688,6 +2688,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	cur = start = ktime_get();
 	if (vc->halt_poll_ns) {
 		ktime_t stop = ktime_add_ns(start, vc->halt_poll_ns);
+		++vc->runner->stat.halt_attempted_poll;
 
 		vc->vcore_state = VCORE_POLLING;
 		spin_unlock(&vc->lock);
@@ -2703,8 +2704,10 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 		spin_lock(&vc->lock);
 		vc->vcore_state = VCORE_INACTIVE;
 
-		if (!do_sleep)
+		if (!do_sleep) {
+			++vc->runner->stat.halt_successful_poll;
 			goto out;
+		}
 	}
 
 	prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
@@ -2712,6 +2715,9 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	if (kvmppc_vcore_check_block(vc)) {
 		finish_swait(&vc->wq, &wait);
 		do_sleep = 0;
+		/* If we polled, count this as a successful poll */
+		if (vc->halt_poll_ns)
+			++vc->runner->stat.halt_successful_poll;
 		goto out;
 	}
 
@@ -2723,12 +2729,18 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	spin_lock(&vc->lock);
 	vc->vcore_state = VCORE_INACTIVE;
 	trace_kvmppc_vcore_blocked(vc, 1);
+	++vc->runner->stat.halt_successful_wait;
 
 	cur = ktime_get();
 
 out:
 	block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
 
+	if (do_sleep)
+		vc->runner->stat.halt_wait_time += block_ns;
+	else if (vc->halt_poll_ns)
+		vc->runner->stat.halt_poll_time += block_ns;
+
 	if (halt_poll_max_ns) {
 		if (block_ns <= vc->halt_poll_ns)
 			;
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [PATCH 3/4] kvm/stats: Add provisioning for 64-bit vcpu statistics
  2016-06-15  9:21   ` Suraj Jitindar Singh
@ 2016-06-20  0:08     ` Paul Mackerras
  -1 siblings, 0 replies; 20+ messages in thread
From: Paul Mackerras @ 2016-06-20  0:08 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, agraf, rkrcmar,
	Suraj Jitindar Singh

Paolo,

Can I have an ack for Suraj's patch below?  If it's OK with you,
I'll take his series through my tree.

Thanks,
Paul.

On Wed, Jun 15, 2016 at 07:21:07PM +1000, Suraj Jitindar Singh wrote:
> vcpus have statistics associated with them which can be viewed within the
> debugfs. Currently it is assumed within the vcpu_stat_get() and
> vcpu_stat_get_per_vm() functions that all of these statistics are
> represented as 32-bit numbers. The next patch adds some 64-bit statistics,
> so add provisioning for the display of 64-bit vcpu statistics.
> 
> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
> ---
>  arch/powerpc/kvm/book3s.c |  1 +
>  include/linux/kvm_host.h  |  1 +
>  virt/kvm/kvm_main.c       | 60 +++++++++++++++++++++++++++++++++++++++++++----
>  3 files changed, 58 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
> index 47018fc..ed9132b 100644
> --- a/arch/powerpc/kvm/book3s.c
> +++ b/arch/powerpc/kvm/book3s.c
> @@ -40,6 +40,7 @@
>  #include "trace.h"
>  
>  #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
> +#define VCPU_STAT_U64(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU_U64
>  
>  /* #define EXIT_DEBUG */
>  
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 1c9c973..667b30e 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -991,6 +991,7 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
>  enum kvm_stat_kind {
>  	KVM_STAT_VM,
>  	KVM_STAT_VCPU,
> +	KVM_STAT_VCPU_U64,
>  };
>  
>  struct kvm_stat_data {
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 02e98f3..ac47ffb 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -3566,6 +3566,20 @@ static int vcpu_stat_get_per_vm(void *data, u64 *val)
>  	return 0;
>  }
>  
> +static int vcpu_stat_u64_get_per_vm(void *data, u64 *val)
> +{
> +	int i;
> +	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
> +	struct kvm_vcpu *vcpu;
> +
> +	*val = 0;
> +
> +	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
> +		*val += *(u64 *)((void *)vcpu + stat_data->offset);
> +
> +	return 0;
> +}
> +
>  static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
>  {
>  	__simple_attr_check_format("%llu\n", 0ull);
> @@ -3573,6 +3587,13 @@ static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
>  				 NULL, "%llu\n");
>  }
>  
> +static int vcpu_stat_u64_get_per_vm_open(struct inode *inode, struct file *file)
> +{
> +	__simple_attr_check_format("%llu\n", 0ull);
> +	return kvm_debugfs_open(inode, file, vcpu_stat_u64_get_per_vm,
> +				 NULL, "%llu\n");
> +}
> +
>  static const struct file_operations vcpu_stat_get_per_vm_fops = {
>  	.owner   = THIS_MODULE,
>  	.open    = vcpu_stat_get_per_vm_open,
> @@ -3582,9 +3603,19 @@ static const struct file_operations vcpu_stat_get_per_vm_fops = {
>  	.llseek  = generic_file_llseek,
>  };
>  
> +static const struct file_operations vcpu_stat_u64_get_per_vm_fops = {
> +	.owner   = THIS_MODULE,
> +	.open    = vcpu_stat_u64_get_per_vm_open,
> +	.release = kvm_debugfs_release,
> +	.read    = simple_attr_read,
> +	.write   = simple_attr_write,
> +	.llseek  = generic_file_llseek,
> +};
> +
>  static const struct file_operations *stat_fops_per_vm[] = {
> -	[KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
> -	[KVM_STAT_VM]   = &vm_stat_get_per_vm_fops,
> +	[KVM_STAT_VCPU]		= &vcpu_stat_get_per_vm_fops,
> +	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_get_per_vm_fops,
> +	[KVM_STAT_VM]		= &vm_stat_get_per_vm_fops,
>  };
>  
>  static int vm_stat_get(void *_offset, u64 *val)
> @@ -3627,9 +3658,30 @@ static int vcpu_stat_get(void *_offset, u64 *val)
>  
>  DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
>  
> +static int vcpu_stat_u64_get(void *_offset, u64 *val)
> +{
> +	unsigned offset = (long)_offset;
> +	struct kvm *kvm;
> +	struct kvm_stat_data stat_tmp = {.offset = offset};
> +	u64 tmp_val;
> +
> +	*val = 0;
> +	spin_lock(&kvm_lock);
> +	list_for_each_entry(kvm, &vm_list, vm_list) {
> +		stat_tmp.kvm = kvm;
> +		vcpu_stat_u64_get_per_vm((void *)&stat_tmp, &tmp_val);
> +		*val += tmp_val;
> +	}
> +	spin_unlock(&kvm_lock);
> +	return 0;
> +}
> +
> +DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_u64_fops, vcpu_stat_u64_get, NULL, "%llu\n");
> +
>  static const struct file_operations *stat_fops[] = {
> -	[KVM_STAT_VCPU] = &vcpu_stat_fops,
> -	[KVM_STAT_VM]   = &vm_stat_fops,
> +	[KVM_STAT_VCPU]		= &vcpu_stat_fops,
> +	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_fops,
> +	[KVM_STAT_VM]		= &vm_stat_fops,
>  };
>  
>  static int kvm_init_debug(void)
> -- 
> 2.5.5
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 3/4] kvm/stats: Add provisioning for 64-bit vcpu statistics
@ 2016-06-20  0:08     ` Paul Mackerras
  0 siblings, 0 replies; 20+ messages in thread
From: Paul Mackerras @ 2016-06-20  0:08 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, agraf, rkrcmar,
	Suraj Jitindar Singh

Paolo,

Can I have an ack for Suraj's patch below?  If it's OK with you,
I'll take his series through my tree.

Thanks,
Paul.

On Wed, Jun 15, 2016 at 07:21:07PM +1000, Suraj Jitindar Singh wrote:
> vcpus have statistics associated with them which can be viewed within the
> debugfs. Currently it is assumed within the vcpu_stat_get() and
> vcpu_stat_get_per_vm() functions that all of these statistics are
> represented as 32-bit numbers. The next patch adds some 64-bit statistics,
> so add provisioning for the display of 64-bit vcpu statistics.
> 
> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
> ---
>  arch/powerpc/kvm/book3s.c |  1 +
>  include/linux/kvm_host.h  |  1 +
>  virt/kvm/kvm_main.c       | 60 +++++++++++++++++++++++++++++++++++++++++++----
>  3 files changed, 58 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
> index 47018fc..ed9132b 100644
> --- a/arch/powerpc/kvm/book3s.c
> +++ b/arch/powerpc/kvm/book3s.c
> @@ -40,6 +40,7 @@
>  #include "trace.h"
>  
>  #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
> +#define VCPU_STAT_U64(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU_U64
>  
>  /* #define EXIT_DEBUG */
>  
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 1c9c973..667b30e 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -991,6 +991,7 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
>  enum kvm_stat_kind {
>  	KVM_STAT_VM,
>  	KVM_STAT_VCPU,
> +	KVM_STAT_VCPU_U64,
>  };
>  
>  struct kvm_stat_data {
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 02e98f3..ac47ffb 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -3566,6 +3566,20 @@ static int vcpu_stat_get_per_vm(void *data, u64 *val)
>  	return 0;
>  }
>  
> +static int vcpu_stat_u64_get_per_vm(void *data, u64 *val)
> +{
> +	int i;
> +	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
> +	struct kvm_vcpu *vcpu;
> +
> +	*val = 0;
> +
> +	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
> +		*val += *(u64 *)((void *)vcpu + stat_data->offset);
> +
> +	return 0;
> +}
> +
>  static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
>  {
>  	__simple_attr_check_format("%llu\n", 0ull);
> @@ -3573,6 +3587,13 @@ static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
>  				 NULL, "%llu\n");
>  }
>  
> +static int vcpu_stat_u64_get_per_vm_open(struct inode *inode, struct file *file)
> +{
> +	__simple_attr_check_format("%llu\n", 0ull);
> +	return kvm_debugfs_open(inode, file, vcpu_stat_u64_get_per_vm,
> +				 NULL, "%llu\n");
> +}
> +
>  static const struct file_operations vcpu_stat_get_per_vm_fops = {
>  	.owner   = THIS_MODULE,
>  	.open    = vcpu_stat_get_per_vm_open,
> @@ -3582,9 +3603,19 @@ static const struct file_operations vcpu_stat_get_per_vm_fops = {
>  	.llseek  = generic_file_llseek,
>  };
>  
> +static const struct file_operations vcpu_stat_u64_get_per_vm_fops = {
> +	.owner   = THIS_MODULE,
> +	.open    = vcpu_stat_u64_get_per_vm_open,
> +	.release = kvm_debugfs_release,
> +	.read    = simple_attr_read,
> +	.write   = simple_attr_write,
> +	.llseek  = generic_file_llseek,
> +};
> +
>  static const struct file_operations *stat_fops_per_vm[] = {
> -	[KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
> -	[KVM_STAT_VM]   = &vm_stat_get_per_vm_fops,
> +	[KVM_STAT_VCPU]		= &vcpu_stat_get_per_vm_fops,
> +	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_get_per_vm_fops,
> +	[KVM_STAT_VM]		= &vm_stat_get_per_vm_fops,
>  };
>  
>  static int vm_stat_get(void *_offset, u64 *val)
> @@ -3627,9 +3658,30 @@ static int vcpu_stat_get(void *_offset, u64 *val)
>  
>  DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
>  
> +static int vcpu_stat_u64_get(void *_offset, u64 *val)
> +{
> +	unsigned offset = (long)_offset;
> +	struct kvm *kvm;
> +	struct kvm_stat_data stat_tmp = {.offset = offset};
> +	u64 tmp_val;
> +
> +	*val = 0;
> +	spin_lock(&kvm_lock);
> +	list_for_each_entry(kvm, &vm_list, vm_list) {
> +		stat_tmp.kvm = kvm;
> +		vcpu_stat_u64_get_per_vm((void *)&stat_tmp, &tmp_val);
> +		*val += tmp_val;
> +	}
> +	spin_unlock(&kvm_lock);
> +	return 0;
> +}
> +
> +DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_u64_fops, vcpu_stat_u64_get, NULL, "%llu\n");
> +
>  static const struct file_operations *stat_fops[] = {
> -	[KVM_STAT_VCPU] = &vcpu_stat_fops,
> -	[KVM_STAT_VM]   = &vm_stat_fops,
> +	[KVM_STAT_VCPU]		= &vcpu_stat_fops,
> +	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_fops,
> +	[KVM_STAT_VM]		= &vm_stat_fops,
>  };
>  
>  static int kvm_init_debug(void)
> -- 
> 2.5.5
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 3/4] kvm/stats: Add provisioning for 64-bit vcpu statistics
  2016-06-20  0:08     ` Paul Mackerras
@ 2016-06-20 14:56       ` Paolo Bonzini
  -1 siblings, 0 replies; 20+ messages in thread
From: Paolo Bonzini @ 2016-06-20 14:56 UTC (permalink / raw)
  To: Paul Mackerras
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, agraf, rkrcmar,
	Suraj Jitindar Singh



On 20/06/2016 02:08, Paul Mackerras wrote:
> Paolo,
> 
> Can I have an ack for Suraj's patch below?  If it's OK with you,
> I'll take his series through my tree.

Yes, please do.

Paolo

> Thanks,
> Paul.
> 
> On Wed, Jun 15, 2016 at 07:21:07PM +1000, Suraj Jitindar Singh wrote:
>> vcpus have statistics associated with them which can be viewed within the
>> debugfs. Currently it is assumed within the vcpu_stat_get() and
>> vcpu_stat_get_per_vm() functions that all of these statistics are
>> represented as 32-bit numbers. The next patch adds some 64-bit statistics,
>> so add provisioning for the display of 64-bit vcpu statistics.
>>
>> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
>> ---
>>  arch/powerpc/kvm/book3s.c |  1 +
>>  include/linux/kvm_host.h  |  1 +
>>  virt/kvm/kvm_main.c       | 60 +++++++++++++++++++++++++++++++++++++++++++----
>>  3 files changed, 58 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
>> index 47018fc..ed9132b 100644
>> --- a/arch/powerpc/kvm/book3s.c
>> +++ b/arch/powerpc/kvm/book3s.c
>> @@ -40,6 +40,7 @@
>>  #include "trace.h"
>>  
>>  #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
>> +#define VCPU_STAT_U64(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU_U64
>>  
>>  /* #define EXIT_DEBUG */
>>  
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 1c9c973..667b30e 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -991,6 +991,7 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
>>  enum kvm_stat_kind {
>>  	KVM_STAT_VM,
>>  	KVM_STAT_VCPU,
>> +	KVM_STAT_VCPU_U64,
>>  };
>>  
>>  struct kvm_stat_data {
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index 02e98f3..ac47ffb 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -3566,6 +3566,20 @@ static int vcpu_stat_get_per_vm(void *data, u64 *val)
>>  	return 0;
>>  }
>>  
>> +static int vcpu_stat_u64_get_per_vm(void *data, u64 *val)
>> +{
>> +	int i;
>> +	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
>> +	struct kvm_vcpu *vcpu;
>> +
>> +	*val = 0;
>> +
>> +	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
>> +		*val += *(u64 *)((void *)vcpu + stat_data->offset);
>> +
>> +	return 0;
>> +}
>> +
>>  static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
>>  {
>>  	__simple_attr_check_format("%llu\n", 0ull);
>> @@ -3573,6 +3587,13 @@ static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
>>  				 NULL, "%llu\n");
>>  }
>>  
>> +static int vcpu_stat_u64_get_per_vm_open(struct inode *inode, struct file *file)
>> +{
>> +	__simple_attr_check_format("%llu\n", 0ull);
>> +	return kvm_debugfs_open(inode, file, vcpu_stat_u64_get_per_vm,
>> +				 NULL, "%llu\n");
>> +}
>> +
>>  static const struct file_operations vcpu_stat_get_per_vm_fops = {
>>  	.owner   = THIS_MODULE,
>>  	.open    = vcpu_stat_get_per_vm_open,
>> @@ -3582,9 +3603,19 @@ static const struct file_operations vcpu_stat_get_per_vm_fops = {
>>  	.llseek  = generic_file_llseek,
>>  };
>>  
>> +static const struct file_operations vcpu_stat_u64_get_per_vm_fops = {
>> +	.owner   = THIS_MODULE,
>> +	.open    = vcpu_stat_u64_get_per_vm_open,
>> +	.release = kvm_debugfs_release,
>> +	.read    = simple_attr_read,
>> +	.write   = simple_attr_write,
>> +	.llseek  = generic_file_llseek,
>> +};
>> +
>>  static const struct file_operations *stat_fops_per_vm[] = {
>> -	[KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
>> -	[KVM_STAT_VM]   = &vm_stat_get_per_vm_fops,
>> +	[KVM_STAT_VCPU]		= &vcpu_stat_get_per_vm_fops,
>> +	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_get_per_vm_fops,
>> +	[KVM_STAT_VM]		= &vm_stat_get_per_vm_fops,
>>  };
>>  
>>  static int vm_stat_get(void *_offset, u64 *val)
>> @@ -3627,9 +3658,30 @@ static int vcpu_stat_get(void *_offset, u64 *val)
>>  
>>  DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
>>  
>> +static int vcpu_stat_u64_get(void *_offset, u64 *val)
>> +{
>> +	unsigned offset = (long)_offset;
>> +	struct kvm *kvm;
>> +	struct kvm_stat_data stat_tmp = {.offset = offset};
>> +	u64 tmp_val;
>> +
>> +	*val = 0;
>> +	spin_lock(&kvm_lock);
>> +	list_for_each_entry(kvm, &vm_list, vm_list) {
>> +		stat_tmp.kvm = kvm;
>> +		vcpu_stat_u64_get_per_vm((void *)&stat_tmp, &tmp_val);
>> +		*val += tmp_val;
>> +	}
>> +	spin_unlock(&kvm_lock);
>> +	return 0;
>> +}
>> +
>> +DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_u64_fops, vcpu_stat_u64_get, NULL, "%llu\n");
>> +
>>  static const struct file_operations *stat_fops[] = {
>> -	[KVM_STAT_VCPU] = &vcpu_stat_fops,
>> -	[KVM_STAT_VM]   = &vm_stat_fops,
>> +	[KVM_STAT_VCPU]		= &vcpu_stat_fops,
>> +	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_fops,
>> +	[KVM_STAT_VM]		= &vm_stat_fops,
>>  };
>>  
>>  static int kvm_init_debug(void)
>> -- 
>> 2.5.5
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 3/4] kvm/stats: Add provisioning for 64-bit vcpu statistics
@ 2016-06-20 14:56       ` Paolo Bonzini
  0 siblings, 0 replies; 20+ messages in thread
From: Paolo Bonzini @ 2016-06-20 14:56 UTC (permalink / raw)
  To: Paul Mackerras
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, agraf, rkrcmar,
	Suraj Jitindar Singh



On 20/06/2016 02:08, Paul Mackerras wrote:
> Paolo,
> 
> Can I have an ack for Suraj's patch below?  If it's OK with you,
> I'll take his series through my tree.

Yes, please do.

Paolo

> Thanks,
> Paul.
> 
> On Wed, Jun 15, 2016 at 07:21:07PM +1000, Suraj Jitindar Singh wrote:
>> vcpus have statistics associated with them which can be viewed within the
>> debugfs. Currently it is assumed within the vcpu_stat_get() and
>> vcpu_stat_get_per_vm() functions that all of these statistics are
>> represented as 32-bit numbers. The next patch adds some 64-bit statistics,
>> so add provisioning for the display of 64-bit vcpu statistics.
>>
>> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
>> ---
>>  arch/powerpc/kvm/book3s.c |  1 +
>>  include/linux/kvm_host.h  |  1 +
>>  virt/kvm/kvm_main.c       | 60 +++++++++++++++++++++++++++++++++++++++++++----
>>  3 files changed, 58 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
>> index 47018fc..ed9132b 100644
>> --- a/arch/powerpc/kvm/book3s.c
>> +++ b/arch/powerpc/kvm/book3s.c
>> @@ -40,6 +40,7 @@
>>  #include "trace.h"
>>  
>>  #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
>> +#define VCPU_STAT_U64(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU_U64
>>  
>>  /* #define EXIT_DEBUG */
>>  
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 1c9c973..667b30e 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -991,6 +991,7 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
>>  enum kvm_stat_kind {
>>  	KVM_STAT_VM,
>>  	KVM_STAT_VCPU,
>> +	KVM_STAT_VCPU_U64,
>>  };
>>  
>>  struct kvm_stat_data {
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index 02e98f3..ac47ffb 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -3566,6 +3566,20 @@ static int vcpu_stat_get_per_vm(void *data, u64 *val)
>>  	return 0;
>>  }
>>  
>> +static int vcpu_stat_u64_get_per_vm(void *data, u64 *val)
>> +{
>> +	int i;
>> +	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
>> +	struct kvm_vcpu *vcpu;
>> +
>> +	*val = 0;
>> +
>> +	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
>> +		*val += *(u64 *)((void *)vcpu + stat_data->offset);
>> +
>> +	return 0;
>> +}
>> +
>>  static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
>>  {
>>  	__simple_attr_check_format("%llu\n", 0ull);
>> @@ -3573,6 +3587,13 @@ static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
>>  				 NULL, "%llu\n");
>>  }
>>  
>> +static int vcpu_stat_u64_get_per_vm_open(struct inode *inode, struct file *file)
>> +{
>> +	__simple_attr_check_format("%llu\n", 0ull);
>> +	return kvm_debugfs_open(inode, file, vcpu_stat_u64_get_per_vm,
>> +				 NULL, "%llu\n");
>> +}
>> +
>>  static const struct file_operations vcpu_stat_get_per_vm_fops = {
>>  	.owner   = THIS_MODULE,
>>  	.open    = vcpu_stat_get_per_vm_open,
>> @@ -3582,9 +3603,19 @@ static const struct file_operations vcpu_stat_get_per_vm_fops = {
>>  	.llseek  = generic_file_llseek,
>>  };
>>  
>> +static const struct file_operations vcpu_stat_u64_get_per_vm_fops = {
>> +	.owner   = THIS_MODULE,
>> +	.open    = vcpu_stat_u64_get_per_vm_open,
>> +	.release = kvm_debugfs_release,
>> +	.read    = simple_attr_read,
>> +	.write   = simple_attr_write,
>> +	.llseek  = generic_file_llseek,
>> +};
>> +
>>  static const struct file_operations *stat_fops_per_vm[] = {
>> -	[KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
>> -	[KVM_STAT_VM]   = &vm_stat_get_per_vm_fops,
>> +	[KVM_STAT_VCPU]		= &vcpu_stat_get_per_vm_fops,
>> +	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_get_per_vm_fops,
>> +	[KVM_STAT_VM]		= &vm_stat_get_per_vm_fops,
>>  };
>>  
>>  static int vm_stat_get(void *_offset, u64 *val)
>> @@ -3627,9 +3658,30 @@ static int vcpu_stat_get(void *_offset, u64 *val)
>>  
>>  DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
>>  
>> +static int vcpu_stat_u64_get(void *_offset, u64 *val)
>> +{
>> +	unsigned offset = (long)_offset;
>> +	struct kvm *kvm;
>> +	struct kvm_stat_data stat_tmp = {.offset = offset};
>> +	u64 tmp_val;
>> +
>> +	*val = 0;
>> +	spin_lock(&kvm_lock);
>> +	list_for_each_entry(kvm, &vm_list, vm_list) {
>> +		stat_tmp.kvm = kvm;
>> +		vcpu_stat_u64_get_per_vm((void *)&stat_tmp, &tmp_val);
>> +		*val += tmp_val;
>> +	}
>> +	spin_unlock(&kvm_lock);
>> +	return 0;
>> +}
>> +
>> +DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_u64_fops, vcpu_stat_u64_get, NULL, "%llu\n");
>> +
>>  static const struct file_operations *stat_fops[] = {
>> -	[KVM_STAT_VCPU] = &vcpu_stat_fops,
>> -	[KVM_STAT_VM]   = &vm_stat_fops,
>> +	[KVM_STAT_VCPU]		= &vcpu_stat_fops,
>> +	[KVM_STAT_VCPU_U64]	= &vcpu_stat_u64_fops,
>> +	[KVM_STAT_VM]		= &vm_stat_fops,
>>  };
>>  
>>  static int kvm_init_debug(void)
>> -- 
>> 2.5.5
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array
  2016-06-15  9:21 ` Suraj Jitindar Singh
@ 2016-06-24  9:59   ` Paul Mackerras
  -1 siblings, 0 replies; 20+ messages in thread
From: Paul Mackerras @ 2016-06-24  9:59 UTC (permalink / raw)
  To: Suraj Jitindar Singh
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, pbonzini, agraf, rkrcmar

On Wed, Jun 15, 2016 at 07:21:05PM +1000, Suraj Jitindar Singh wrote:
> The struct kvmppc_vcore is a structure used to store various information
> about a virtual core for a kvm guest. The runnable_threads element of the
> struct provides a list of all of the currently runnable vcpus on the core
> (those in the KVMPPC_VCPU_RUNNABLE state). The previous implementation of
> this list was a linked_list. The next patch requires that the list be able
> to be iterated over without holding the vcore lock.
> 
> Reimplement the runnable_threads list in the kvmppc_vcore struct as an
> array. Implement function to iterate over valid entries in the array and
> update access sites accordingly.
> 
> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>

Unfortunately I get a compile error when compiling for either a 32-bit
powerpc config (e.g. pmac32_defconfig with KVM turned on) or a Book E
config.  The error is:

In file included from /home/paulus/kernel/kvm/include/linux/kvm_host.h:36:0,
                 from /home/paulus/kernel/kvm/arch/powerpc/kernel/asm-offsets.c:54:
/home/paulus/kernel/kvm/arch/powerpc/include/asm/kvm_host.h:299:36: error: ‘MAX_SMT_THREADS’ undeclared here (not in a function)
  struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
                                    ^
/home/paulus/kernel/kvm/./Kbuild:81: recipe for target 'arch/powerpc/kernel/asm-offsets.s' failed

You are using MAX_SMT_THREADS in kvm_host.h, but it is defined in
kvm_book3s_asm.h, which gets included by asm-offsets.c after it
include kvm_host.h.  I don't think we can just make kvm_host.h include
book3s.h.  The best solution might be to move the definition of struct
kvmppc_vcore to kvm_book3s.h.

Paul.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array
@ 2016-06-24  9:59   ` Paul Mackerras
  0 siblings, 0 replies; 20+ messages in thread
From: Paul Mackerras @ 2016-06-24  9:59 UTC (permalink / raw)
  To: Suraj Jitindar Singh
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, pbonzini, agraf, rkrcmar

On Wed, Jun 15, 2016 at 07:21:05PM +1000, Suraj Jitindar Singh wrote:
> The struct kvmppc_vcore is a structure used to store various information
> about a virtual core for a kvm guest. The runnable_threads element of the
> struct provides a list of all of the currently runnable vcpus on the core
> (those in the KVMPPC_VCPU_RUNNABLE state). The previous implementation of
> this list was a linked_list. The next patch requires that the list be able
> to be iterated over without holding the vcore lock.
> 
> Reimplement the runnable_threads list in the kvmppc_vcore struct as an
> array. Implement function to iterate over valid entries in the array and
> update access sites accordingly.
> 
> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>

Unfortunately I get a compile error when compiling for either a 32-bit
powerpc config (e.g. pmac32_defconfig with KVM turned on) or a Book E
config.  The error is:

In file included from /home/paulus/kernel/kvm/include/linux/kvm_host.h:36:0,
                 from /home/paulus/kernel/kvm/arch/powerpc/kernel/asm-offsets.c:54:
/home/paulus/kernel/kvm/arch/powerpc/include/asm/kvm_host.h:299:36: error: ‘MAX_SMT_THREADS’ undeclared here (not in a function)
  struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
                                    ^
/home/paulus/kernel/kvm/./Kbuild:81: recipe for target 'arch/powerpc/kernel/asm-offsets.s' failed

You are using MAX_SMT_THREADS in kvm_host.h, but it is defined in
kvm_book3s_asm.h, which gets included by asm-offsets.c after it
include kvm_host.h.  I don't think we can just make kvm_host.h include
book3s.h.  The best solution might be to move the definition of struct
kvmppc_vcore to kvm_book3s.h.

Paul.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array
  2016-06-24  9:59   ` Paul Mackerras
@ 2016-06-29  4:44     ` Suraj Jitindar Singh
  -1 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-06-29  4:44 UTC (permalink / raw)
  To: Paul Mackerras
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, pbonzini, agraf, rkrcmar

On 24/06/16 19:59, Paul Mackerras wrote:
> On Wed, Jun 15, 2016 at 07:21:05PM +1000, Suraj Jitindar Singh wrote:
>> The struct kvmppc_vcore is a structure used to store various information
>> about a virtual core for a kvm guest. The runnable_threads element of the
>> struct provides a list of all of the currently runnable vcpus on the core
>> (those in the KVMPPC_VCPU_RUNNABLE state). The previous implementation of
>> this list was a linked_list. The next patch requires that the list be able
>> to be iterated over without holding the vcore lock.
>>
>> Reimplement the runnable_threads list in the kvmppc_vcore struct as an
>> array. Implement function to iterate over valid entries in the array and
>> update access sites accordingly.
>>
>> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
> Unfortunately I get a compile error when compiling for either a 32-bit
> powerpc config (e.g. pmac32_defconfig with KVM turned on) or a Book E
> config.  The error is:
>
> In file included from /home/paulus/kernel/kvm/include/linux/kvm_host.h:36:0,
>                  from /home/paulus/kernel/kvm/arch/powerpc/kernel/asm-offsets.c:54:
> /home/paulus/kernel/kvm/arch/powerpc/include/asm/kvm_host.h:299:36: error: ‘MAX_SMT_THREADS’ undeclared here (not in a function)
>   struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
>                                     ^
> /home/paulus/kernel/kvm/./Kbuild:81: recipe for target 'arch/powerpc/kernel/asm-offsets.s' failed
>
> You are using MAX_SMT_THREADS in kvm_host.h, but it is defined in
> kvm_book3s_asm.h, which gets included by asm-offsets.c after it
> include kvm_host.h.  I don't think we can just make kvm_host.h include
> book3s.h.  The best solution might be to move the definition of struct
> kvmppc_vcore to kvm_book3s.h.

Thanks for catching that, yeah I see.

I don't think we can trivially move the struct kvmppc_vcore definition into 
kvm_book3s.h as other code in kvm_host.h (i.e. struct kvm_vcpu_arch) requires
the definition. I was thinking that I could just put runnable_threads inside an #ifdef.

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
#endif

Suraj.

>
> Paul.


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array
@ 2016-06-29  4:44     ` Suraj Jitindar Singh
  0 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-06-29  4:44 UTC (permalink / raw)
  To: Paul Mackerras
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, pbonzini, agraf, rkrcmar

On 24/06/16 19:59, Paul Mackerras wrote:
> On Wed, Jun 15, 2016 at 07:21:05PM +1000, Suraj Jitindar Singh wrote:
>> The struct kvmppc_vcore is a structure used to store various information
>> about a virtual core for a kvm guest. The runnable_threads element of the
>> struct provides a list of all of the currently runnable vcpus on the core
>> (those in the KVMPPC_VCPU_RUNNABLE state). The previous implementation of
>> this list was a linked_list. The next patch requires that the list be able
>> to be iterated over without holding the vcore lock.
>>
>> Reimplement the runnable_threads list in the kvmppc_vcore struct as an
>> array. Implement function to iterate over valid entries in the array and
>> update access sites accordingly.
>>
>> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
> Unfortunately I get a compile error when compiling for either a 32-bit
> powerpc config (e.g. pmac32_defconfig with KVM turned on) or a Book E
> config.  The error is:
>
> In file included from /home/paulus/kernel/kvm/include/linux/kvm_host.h:36:0,
>                  from /home/paulus/kernel/kvm/arch/powerpc/kernel/asm-offsets.c:54:
> /home/paulus/kernel/kvm/arch/powerpc/include/asm/kvm_host.h:299:36: error: ‘MAX_SMT_THREADS’ undeclared here (not in a function)
>   struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
>                                     ^
> /home/paulus/kernel/kvm/./Kbuild:81: recipe for target 'arch/powerpc/kernel/asm-offsets.s' failed
>
> You are using MAX_SMT_THREADS in kvm_host.h, but it is defined in
> kvm_book3s_asm.h, which gets included by asm-offsets.c after it
> include kvm_host.h.  I don't think we can just make kvm_host.h include
> book3s.h.  The best solution might be to move the definition of struct
> kvmppc_vcore to kvm_book3s.h.

Thanks for catching that, yeah I see.

I don't think we can trivially move the struct kvmppc_vcore definition into 
kvm_book3s.h as other code in kvm_host.h (i.e. struct kvm_vcpu_arch) requires
the definition. I was thinking that I could just put runnable_threads inside an #ifdef.

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
#endif

Suraj.

>
> Paul.


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array
  2016-06-29  4:44     ` Suraj Jitindar Singh
@ 2016-06-29 12:51       ` Paolo Bonzini
  -1 siblings, 0 replies; 20+ messages in thread
From: Paolo Bonzini @ 2016-06-29 12:51 UTC (permalink / raw)
  To: Suraj Jitindar Singh, Paul Mackerras
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, agraf, rkrcmar



On 29/06/2016 06:44, Suraj Jitindar Singh wrote:
> Thanks for catching that, yeah I see.
> 
> I don't think we can trivially move the struct kvmppc_vcore definition into 
> kvm_book3s.h as other code in kvm_host.h (i.e. struct kvm_vcpu_arch) requires
> the definition. I was thinking that I could just put runnable_threads inside an #ifdef.
> 
> #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
> 	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
> #endif

You can rename MAX_SMT_THREADS to BOOK3S_MAX_SMT_THREADS and move it to
kvm_host.h.  It seems like assembly code does not use it, so it's
unnecessary to have it in book3s_asm.h.

Paolo

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array
@ 2016-06-29 12:51       ` Paolo Bonzini
  0 siblings, 0 replies; 20+ messages in thread
From: Paolo Bonzini @ 2016-06-29 12:51 UTC (permalink / raw)
  To: Suraj Jitindar Singh, Paul Mackerras
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, agraf, rkrcmar



On 29/06/2016 06:44, Suraj Jitindar Singh wrote:
> Thanks for catching that, yeah I see.
> 
> I don't think we can trivially move the struct kvmppc_vcore definition into 
> kvm_book3s.h as other code in kvm_host.h (i.e. struct kvm_vcpu_arch) requires
> the definition. I was thinking that I could just put runnable_threads inside an #ifdef.
> 
> #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
> 	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
> #endif

You can rename MAX_SMT_THREADS to BOOK3S_MAX_SMT_THREADS and move it to
kvm_host.h.  It seems like assembly code does not use it, so it's
unnecessary to have it in book3s_asm.h.

Paolo

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array
  2016-06-29 12:51       ` Paolo Bonzini
@ 2016-07-11  6:05         ` Suraj Jitindar Singh
  -1 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-07-11  6:05 UTC (permalink / raw)
  To: Paolo Bonzini, Paul Mackerras
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, agraf, rkrcmar

On 29/06/16 22:51, Paolo Bonzini wrote:
>
> On 29/06/2016 06:44, Suraj Jitindar Singh wrote:
>> Thanks for catching that, yeah I see.
>>
>> I don't think we can trivially move the struct kvmppc_vcore definition into 
>> kvm_book3s.h as other code in kvm_host.h (i.e. struct kvm_vcpu_arch) requires
>> the definition. I was thinking that I could just put runnable_threads inside an #ifdef.
>>
>> #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
>> 	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
>> #endif
> You can rename MAX_SMT_THREADS to BOOK3S_MAX_SMT_THREADS and move it to
> kvm_host.h.  It seems like assembly code does not use it, so it's
> unnecessary to have it in book3s_asm.h.

It looks like MAX_SMT_THREADS is used else where in book3s_asm.h.
I think the easiest option is to put the v_core struct in book3s.h. 

>
> Paolo


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array
@ 2016-07-11  6:05         ` Suraj Jitindar Singh
  0 siblings, 0 replies; 20+ messages in thread
From: Suraj Jitindar Singh @ 2016-07-11  6:05 UTC (permalink / raw)
  To: Paolo Bonzini, Paul Mackerras
  Cc: linuxppc-dev, kvm-ppc, mpe, benh, kvm, agraf, rkrcmar

On 29/06/16 22:51, Paolo Bonzini wrote:
>
> On 29/06/2016 06:44, Suraj Jitindar Singh wrote:
>> Thanks for catching that, yeah I see.
>>
>> I don't think we can trivially move the struct kvmppc_vcore definition into 
>> kvm_book3s.h as other code in kvm_host.h (i.e. struct kvm_vcpu_arch) requires
>> the definition. I was thinking that I could just put runnable_threads inside an #ifdef.
>>
>> #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
>> 	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
>> #endif
> You can rename MAX_SMT_THREADS to BOOK3S_MAX_SMT_THREADS and move it to
> kvm_host.h.  It seems like assembly code does not use it, so it's
> unnecessary to have it in book3s_asm.h.

It looks like MAX_SMT_THREADS is used else where in book3s_asm.h.
I think the easiest option is to put the v_core struct in book3s.h. 

>
> Paolo


^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2016-07-11  6:05 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-15  9:21 [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array Suraj Jitindar Singh
2016-06-15  9:21 ` Suraj Jitindar Singh
2016-06-15  9:21 ` [PATCH 2/4] kvm/ppc/book3s_hv: Implement halt polling in the kvm_hv kernel module Suraj Jitindar Singh
2016-06-15  9:21   ` Suraj Jitindar Singh
2016-06-15  9:21 ` [PATCH 3/4] kvm/stats: Add provisioning for 64-bit vcpu statistics Suraj Jitindar Singh
2016-06-15  9:21   ` Suraj Jitindar Singh
2016-06-20  0:08   ` Paul Mackerras
2016-06-20  0:08     ` Paul Mackerras
2016-06-20 14:56     ` Paolo Bonzini
2016-06-20 14:56       ` Paolo Bonzini
2016-06-15  9:21 ` [PATCH 4/4] powerpc/kvm/stats: Implement existing and add new halt polling vcpu stats Suraj Jitindar Singh
2016-06-15  9:21   ` Suraj Jitindar Singh
2016-06-24  9:59 ` [PATCH 1/4] kvm/ppc/book3s_hv: Change vcore element runnable_threads from linked-list to array Paul Mackerras
2016-06-24  9:59   ` Paul Mackerras
2016-06-29  4:44   ` Suraj Jitindar Singh
2016-06-29  4:44     ` Suraj Jitindar Singh
2016-06-29 12:51     ` Paolo Bonzini
2016-06-29 12:51       ` Paolo Bonzini
2016-07-11  6:05       ` Suraj Jitindar Singh
2016-07-11  6:05         ` Suraj Jitindar Singh

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.