xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Stefano Stabellini" <sstabellini@kernel.org>,
	"Julien Grall" <julien@xen.org>, "Wei Liu" <wl@xen.org>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Ian Jackson" <ian.jackson@eu.citrix.com>,
	"George Dunlap" <george.dunlap@citrix.com>,
	"Dario Faggioli" <dfaggioli@suse.com>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Volodymyr Babchuk" <Volodymyr_Babchuk@epam.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [RFC PATCH v1 1/6] sched: track time spent in IRQ handler
Date: Fri, 12 Jun 2020 00:22:36 +0000	[thread overview]
Message-ID: <20200612002205.174295-2-volodymyr_babchuk@epam.com> (raw)
In-Reply-To: <20200612002205.174295-1-volodymyr_babchuk@epam.com>

Add code that saves time spent in IRQ handler, so later we can make
adjustments to schedule unit run time.

This and following changes are called upon to provide fair
scheduling. Problem is that any running vCPU can be interrupted by to
handle IRQ which is bound to some other vCPU. Thus, current vCPU can
be charged for a time, it actually didn't used.

TODO: move vcpu_{begin|end}_irq_handler() calls to entry.S for even
more fair time tracking.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
---
 xen/arch/arm/irq.c      |  2 ++
 xen/arch/x86/irq.c      |  2 ++
 xen/common/sched/core.c | 29 +++++++++++++++++++++++++++++
 xen/include/xen/sched.h | 13 +++++++++++++
 4 files changed, 46 insertions(+)

diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
index 3877657a52..51b517c0cd 100644
--- a/xen/arch/arm/irq.c
+++ b/xen/arch/arm/irq.c
@@ -201,6 +201,7 @@ void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq)
     struct irq_desc *desc = irq_to_desc(irq);
     struct irqaction *action;
 
+    vcpu_begin_irq_handler();
     perfc_incr(irqs);
 
     ASSERT(irq >= 16); /* SGIs do not come down this path */
@@ -267,6 +268,7 @@ out:
 out_no_end:
     spin_unlock(&desc->lock);
     irq_exit();
+    vcpu_end_irq_handler();
 }
 
 void release_irq(unsigned int irq, const void *dev_id)
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index a69937c840..3ef4221b64 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1895,6 +1895,7 @@ void do_IRQ(struct cpu_user_regs *regs)
     int               irq = this_cpu(vector_irq)[vector];
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
+    vcpu_begin_irq_handler();
     perfc_incr(irqs);
     this_cpu(irq_count)++;
     irq_enter();
@@ -2024,6 +2025,7 @@ void do_IRQ(struct cpu_user_regs *regs)
  out_no_unlock:
     irq_exit();
     set_irq_regs(old_regs);
+    vcpu_end_irq_handler();
 }
 
 static inline bool is_free_pirq(const struct domain *d,
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index cb49a8bc02..8f642ada05 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -916,6 +916,35 @@ void vcpu_unblock(struct vcpu *v)
     vcpu_wake(v);
 }
 
+void vcpu_begin_irq_handler(void)
+{
+    if (is_idle_vcpu(current))
+        return;
+
+    /* XXX: Looks like ASSERT_INTERRUPTS_DISABLED() is available only for x86 */
+    if ( current->irq_nesting++ )
+        return;
+
+    current->irq_entry_time = NOW();
+}
+
+void vcpu_end_irq_handler(void)
+{
+    int delta;
+
+    if (is_idle_vcpu(current))
+        return;
+
+    ASSERT(current->irq_nesting);
+
+    if ( --current->irq_nesting )
+        return;
+
+    /* We assume that irq handling time will not overflow int */
+    delta = NOW() - current->irq_entry_time;
+    atomic_add(delta, &current->sched_unit->irq_time);
+}
+
 /*
  * Do the actual movement of an unit from old to new CPU. Locks for *both*
  * CPUs needs to have been taken already when calling this!
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index ac53519d7f..ceed53364b 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -237,6 +237,9 @@ struct vcpu
     evtchn_port_t    virq_to_evtchn[NR_VIRQS];
     spinlock_t       virq_lock;
 
+    /* Fair scheduling state */
+    uint64_t         irq_entry_time;
+    unsigned int     irq_nesting;
     /* Tasklet for continue_hypercall_on_cpu(). */
     struct tasklet   continue_hypercall_tasklet;
 
@@ -276,6 +279,9 @@ struct sched_unit {
     /* Vcpu state summary. */
     unsigned int           runstate_cnt[4];
 
+    /* Fair scheduling correction value */
+    atomic_t               irq_time;
+
     /* Bitmask of CPUs on which this VCPU may run. */
     cpumask_var_t          cpu_hard_affinity;
     /* Used to save affinity during temporary pinning. */
@@ -690,6 +696,13 @@ long vcpu_yield(void);
 void vcpu_sleep_nosync(struct vcpu *v);
 void vcpu_sleep_sync(struct vcpu *v);
 
+/*
+ * Report IRQ handling time to scheduler. As IRQs can be nested,
+ * next two functions are re-enterable.
+ */
+void vcpu_begin_irq_handler(void);
+void vcpu_end_irq_handler(void);
+
 /*
  * Force synchronisation of given VCPU's state. If it is currently descheduled,
  * this call will ensure that all its state is committed to memory and that
-- 
2.27.0


  parent reply	other threads:[~2020-06-12  0:23 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-12  0:22 [RFC PATCH v1 0/6] Fair scheduling Volodymyr Babchuk
2020-06-12  0:22 ` [RFC PATCH v1 2/6] sched: track time spent in hypervisor tasks Volodymyr Babchuk
2020-06-12  4:43   ` Jürgen Groß
2020-06-12 11:30     ` Volodymyr Babchuk
2020-06-12 11:40       ` Jürgen Groß
2020-09-24 18:08         ` Volodymyr Babchuk
2020-09-25 17:22           ` Dario Faggioli
2020-09-25 20:21             ` Volodymyr Babchuk
2020-09-25 21:42               ` Dario Faggioli
2020-06-16 10:10   ` Jan Beulich
2020-06-18  2:50     ` Volodymyr Babchuk
2020-06-18  6:34       ` Jan Beulich
2020-06-12  0:22 ` Volodymyr Babchuk [this message]
2020-06-12  4:36   ` [RFC PATCH v1 1/6] sched: track time spent in IRQ handler Jürgen Groß
2020-06-12 11:26     ` Volodymyr Babchuk
2020-06-12 11:29       ` Julien Grall
2020-06-12 11:33         ` Volodymyr Babchuk
2020-06-12 12:21           ` Julien Grall
2020-06-12 20:08             ` Dario Faggioli
2020-06-12 22:25               ` Volodymyr Babchuk
2020-06-12 22:54               ` Julien Grall
2020-06-16 10:06   ` Jan Beulich
2020-06-12  0:22 ` [RFC PATCH v1 3/6] sched, credit2: improve scheduler fairness Volodymyr Babchuk
2020-06-12  4:51   ` Jürgen Groß
2020-06-12 11:38     ` Volodymyr Babchuk
2020-06-12  0:22 ` [RFC PATCH v1 5/6] tools: xentop: show time spent in IRQ and HYP states Volodymyr Babchuk
2020-06-12  0:22 ` [RFC PATCH v1 6/6] trace: add fair scheduling trace events Volodymyr Babchuk
2020-06-12  0:22 ` [RFC PATCH v1 4/6] xentop: collect IRQ and HYP time statistics Volodymyr Babchuk
2020-06-12  4:57   ` Jürgen Groß
2020-06-12 11:44     ` Volodymyr Babchuk
2020-06-12 12:45       ` Julien Grall
2020-06-12 22:16         ` Volodymyr Babchuk
2020-06-18 20:24         ` Volodymyr Babchuk
2020-06-18 20:34           ` Julien Grall
2020-06-18 23:35             ` Volodymyr Babchuk
2020-06-12 12:29     ` Julien Grall
2020-06-12 12:41       ` Jürgen Groß
2020-06-12 15:29         ` Dario Faggioli
2020-06-12 22:27           ` Volodymyr Babchuk
2020-06-13  6:22             ` Jürgen Groß
2020-06-18  2:58               ` Volodymyr Babchuk
2020-06-18 15:17                 ` Julien Grall
2020-06-18 15:23                   ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200612002205.174295-2-volodymyr_babchuk@epam.com \
    --to=volodymyr_babchuk@epam.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dfaggioli@suse.com \
    --cc=george.dunlap@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien@xen.org \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).