From mboxrd@z Thu Jan 1 00:00:00 1970 From: Josh Whitehead Subject: [RFC PATCH v2 2/7] Fixed formatting and misleading comments/variables. Added comments and renamed variables to accurately reflect modern terminology Date: Wed, 9 Jul 2014 16:55:43 -0400 Message-ID: <1404939348-4926-3-git-send-email-josh.whitehead@dornerworks.com> References: <1404939348-4926-1-git-send-email-josh.whitehead@dornerworks.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1404939348-4926-1-git-send-email-josh.whitehead@dornerworks.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Xen-devel Cc: Ian Campbell , Stefano Stabellini , George Dunlap , Dario Faggioli , Ian Jackson , Robert VanVossen , Nathan Studer , Joshua Whitehead List-Id: xen-devel@lists.xenproject.org Due to the age of the scheduler there were many incorrect/misleading comments and variable names, the bulk of which centered around the fact that "VCPU" and "Domain" used to be synonymous. Therefore a large portion of these modifcations involve simply changing a variable "d" to a "v" or a "dom" to "vcpu" so that the comments and variable names are accurate to what's being used. A few other name changes were also made, the most significant being the change from "slice" to "budget" to better reflect modern terminology used in real-time algorithms such as CBS and deferrable server. Some varible, function name, and macro name changes were also made to bring the code inline with current Xen coding standards. Signed-off-by: Nathan Studer Signed-off-by: Joshua Whitehead --- xen/common/sched_sedf.c | 412 ++++++++++++++++++++++------------------------- 1 file changed, 190 insertions(+), 222 deletions(-) diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c index 0cc3e06..a8dd3e0 100644 --- a/xen/common/sched_sedf.c +++ b/xen/common/sched_sedf.c @@ -1,8 +1,29 @@ /****************************************************************************** - * Simple EDF scheduler for xen + * Simple EDF Scheduler for xen + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * by DornerWorks Ltd. (C) 2014 Grand Rapids, MI + * + * Adapted from code by Stephan Diestelhorst (C) 2004 Cambridge University + * and Mark Williamson (C) 2004 Intel Research Cambridge * - * by Stephan Diestelhorst (C) 2004 Cambridge University - * based on code by Mark Williamson (C) 2004 Intel Research Cambridge */ #include @@ -28,14 +49,13 @@ #define SEDF_ASLEEP (16) #define DEFAULT_PERIOD (MILLISECS(20)) -#define DEFAULT_SLICE (MILLISECS(10)) +#define DEFAULT_BUDGET (MILLISECS(10)) #define PERIOD_MAX MILLISECS(10000) /* 10s */ #define PERIOD_MIN (MICROSECS(10)) /* 10us */ -#define SLICE_MIN (MICROSECS(5)) /* 5us */ +#define BUDGET_MIN (MICROSECS(5)) /* 5us */ -#define IMPLY(a, b) (!(a) || (b)) -#define EQ(a, b) ((!!(a)) == (!!(b))) +#define EQ(_A, _B) ((!!(_A)) == (!!(_B))) struct sedf_dom_info { @@ -52,16 +72,16 @@ struct sedf_vcpu_info { struct list_head list; /* Parameters for EDF */ - s_time_t period; /* = relative deadline */ - s_time_t slice; /* = worst case execution time */ + s_time_t period; /* = Server scheduling period */ + s_time_t budget; /* = Guarenteed minimum CPU time per period */ - /* Status of domain */ + /* Status of vcpu */ int status; /* Bookkeeping */ s_time_t deadl_abs; s_time_t sched_start_abs; s_time_t cputime; - /* Times the domain un-/blocked */ + /* Times the vcpu un-/blocked */ s_time_t block_abs; s_time_t unblock_abs; @@ -81,35 +101,35 @@ struct sedf_cpu_info { #define SEDF_PRIV(_ops) \ ((struct sedf_priv_info *)((_ops)->sched_data)) -#define EDOM_INFO(d) ((struct sedf_vcpu_info *)((d)->sched_priv)) -#define CPU_INFO(cpu) \ - ((struct sedf_cpu_info *)per_cpu(schedule_data, cpu).sched_priv) -#define LIST(d) (&EDOM_INFO(d)->list) -#define RUNQ(cpu) (&CPU_INFO(cpu)->runnableq) -#define WAITQ(cpu) (&CPU_INFO(cpu)->waitq) -#define IDLETASK(cpu) (idle_vcpu[cpu]) +#define SEDF_VCPU(_vcpu) ((struct sedf_vcpu_info *)((_vcpu)->sched_priv)) +#define SEDF_PCPU(_cpu) \ + ((struct sedf_cpu_info *)per_cpu(schedule_data, _cpu).sched_priv) +#define LIST(_vcpu) (&SEDF_VCPU(_vcpu)->list) +#define RUNQ(_cpu) (&SEDF_PCPU(_cpu)->runnableq) +#define WAITQ(_cpu) (&SEDF_PCPU(_cpu)->waitq) +#define IDLETASK(_cpu) (idle_vcpu[_cpu]) #define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period) -#define DIV_UP(x,y) (((x) + (y) - 1) / y) +#define DIV_UP(_X, _Y) (((_X) + (_Y) - 1) / _Y) -#define sedf_runnable(edom) (!(EDOM_INFO(edom)->status & SEDF_ASLEEP)) +#define sedf_runnable(edom) (!(SEDF_VCPU(edom)->status & SEDF_ASLEEP)) -static void sedf_dump_cpu_state(const struct scheduler *ops, int i); +static void sedf_dump_cpu_state(const struct scheduler *ops, int cpu); -static inline int __task_on_queue(struct vcpu *d) +static inline int __task_on_queue(struct vcpu *v) { - return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d))); + return (((LIST(v))->next != NULL) && (LIST(v)->next != LIST(v))); } -static inline void __del_from_queue(struct vcpu *d) +static inline void __del_from_queue(struct vcpu *v) { - struct list_head *list = LIST(d); - ASSERT(__task_on_queue(d)); + struct list_head *list = LIST(v); + ASSERT(__task_on_queue(v)); list_del(list); list->next = NULL; - ASSERT(!__task_on_queue(d)); + ASSERT(!__task_on_queue(v)); } typedef int(*list_comparer)(struct list_head* el1, struct list_head* el2); @@ -128,12 +148,12 @@ static inline void list_insert_sort( list_add(element, cur->prev); } -#define DOMAIN_COMPARER(name, field, comp1, comp2) \ +#define VCPU_COMPARER(name, field, comp1, comp2) \ static int name##_comp(struct list_head* el1, struct list_head* el2) \ { \ - struct sedf_vcpu_info *d1, *d2; \ - d1 = list_entry(el1,struct sedf_vcpu_info, field); \ - d2 = list_entry(el2,struct sedf_vcpu_info, field); \ + struct sedf_vcpu_info *v1, *v2; \ + v1 = list_entry(el1, struct sedf_vcpu_info, field); \ + v2 = list_entry(el2, struct sedf_vcpu_info, field); \ if ( (comp1) == (comp2) ) \ return 0; \ if ( (comp1) < (comp2) ) \ @@ -143,11 +163,11 @@ static int name##_comp(struct list_head* el1, struct list_head* el2) \ } /* - * Adds a domain to the queue of processes which wait for the beginning of the - * next period; this list is therefore sortet by this time, which is simply + * Adds a vcpu to the queue of processes which wait for the beginning of the + * next period; this list is therefore sorted by this time, which is simply * absol. deadline - period. */ -DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2)); +VCPU_COMPARER(waitq, list, PERIOD_BEGIN(v1), PERIOD_BEGIN(v2)); static inline void __add_to_waitqueue_sort(struct vcpu *v) { ASSERT(!__task_on_queue(v)); @@ -156,12 +176,12 @@ static inline void __add_to_waitqueue_sort(struct vcpu *v) } /* - * Adds a domain to the queue of processes which have started their current + * Adds a vcpu to the queue of processes which have started their current * period and are runnable (i.e. not blocked, dieing,...). The first element * on this list is running on the processor, if the list is empty the idle * task will run. As we are implementing EDF, this list is sorted by deadlines. */ -DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs); +VCPU_COMPARER(runq, list, v1->deadl_abs, v2->deadl_abs); static inline void __add_to_runqueue_sort(struct vcpu *v) { list_insert_sort(RUNQ(v->processor), LIST(v), runq_comp); @@ -172,8 +192,8 @@ static void sedf_insert_vcpu(const struct scheduler *ops, struct vcpu *v) { if ( is_idle_vcpu(v) ) { - EDOM_INFO(v)->deadl_abs = 0; - EDOM_INFO(v)->status &= ~SEDF_ASLEEP; + SEDF_VCPU(v)->deadl_abs = 0; + SEDF_VCPU(v)->status &= ~SEDF_ASLEEP; } } @@ -187,19 +207,19 @@ static void *sedf_alloc_vdata(const struct scheduler *ops, struct vcpu *v, void inf->vcpu = v; - inf->deadl_abs = 0; - inf->status = SEDF_ASLEEP; + inf->deadl_abs = 0; + inf->status = SEDF_ASLEEP; if (v->domain->domain_id == 0) { - /* Domain 0, needs a slice to boot the machine */ - inf->period = DEFAULT_PERIOD; - inf->slice = DEFAULT_SLICE; + /* Domain 0, needs a budget to boot the machine */ + inf->period = DEFAULT_PERIOD; + inf->budget = DEFAULT_BUDGET; } else { - inf->period = DEFAULT_PERIOD; - inf->slice = 0; + inf->period = DEFAULT_PERIOD; + inf->budget = 0; } INIT_LIST_HEAD(&(inf->list)); @@ -273,45 +293,45 @@ static int sedf_pick_cpu(const struct scheduler *ops, struct vcpu *v) } /* - * Handles the rescheduling & bookkeeping of domains running in their + * Handles the rescheduling & bookkeeping of vcpus running in their * guaranteed timeslice. */ -static void desched_edf_dom(s_time_t now, struct vcpu* d) +static void desched_edf_vcpu(s_time_t now, struct vcpu *v) { - struct sedf_vcpu_info* inf = EDOM_INFO(d); + struct sedf_vcpu_info* inf = SEDF_VCPU(v); - /* Current domain is running in real time mode */ - ASSERT(__task_on_queue(d)); + /* Current vcpu is running in real time mode */ + ASSERT(__task_on_queue(v)); - /* Update the domain's cputime */ + /* Update the vcpu's cputime */ inf->cputime += now - inf->sched_start_abs; - /* Scheduling decisions which don't remove the running domain from + /* Scheduling decisions which don't remove the running vcpu from * the runq */ - if ( (inf->cputime < inf->slice) && sedf_runnable(d) ) + if ( (inf->cputime < inf->budget) && sedf_runnable(v) ) return; - __del_from_queue(d); + __del_from_queue(v); /* * Manage bookkeeping (i.e. calculate next deadline, memorise - * overrun-time of slice) of finished domains. + * overrun-time of budget) of finished vcpus. */ - if ( inf->cputime >= inf->slice ) + if ( inf->cputime >= inf->budget ) { - inf->cputime -= inf->slice; + inf->cputime -= inf->budget; /* Set next deadline */ inf->deadl_abs += inf->period; } - /* Add a runnable domain to the waitqueue */ - if ( sedf_runnable(d) ) + /* Add a runnable vcpu to the appropriate queue */ + if ( sedf_runnable(v) ) { - __add_to_waitqueue_sort(d); + __add_to_waitqueue_sort(v); } - ASSERT(EQ(sedf_runnable(d), __task_on_queue(d))); + ASSERT(EQ(sedf_runnable(v), __task_on_queue(v))); } @@ -335,14 +355,14 @@ static void update_queues( __add_to_runqueue_sort(curinf->vcpu); } - /* Process the runq, find domains that are on the runq that shouldn't */ + /* Process the runq, find vcpus that are on the runq that shouldn't */ list_for_each_safe ( cur, tmp, runq ) { - curinf = list_entry(cur,struct sedf_vcpu_info,list); + curinf = list_entry(cur, struct sedf_vcpu_info, list); - if ( unlikely(curinf->slice == 0) ) + if ( unlikely(curinf->budget == 0) ) { - /* Ignore domains with empty slice */ + /* Ignore vcpus with empty budget */ __del_from_queue(curinf->vcpu); /* Move them to their next period */ @@ -391,9 +411,9 @@ static void sedf_deinit(const struct scheduler *ops) /* * Main scheduling function * Reasons for calling this function are: - * -timeslice for the current period used up - * -domain on waitqueue has started it's period - * -and various others ;) in general: determine which domain to run next + * -budget for the current period is used up + * -vcpu on waitqueue has started it's period + * -and various others ;) in general: determine which vcpu to run next */ static struct task_slice sedf_do_schedule( const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled) @@ -401,18 +421,18 @@ static struct task_slice sedf_do_schedule( int cpu = smp_processor_id(); struct list_head *runq = RUNQ(cpu); struct list_head *waitq = WAITQ(cpu); - struct sedf_vcpu_info *inf = EDOM_INFO(current); + struct sedf_vcpu_info *inf = SEDF_VCPU(current); struct sedf_vcpu_info *runinf, *waitinf; struct task_slice ret; SCHED_STAT_CRANK(schedule); - /* Idle tasks don't need any of the following stuf */ + /* Idle tasks don't need any of the following stuff */ if ( is_idle_vcpu(current) ) goto check_waitq; /* - * Create local state of the status of the domain, in order to avoid + * Create local state of the status of the vcpu, in order to avoid * inconsistent state during scheduling decisions, because data for * vcpu_runnable is not protected by the scheduling lock! */ @@ -422,12 +442,12 @@ static struct task_slice sedf_do_schedule( if ( inf->status & SEDF_ASLEEP ) inf->block_abs = now; - desched_edf_dom(now, current); + desched_edf_vcpu(now, current); check_waitq: update_queues(now, runq, waitq); /* - * Now simply pick the first domain from the runqueue, which has the + * Now simply pick the first vcpu from the runqueue, which has the * earliest deadline, because the list is sorted * * Tasklet work (which runs in idle VCPU context) overrides all else. @@ -442,28 +462,28 @@ static struct task_slice sedf_do_schedule( } else if ( !list_empty(runq) ) { - runinf = list_entry(runq->next,struct sedf_vcpu_info,list); + runinf = list_entry(runq->next, struct sedf_vcpu_info, list); ret.task = runinf->vcpu; if ( !list_empty(waitq) ) { waitinf = list_entry(waitq->next, - struct sedf_vcpu_info,list); + struct sedf_vcpu_info, list); /* - * Rerun scheduler, when scheduled domain reaches it's - * end of slice or the first domain from the waitqueue + * Rerun scheduler, when scheduled vcpu consumes + * its budget or the first vcpu from the waitqueue * gets ready. */ - ret.time = MIN(now + runinf->slice - runinf->cputime, + ret.time = MIN(now + runinf->budget - runinf->cputime, PERIOD_BEGIN(waitinf)) - now; } else { - ret.time = runinf->slice - runinf->cputime; + ret.time = runinf->budget - runinf->cputime; } } else { - waitinf = list_entry(waitq->next,struct sedf_vcpu_info, list); + waitinf = list_entry(waitq->next, struct sedf_vcpu_info, list); ret.task = IDLETASK(cpu); ret.time = PERIOD_BEGIN(waitinf) - now; @@ -479,141 +499,89 @@ static struct task_slice sedf_do_schedule( ret.migrated = 0; - EDOM_INFO(ret.task)->sched_start_abs = now; + SEDF_VCPU(ret.task)->sched_start_abs = now; CHECK(ret.time > 0); ASSERT(sedf_runnable(ret.task)); - CPU_INFO(cpu)->current_slice_expires = now + ret.time; + SEDF_PCPU(cpu)->current_slice_expires = now + ret.time; return ret; } -static void sedf_sleep(const struct scheduler *ops, struct vcpu *d) +static void sedf_sleep(const struct scheduler *ops, struct vcpu *v) { - if ( is_idle_vcpu(d) ) + if ( is_idle_vcpu(v) ) return; - EDOM_INFO(d)->status |= SEDF_ASLEEP; + SEDF_VCPU(v)->status |= SEDF_ASLEEP; - if ( per_cpu(schedule_data, d->processor).curr == d ) + if ( per_cpu(schedule_data, v->processor).curr == v ) { - cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ); + cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ); } else { - if ( __task_on_queue(d) ) - __del_from_queue(d); + if ( __task_on_queue(v) ) + __del_from_queue(v); } } /* - * This function wakes up a domain, i.e. moves them into the waitqueue - * things to mention are: admission control is taking place nowhere at - * the moment, so we can't be sure, whether it is safe to wake the domain - * up at all. Anyway, even if it is safe (total cpu usage <=100%) there are - * some considerations on when to allow the domain to wake up and have it's - * first deadline... - * I detected 3 cases, which could describe the possible behaviour of the - * scheduler, - * and I'll try to make them more clear: - * - * 1. Very conservative - * -when a blocked domain unblocks, it is allowed to start execution at - * the beginning of the next complete period - * (D..deadline, R..running, B..blocking/sleeping, U..unblocking/waking up - * - * DRRB_____D__U_____DRRRRR___D________ ... - * - * -this causes the domain to miss a period (and a deadlline) - * -doesn't disturb the schedule at all - * -deadlines keep occuring isochronous - * - * 2. Conservative Part 1: Short Unblocking - * -when a domain unblocks in the same period as it was blocked it - * unblocks and may consume the rest of it's original time-slice minus - * the time it was blocked - * (assume period=9, slice=5) - * - * DRB_UR___DRRRRR___D... - * - * -this also doesn't disturb scheduling, but might lead to the fact, that - * the domain can't finish it's workload in the period - * -addition: experiments have shown that this may have a HUGE impact on - * performance of other domains, becaus it can lead to excessive context - * switches - * - * Part2: Long Unblocking - * Part 2a - * -it is obvious that such accounting of block time, applied when - * unblocking is happening in later periods, works fine aswell - * -the domain is treated as if it would have been running since the start - * of its new period - * - * DRB______D___UR___D... - * - * Part 2b - * -if one needs the full slice in the next period, it is necessary to - * treat the unblocking time as the start of the new period, i.e. move - * the deadline further back (later) - * -this doesn't disturb scheduling as well, because for EDF periods can - * be treated as minimal inter-release times and scheduling stays - * correct, when deadlines are kept relative to the time the process - * unblocks - * - * DRB______D___URRRR___D... - * (D) <- old deadline was here - * -problem: deadlines don't occur isochronous anymore - * - * 3. Unconservative (i.e. incorrect) - * -to boost the performance of I/O dependent domains it would be possible - * to put the domain into the runnable queue immediately, and let it run - * for the remainder of the slice of the current period - * (or even worse: allocate a new full slice for the domain) - * -either behaviour can lead to missed deadlines in other domains as - * opposed to approaches 1,2a,2b - */ - -/* - * Compares two domains in the relation of whether the one is allowed to + * Compares two vcpus in the relation of whether the one is allowed to * interrupt the others execution. - * It returns true (!=0) if a switch to the other domain is good. + * It returns true (!=0) if a switch to the other vcpu is good. + * Priority scheme is as follows: + * EDF: early deadline > late deadline */ static inline int should_switch(struct vcpu *cur, struct vcpu *other, s_time_t now) { struct sedf_vcpu_info *cur_inf, *other_inf; - cur_inf = EDOM_INFO(cur); - other_inf = EDOM_INFO(other); - + cur_inf = SEDF_VCPU(cur); + other_inf = SEDF_VCPU(other); + /* Always interrupt idle vcpu. */ if ( is_idle_vcpu(cur) ) return 1; - + /* Check whether we need to make an earlier scheduling decision */ if ( PERIOD_BEGIN(other_inf) < - CPU_INFO(other->processor)->current_slice_expires ) + SEDF_PCPU(other->processor)->current_slice_expires ) return 1; return 0; } -static void sedf_wake(const struct scheduler *ops, struct vcpu *d) +/* + * This function wakes up a vcpu, i.e. moves them into the appropriate queue + * + * When a blocked vcpu unblocks, it is allowed to start execution at + * the beginning of the next complete period + * (D..deadline, R..running, B..blocking/sleeping, U..unblocking/waking up + * + * DRRB_____D__U_____DRRRRR___D________ ... + * + * - This causes the vcpu to miss a period (and a deadlline) + * - Doesn't disturb the schedule at all + * - Deadlines keep occuring isochronous + */ +static void sedf_wake(const struct scheduler *ops, struct vcpu *v) { s_time_t now = NOW(); - struct sedf_vcpu_info* inf = EDOM_INFO(d); + struct sedf_vcpu_info* inf = SEDF_VCPU(v); - if ( unlikely(is_idle_vcpu(d)) ) + if ( unlikely(is_idle_vcpu(v)) ) return; - if ( unlikely(__task_on_queue(d)) ) + if ( unlikely(__task_on_queue(v)) ) return; - ASSERT(!sedf_runnable(d)); + ASSERT(!sedf_runnable(v)); inf->status &= ~SEDF_ASLEEP; if ( unlikely(inf->deadl_abs == 0) ) { /* Initial setup of the deadline */ - inf->deadl_abs = now + inf->slice; + inf->deadl_abs = now + inf->budget; } #ifdef SEDF_STATS @@ -627,14 +595,14 @@ static void sedf_wake(const struct scheduler *ops, struct vcpu *d) } else { - /* Long unblocking */ + /* Long unblocking, someone is going to miss their deadline. */ inf->long_block_tot++; } if ( PERIOD_BEGIN(inf) > now ) - __add_to_waitqueue_sort(d); + __add_to_waitqueue_sort(v); else - __add_to_runqueue_sort(d); + __add_to_runqueue_sort(v); #ifdef SEDF_STATS /* Do some statistics here... */ @@ -644,74 +612,74 @@ static void sedf_wake(const struct scheduler *ops, struct vcpu *d) } #endif - ASSERT(__task_on_queue(d)); + ASSERT(__task_on_queue(v)); /* * Check whether the awakened task needs to invoke the do_schedule * routine. Try to avoid unnecessary runs but: - * Save approximation: Always switch to scheduler! + * Safe approximation: Always switch to scheduler! */ - ASSERT(d->processor >= 0); - ASSERT(d->processor < nr_cpu_ids); - ASSERT(per_cpu(schedule_data, d->processor).curr); + ASSERT(v->processor >= 0); + ASSERT(v->processor < nr_cpu_ids); + ASSERT(per_cpu(schedule_data, v->processor).curr); - if ( should_switch(per_cpu(schedule_data, d->processor).curr, d, now) ) - cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ); + if ( should_switch(per_cpu(schedule_data, v->processor).curr, v, now) ) + cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ); } - -/* Print a lot of useful information about a domains in the system */ -static void sedf_dump_domain(struct vcpu *d) +/* Print a lot of useful information about a vcpus in the system */ +static void sedf_dump_vcpu(struct vcpu *v) { - printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id, - d->is_running ? 'T':'F'); + printk("%i.%i has=%c ", v->domain->domain_id, v->vcpu_id, + v->is_running ? 'T':'F'); printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64, - EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs); + SEDF_VCPU(v)->period, SEDF_VCPU(v)->budget, SEDF_VCPU(v)->deadl_abs); #ifdef SEDF_STATS - if ( EDOM_INFO(d)->block_time_tot != 0 ) - printk(" pen=%"PRIu64"%%", EDOM_INFO(d)->block_time_tot); - if ( EDOM_INFO(d)->block_tot != 0 ) + if ( SEDF_VCPU(v)->block_time_tot != 0 ) + printk(" pen=%"PRIu64"%%", SEDF_VCPU(v)->block_time_tot); + if ( SEDF_VCPU(v)->block_tot != 0 ) printk("\n blks=%u sh=%u (%u%%) "\ "l=%u (%u%%) avg: b=%"PRIu64" p=%d", - EDOM_INFO(d)->block_tot, EDOM_INFO(d)->short_block_tot, - (EDOM_INFO(d)->short_block_tot * 100) / EDOM_INFO(d)->block_tot, - EDOM_INFO(d)->long_block_tot, - (EDOM_INFO(d)->long_block_tot * 100) / EDOM_INFO(d)->block_tot, - (EDOM_INFO(d)->block_time_tot) / EDOM_INFO(d)->block_tot, - EDOM_INFO(d)->block_tot); + SEDF_VCPU(v)->block_tot, SEDF_VCPU(v)->short_block_tot, + (SEDF_VCPU(v)->short_block_tot * 100) / SEDF_VCPU(v)->block_tot, + SEDF_VCPU(v)->long_block_tot, + (SEDF_VCPU(v)->long_block_tot * 100) / SEDF_VCPU(v)->block_tot, + (SEDF_VCPU(v)->block_time_tot) / SEDF_VCPU(v)->block_tot, + SEDF_VCPU(v)->block_tot); #endif printk("\n"); } -/* Dumps all domains on the specified cpu */ -static void sedf_dump_cpu_state(const struct scheduler *ops, int i) +/* Dumps all vcpus on the specified cpu */ +static void sedf_dump_cpu_state(const struct scheduler *ops, int cpu) { struct list_head *list, *queue, *tmp; - struct sedf_vcpu_info *d_inf; + struct sedf_vcpu_info *v_inf; struct domain *d; - struct vcpu *ed; + struct vcpu *v; int loop = 0; - printk("now=%"PRIu64"\n",NOW()); - queue = RUNQ(i); + printk("now=%"PRIu64"\n", NOW()); + queue = RUNQ(cpu); printk("RUNQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue, (unsigned long) queue->next, (unsigned long) queue->prev); list_for_each_safe ( list, tmp, queue ) { - printk("%3d: ",loop++); - d_inf = list_entry(list, struct sedf_vcpu_info, list); - sedf_dump_domain(d_inf->vcpu); + printk("%3d: ", loop++); + v_inf = list_entry(list, struct sedf_vcpu_info, list); + sedf_dump_vcpu(v_inf->vcpu); } - queue = WAITQ(i); loop = 0; + queue = WAITQ(cpu); + loop = 0; printk("\nWAITQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue, (unsigned long) queue->next, (unsigned long) queue->prev); list_for_each_safe ( list, tmp, queue ) { - printk("%3d: ",loop++); - d_inf = list_entry(list, struct sedf_vcpu_info, list); - sedf_dump_domain(d_inf->vcpu); + printk("%3d: ", loop++); + v_inf = list_entry(list, struct sedf_vcpu_info, list); + sedf_dump_vcpu(v_inf->vcpu); } loop = 0; @@ -722,12 +690,12 @@ static void sedf_dump_cpu_state(const struct scheduler *ops, int i) { if ( (d->cpupool ? d->cpupool->sched : &sched_sedf_def) != ops ) continue; - for_each_vcpu(d, ed) + for_each_vcpu(d, v) { - if ( !__task_on_queue(ed) && (ed->processor == i) ) + if ( !__task_on_queue(v) && (v->processor == cpu) ) { - printk("%3d: ",loop++); - sedf_dump_domain(ed); + printk("%3d: ", loop++); + sedf_dump_vcpu(v); } } } @@ -736,7 +704,7 @@ static void sedf_dump_cpu_state(const struct scheduler *ops, int i) /* Set or fetch domain scheduling parameters */ -static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct xen_domctl_scheduler_op *op) +static int sedf_adjust(const struct scheduler *ops, struct domain *d, struct xen_domctl_scheduler_op *op) { struct sedf_priv_info *prv = SEDF_PRIV(ops); unsigned long flags; @@ -746,8 +714,8 @@ static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct xen /* * Serialize against the pluggable scheduler lock to protect from * concurrent updates. We need to take the runq lock for the VCPUs - * as well, since we are touching slice and period. - * + * as well, since we are touching budget and period. + * * As in sched_credit2.c, runq locks nest inside the pluggable scheduler * lock. */ @@ -767,33 +735,33 @@ static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct xen */ if ( (op->u.sedf.period > PERIOD_MAX) || (op->u.sedf.period < PERIOD_MIN) || - (op->u.sedf.slice > op->u.sedf.period) || - (op->u.sedf.slice < SLICE_MIN) ) + (op->u.sedf.slice > op->u.sedf.slice) || + (op->u.sedf.slice < BUDGET_MIN) ) { rc = -EINVAL; goto out; } /* Time-driven domains */ - for_each_vcpu ( p, v ) + for_each_vcpu ( d, v ) { spinlock_t *lock = vcpu_schedule_lock(v); - EDOM_INFO(v)->period = op->u.sedf.period; - EDOM_INFO(v)->slice = op->u.sedf.slice; + SEDF_VCPU(v)->period = op->u.sedf.period; + SEDF_VCPU(v)->budget = op->u.sedf.slice; vcpu_schedule_unlock(lock, v); } } else if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo ) { - if ( p->vcpu[0] == NULL ) + if ( d->vcpu[0] == NULL ) { rc = -EINVAL; goto out; } - op->u.sedf.period = EDOM_INFO(p->vcpu[0])->period; - op->u.sedf.slice = EDOM_INFO(p->vcpu[0])->slice; + op->u.sedf.period = SEDF_VCPU(d->vcpu[0])->period; + op->u.sedf.slice = SEDF_VCPU(d->vcpu[0])->budget; } out: -- 1.7.9.5