From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756406AbcA2Wks (ORCPT ); Fri, 29 Jan 2016 17:40:48 -0500 Received: from shelob.surriel.com ([74.92.59.67]:47530 "EHLO shelob.surriel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756183AbcA2WkI (ORCPT ); Fri, 29 Jan 2016 17:40:08 -0500 From: riel@redhat.com To: linux-kernel@vger.kernel.org Cc: tglx@linutronix.de, mingo@kernel.org, peterz@infradead.org, luto@amacapital.net, fweisbec@gmail.com Subject: [PATCH 2/2] sched,time: call __acct_update_integrals once a jiffy Date: Fri, 29 Jan 2016 17:23:00 -0500 Message-Id: <1454106180-20918-3-git-send-email-riel@redhat.com> X-Mailer: git-send-email 2.5.0 In-Reply-To: <1454106180-20918-1-git-send-email-riel@redhat.com> References: <1454106180-20918-1-git-send-email-riel@redhat.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Rik van Riel Because __acct_update_integrals does nothing unless the time interval in question exceeds a jiffy, there is no real reason to call it more than once a jiffy from the syscall, irq, and guest entry & exit paths. If tasks get rescheduled frequently, the scheduler will still update their time statistics normally. This patch only impacts longer running tasks. This speeds up Signed-off-by: Rik van Riel --- include/linux/sched.h | 1 + kernel/sched/cputime.c | 35 +++++++++++++++++++++++++++++------ 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index a10494a94cc3..019c3af98503 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1532,6 +1532,7 @@ struct task_struct { struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqcount_t vtime_seqcount; + unsigned long vtime_jiffies; unsigned long long vtime_snap; enum { /* Task is sleeping or running in a CPU with VTIME inactive */ diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index b2ab2ffb1adc..923c110319b1 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -668,6 +668,15 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +static bool vtime_jiffies_changed(struct task_struct *tsk, unsigned long now) +{ + if (tsk->vtime_jiffies == jiffies) + return false; + + tsk->vtime_jiffies = jiffies; + return true; +} + static unsigned long long vtime_delta(struct task_struct *tsk) { unsigned long long clock; @@ -699,6 +708,9 @@ static void __vtime_account_system(struct task_struct *tsk) void vtime_account_system(struct task_struct *tsk) { + if (!vtime_jiffies_changed(tsk, jiffies)) + return; + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); write_seqcount_end(&tsk->vtime_seqcount); @@ -707,7 +719,8 @@ void vtime_account_system(struct task_struct *tsk) void vtime_gen_account_irq_exit(struct task_struct *tsk) { write_seqcount_begin(&tsk->vtime_seqcount); - __vtime_account_system(tsk); + if (vtime_jiffies_changed(tsk, jiffies)) + __vtime_account_system(tsk); if (context_tracking_in_user()) tsk->vtime_snap_whence = VTIME_USER; write_seqcount_end(&tsk->vtime_seqcount); @@ -718,16 +731,19 @@ void vtime_account_user(struct task_struct *tsk) cputime_t delta_cpu; write_seqcount_begin(&tsk->vtime_seqcount); - delta_cpu = get_vtime_delta(tsk); tsk->vtime_snap_whence = VTIME_SYS; - account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); + if (vtime_jiffies_changed(tsk, jiffies)) { + delta_cpu = get_vtime_delta(tsk); + account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); + } write_seqcount_end(&tsk->vtime_seqcount); } void vtime_user_enter(struct task_struct *tsk) { write_seqcount_begin(&tsk->vtime_seqcount); - __vtime_account_system(tsk); + if (vtime_jiffies_changed(tsk, jiffies)) + __vtime_account_system(tsk); tsk->vtime_snap_whence = VTIME_USER; write_seqcount_end(&tsk->vtime_seqcount); } @@ -742,7 +758,8 @@ void vtime_guest_enter(struct task_struct *tsk) * that can thus safely catch up with a tickless delta. */ write_seqcount_begin(&tsk->vtime_seqcount); - __vtime_account_system(tsk); + if (vtime_jiffies_changed(tsk, jiffies)) + __vtime_account_system(tsk); current->flags |= PF_VCPU; write_seqcount_end(&tsk->vtime_seqcount); } @@ -759,8 +776,12 @@ EXPORT_SYMBOL_GPL(vtime_guest_exit); void vtime_account_idle(struct task_struct *tsk) { - cputime_t delta_cpu = get_vtime_delta(tsk); + cputime_t delta_cpu; + + if (!vtime_jiffies_changed(tsk, jiffies)) + return; + delta_cpu = get_vtime_delta(tsk); account_idle_time(delta_cpu); } @@ -773,6 +794,7 @@ void arch_vtime_task_switch(struct task_struct *prev) write_seqcount_begin(¤t->vtime_seqcount); current->vtime_snap_whence = VTIME_SYS; current->vtime_snap = sched_clock_cpu(smp_processor_id()); + current->vtime_jiffies = jiffies; write_seqcount_end(¤t->vtime_seqcount); } @@ -784,6 +806,7 @@ void vtime_init_idle(struct task_struct *t, int cpu) write_seqcount_begin(&t->vtime_seqcount); t->vtime_snap_whence = VTIME_SYS; t->vtime_snap = sched_clock_cpu(cpu); + t->vtime_jiffies = jiffies; write_seqcount_end(&t->vtime_seqcount); local_irq_restore(flags); } -- 2.5.0