From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932811AbbKDO4C (ORCPT ); Wed, 4 Nov 2015 09:56:02 -0500 Received: from mx1.redhat.com ([209.132.183.28]:38715 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932779AbbKDOz6 (ORCPT ); Wed, 4 Nov 2015 09:55:58 -0500 Date: Wed, 4 Nov 2015 09:55:57 -0500 From: Luiz Capitulino To: Fenghua Yu Cc: "H Peter Anvin" , "Ingo Molnar" , "Thomas Gleixner" , "Peter Zijlstra" , "linux-kernel" , "x86" , "Vikas Shivappa" Subject: Re: [PATCH V15 05/11] x86/intel_rdt: Add Class of service management Message-ID: <20151104095557.3c3c8473@redhat.com> In-Reply-To: <1443766185-61618-6-git-send-email-fenghua.yu@intel.com> References: <1443766185-61618-1-git-send-email-fenghua.yu@intel.com> <1443766185-61618-6-git-send-email-fenghua.yu@intel.com> Organization: Red Hat MIME-Version: 1.0 Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: 7bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On Thu, 1 Oct 2015 23:09:39 -0700 Fenghua Yu wrote: > Adds some data-structures and APIs to support Class of service > management(closid). There is a new clos_cbm table which keeps a 1:1 > mapping between closid and capacity bit mask (cbm) > and a count of usage of closid. Each task would be associated with a > Closid at a time and this patch adds a new field closid to task_struct > to keep track of the same. > > Signed-off-by: Vikas Shivappa > Signed-off-by: Fenghua Yu > --- > arch/x86/include/asm/intel_rdt.h | 12 ++++++ > arch/x86/kernel/cpu/intel_rdt.c | 82 +++++++++++++++++++++++++++++++++++++++- > include/linux/sched.h | 3 ++ > 3 files changed, 95 insertions(+), 2 deletions(-) > create mode 100644 arch/x86/include/asm/intel_rdt.h > > diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h > new file mode 100644 > index 0000000..88b7643 > --- /dev/null > +++ b/arch/x86/include/asm/intel_rdt.h > @@ -0,0 +1,12 @@ > +#ifndef _RDT_H_ > +#define _RDT_H_ > + > +#ifdef CONFIG_INTEL_RDT > + > +struct clos_cbm_table { > + unsigned long l3_cbm; > + unsigned int clos_refcnt; > +}; Isn't this a single entry? > + > +#endif > +#endif > diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c > index f49e970..d79213a 100644 > --- a/arch/x86/kernel/cpu/intel_rdt.c > +++ b/arch/x86/kernel/cpu/intel_rdt.c > @@ -24,17 +24,95 @@ > > #include > #include > +#include > + > +/* > + * cctable maintains 1:1 mapping between CLOSid and cache bitmask. > + */ > +static struct clos_cbm_table *cctable; > +/* > + * closid availability bit map. > + */ > +unsigned long *closmap; > +static DEFINE_MUTEX(rdt_group_mutex); > + > +static inline void closid_get(u32 closid) > +{ > + struct clos_cbm_table *cct = &cctable[closid]; > + > + lockdep_assert_held(&rdt_group_mutex); > + > + cct->clos_refcnt++; > +} > + > +static int closid_alloc(u32 *closid) > +{ > + u32 maxid; > + u32 id; > + > + lockdep_assert_held(&rdt_group_mutex); > + > + maxid = boot_cpu_data.x86_cache_max_closid; > + id = find_first_zero_bit(closmap, maxid); > + if (id == maxid) > + return -ENOSPC; This causes echo to print "No space left in device" when you run out of CBMs. I think -ENOMEM makes more sense. > + > + set_bit(id, closmap); > + closid_get(id); > + *closid = id; > + > + return 0; > +} > + > +static inline void closid_free(u32 closid) > +{ > + clear_bit(closid, closmap); > + cctable[closid].l3_cbm = 0; > +} > + > +static void closid_put(u32 closid) > +{ > + struct clos_cbm_table *cct = &cctable[closid]; > + > + lockdep_assert_held(&rdt_group_mutex); > + if (WARN_ON(!cct->clos_refcnt)) > + return; > + > + if (!--cct->clos_refcnt) > + closid_free(closid); > +} This is very minor, but IMHO you got closid_put() and closid_free() naming backwards. closeid_free() is the opposite operation of closeid_alloc(). Likewise, closeid_put() is the opposite of closide_get(). > > static int __init intel_rdt_late_init(void) > { > struct cpuinfo_x86 *c = &boot_cpu_data; > + u32 maxid, max_cbm_len; > + int err = 0, size; > > if (!cpu_has(c, X86_FEATURE_CAT_L3)) > return -ENODEV; > > - pr_info("Intel cache allocation detected\n"); > + maxid = c->x86_cache_max_closid; > + max_cbm_len = c->x86_cache_max_cbm_len; > > - return 0; > + size = maxid * sizeof(struct clos_cbm_table); > + cctable = kzalloc(size, GFP_KERNEL); > + if (!cctable) { > + err = -ENOMEM; > + goto out_err; > + } > + > + size = BITS_TO_LONGS(maxid) * sizeof(long); > + closmap = kzalloc(size, GFP_KERNEL); > + if (!closmap) { > + kfree(cctable); > + err = -ENOMEM; > + goto out_err; > + } > + > + pr_info("Intel cache allocation enabled\n"); > +out_err: > + > + return err; > } > > late_initcall(intel_rdt_late_init); > diff --git a/include/linux/sched.h b/include/linux/sched.h > index b7b9501..24bfbac 100644 > --- a/include/linux/sched.h > +++ b/include/linux/sched.h > @@ -1668,6 +1668,9 @@ struct task_struct { > /* cg_list protected by css_set_lock and tsk->alloc_lock */ > struct list_head cg_list; > #endif > +#ifdef CONFIG_INTEL_RDT > + u32 closid; > +#endif > #ifdef CONFIG_FUTEX > struct robust_list_head __user *robust_list; > #ifdef CONFIG_COMPAT