linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: David Carrillo-Cisneros <davidcc@google.com>
To: Fenghua Yu <fenghua.yu@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@elte.hu>,
	"H. Peter Anvin" <h.peter.anvin@intel.com>,
	Tony Luck <tony.luck@intel.com>, Tejun Heo <tj@kernel.org>,
	Borislav Petkov <bp@suse.de>,
	Stephane Eranian <eranian@google.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Marcelo Tosatti <mtosatti@redhat.com>,
	Ravi V Shankar <ravi.v.shankar@intel.com>,
	Vikas Shivappa <vikas.shivappa@linux.intel.com>,
	Sai Prakhya <sai.praneeth.prakhya@intel.com>,
	linux-kernel <linux-kernel@vger.kernel.org>, x86 <x86@kernel.org>
Subject: Re: [PATCH 30/32] x86/intel_rdt_rdtgroup.c: Process schemas input from rscctrl interface
Date: Wed, 13 Jul 2016 17:41:34 -0700	[thread overview]
Message-ID: <CALcN6mh0Pr8JJR2fQzLZWE5xo4YJf3cF25Hz8Xx7q37RynXnrA@mail.gmail.com> (raw)
In-Reply-To: <1468371785-53231-31-git-send-email-fenghua.yu@intel.com>

> +static int get_res_type(char **res, enum resource_type *res_type)
> +{
> +       char *tok;
> +
> +       tok = strsep(res, ":");
> +       if (tok == NULL)
> +               return -EINVAL;
> +
> +       if (!strcmp(tok, "L3")) {

Maybe use strstrip to allow a more readable input ? i.e. "L3 : <schema> "

> +               *res_type = RESOURCE_L3;
> +               return 0;
> +       }
> +
> +       return -EINVAL;
> +}
> +
> +static int divide_resources(char *buf, char *resources[RESOURCE_NUM])
> +{
> +       char *tok;
> +       unsigned int resource_num = 0;
> +       int ret = 0;
> +       char *res;
> +       char *res_block;
> +       size_t size;
> +       enum resource_type res_type;
> +
> +       size = strlen(buf) + 1;
> +       res = kzalloc(size, GFP_KERNEL);
> +       if (!res) {
> +               ret = -ENOSPC;

-ENOMEM?

> +
> +       res_block = res;
> +       ret = get_res_type(&res_block, &res_type);
> +       if (ret) {
> +               pr_info("Unknown resource type!");
> +               goto out;
> +       }

does this work if res_block doesn't have ":"? don't you need to check res_block?

> +static int get_cache_schema(char *buf, struct cache_resource *l, int level,
> +                        struct rdtgroup *rdtgrp)
> +{
> +       char *tok, *tok_cache_id;
> +       int ret;
> +       int domain_num;
> +       int input_domain_num;
> +       int len;
> +       unsigned int input_cache_id;
> +       unsigned int cid;
> +       unsigned int leaf;
> +
> +       if (!cat_enabled(level) && strcmp(buf, ";")) {
> +               pr_info("Disabled resource should have empty schema\n");
> +               return -EINVAL;
> +       }
> +
> +       len = strlen(buf);
> +       /*
> +        * Translate cache id based cbm from one line string with format
> +        * "<cache prefix>:<cache id0>=xxxx;<cache id1>=xxxx;..." for
> +        * disabled cdp.
> +        * Or
> +        * "<cache prefix>:<cache id0>=xxxxx,xxxxx;<cache id1>=xxxxx,xxxxx;..."
> +        * for enabled cdp.
> +        */
> +       input_domain_num = 0;
> +       while ((tok = strsep(&buf, ";")) != NULL) {
> +               tok_cache_id = strsep(&tok, "=");
> +               if (tok_cache_id == NULL)
> +                       goto cache_id_err;

what if no "=" ? , also would be nice to allow spaces around "="  .

> +
> +               ret = kstrtouint(tok_cache_id, 16, &input_cache_id);
> +               if (ret)
> +                       goto cache_id_err;
> +
> +               leaf = level_to_leaf(level);

why is this in the loop?

> +               cid = cache_domains[leaf].shared_cache_id[input_domain_num];
> +               if (input_cache_id != cid)
> +                       goto cache_id_err;

so schemata must be present for all cache_id's and sorted in
increasing order of cache_id? what's the point of having the cache_id#
then?

> +
> +/*
> + * Check if the reference counts are all ones in rdtgrp's domain.
> + */
> +static bool one_refcnt(struct rdtgroup *rdtgrp, int domain)
> +{
> +       int refcnt;
> +       int closid;
> +
> +       closid = rdtgrp->resource.closid[domain];
> +       if (cat_l3_enabled) {

if cat_l3_enabled == false, then reference counts are always one?

> + * Go through all shared domains. Check if there is an existing closid
> + * in all rdtgroups that matches l3 cbms in the shared
> + * domain. If find one, reuse the closid. Otherwise, allocate a new one.
> + */
> +static int get_rdtgroup_resources(struct resources *resources_set,
> +                                 struct rdtgroup *rdtgrp)
> +{
> +       struct cache_resource *l3;
> +       bool l3_cbm_found;
> +       struct list_head *l;
> +       struct rdtgroup *r;
> +       u64 cbm;
> +       int rdt_closid[MAX_CACHE_DOMAINS];
> +       int rdt_closid_type[MAX_CACHE_DOMAINS];
> +       int domain;
> +       int closid;
> +       int ret;
> +
> +       l3 = resources_set->l3;

l3 is NULL if cat_l3_enabled == false but it seems like it may be used
later even though.

> +       memcpy(rdt_closid, rdtgrp->resource.closid,
> +              shared_domain_num * sizeof(int));
> +       for (domain = 0; domain < shared_domain_num; domain++) {
> +               if (rdtgrp->resource.valid) {
> +                       /*
> +                        * If current rdtgrp is the only user of cbms in
> +                        * this domain, will replace the cbms with the input
> +                        * cbms and reuse its own closid.
> +                        */
> +                       if (one_refcnt(rdtgrp, domain)) {
> +                               closid = rdtgrp->resource.closid[domain];
> +                               rdt_closid[domain] = closid;
> +                               rdt_closid_type[domain] = REUSED_OWN_CLOSID;
> +                               continue;
> +                       }
> +
> +                       l3_cbm_found = true;
> +
> +                       if (cat_l3_enabled)
> +                               l3_cbm_found = cbm_found(l3, rdtgrp, domain,
> +                                                        CACHE_LEVEL3);
> +
> +                       /*
> +                        * If the cbms in this shared domain are already
> +                        * existing in current rdtgrp, record the closid
> +                        * and its type.
> +                        */
> +                       if (l3_cbm_found) {
> +                               closid = rdtgrp->resource.closid[domain];
> +                               rdt_closid[domain] = closid;
> +                               rdt_closid_type[domain] = CURRENT_CLOSID;

a new l3 resource will be created if cat_l3_enabled is false.


> +static void init_cache_resource(struct cache_resource *l)
> +{
> +       l->cbm = NULL;
> +       l->cbm2 = NULL;

is cbm2 the data bitmask for when CDP is enabled? if so, a more
descriptive name may help.

> +       l->closid = NULL;
> +       l->refcnt = NULL;
> +}
> +
> +static void free_cache_resource(struct cache_resource *l)
> +{
> +       kfree(l->cbm);
> +       kfree(l->cbm2);
> +       kfree(l->closid);
> +       kfree(l->refcnt);

this function is used to clean up alloc_cache_resource in the error
path of get_resources where it's not necessarily true that all of l's
members were allocated.

  reply	other threads:[~2016-07-14  0:41 UTC|newest]

Thread overview: 81+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-13  1:02 [PATCH 00/32] Enable Intel Resource Allocation in Resource Director Technology Fenghua Yu
2016-07-13  1:02 ` [PATCH 01/32] x86/intel_rdt: Cache Allocation documentation Fenghua Yu
2016-07-13  1:02 ` [PATCH 02/32] x86/intel_rdt: Add support for Cache Allocation detection Fenghua Yu
2016-07-26 19:00   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 03/32] x86/intel_rdt: Add Class of service management Fenghua Yu
2016-07-13  1:02 ` [PATCH 04/32] x86/intel_rdt: Add L3 cache capacity bitmask management Fenghua Yu
2016-07-22  7:12   ` Marcelo Tosatti
2016-07-22 21:43     ` Luck, Tony
2016-07-23  4:31       ` Marcelo Tosatti
2016-07-26  3:18         ` Luck, Tony
2016-07-26 17:10         ` Shivappa Vikas
2016-07-13  1:02 ` [PATCH 05/32] x86/intel_rdt: Implement scheduling support for Intel RDT Fenghua Yu
2016-07-25 16:25   ` Nilay Vaish
2016-07-25 16:31   ` Nilay Vaish
2016-07-25 18:05     ` Luck, Tony
2016-07-25 22:47       ` David Carrillo-Cisneros
2016-07-13  1:02 ` [PATCH 06/32] x86/intel_rdt: Hot cpu support for Cache Allocation Fenghua Yu
2016-07-13  9:19   ` Thomas Gleixner
2016-07-21 19:46     ` Shivappa Vikas
2016-07-14  0:40   ` David Carrillo-Cisneros
2016-07-14 22:58     ` Yu, Fenghua
2016-07-13  1:02 ` [PATCH 07/32] x86/intel_rdt: Intel haswell Cache Allocation enumeration Fenghua Yu
2016-07-13  1:02 ` [PATCH 08/32] Define CONFIG_INTEL_RDT Fenghua Yu
2016-07-13 10:25   ` Thomas Gleixner
2016-07-13 18:05     ` Yu, Fenghua
2016-07-13 21:09       ` Thomas Gleixner
2016-07-13 21:18         ` Yu, Fenghua
2016-07-13  1:02 ` [PATCH 09/32] x86/intel_rdt: Intel Code Data Prioritization detection Fenghua Yu
2016-07-13  1:02 ` [PATCH 10/32] x86/intel_rdt: Adds support to enable Code Data Prioritization Fenghua Yu
2016-07-26 19:23   ` Nilay Vaish
2016-07-26 20:32     ` Shivappa Vikas
2016-07-13  1:02 ` [PATCH 11/32] x86/intel_rdt: Class of service and capacity bitmask management for CDP Fenghua Yu
2016-07-13  1:02 ` [PATCH 12/32] x86/intel_rdt: Hot cpu update for code data prioritization Fenghua Yu
2016-07-13  1:02 ` [PATCH 13/32] Documentation, x86: Documentation for Intel resource allocation user interface Fenghua Yu
2016-07-13 12:47   ` Thomas Gleixner
2016-07-13 17:13     ` Luck, Tony
2016-07-14  6:53       ` Thomas Gleixner
2016-07-14 17:16         ` Luck, Tony
2016-07-19 12:32           ` Thomas Gleixner
2016-08-04 23:38             ` Yu, Fenghua
2016-07-27 16:20   ` Nilay Vaish
2016-07-27 16:57     ` Luck, Tony
2016-08-03 22:15   ` Marcelo Tosatti
2016-07-13  1:02 ` [PATCH 14/32] x86/cpufeatures: Get max closid and max cbm len and clean feature comments and code Fenghua Yu
2016-07-27 16:49   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 15/32] cacheinfo: Introduce cache id Fenghua Yu
2016-07-27 17:04   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 16/32] Documentation, ABI: Add a document entry for " Fenghua Yu
2016-07-13  1:02 ` [PATCH 17/32] x86, intel_cacheinfo: Enable cache id in x86 Fenghua Yu
2016-07-28  5:41   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 18/32] drivers/base/cacheinfo.c: Export some cacheinfo functions for others to use Fenghua Yu
2016-07-13  1:02 ` [PATCH 19/32] sched.h: Add rg_list and rdtgroup in task_struct Fenghua Yu
2016-07-13 12:56   ` Thomas Gleixner
2016-07-13 17:50     ` Yu, Fenghua
2016-07-28  5:53   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 20/32] magic number for rscctrl file system Fenghua Yu
2016-07-28  5:57   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 21/32] x86/intel_rdt.h: Header for inter_rdt.c Fenghua Yu
2016-07-28 14:07   ` Nilay Vaish
2016-07-13  1:02 ` [PATCH 22/32] x86/intel_rdt_rdtgroup.h: Header for user interface Fenghua Yu
2016-07-13  1:02 ` [PATCH 23/32] x86/intel_rdt.c: Extend RDT to per cache and per resources Fenghua Yu
2016-07-13 13:07   ` Thomas Gleixner
2016-07-13 17:40     ` Yu, Fenghua
2016-07-13  1:02 ` [PATCH 24/32] Task fork and exit for rdtgroup Fenghua Yu
2016-07-13 13:14   ` Thomas Gleixner
2016-07-13 17:32     ` Yu, Fenghua
2016-07-13 21:02       ` Thomas Gleixner
2016-07-13 21:22         ` Yu, Fenghua
2016-07-13  1:02 ` [PATCH 25/32] x86/intel_rdt_rdtgroup.c: User interface for RDT Fenghua Yu
2016-07-14 12:30   ` Thomas Gleixner
2016-07-13  1:02 ` [PATCH 26/32] x86/intel_rdt_rdtgroup.c: Create info directory Fenghua Yu
2016-07-13  1:03 ` [PATCH 27/32] x86/intel_rdt_rdtgroup.c: Implement rscctrl file system commands Fenghua Yu
2016-07-13  1:03 ` [PATCH 28/32] x86/intel_rdt_rdtgroup.c: Read and write cpus Fenghua Yu
2016-07-13  1:03 ` [PATCH 29/32] x86/intel_rdt_rdtgroup.c: Tasks iterator and write Fenghua Yu
2016-07-13  1:03 ` [PATCH 30/32] x86/intel_rdt_rdtgroup.c: Process schemas input from rscctrl interface Fenghua Yu
2016-07-14  0:41   ` David Carrillo-Cisneros [this message]
2016-07-14  6:11     ` Thomas Gleixner
2016-07-14  6:16       ` Yu, Fenghua
2016-07-14  6:32     ` Yu, Fenghua
2016-07-13  1:03 ` [PATCH 31/32] MAINTAINERS: Add maintainer for Intel RDT resource allocation Fenghua Yu
2016-07-13  1:03 ` [PATCH 32/32] x86/Makefile: Build intel_rdt_rdtgroup.c Fenghua Yu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CALcN6mh0Pr8JJR2fQzLZWE5xo4YJf3cF25Hz8Xx7q37RynXnrA@mail.gmail.com \
    --to=davidcc@google.com \
    --cc=bp@suse.de \
    --cc=eranian@google.com \
    --cc=fenghua.yu@intel.com \
    --cc=h.peter.anvin@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=mtosatti@redhat.com \
    --cc=peterz@infradead.org \
    --cc=ravi.v.shankar@intel.com \
    --cc=sai.praneeth.prakhya@intel.com \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    --cc=tony.luck@intel.com \
    --cc=vikas.shivappa@linux.intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).