From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754812AbbCBRvG (ORCPT ); Mon, 2 Mar 2015 12:51:06 -0500 Received: from linuxhacker.ru ([217.76.32.60]:50816 "EHLO fiona.linuxhacker.ru" rhost-flags-OK-FAIL-OK-OK) by vger.kernel.org with ESMTP id S1752602AbbCBRvE convert rfc822-to-8bit (ORCPT ); Mon, 2 Mar 2015 12:51:04 -0500 Subject: Re: [PATCH 05/16] staging/lustre: fix up obsolete cpu function usage. Mime-Version: 1.0 (Apple Message framework v1283) Content-Type: text/plain; charset=us-ascii From: Oleg Drokin In-Reply-To: <1425296150-4722-5-git-send-email-rusty@rustcorp.com.au> Date: Mon, 2 Mar 2015 12:50:50 -0500 Cc: linux-kernel@vger.kernel.org Content-Transfer-Encoding: 8BIT Message-Id: <74B1B5A4-D3CE-4A7A-A09B-6B755623ADA4@linuxhacker.ru> References: <1425296150-4722-1-git-send-email-rusty@rustcorp.com.au> <1425296150-4722-5-git-send-email-rusty@rustcorp.com.au> To: Rusty Russell X-Mailer: Apple Mail (2.1283) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Thanks! Seems there was a midair collsion with my own patch that was not as comprehensive wrt functions touched: https://lkml.org/lkml/2015/3/2/10 But on the other hand I also tried to clean up some of the NR_CPUS usage while I was at it and this raises this question, from me, in the code like: for_each_cpu_mask(i, blah) { blah if (something) break; } if (i == NR_CPUS) blah; when we are replacing for_each_cpu_mask with for_each_cpu, what do we check the counter against now to see that the entire loop was executed and we did not exit prematurely? nr_cpu_ids? Also I assume we still want to get rid of direct cpumask assignments like > mask = *cpumask_of_node(cpu_to_node(index)); On Mar 2, 2015, at 6:35 AM, Rusty Russell wrote: > They triggered this cleanup, so I've separated their patch in the assumption > they've already combed their code. > > Signed-off-by: Rusty Russell > Cc: Oleg Drokin > --- > .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 4 +- > .../staging/lustre/lustre/libcfs/linux/linux-cpu.c | 88 +++++++++++----------- > drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c | 6 +- > drivers/staging/lustre/lustre/ptlrpc/service.c | 4 +- > 4 files changed, 52 insertions(+), 50 deletions(-) > > diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c > index 651016919669..a25816ab9e53 100644 > --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c > +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c > @@ -638,8 +638,8 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) > return 0; > > /* hash NID to CPU id in this partition... */ > - off = do_div(nid, cpus_weight(*mask)); > - for_each_cpu_mask(i, *mask) { > + off = do_div(nid, cpumask_weight(mask)); > + for_each_cpu(i, mask) { > if (off-- == 0) > return i % vectors; > } > diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c > index 05f7595f18aa..c8fca8aae848 100644 > --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c > +++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c > @@ -204,7 +204,7 @@ cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) > } > > tmp += rc; > - for_each_cpu_mask(j, *cptab->ctb_parts[i].cpt_cpumask) { > + for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) { > rc = snprintf(tmp, len, "%d ", j); > len -= rc; > if (len <= 0) { > @@ -240,8 +240,8 @@ cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) > LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); > > return cpt == CFS_CPT_ANY ? > - cpus_weight(*cptab->ctb_cpumask) : > - cpus_weight(*cptab->ctb_parts[cpt].cpt_cpumask); > + cpumask_weight(cptab->ctb_cpumask) : > + cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask); > } > EXPORT_SYMBOL(cfs_cpt_weight); > > @@ -251,8 +251,9 @@ cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) > LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); > > return cpt == CFS_CPT_ANY ? > - any_online_cpu(*cptab->ctb_cpumask) != NR_CPUS : > - any_online_cpu(*cptab->ctb_parts[cpt].cpt_cpumask) != NR_CPUS; > + cpumask_any_and(cptab->ctb_cpumask, cpu_online_mask) != NR_CPUS : > + cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask, > + cpu_online_mask) != NR_CPUS; > } > EXPORT_SYMBOL(cfs_cpt_online); > > @@ -296,11 +297,11 @@ cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) > > cptab->ctb_cpu2cpt[cpu] = cpt; > > - LASSERT(!cpu_isset(cpu, *cptab->ctb_cpumask)); > - LASSERT(!cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask)); > + LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask)); > + LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); > > - cpu_set(cpu, *cptab->ctb_cpumask); > - cpu_set(cpu, *cptab->ctb_parts[cpt].cpt_cpumask); > + cpumask_set_cpu(cpu, cptab->ctb_cpumask); > + cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); > > node = cpu_to_node(cpu); > > @@ -344,11 +345,11 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) > return; > } > > - LASSERT(cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask)); > - LASSERT(cpu_isset(cpu, *cptab->ctb_cpumask)); > + LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); > + LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask)); > > - cpu_clear(cpu, *cptab->ctb_parts[cpt].cpt_cpumask); > - cpu_clear(cpu, *cptab->ctb_cpumask); > + cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); > + cpumask_clear_cpu(cpu, cptab->ctb_cpumask); > cptab->ctb_cpu2cpt[cpu] = -1; > > node = cpu_to_node(cpu); > @@ -356,7 +357,7 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) > LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)); > LASSERT(node_isset(node, *cptab->ctb_nodemask)); > > - for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask) { > + for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) { > /* this CPT has other CPU belonging to this node? */ > if (cpu_to_node(i) == node) > break; > @@ -365,7 +366,7 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) > if (i == NR_CPUS) > node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask); > > - for_each_cpu_mask(i, *cptab->ctb_cpumask) { > + for_each_cpu(i, cptab->ctb_cpumask) { > /* this CPT-table has other CPU belonging to this node? */ > if (cpu_to_node(i) == node) > break; > @@ -383,13 +384,13 @@ cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) > { > int i; > > - if (cpus_weight(*mask) == 0 || any_online_cpu(*mask) == NR_CPUS) { > + if (cpumask_weight(mask) == 0 || cpumask_any_and(mask, cpu_online_mask) == NR_CPUS) { > CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n", > cpt); > return 0; > } > > - for_each_cpu_mask(i, *mask) { > + for_each_cpu(i, mask) { > if (!cfs_cpt_set_cpu(cptab, cpt, i)) > return 0; > } > @@ -403,7 +404,7 @@ cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) > { > int i; > > - for_each_cpu_mask(i, *mask) > + for_each_cpu(i, mask) > cfs_cpt_unset_cpu(cptab, cpt, i); > } > EXPORT_SYMBOL(cfs_cpt_unset_cpumask); > @@ -493,7 +494,7 @@ cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) > } > > for (; cpt <= last; cpt++) { > - for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask) > + for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) > cfs_cpt_unset_cpu(cptab, cpt, i); > } > } > @@ -578,14 +579,14 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) > nodemask = cptab->ctb_parts[cpt].cpt_nodemask; > } > > - if (any_online_cpu(*cpumask) == NR_CPUS) { > + if (cpumask_any_and(cpumask, cpu_online_mask) == NR_CPUS) { > CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n", > cpt); > return -EINVAL; > } > > for_each_online_cpu(i) { > - if (cpu_isset(i, *cpumask)) > + if (cpumask_test_cpu(i, cpumask)) > continue; > > rc = set_cpus_allowed_ptr(current, cpumask); > @@ -616,14 +617,14 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, > > LASSERT(number > 0); > > - if (number >= cpus_weight(*node)) { > - while (!cpus_empty(*node)) { > - cpu = first_cpu(*node); > + if (number >= cpumask_weight(node)) { > + while (!cpumask_empty(node)) { > + cpu = cpumask_first(node); > > rc = cfs_cpt_set_cpu(cptab, cpt, cpu); > if (!rc) > return -EINVAL; > - cpu_clear(cpu, *node); > + cpumask_clear_cpu(cpu, node); > } > return 0; > } > @@ -636,27 +637,27 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, > goto out; > } > > - while (!cpus_empty(*node)) { > - cpu = first_cpu(*node); > + while (!cpumask_empty(node)) { > + cpu = cpumask_first(node); > > /* get cpumask for cores in the same socket */ > cfs_cpu_core_siblings(cpu, socket); > - cpus_and(*socket, *socket, *node); > + cpumask_and(socket, socket, node); > > - LASSERT(!cpus_empty(*socket)); > + LASSERT(!cpumask_empty(socket)); > > - while (!cpus_empty(*socket)) { > + while (!cpumask_empty(socket)) { > int i; > > /* get cpumask for hts in the same core */ > cfs_cpu_ht_siblings(cpu, core); > - cpus_and(*core, *core, *node); > + cpumask_and(core, core, node); > > - LASSERT(!cpus_empty(*core)); > + LASSERT(!cpumask_empty(core)); > > - for_each_cpu_mask(i, *core) { > - cpu_clear(i, *socket); > - cpu_clear(i, *node); > + for_each_cpu(i, core) { > + cpumask_clear_cpu(i, socket); > + cpumask_clear_cpu(i, node); > > rc = cfs_cpt_set_cpu(cptab, cpt, i); > if (!rc) { > @@ -667,7 +668,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, > if (--number == 0) > goto out; > } > - cpu = first_cpu(*socket); > + cpu = cpumask_first(socket); > } > } > > @@ -767,7 +768,7 @@ cfs_cpt_table_create(int ncpt) > for_each_online_node(i) { > cfs_node_to_cpumask(i, mask); > > - while (!cpus_empty(*mask)) { > + while (!cpumask_empty(mask)) { > struct cfs_cpu_partition *part; > int n; > > @@ -776,24 +777,24 @@ cfs_cpt_table_create(int ncpt) > > part = &cptab->ctb_parts[cpt]; > > - n = num - cpus_weight(*part->cpt_cpumask); > + n = num - cpumask_weight(part->cpt_cpumask); > LASSERT(n > 0); > > rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n); > if (rc < 0) > goto failed; > > - LASSERT(num >= cpus_weight(*part->cpt_cpumask)); > - if (num == cpus_weight(*part->cpt_cpumask)) > + LASSERT(num >= cpumask_weight(part->cpt_cpumask)); > + if (num == cpumask_weight(part->cpt_cpumask)) > cpt++; > } > } > > if (cpt != ncpt || > - num != cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)) { > + num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) { > CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n", > cptab->ctb_nparts, num, cpt, > - cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)); > + cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)); > goto failed; > } > > @@ -965,7 +966,8 @@ cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) > mutex_lock(&cpt_data.cpt_mutex); > /* if all HTs in a core are offline, it may break affinity */ > cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask); > - warn = any_online_cpu(*cpt_data.cpt_cpumask) >= nr_cpu_ids; > + warn = cpumask_any_and(cpt_data.cpt_cpumask, > + cpu_online_mask) >= nr_cpu_ids; > mutex_unlock(&cpt_data.cpt_mutex); > CDEBUG(warn ? D_WARNING : D_INFO, > "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n", > diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c > index 4621b71fe0b6..625858b6d793 100644 > --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c > +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c > @@ -513,8 +513,8 @@ static int ptlrpcd_bind(int index, int max) > int i; > mask = *cpumask_of_node(cpu_to_node(index)); > for (i = max; i < num_online_cpus(); i++) > - cpu_clear(i, mask); > - pc->pc_npartners = cpus_weight(mask) - 1; > + cpumask_clear_cpu(i, &mask); > + pc->pc_npartners = cpumask_weight(&mask) - 1; > set_bit(LIOD_BIND, &pc->pc_flags); > } > #else > @@ -554,7 +554,7 @@ static int ptlrpcd_bind(int index, int max) > * that are already initialized > */ > for (pidx = 0, i = 0; i < index; i++) { > - if (cpu_isset(i, mask)) { > + if (cpumask_test_cpu(i, &mask)) { > ppc = &ptlrpcds->pd_threads[i]; > pc->pc_partners[pidx++] = ppc; > ppc->pc_partners[ppc-> > diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c > index 635b12b22cef..173803829865 100644 > --- a/drivers/staging/lustre/lustre/ptlrpc/service.c > +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c > @@ -558,7 +558,7 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, > * there are. > */ > cpumask_copy(&mask, topology_thread_cpumask(0)); > - if (cpus_weight(mask) > 1) { /* weight is # of HTs */ > + if (cpumask_weight(&mask) > 1) { /* weight is # of HTs */ > /* depress thread factor for hyper-thread */ > factor = factor - (factor >> 1) + (factor >> 3); > } > @@ -2771,7 +2771,7 @@ int ptlrpc_hr_init(void) > init_waitqueue_head(&ptlrpc_hr.hr_waitq); > > cpumask_copy(&mask, topology_thread_cpumask(0)); > - weight = cpus_weight(mask); > + weight = cpumask_weight(&mask); > > cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { > hrp->hrp_cpt = i; > -- > 2.1.0