All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yinghai Lu <yinghai@kernel.org>
To: Alexander Gordeev <agordeev@redhat.com>
Cc: linux-kernel@vger.kernel.org, x86@kernel.org,
	Suresh Siddha <suresh.b.siddha@intel.com>,
	Cyrill Gorcunov <gorcunov@openvz.org>
Subject: Re: [PATCH 2/3] x86: x2apic/cluster: Make use of lowest priority delivery mode
Date: Sat, 19 May 2012 13:53:36 -0700	[thread overview]
Message-ID: <CAE9FiQXUM-+KSZ1OcxipRW-3XQED3Csyxs=0ke0hJZR7E_hrhg@mail.gmail.com> (raw)
In-Reply-To: <20120518102640.GB31517@dhcp-26-207.brq.redhat.com>

On Fri, May 18, 2012 at 3:26 AM, Alexander Gordeev <agordeev@redhat.com> wrote:
> Currently x2APIC in logical destination mode delivers interrupts to a
> single CPU, no matter how many CPUs were specified in the destination
> cpumask.
>
> This fix enables delivery of interrupts to multiple CPUs by bit-ORing
> Logical IDs of destination CPUs that have matching Cluster ID.
>
> Because only one cluster could be specified in a message destination
> address, the destination cpumask is tried for a cluster that contains
> maximum number of CPUs matching this cpumask. The CPUs in this cluster
> are selected to receive the interrupts while all other CPUs (in the
> cpumask) are ignored.
>
> Signed-off-by: Alexander Gordeev <agordeev@redhat.com>
> ---
>  arch/x86/include/asm/x2apic.h         |    9 --
>  arch/x86/kernel/apic/x2apic_cluster.c |  140 +++++++++++++++++++++++++++++----
>  arch/x86/kernel/apic/x2apic_phys.c    |    9 ++-
>  3 files changed, 131 insertions(+), 27 deletions(-)
>
> diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
> index 92e54ab..7a5a832 100644
> --- a/arch/x86/include/asm/x2apic.h
> +++ b/arch/x86/include/asm/x2apic.h
> @@ -28,15 +28,6 @@ static int x2apic_apic_id_registered(void)
>        return 1;
>  }
>
> -/*
> - * For now each logical cpu is in its own vector allocation domain.
> - */
> -static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
> -{
> -       cpumask_clear(retmask);
> -       cpumask_set_cpu(cpu, retmask);
> -}
> -
>  static void
>  __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
>  {
> diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
> index 8f012b2..f8fa4c4 100644
> --- a/arch/x86/kernel/apic/x2apic_cluster.c
> +++ b/arch/x86/kernel/apic/x2apic_cluster.c
> @@ -96,36 +96,142 @@ static void x2apic_send_IPI_all(int vector)
>        __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
>  }
>
> +static inline unsigned int
> +__x2apic_cluster_to_apicid(int cpu_in_cluster, const struct cpumask *cpumask)
> +{
> +       unsigned int apicid = 0;
> +       int cpu;
> +
> +       for_each_cpu_and(cpu, per_cpu(cpus_in_cluster, cpu_in_cluster), cpumask)
> +               apicid |= per_cpu(x86_cpu_to_logical_apicid, cpu);
> +
> +       return apicid;
> +}
> +
> +static int
> +__x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *apicid)
> +{
> +       int ret = 0;
> +       int cpu, heaviest;
> +       unsigned int weight, max_weight;
> +       cpumask_var_t target_cpus, cluster_cpus;
> +
> +       if (unlikely(!alloc_cpumask_var(&target_cpus, GFP_ATOMIC))) {
> +               ret = -ENOMEM;
> +               goto out;
> +       }
> +       if (unlikely(!alloc_cpumask_var(&cluster_cpus, GFP_ATOMIC))) {
> +               ret = -ENOMEM;
> +               goto out_free_target_cpus;
> +       }
> +
> +       cpumask_and(target_cpus, cpumask, cpu_online_mask);
> +       max_weight = 0;
> +
> +       for_each_cpu(cpu, target_cpus) {
> +               cpumask_and(cluster_cpus, per_cpu(cpus_in_cluster, cpu), cpumask);
> +
> +               weight = cpumask_weight(cluster_cpus);
> +               if (weight > max_weight) {
> +                       max_weight = weight;
> +                       heaviest = cpu;
> +               }
> +
> +               cpumask_andnot(target_cpus, target_cpus, cluster_cpus);
> +       }
> +
> +       if (!max_weight) {
> +               ret = -EINVAL;
> +               goto out_free_cluster_cpus;
> +       }
> +
> +       *apicid = __x2apic_cluster_to_apicid(heaviest, cpumask);
> +
> +out_free_cluster_cpus:
> +       free_cpumask_var(cluster_cpus);
> +out_free_target_cpus:
> +       free_cpumask_var(target_cpus);
> +out:
> +       return ret;
> +}
> +
>  static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
>  {
> -       /*
> -        * We're using fixed IRQ delivery, can only return one logical APIC ID.
> -        * May as well be the first.
> -        */
> -       int cpu = cpumask_first(cpumask);
> +       int err;
> +       int cpu;
> +       unsigned int apicid;
>
> -       if ((unsigned)cpu < nr_cpu_ids)
> -               return per_cpu(x86_cpu_to_logical_apicid, cpu);
> -       else
> -               return BAD_APICID;
> +       err = __x2apic_cpu_mask_to_apicid(cpumask, &apicid);
> +       WARN_ON(err);
> +
> +       if (!err)
> +               return apicid;
> +
> +       if (err == -ENOMEM) {
> +               for_each_cpu(cpu, cpumask) {
> +                       if (cpumask_test_cpu(cpu, cpu_online_mask))
> +                               break;
> +               }
> +               if (cpu < nr_cpu_ids)
> +                       return __x2apic_cluster_to_apicid(cpu, cpumask);
> +       }
> +
> +       return BAD_APICID;
>  }
>
>  static unsigned int
>  x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
>                              const struct cpumask *andmask)
>  {
> -       int cpu;
> +       int err;
> +       int cpu, first_cpu;
> +       unsigned int apicid;
> +       cpumask_var_t target_cpus;
> +
> +       if (likely(alloc_cpumask_var(&target_cpus, GFP_ATOMIC))) {
> +               cpumask_and(target_cpus, cpumask, andmask);
> +
> +               err = __x2apic_cpu_mask_to_apicid(target_cpus, &apicid);
> +
> +               free_cpumask_var(target_cpus);
> +
> +               if (!err)
> +                       return apicid;
> +       } else {
> +               err = -ENOMEM;
> +       }
> +
> +       WARN_ON(err);
> +
> +       if (err != -ENOMEM)
> +               return 0;
> +
> +       apicid = 0;
> +       first_cpu = nr_cpu_ids;
>
> -       /*
> -        * We're using fixed IRQ delivery, can only return one logical APIC ID.
> -        * May as well be the first.
> -        */
>        for_each_cpu_and(cpu, cpumask, andmask) {
> -               if (cpumask_test_cpu(cpu, cpu_online_mask))
> +               if (cpumask_test_cpu(cpu, cpu_online_mask)) {
> +                       first_cpu = cpu;
>                        break;
> +               }
> +       }
> +
> +       if (first_cpu < nr_cpu_ids) {
> +               for_each_cpu_and(cpu, per_cpu(cpus_in_cluster, first_cpu),
> +                                cpumask) {
> +                       if (!cpumask_test_cpu(cpu, andmask))
> +                               continue;
> +                       apicid |= per_cpu(x86_cpu_to_logical_apicid, cpu);
> +               }
>        }
>
> -       return per_cpu(x86_cpu_to_logical_apicid, cpu);
> +       return apicid;
> +}
> +
> +static void
> +x2apic_cluster_vector_allocation_domain(int cpu, struct cpumask *retmask)
> +{
> +       cpumask_copy(retmask, cpu_possible_mask);

why not using per_cpu(cpus_in_cluster, cpu) instead?

also you may add one per cpu var like x86_cpu_to_logical_cluster_apicid.


Yinghai

  parent reply	other threads:[~2012-05-19 20:53 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-05-18 10:26 [PATCH 2/3] x86: x2apic/cluster: Make use of lowest priority delivery mode Alexander Gordeev
2012-05-18 14:41 ` Cyrill Gorcunov
2012-05-18 15:42   ` Alexander Gordeev
2012-05-18 15:51     ` Cyrill Gorcunov
2012-05-19 10:47       ` Cyrill Gorcunov
2012-05-21  7:11         ` Alexander Gordeev
2012-05-21  9:46           ` Cyrill Gorcunov
2012-05-19 20:53 ` Yinghai Lu [this message]
2012-05-21  8:13   ` Alexander Gordeev
2012-05-21 23:02     ` Yinghai Lu
2012-05-21 23:33       ` Yinghai Lu
2012-05-22  9:36         ` Alexander Gordeev
2012-05-21 23:44     ` Suresh Siddha
2012-05-21 23:58       ` [PATCH 1/2] x86, irq: update irq_cfg domain unless the new affinity is a subset of the current domain Suresh Siddha
2012-05-21 23:58         ` [PATCH 2/2] x2apic, cluster: use all the members of one cluster specified in the smp_affinity mask for the interrupt desintation Suresh Siddha
2012-05-22  7:04           ` Ingo Molnar
2012-05-22  7:34             ` Cyrill Gorcunov
2012-05-22 17:21             ` Suresh Siddha
2012-05-22 17:39               ` Cyrill Gorcunov
2012-05-22 17:42                 ` Suresh Siddha
2012-05-22 17:45                   ` Cyrill Gorcunov
2012-05-22 20:03           ` Yinghai Lu
2012-06-06 15:04           ` [tip:x86/apic] x86/x2apic/cluster: Use all the members of one cluster specified in the smp_affinity mask for the interrupt destination tip-bot for Suresh Siddha
2012-06-06 22:21             ` Yinghai Lu
2012-06-06 23:14               ` Suresh Siddha
2012-06-06 15:03         ` [tip:x86/apic] x86/irq: Update irq_cfg domain unless the new affinity is a subset of the current domain tip-bot for Suresh Siddha
2012-08-07 15:31           ` Robert Richter
2012-08-07 15:41             ` do_IRQ: 1.55 No irq handler for vector (irq -1) Borislav Petkov
2012-08-07 16:24               ` Suresh Siddha
2012-08-07 17:28                 ` Robert Richter
2012-08-07 17:47                   ` Suresh Siddha
2012-08-07 17:45                 ` Eric W. Biederman
2012-08-07 20:57                   ` Borislav Petkov
2012-08-07 22:39                     ` Suresh Siddha
2012-08-08  8:58                       ` Robert Richter
2012-08-08 11:04                         ` Borislav Petkov
2012-08-08 19:16                           ` Suresh Siddha
2012-08-14 17:02                             ` [tip:x86/urgent] x86, apic: fix broken legacy interrupts in the logical apic mode tip-bot for Suresh Siddha
2012-06-06 17:20         ` [PATCH 1/2] x86, irq: update irq_cfg domain unless the new affinity is a subset of the current domain Alexander Gordeev
2012-06-06 23:02           ` Suresh Siddha
2012-06-16  0:25           ` Suresh Siddha
2012-06-18  9:17             ` Alexander Gordeev
2012-06-19  0:51               ` Suresh Siddha
2012-06-19 23:43                 ` [PATCH 1/2] x86, apic: optimize cpu traversal in __assign_irq_vector() using domain membership Suresh Siddha
2012-06-19 23:43                   ` [PATCH 2/2] x86, x2apic: limit the vector reservation to the user specified mask Suresh Siddha
2012-06-20  5:56                     ` Yinghai Lu
2012-06-21  9:04                     ` Alexander Gordeev
2012-06-21 21:51                       ` Suresh Siddha
2012-06-20  5:53                   ` [PATCH 1/2] x86, apic: optimize cpu traversal in __assign_irq_vector() using domain membership Yinghai Lu
2012-06-21  8:31                   ` Alexander Gordeev
2012-06-21 21:53                     ` Suresh Siddha
2012-06-20  0:18               ` [PATCH 1/2] x86, irq: update irq_cfg domain unless the new affinity is a subset of the current domain Suresh Siddha
2012-06-21 11:00                 ` Alexander Gordeev
2012-06-21 21:58                   ` Suresh Siddha
2012-05-22 10:12       ` [PATCH 2/3] x86: x2apic/cluster: Make use of lowest priority delivery mode Alexander Gordeev
2012-05-21  8:22 ` Ingo Molnar
2012-05-21  9:36   ` Alexander Gordeev
2012-05-21 12:40     ` Ingo Molnar
2012-05-21 14:48       ` Alexander Gordeev
2012-05-21 14:59         ` Ingo Molnar
2012-05-21 15:22           ` Alexander Gordeev
2012-05-21 15:34           ` Cyrill Gorcunov
2012-05-21 15:36           ` Linus Torvalds
2012-05-21 18:07             ` Suresh Siddha
2012-05-21 18:18               ` Linus Torvalds
2012-05-21 18:37                 ` Suresh Siddha
2012-05-21 19:30                   ` Ingo Molnar
2012-05-21 19:15             ` Ingo Molnar
2012-05-21 19:56               ` Suresh Siddha

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAE9FiQXUM-+KSZ1OcxipRW-3XQED3Csyxs=0ke0hJZR7E_hrhg@mail.gmail.com' \
    --to=yinghai@kernel.org \
    --cc=agordeev@redhat.com \
    --cc=gorcunov@openvz.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=suresh.b.siddha@intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.