qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Gibson <david@gibson.dropbear.id.au>
To: Greg Kurz <groug@kaod.org>
Cc: aik@ozlabs.ru, qemu-ppc@nongnu.org, clg@kaod.org, qemu-devel@nongnu.org
Subject: Re: [RFC for-5.1 1/4] spapr: Refactor locating NVLink2 devices for device tree creation
Date: Fri, 27 Mar 2020 10:55:16 +1100	[thread overview]
Message-ID: <20200326235516.GC456060@umbus.fritz.box> (raw)
In-Reply-To: <20200326125738.4df23c2b@bahia.lan>

[-- Attachment #1: Type: text/plain, Size: 7317 bytes --]

On Thu, Mar 26, 2020 at 12:57:38PM +0100, Greg Kurz wrote:
65;5803;1c> On Thu, 26 Mar 2020 16:40:06 +1100
> David Gibson <david@gibson.dropbear.id.au> wrote:
> 
> > Currently spapr_phb_nvgpu_populate_pcidev_dt() works a little cryptically.
> > It steps through all the NVLink2 GPUs and NPUs and if they match the device
> > we're called for, we generate the relevant device tree information.
> > 
> > Make this a little more obvious by introducing helpers to determine it a
> 
> ... to determine if a

Fixed, thanks.

> 
> > given PCI device is an NVLink2 GPU or NPU, returning the NVLink2 slot and
> > link number information as well.
> > 
> > Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> > ---
> 
> LGTM
> 
> Reviewed-by: Greg Kurz <groug@kaod.org>
> 
> >  hw/ppc/spapr_pci_nvlink2.c | 115 +++++++++++++++++++++++++------------
> >  1 file changed, 79 insertions(+), 36 deletions(-)
> > 
> > diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c
> > index 8332d5694e..7d3a685421 100644
> > --- a/hw/ppc/spapr_pci_nvlink2.c
> > +++ b/hw/ppc/spapr_pci_nvlink2.c
> > @@ -390,13 +390,12 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
> >  
> >  }
> >  
> > -void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
> > -                                        SpaprPhbState *sphb)
> > +static bool is_nvgpu(PCIDevice *dev, SpaprPhbState *sphb, int *slot)
> >  {
> > -    int i, j;
> > +    int i;
> >  
> >      if (!sphb->nvgpus) {
> > -        return;
> > +        return false;
> >      }
> >  
> >      for (i = 0; i < sphb->nvgpus->num; ++i) {
> > @@ -406,47 +405,91 @@ void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
> >          if (!nvslot->gpdev) {
> >              continue;
> >          }
> > +
> >          if (dev == nvslot->gpdev) {
> > -            uint32_t npus[nvslot->linknum];
> > +            if (slot) {
> > +                *slot = i;
> > +            }
> > +            return true;
> > +        }
> > +    }
> >  
> > -            for (j = 0; j < nvslot->linknum; ++j) {
> > -                PCIDevice *npdev = nvslot->links[j].npdev;
> > +    return false;
> > +}
> >  
> > -                npus[j] = cpu_to_be32(PHANDLE_PCIDEV(sphb, npdev));
> > -            }
> > -            _FDT(fdt_setprop(fdt, offset, "ibm,npu", npus,
> > -                             j * sizeof(npus[0])));
> > -            _FDT((fdt_setprop_cell(fdt, offset, "phandle",
> > -                                   PHANDLE_PCIDEV(sphb, dev))));
> > +static bool is_nvnpu(PCIDevice *dev, SpaprPhbState *sphb, int *slot, int *link)
> > +{
> > +    int i, j;
> > +
> > +    if (!sphb->nvgpus) {
> > +        return false;
> > +    }
> > +
> > +    for (i = 0; i < sphb->nvgpus->num; ++i) {
> > +        SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i];
> > +
> > +        /* Skip "slot" without attached GPU */
> > +        if (!nvslot->gpdev) {
> >              continue;
> >          }
> >  
> >          for (j = 0; j < nvslot->linknum; ++j) {
> > -            if (dev != nvslot->links[j].npdev) {
> > -                continue;
> > +            if (dev == nvslot->links[j].npdev) {
> > +                if (slot) {
> > +                    *slot = i;
> > +                }
> > +                if (link) {
> > +                    *link = j;
> > +                }
> > +                return true;
> >              }
> > +        }
> > +    }
> >  
> > -            _FDT((fdt_setprop_cell(fdt, offset, "phandle",
> > -                                   PHANDLE_PCIDEV(sphb, dev))));
> > -            _FDT(fdt_setprop_cell(fdt, offset, "ibm,gpu",
> > -                                  PHANDLE_PCIDEV(sphb, nvslot->gpdev)));
> > -            _FDT((fdt_setprop_cell(fdt, offset, "ibm,nvlink",
> > -                                   PHANDLE_NVLINK(sphb, i, j))));
> > -            /*
> > -             * If we ever want to emulate GPU RAM at the same location as on
> > -             * the host - here is the encoding GPA->TGT:
> > -             *
> > -             * gta  = ((sphb->nv2_gpa >> 42) & 0x1) << 42;
> > -             * gta |= ((sphb->nv2_gpa >> 45) & 0x3) << 43;
> > -             * gta |= ((sphb->nv2_gpa >> 49) & 0x3) << 45;
> > -             * gta |= sphb->nv2_gpa & ((1UL << 43) - 1);
> > -             */
> > -            _FDT(fdt_setprop_cell(fdt, offset, "memory-region",
> > -                                  PHANDLE_GPURAM(sphb, i)));
> > -            _FDT(fdt_setprop_u64(fdt, offset, "ibm,device-tgt-addr",
> > -                                 nvslot->tgt));
> > -            _FDT(fdt_setprop_cell(fdt, offset, "ibm,nvlink-speed",
> > -                                  nvslot->links[j].link_speed));
> > +    return false;
> > +}
> > +
> > +void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
> > +                                        SpaprPhbState *sphb)
> > +{
> > +    int slot, link;
> > +
> > +    if (is_nvgpu(dev, sphb, &slot)) {
> > +        SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[slot];
> > +        uint32_t npus[nvslot->linknum];
> > +
> > +        for (link = 0; link < nvslot->linknum; ++link) {
> > +            PCIDevice *npdev = nvslot->links[link].npdev;
> > +
> > +            npus[link] = cpu_to_be32(PHANDLE_PCIDEV(sphb, npdev));
> >          }
> > +        _FDT(fdt_setprop(fdt, offset, "ibm,npu", npus,
> > +                         link * sizeof(npus[0])));
> > +        _FDT((fdt_setprop_cell(fdt, offset, "phandle",
> > +                               PHANDLE_PCIDEV(sphb, dev))));
> > +    } else if (is_nvnpu(dev, sphb, &slot, &link)) {
> > +        SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[slot];
> > +
> > +        _FDT((fdt_setprop_cell(fdt, offset, "phandle",
> > +                               PHANDLE_PCIDEV(sphb, dev))));
> > +        _FDT(fdt_setprop_cell(fdt, offset, "ibm,gpu",
> > +                              PHANDLE_PCIDEV(sphb, nvslot->gpdev)));
> > +        _FDT((fdt_setprop_cell(fdt, offset, "ibm,nvlink",
> > +                               PHANDLE_NVLINK(sphb, slot, link))));
> > +        /*
> > +         * If we ever want to emulate GPU RAM at the same location as
> > +         * on the host - here is the encoding GPA->TGT:
> > +         *
> > +         * gta  = ((sphb->nv2_gpa >> 42) & 0x1) << 42;
> > +         * gta |= ((sphb->nv2_gpa >> 45) & 0x3) << 43;
> > +         * gta |= ((sphb->nv2_gpa >> 49) & 0x3) << 45;
> > +         * gta |= sphb->nv2_gpa & ((1UL << 43) - 1);
> > +         */
> > +        _FDT(fdt_setprop_cell(fdt, offset, "memory-region",
> > +                              PHANDLE_GPURAM(sphb, slot)));
> > +        _FDT(fdt_setprop_u64(fdt, offset, "ibm,device-tgt-addr",
> > +                             nvslot->tgt));
> > +        _FDT(fdt_setprop_cell(fdt, offset, "ibm,nvlink-speed",
> > +                              nvslot->links[link].link_speed));
> >      }
> >  }
> 

-- 
David Gibson			| I'll have my music baroque, and my code
david AT gibson.dropbear.id.au	| minimalist, thank you.  NOT _the_ _other_
				| _way_ _around_!
http://www.ozlabs.org/~dgibson

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

  reply	other threads:[~2020-03-27  0:59 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-26  5:40 [RFC for-5.1 0/4] Better handling of attempt NVLink2 unplug David Gibson
2020-03-26  5:40 ` [RFC for-5.1 1/4] spapr: Refactor locating NVLink2 devices for device tree creation David Gibson
2020-03-26 11:57   ` Greg Kurz
2020-03-26 23:55     ` David Gibson [this message]
2020-03-26  5:40 ` [RFC for-5.1 2/4] spapr: Helper to determine if a device is NVLink2 related David Gibson
2020-03-26 11:58   ` Greg Kurz
2020-03-26  5:40 ` [RFC for-5.1 3/4] spapr: Fix failure path for attempting to hot unplug PCI bridges David Gibson
2020-03-26 12:18   ` Greg Kurz
2020-03-26 23:54     ` David Gibson
2020-03-26  5:40 ` [RFC for-5.1 4/4] spapr: Don't allow unplug of NVLink2 devices David Gibson
2020-03-26 12:27   ` Greg Kurz
2020-03-26 23:56     ` David Gibson
2020-03-28 12:32   ` Alexey Kardashevskiy
2020-03-31  3:25     ` David Gibson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200326235516.GC456060@umbus.fritz.box \
    --to=david@gibson.dropbear.id.au \
    --cc=aik@ozlabs.ru \
    --cc=clg@kaod.org \
    --cc=groug@kaod.org \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-ppc@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).