From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: Oliver O'Halloran <oohall@gmail.com>, linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH 11/15] powerpc/powernv/sriov: Drop iov->pe_num_map[]
Date: Wed, 15 Jul 2020 13:31:00 +1000 [thread overview]
Message-ID: <833e07b0-3c55-ef51-8649-a5e244f72560@ozlabs.ru> (raw)
In-Reply-To: <20200710052340.737567-12-oohall@gmail.com>
On 10/07/2020 15:23, Oliver O'Halloran wrote:
> Currently the iov->pe_num_map[] does one of two things depending on
> whether single PE mode is being used or not. When it is, this contains an
> array which maps a vf_index to the corresponding PE number. When single PE
> mode is not being used this contains a scalar which is the base PE for the
> set of enabled VFs (for for VFn is base + n).
>
> The array was necessary because when calling pnv_ioda_alloc_pe() there is
> no guarantee that the allocated PEs would be contigious. We can now
s/contigious/contiguous/ here and below.
> allocate contigious blocks of PEs so this is no longer an issue. This
> allows us to drop the if (single_mode) {} .. else {} block scattered
> through the SR-IOV code which is a nice clean up.
>
> Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
> ---
> arch/powerpc/platforms/powernv/pci-sriov.c | 109 +++++----------------
> arch/powerpc/platforms/powernv/pci.h | 4 +-
> 2 files changed, 25 insertions(+), 88 deletions(-)
>
> diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c
> index d53a85ccb538..08f88187d65a 100644
> --- a/arch/powerpc/platforms/powernv/pci-sriov.c
> +++ b/arch/powerpc/platforms/powernv/pci-sriov.c
> @@ -456,11 +456,13 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
>
>
> if (iov->m64_single_mode) {
> + int pe_num = iov->vf_pe_arr[j].pe_number;
> +
> size = pci_iov_resource_size(pdev,
> PCI_IOV_RESOURCES + i);
> start = res->start + size * j;
> rc = pnv_ioda_map_m64_single(phb, win,
> - iov->pe_num_map[j],
> + pe_num,
> start,
> size);
> } else {
> @@ -599,38 +601,24 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
>
> static void pnv_pci_sriov_disable(struct pci_dev *pdev)
> {
> + u16 num_vfs, base_pe;
> struct pnv_phb *phb;
> - struct pnv_ioda_pe *pe;
> struct pnv_iov_data *iov;
> - u16 num_vfs, i;
>
> phb = pci_bus_to_pnvhb(pdev->bus);
> iov = pnv_iov_get(pdev);
> num_vfs = iov->num_vfs;
> + base_pe = iov->vf_pe_arr[0].pe_number;
>
> /* Release VF PEs */
> pnv_ioda_release_vf_PE(pdev);
>
> if (phb->type == PNV_PHB_IODA2) {
> if (!iov->m64_single_mode)
> - pnv_pci_vf_resource_shift(pdev, -*iov->pe_num_map);
> + pnv_pci_vf_resource_shift(pdev, -base_pe);
>
> /* Release M64 windows */
> pnv_pci_vf_release_m64(pdev, num_vfs);
> -
> - /* Release PE numbers */
> - if (iov->m64_single_mode) {
> - for (i = 0; i < num_vfs; i++) {
> - if (iov->pe_num_map[i] == IODA_INVALID_PE)
> - continue;
> -
> - pe = &phb->ioda.pe_array[iov->pe_num_map[i]];
> - pnv_ioda_free_pe(pe);
> - }
> - } else
> - bitmap_clear(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs);
> - /* Releasing pe_num_map */
> - kfree(iov->pe_num_map);
> }
> }
>
> @@ -656,13 +644,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
> int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
> struct pci_dn *vf_pdn;
>
> - if (iov->m64_single_mode)
> - pe_num = iov->pe_num_map[vf_index];
> - else
> - pe_num = *iov->pe_num_map + vf_index;
> -
> - pe = &phb->ioda.pe_array[pe_num];
> - pe->pe_number = pe_num;
> + pe = &iov->vf_pe_arr[vf_index];
> pe->phb = phb;
> pe->flags = PNV_IODA_PE_VF;
> pe->pbus = NULL;
> @@ -670,6 +652,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
> pe->mve_number = -1;
> pe->rid = (vf_bus << 8) | vf_devfn;
>
> + pe_num = pe->pe_number;
> pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
> pci_domain_nr(pdev->bus), pdev->bus->number,
> PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
> @@ -701,9 +684,9 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
>
> static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
> {
> + struct pnv_ioda_pe *base_pe;
> struct pnv_iov_data *iov;
> struct pnv_phb *phb;
> - struct pnv_ioda_pe *pe;
> int ret;
> u16 i;
>
> @@ -717,55 +700,14 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
> return -ENOSPC;
> }
>
> - /*
> - * When M64 BARs functions in Single PE mode, the number of VFs
> - * could be enabled must be less than the number of M64 BARs.
> - */
> - if (iov->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {
> - dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n");
> + /* allocate a contigious block of PEs for our VFs */
> + base_pe = pnv_ioda_alloc_pe(phb, num_vfs);
> + if (!base_pe) {
> + pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs);
> return -EBUSY;
> }
>
> - /* Allocating pe_num_map */
> - if (iov->m64_single_mode)
> - iov->pe_num_map = kmalloc_array(num_vfs,
> - sizeof(*iov->pe_num_map),
> - GFP_KERNEL);
> - else
> - iov->pe_num_map = kmalloc(sizeof(*iov->pe_num_map), GFP_KERNEL);
> -
> - if (!iov->pe_num_map)
> - return -ENOMEM;
> -
> - if (iov->m64_single_mode)
> - for (i = 0; i < num_vfs; i++)
> - iov->pe_num_map[i] = IODA_INVALID_PE;
> -
> - /* Calculate available PE for required VFs */
> - if (iov->m64_single_mode) {
> - for (i = 0; i < num_vfs; i++) {
> - pe = pnv_ioda_alloc_pe(phb);
> - if (!pe) {
> - ret = -EBUSY;
> - goto m64_failed;
> - }
> -
> - iov->pe_num_map[i] = pe->pe_number;
> - }
> - } else {
> - mutex_lock(&phb->ioda.pe_alloc_mutex);
> - *iov->pe_num_map = bitmap_find_next_zero_area(
> - phb->ioda.pe_alloc, phb->ioda.total_pe_num,
> - 0, num_vfs, 0);
> - if (*iov->pe_num_map >= phb->ioda.total_pe_num) {
> - mutex_unlock(&phb->ioda.pe_alloc_mutex);
> - dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
> - kfree(iov->pe_num_map);
> - return -EBUSY;
> - }
> - bitmap_set(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs);
> - mutex_unlock(&phb->ioda.pe_alloc_mutex);
> - }
> + iov->vf_pe_arr = base_pe;
> iov->num_vfs = num_vfs;
>
> /* Assign M64 window accordingly */
> @@ -781,9 +723,10 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
> * Otherwise, the PE# for the VF will conflict with others.
> */
> if (!iov->m64_single_mode) {
> - ret = pnv_pci_vf_resource_shift(pdev, *iov->pe_num_map);
> + ret = pnv_pci_vf_resource_shift(pdev,
> + base_pe->pe_number);
> if (ret)
> - goto m64_failed;
> + goto shift_failed;
> }
> }
>
> @@ -792,20 +735,12 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
>
> return 0;
>
> -m64_failed:
> - if (iov->m64_single_mode) {
> - for (i = 0; i < num_vfs; i++) {
> - if (iov->pe_num_map[i] == IODA_INVALID_PE)
> - continue;
> -
> - pe = &phb->ioda.pe_array[iov->pe_num_map[i]];
> - pnv_ioda_free_pe(pe);
> - }
> - } else
> - bitmap_clear(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs);
> +shift_failed:
> + pnv_pci_vf_release_m64(pdev, num_vfs);
>
> - /* Releasing pe_num_map */
> - kfree(iov->pe_num_map);
> +m64_failed:
> + for (i = 0; i < num_vfs; i++)
> + pnv_ioda_free_pe(&iov->vf_pe_arr[i]);
>
> return ret;
> }
> diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
> index b4c9bdba7217..13555bc549f4 100644
> --- a/arch/powerpc/platforms/powernv/pci.h
> +++ b/arch/powerpc/platforms/powernv/pci.h
> @@ -238,7 +238,9 @@ struct pnv_iov_data {
>
> /* number of VFs enabled */
> u16 num_vfs;
> - unsigned int *pe_num_map; /* PE# for the first VF PE or array */
> +
> + /* pointer to the array of VF PEs. num_vfs long*/
I read the comment and for a second I thought that now you are storing
pnv_ioda_pe structs in pnv_iov_data which is not true: vf_pe_arr
actually points inside phb->ioda.pe_array[]. May be add this to the
comment please.
Otherwise good,
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> + struct pnv_ioda_pe *vf_pe_arr;
>
> /* Did we map the VF BARs with single-PE IODA BARs? */
> bool m64_single_mode;
>
--
Alexey
next prev parent reply other threads:[~2020-07-15 3:32 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-10 5:23 PowerNV PCI & SR-IOV cleanups Oliver O'Halloran
2020-07-10 5:23 ` [PATCH 01/15] powernv/pci: Add pci_bus_to_pnvhb() helper Oliver O'Halloran
2020-07-13 8:28 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 02/15] powerpc/powernv/pci: Always tear down DMA windows on PE release Oliver O'Halloran
2020-07-13 8:30 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 03/15] powerpc/powernv/pci: Add explicit tracking of the DMA setup state Oliver O'Halloran
2020-07-14 5:37 ` Alexey Kardashevskiy
2020-07-14 5:58 ` Oliver O'Halloran
2020-07-14 7:21 ` Alexey Kardashevskiy
2020-07-15 0:23 ` Alexey Kardashevskiy
2020-07-15 1:38 ` Oliver O'Halloran
2020-07-15 3:33 ` Alexey Kardashevskiy
2020-07-15 7:05 ` Cédric Le Goater
2020-07-15 9:00 ` Oliver O'Halloran
2020-07-15 10:05 ` Cédric Le Goater
2020-07-10 5:23 ` [PATCH 04/15] powerpc/powernv/pci: Initialise M64 for IODA1 as a 1-1 window Oliver O'Halloran
2020-07-14 7:39 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 05/15] powerpc/powernv/sriov: Move SR-IOV into a seperate file Oliver O'Halloran
2020-07-14 9:16 ` Alexey Kardashevskiy
2020-07-22 5:01 ` Oliver O'Halloran
2020-07-22 9:53 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 06/15] powerpc/powernv/sriov: Explain how SR-IOV works on PowerNV Oliver O'Halloran
2020-07-15 0:40 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 07/15] powerpc/powernv/sriov: Rename truncate_iov Oliver O'Halloran
2020-07-15 0:46 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 08/15] powerpc/powernv/sriov: Simplify used window tracking Oliver O'Halloran
2020-07-15 1:34 ` Alexey Kardashevskiy
2020-07-15 1:41 ` Oliver O'Halloran
2020-07-10 5:23 ` [PATCH 09/15] powerpc/powernv/sriov: Factor out M64 BAR setup Oliver O'Halloran
2020-07-15 2:09 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 10/15] powerpc/powernv/pci: Refactor pnv_ioda_alloc_pe() Oliver O'Halloran
2020-07-15 2:29 ` Alexey Kardashevskiy
2020-07-15 2:53 ` Oliver O'Halloran
2020-07-15 3:15 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 11/15] powerpc/powernv/sriov: Drop iov->pe_num_map[] Oliver O'Halloran
2020-07-15 3:31 ` Alexey Kardashevskiy [this message]
2020-07-10 5:23 ` [PATCH 12/15] powerpc/powernv/sriov: De-indent setup and teardown Oliver O'Halloran
2020-07-15 4:00 ` Alexey Kardashevskiy
2020-07-15 4:21 ` Oliver O'Halloran
2020-07-15 4:41 ` Alexey Kardashevskiy
2020-07-15 4:46 ` Oliver O'Halloran
2020-07-15 4:58 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 13/15] powerpc/powernv/sriov: Move M64 BAR allocation into a helper Oliver O'Halloran
2020-07-15 4:02 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 14/15] powerpc/powernv/sriov: Refactor M64 BAR setup Oliver O'Halloran
2020-07-15 4:50 ` Alexey Kardashevskiy
2020-07-10 5:23 ` [PATCH 15/15] powerpc/powernv/sriov: Make single PE mode a per-BAR setting Oliver O'Halloran
2020-07-15 5:24 ` Alexey Kardashevskiy
2020-07-15 6:16 ` Oliver O'Halloran
2020-07-15 8:00 ` Alexey Kardashevskiy
2020-07-22 5:39 ` Oliver O'Halloran
2020-07-22 10:06 ` Alexey Kardashevskiy
2020-07-24 3:40 ` Oliver O'Halloran
2020-07-10 6:45 ` PowerNV PCI & SR-IOV cleanups Christoph Hellwig
2020-07-10 12:45 ` Oliver O'Halloran
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=833e07b0-3c55-ef51-8649-a5e244f72560@ozlabs.ru \
--to=aik@ozlabs.ru \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=oohall@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).