From: Andrew Murray <andrew.murray@arm.com>
To: Denis Efremov <efremov@linux.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>,
linux-kernel@vger.kernel.org, linux-pci@vger.kernel.org,
kvm@vger.kernel.org, Cornelia Huck <cohuck@redhat.com>,
Alex Williamson <alex.williamson@redhat.com>
Subject: Re: [PATCH v3 17/26] vfio_pci: Loop using PCI_STD_NUM_BARS
Date: Thu, 19 Sep 2019 09:00:38 +0100 [thread overview]
Message-ID: <20190919080038.GH9720@e119886-lin.cambridge.arm.com> (raw)
In-Reply-To: <b2783460-1d70-f4f0-17fd-c7a901c41670@linux.com>
On Wed, Sep 18, 2019 at 05:31:33PM +0300, Denis Efremov wrote:
> On 9/18/19 12:17 PM, Andrew Murray wrote:
> > On Mon, Sep 16, 2019 at 11:41:49PM +0300, Denis Efremov wrote:
> >> Refactor loops to use idiomatic C style and avoid the fencepost error
> >> of using "i < PCI_STD_RESOURCE_END" when "i <= PCI_STD_RESOURCE_END"
> >> is required, e.g., commit 2f686f1d9bee ("PCI: Correct PCI_STD_RESOURCE_END
> >> usage").
> >>
> >> To iterate through all possible BARs, loop conditions changed to the
> >> *number* of BARs "i < PCI_STD_NUM_BARS", instead of the index of the last
> >> valid BAR "i <= PCI_STD_RESOURCE_END".
> >>
> >> Cc: Cornelia Huck <cohuck@redhat.com>
> >> Cc: Alex Williamson <alex.williamson@redhat.com>
> >> Signed-off-by: Denis Efremov <efremov@linux.com>
> >> ---
> >> drivers/vfio/pci/vfio_pci.c | 11 ++++++----
> >> drivers/vfio/pci/vfio_pci_config.c | 32 +++++++++++++++--------------
> >> drivers/vfio/pci/vfio_pci_private.h | 4 ++--
> >> 3 files changed, 26 insertions(+), 21 deletions(-)
> >>
> >> diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
> >> index 703948c9fbe1..cb7d220d3246 100644
> >> --- a/drivers/vfio/pci/vfio_pci.c
> >> +++ b/drivers/vfio/pci/vfio_pci.c
> >> @@ -110,13 +110,15 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
> >> static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
> >> {
> >> struct resource *res;
> >> - int bar;
> >> + int i;
> >> struct vfio_pci_dummy_resource *dummy_res;
> >>
> >> INIT_LIST_HEAD(&vdev->dummy_resources_list);
> >>
> >> - for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
> >> - res = vdev->pdev->resource + bar;
> >> + for (i = 0; i < PCI_STD_NUM_BARS; i++) {
> >> + int bar = i + PCI_STD_RESOURCES;
> >> +
> >> + res = &vdev->pdev->resource[bar];
> >
> > Why can't we just drop PCI_STD_RESOURCES and replace it was 0. I understand
> > the abstraction here, but we don't do it elsewhere across the kernel. Is this
> > necessary?
>
> There was a discussion about this particular case:
> https://lkml.org/lkml/2019/8/12/999
>
> It was decided to save the original style for vfio drivers.
OK no problem.
Thanks,
Andrew Murray
>
> >
> > Thanks,
> >
> > Andrew Murray
> >
> >>
> >> if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
> >> goto no_mmap;
> >> @@ -399,7 +401,8 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
> >>
> >> vfio_config_free(vdev);
> >>
> >> - for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
> >> + for (i = 0; i < PCI_STD_NUM_BARS; i++) {
> >> + bar = i + PCI_STD_RESOURCES;
> >> if (!vdev->barmap[bar])
> >> continue;
> >> pci_iounmap(pdev, vdev->barmap[bar]);
> >> diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
> >> index f0891bd8444c..90c0b80f8acf 100644
> >> --- a/drivers/vfio/pci/vfio_pci_config.c
> >> +++ b/drivers/vfio/pci/vfio_pci_config.c
> >> @@ -450,30 +450,32 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
> >> {
> >> struct pci_dev *pdev = vdev->pdev;
> >> int i;
> >> - __le32 *bar;
> >> + __le32 *vbar;
> >> u64 mask;
> >>
> >> - bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
> >> + vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
> >>
> >> - for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) {
> >> - if (!pci_resource_start(pdev, i)) {
> >> - *bar = 0; /* Unmapped by host = unimplemented to user */
> >> + for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) {
> >> + int bar = i + PCI_STD_RESOURCES;
> >> +
> >> + if (!pci_resource_start(pdev, bar)) {
> >> + *vbar = 0; /* Unmapped by host = unimplemented to user */
> >> continue;
> >> }
> >>
> >> - mask = ~(pci_resource_len(pdev, i) - 1);
> >> + mask = ~(pci_resource_len(pdev, bar) - 1);
> >>
> >> - *bar &= cpu_to_le32((u32)mask);
> >> - *bar |= vfio_generate_bar_flags(pdev, i);
> >> + *vbar &= cpu_to_le32((u32)mask);
> >> + *vbar |= vfio_generate_bar_flags(pdev, bar);
> >>
> >> - if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
> >> - bar++;
> >> - *bar &= cpu_to_le32((u32)(mask >> 32));
> >> + if (*vbar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
> >> + vbar++;
> >> + *vbar &= cpu_to_le32((u32)(mask >> 32));
> >> i++;
> >> }
> >> }
> >>
> >> - bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
> >> + vbar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
> >>
> >> /*
> >> * NB. REGION_INFO will have reported zero size if we weren't able
> >> @@ -483,14 +485,14 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
> >> if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
> >> mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
> >> mask |= PCI_ROM_ADDRESS_ENABLE;
> >> - *bar &= cpu_to_le32((u32)mask);
> >> + *vbar &= cpu_to_le32((u32)mask);
> >> } else if (pdev->resource[PCI_ROM_RESOURCE].flags &
> >> IORESOURCE_ROM_SHADOW) {
> >> mask = ~(0x20000 - 1);
> >> mask |= PCI_ROM_ADDRESS_ENABLE;
> >> - *bar &= cpu_to_le32((u32)mask);
> >> + *vbar &= cpu_to_le32((u32)mask);
> >> } else
> >> - *bar = 0;
> >> + *vbar = 0;
> >>
> >> vdev->bardirty = false;
> >> }
> >> diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
> >> index ee6ee91718a4..8a2c7607d513 100644
> >> --- a/drivers/vfio/pci/vfio_pci_private.h
> >> +++ b/drivers/vfio/pci/vfio_pci_private.h
> >> @@ -86,8 +86,8 @@ struct vfio_pci_reflck {
> >>
> >> struct vfio_pci_device {
> >> struct pci_dev *pdev;
> >> - void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
> >> - bool bar_mmap_supported[PCI_STD_RESOURCE_END + 1];
> >> + void __iomem *barmap[PCI_STD_NUM_BARS];
> >> + bool bar_mmap_supported[PCI_STD_NUM_BARS];
> >> u8 *pci_config_map;
> >> u8 *vconfig;
> >> struct perm_bits *msi_perm;
> >> --
> >> 2.21.0
> >>
>
next prev parent reply other threads:[~2019-09-19 8:00 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20190916204158.6889-1-efremov@linux.com>
2019-09-16 20:41 ` [PATCH v3 02/26] PCI: hv: Use PCI_STD_NUM_BARS Denis Efremov
2019-09-16 21:19 ` Haiyang Zhang
2019-09-26 22:05 ` Bjorn Helgaas
2019-09-27 12:43 ` Bjorn Helgaas
2019-09-27 23:40 ` [PATCH RESEND v3 00/26] Add definition for the number of standard PCI BARs Denis Efremov
2019-09-30 19:58 ` Bjorn Helgaas
2019-09-27 23:43 ` [PATCH RESEND v3 01/26] PCI: Add define " Denis Efremov
2019-09-16 20:41 ` [PATCH v3 03/26] PCI: dwc: Use PCI_STD_NUM_BARS Denis Efremov
2019-09-17 8:36 ` Gustavo Pimentel
2019-09-16 20:41 ` [PATCH v3 04/26] PCI: endpoint: " Denis Efremov
2019-09-18 9:19 ` Andrew Murray
2019-09-18 14:20 ` Denis Efremov
2019-09-16 20:41 ` [PATCH v3 05/26] misc: pci_endpoint_test: " Denis Efremov
2019-09-16 20:41 ` [PATCH v3 06/26] s390/pci: " Denis Efremov
2019-09-18 8:58 ` Andrew Murray
2019-09-18 14:26 ` Denis Efremov
2019-09-19 8:00 ` Andrew Murray
2019-09-30 19:47 ` Bjorn Helgaas
2019-09-16 20:41 ` [PATCH v3 07/26] x86/PCI: Loop using PCI_STD_NUM_BARS Denis Efremov
2019-09-16 20:41 ` [PATCH v3 08/26] alpha/PCI: Use PCI_STD_NUM_BARS Denis Efremov
2019-09-16 20:41 ` [PATCH v3 09/26] ia64: " Denis Efremov
2019-09-16 20:41 ` [PATCH v3 10/26] stmmac: pci: Loop using PCI_STD_NUM_BARS Denis Efremov
2019-09-16 20:41 ` [PATCH v3 11/26] net: dwc-xlgmac: " Denis Efremov
2019-09-16 20:41 ` [PATCH v3 12/26] ixgb: use PCI_STD_NUM_BARS Denis Efremov
2019-09-16 20:41 ` [PATCH v3 13/26] e1000: Use PCI_STD_NUM_BARS Denis Efremov
2019-09-18 9:05 ` Andrew Murray
2019-09-16 20:41 ` [PATCH v3 14/26] rapidio/tsi721: Loop using PCI_STD_NUM_BARS Denis Efremov
2019-09-16 20:41 ` [PATCH v3 15/26] efifb: " Denis Efremov
2019-09-16 20:41 ` [PATCH v3 16/26] fbmem: use PCI_STD_NUM_BARS Denis Efremov
2019-09-16 20:41 ` [PATCH v3 17/26] vfio_pci: Loop using PCI_STD_NUM_BARS Denis Efremov
2019-09-18 9:17 ` Andrew Murray
2019-09-18 14:31 ` Denis Efremov
2019-09-19 8:00 ` Andrew Murray [this message]
2019-09-16 20:41 ` [PATCH v3 18/26] scsi: pm80xx: Use PCI_STD_NUM_BARS Denis Efremov
2019-09-17 9:06 ` Jinpu Wang
2019-09-24 2:22 ` Martin K. Petersen
2019-09-24 9:44 ` Denis Efremov
2019-09-26 2:29 ` Bjorn Helgaas
2019-09-26 22:51 ` Martin K. Petersen
2019-09-16 20:41 ` [PATCH v3 19/26] ata: sata_nv: " Denis Efremov
2019-09-16 20:41 ` [PATCH v3 20/26] staging: gasket: " Denis Efremov
2019-09-16 20:41 ` [PATCH v3 21/26] serial: 8250_pci: " Denis Efremov
2019-09-16 20:41 ` [PATCH v3 22/26] pata_atp867x: " Denis Efremov
2019-09-16 20:41 ` [PATCH v3 23/26] memstick: use PCI_STD_NUM_BARS Denis Efremov
2019-09-20 7:42 ` Ulf Hansson
2019-09-20 8:05 ` Denis Efremov
2019-09-16 20:41 ` [PATCH v3 24/26] USB: core: Use PCI_STD_NUM_BARS Denis Efremov
2019-09-16 20:41 ` [PATCH v3 25/26] usb: pci-quirks: " Denis Efremov
2019-09-16 20:41 ` [PATCH v3 26/26] devres: use PCI_STD_NUM_BARS Denis Efremov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190919080038.GH9720@e119886-lin.cambridge.arm.com \
--to=andrew.murray@arm.com \
--cc=alex.williamson@redhat.com \
--cc=bhelgaas@google.com \
--cc=cohuck@redhat.com \
--cc=efremov@linux.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pci@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).