qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Kirti Wankhede <kwankhede@nvidia.com>
To: "Longpeng (Mike,
	Cloud Infrastructure Service Product Dept.)"
	<longpeng2@huawei.com>, <alex.williamson@redhat.com>
Cc: kevin.tian@intel.com, yi.l.liu@intel.com, yan.y.zhao@intel.com,
	felipe@nutanix.com, eskultet@redhat.com, ziye.yang@intel.com,
	Ken.Xue@amd.com, Zhengxiao.zx@Alibaba-inc.com,
	shuangtai.tst@alibaba-inc.com, qemu-devel@nongnu.org,
	dgilbert@redhat.com, pasic@linux.ibm.com, aik@ozlabs.ru,
	"Gonglei \(Arei\)" <arei.gonglei@huawei.com>,
	eauger@redhat.com, cohuck@redhat.com,
	jonathan.davies@nutanix.com, cjia@nvidia.com,
	mlevitsk@redhat.com, changpeng.liu@intel.com,
	zhi.a.wang@intel.com
Subject: Re: [PATCH v16 QEMU 04/16] vfio: Add save and load functions for VFIO PCI devices
Date: Tue, 5 May 2020 04:51:16 +0530	[thread overview]
Message-ID: <8e0ed8dc-fd53-a440-b28d-2af35d569f66@nvidia.com> (raw)
In-Reply-To: <4b79c097-4a56-49df-2f2e-b9c58d6e406e@huawei.com>



On 4/7/2020 9:40 AM, Longpeng (Mike, Cloud Infrastructure Service 
Product Dept.) wrote:
> 
> 
> On 2020/3/25 5:09, Kirti Wankhede wrote:
>> These functions save and restore PCI device specific data - config
>> space of PCI device.
>> Tested save and restore with MSI and MSIX type.
>>
>> Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com>
>> Reviewed-by: Neo Jia <cjia@nvidia.com>
>> ---
>>   hw/vfio/pci.c                 | 163 ++++++++++++++++++++++++++++++++++++++++++
>>   include/hw/vfio/vfio-common.h |   2 +
>>   2 files changed, 165 insertions(+)
>>
>> diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
>> index 6c77c12e44b9..8deb11e87ef7 100644
>> --- a/hw/vfio/pci.c
>> +++ b/hw/vfio/pci.c
>> @@ -41,6 +41,7 @@
>>   #include "trace.h"
>>   #include "qapi/error.h"
>>   #include "migration/blocker.h"
>> +#include "migration/qemu-file.h"
>>   
>>   #define TYPE_VFIO_PCI "vfio-pci"
>>   #define PCI_VFIO(obj)    OBJECT_CHECK(VFIOPCIDevice, obj, TYPE_VFIO_PCI)
>> @@ -1632,6 +1633,50 @@ static void vfio_bars_prepare(VFIOPCIDevice *vdev)
>>       }
>>   }
>>   
>> +static int vfio_bar_validate(VFIOPCIDevice *vdev, int nr)
>> +{
>> +    PCIDevice *pdev = &vdev->pdev;
>> +    VFIOBAR *bar = &vdev->bars[nr];
>> +    uint64_t addr;
>> +    uint32_t addr_lo, addr_hi = 0;
>> +
>> +    /* Skip unimplemented BARs and the upper half of 64bit BARS. */
>> +    if (!bar->size) {
>> +        return 0;
>> +    }
>> +
>> +    addr_lo = pci_default_read_config(pdev, PCI_BASE_ADDRESS_0 + nr * 4, 4);
>> +
>> +    addr_lo = addr_lo & (bar->ioport ? PCI_BASE_ADDRESS_IO_MASK :
>> +                                       PCI_BASE_ADDRESS_MEM_MASK);
>> +    if (bar->type == PCI_BASE_ADDRESS_MEM_TYPE_64) {
>> +        addr_hi = pci_default_read_config(pdev,
>> +                                         PCI_BASE_ADDRESS_0 + (nr + 1) * 4, 4);
>> +    }
>> +
>> +    addr = ((uint64_t)addr_hi << 32) | addr_lo;
>> +
>> +    if (!QEMU_IS_ALIGNED(addr, bar->size)) {
>> +        return -EINVAL;
>> +    }
>> +
>> +    return 0;
>> +}
>> +
>> +static int vfio_bars_validate(VFIOPCIDevice *vdev)
>> +{
>> +    int i, ret;
>> +
>> +    for (i = 0; i < PCI_ROM_SLOT; i++) {
>> +        ret = vfio_bar_validate(vdev, i);
>> +        if (ret) {
>> +            error_report("vfio: BAR address %d validation failed", i);
>> +            return ret;
>> +        }
>> +    }
>> +    return 0;
>> +}
>> +
>>   static void vfio_bar_register(VFIOPCIDevice *vdev, int nr)
>>   {
>>       VFIOBAR *bar = &vdev->bars[nr];
>> @@ -2414,11 +2459,129 @@ static Object *vfio_pci_get_object(VFIODevice *vbasedev)
>>       return OBJECT(vdev);
>>   }
>>   
>> +static void vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f)
>> +{
>> +    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
>> +    PCIDevice *pdev = &vdev->pdev;
>> +    uint16_t pci_cmd;
>> +    int i;
>> +
>> +    for (i = 0; i < PCI_ROM_SLOT; i++) {
>> +        uint32_t bar;
>> +
>> +        bar = pci_default_read_config(pdev, PCI_BASE_ADDRESS_0 + i * 4, 4);
>> +        qemu_put_be32(f, bar);
>> +    }
>> +
>> +    qemu_put_be32(f, vdev->interrupt);
>> +    if (vdev->interrupt == VFIO_INT_MSI) {
>> +        uint32_t msi_flags, msi_addr_lo, msi_addr_hi = 0, msi_data;
>> +        bool msi_64bit;
>> +
>> +        msi_flags = pci_default_read_config(pdev, pdev->msi_cap + PCI_MSI_FLAGS,
>> +                                            2);
>> +        msi_64bit = (msi_flags & PCI_MSI_FLAGS_64BIT);
>> +
>> +        msi_addr_lo = pci_default_read_config(pdev,
>> +                                         pdev->msi_cap + PCI_MSI_ADDRESS_LO, 4);
>> +        qemu_put_be32(f, msi_addr_lo);
>> +
>> +        if (msi_64bit) {
>> +            msi_addr_hi = pci_default_read_config(pdev,
>> +                                             pdev->msi_cap + PCI_MSI_ADDRESS_HI,
>> +                                             4);
>> +        }
>> +        qemu_put_be32(f, msi_addr_hi);
>> +
>> +        msi_data = pci_default_read_config(pdev,
>> +                pdev->msi_cap + (msi_64bit ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32),
>> +                2);
>> +        qemu_put_be32(f, msi_data);
>> +    } else if (vdev->interrupt == VFIO_INT_MSIX) {
>> +        uint16_t offset;
>> +
>> +        /* save enable bit and maskall bit */
>> +        offset = pci_default_read_config(pdev,
>> +                                       pdev->msix_cap + PCI_MSIX_FLAGS + 1, 2);
>> +        qemu_put_be16(f, offset);
>> +        msix_save(pdev, f);
>> +    }
>> +    pci_cmd = pci_default_read_config(pdev, PCI_COMMAND, 2);
>> +    qemu_put_be16(f, pci_cmd);
>> +}
>> +
>> +static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f)
>> +{
>> +    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
>> +    PCIDevice *pdev = &vdev->pdev;
>> +    uint32_t interrupt_type;
>> +    uint32_t msi_flags, msi_addr_lo, msi_addr_hi = 0, msi_data;
>> +    uint16_t pci_cmd;
>> +    bool msi_64bit;
>> +    int i, ret;
>> +
>> +    /* retore pci bar configuration */
>> +    pci_cmd = pci_default_read_config(pdev, PCI_COMMAND, 2);
>> +    vfio_pci_write_config(pdev, PCI_COMMAND,
>> +                        pci_cmd & (!(PCI_COMMAND_IO | PCI_COMMAND_MEMORY)), 2);
>> +    for (i = 0; i < PCI_ROM_SLOT; i++) {
>> +        uint32_t bar = qemu_get_be32(f);
>> +
>> +        vfio_pci_write_config(pdev, PCI_BASE_ADDRESS_0 + i * 4, bar, 4);
>> +    }
>> +
>> +    ret = vfio_bars_validate(vdev);
>> +    if (ret) {
>> +        return ret;
>> +    }
>> +
>> +    interrupt_type = qemu_get_be32(f);
>> +
>> +    if (interrupt_type == VFIO_INT_MSI) {
>> +        /* restore msi configuration */
>> +        msi_flags = pci_default_read_config(pdev,
>> +                                            pdev->msi_cap + PCI_MSI_FLAGS, 2);
>> +        msi_64bit = (msi_flags & PCI_MSI_FLAGS_64BIT);
>> +
>> +        vfio_pci_write_config(pdev, pdev->msi_cap + PCI_MSI_FLAGS,
>> +                              msi_flags & (!PCI_MSI_FLAGS_ENABLE), 2);
>> +
>> +        msi_addr_lo = qemu_get_be32(f);
>> +        vfio_pci_write_config(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO,
>> +                              msi_addr_lo, 4);
>> +
>> +        msi_addr_hi = qemu_get_be32(f);
>> +        if (msi_64bit) {
>> +            vfio_pci_write_config(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI,
>> +                                  msi_addr_hi, 4);
>> +        }
>> +        msi_data = qemu_get_be32(f);
>> +        vfio_pci_write_config(pdev,
>> +                pdev->msi_cap + (msi_64bit ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32),
>> +                msi_data, 2);
>> +
>> +        vfio_pci_write_config(pdev, pdev->msi_cap + PCI_MSI_FLAGS,
>> +                              msi_flags | PCI_MSI_FLAGS_ENABLE, 2);
>> +    } else if (interrupt_type == VFIO_INT_MSIX) {
>> +        uint16_t offset = qemu_get_be16(f);
>> +
>> +        /* load enable bit and maskall bit */
>> +        vfio_pci_write_config(pdev, pdev->msix_cap + PCI_MSIX_FLAGS + 1,
>> +                              offset, 2);
>> +        msix_load(pdev, f);
> Hi Kirti, Alex
> 
> 'msix_load' here may increases the downtime. Our migrate-cap device has 128 msix
> interrupts and the guestos enables all of them, so cost a lot of time (nearly
> 1s) to do 'msix_load', because in 'vfio_msix_vector_do_use' we need disable all
> old interuppts and then append a new one.
> 
> What's your opinions ?
> 

This will be at destination, so device should not be running which means 
interrupts should be disabled already, right?
You only need to append new.

Thanks,
Kirti

>> +    }
>> +    pci_cmd = qemu_get_be16(f);
>> +    vfio_pci_write_config(pdev, PCI_COMMAND, pci_cmd, 2);
>> +    return 0;
>> +}
>> +
>>   static VFIODeviceOps vfio_pci_ops = {
>>       .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
>>       .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
>>       .vfio_eoi = vfio_intx_eoi,
>>       .vfio_get_object = vfio_pci_get_object,
>> +    .vfio_save_config = vfio_pci_save_config,
>> +    .vfio_load_config = vfio_pci_load_config,
>>   };
>>   
>>   int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
>> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
>> index 74261feaeac9..d69a7f3ae31e 100644
>> --- a/include/hw/vfio/vfio-common.h
>> +++ b/include/hw/vfio/vfio-common.h
>> @@ -120,6 +120,8 @@ struct VFIODeviceOps {
>>       int (*vfio_hot_reset_multi)(VFIODevice *vdev);
>>       void (*vfio_eoi)(VFIODevice *vdev);
>>       Object *(*vfio_get_object)(VFIODevice *vdev);
>> +    void (*vfio_save_config)(VFIODevice *vdev, QEMUFile *f);
>> +    int (*vfio_load_config)(VFIODevice *vdev, QEMUFile *f);
>>   };
>>   
>>   typedef struct VFIOGroup {
>>
> 


  reply	other threads:[~2020-05-04 23:41 UTC|newest]

Thread overview: 74+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-24 21:08 [PATCH v16 QEMU 00/16] Add migration support for VFIO devices Kirti Wankhede
2020-03-24 21:08 ` [PATCH v16 QEMU 01/16] vfio: KABI for migration interface - Kernel header placeholder Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 02/16] vfio: Add function to unmap VFIO region Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 03/16] vfio: Add vfio_get_object callback to VFIODeviceOps Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 04/16] vfio: Add save and load functions for VFIO PCI devices Kirti Wankhede
2020-03-25 19:56   ` Alex Williamson
2020-03-26 17:29     ` Dr. David Alan Gilbert
2020-03-26 17:38       ` Alex Williamson
2020-05-04 23:18     ` Kirti Wankhede
2020-05-05  4:37       ` Alex Williamson
2020-05-06  6:11         ` Yan Zhao
2020-05-06 19:48           ` Kirti Wankhede
2020-05-06 20:03             ` Alex Williamson
2020-05-07  5:40               ` Kirti Wankhede
2020-05-07 18:14                 ` Alex Williamson
2020-03-26 17:46   ` Dr. David Alan Gilbert
2020-05-04 23:19     ` Kirti Wankhede
2020-04-07  4:10   ` Longpeng (Mike, Cloud Infrastructure Service Product Dept.)
2020-05-04 23:21     ` Kirti Wankhede [this message]
2020-03-24 21:09 ` [PATCH v16 QEMU 05/16] vfio: Add migration region initialization and finalize function Kirti Wankhede
2020-03-26 17:52   ` Dr. David Alan Gilbert
2020-05-04 23:19     ` Kirti Wankhede
2020-05-19 19:32       ` Dr. David Alan Gilbert
2020-03-24 21:09 ` [PATCH v16 QEMU 06/16] vfio: Add VM state change handler to know state of VM Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 07/16] vfio: Add migration state change notifier Kirti Wankhede
2020-04-01 11:27   ` Dr. David Alan Gilbert
2020-05-04 23:20     ` Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 08/16] vfio: Register SaveVMHandlers for VFIO device Kirti Wankhede
2020-03-25 21:02   ` Alex Williamson
2020-05-04 23:19     ` Kirti Wankhede
2020-05-05  4:37       ` Alex Williamson
2020-05-06  6:38         ` Yan Zhao
2020-05-06  9:58           ` Cornelia Huck
2020-05-06 16:53             ` Dr. David Alan Gilbert
2020-05-06 19:30               ` Kirti Wankhede
2020-05-07  6:37                 ` Cornelia Huck
2020-05-07 20:29                 ` Alex Williamson
2020-04-01 17:36   ` Dr. David Alan Gilbert
2020-05-04 23:20     ` Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 09/16] vfio: Add save state functions to SaveVMHandlers Kirti Wankhede
2020-03-25 22:03   ` Alex Williamson
2020-05-04 23:18     ` Kirti Wankhede
2020-05-05  4:37       ` Alex Williamson
2020-05-11  9:53         ` Kirti Wankhede
2020-05-11 15:59           ` Alex Williamson
2020-05-12  2:06           ` Yan Zhao
2020-05-09  5:31   ` Yan Zhao
2020-05-11 10:22     ` Kirti Wankhede
2020-05-12  0:50       ` Yan Zhao
2020-03-24 21:09 ` [PATCH v16 QEMU 10/16] vfio: Add load " Kirti Wankhede
2020-03-25 22:36   ` Alex Williamson
2020-04-01 18:58   ` Dr. David Alan Gilbert
2020-05-04 23:20     ` Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 11/16] iommu: add callback to get address limit IOMMU supports Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 12/16] memory: Set DIRTY_MEMORY_MIGRATION when IOMMU is enabled Kirti Wankhede
2020-04-01 19:00   ` Dr. David Alan Gilbert
2020-04-01 19:42     ` Alex Williamson
2020-03-24 21:09 ` [PATCH v16 QEMU 13/16] vfio: Add function to start and stop dirty pages tracking Kirti Wankhede
2020-03-26 19:10   ` Alex Williamson
2020-05-04 23:20     ` Kirti Wankhede
2020-04-01 19:03   ` Dr. David Alan Gilbert
2020-05-04 23:21     ` Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 14/16] vfio: Add vfio_listener_log_sync to mark dirty pages Kirti Wankhede
2020-03-25  2:19   ` Yan Zhao
2020-03-26 19:46   ` Alex Williamson
2020-04-01 19:08     ` Dr. David Alan Gilbert
2020-04-01  5:50   ` Yan Zhao
2020-04-03 20:11     ` Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 15/16] vfio: Add ioctl to get dirty pages bitmap during dma unmap Kirti Wankhede
2020-03-24 21:09 ` [PATCH v16 QEMU 16/16] vfio: Make vfio-pci device migration capable Kirti Wankhede
2020-03-24 23:36 ` [PATCH v16 QEMU 00/16] Add migration support for VFIO devices no-reply
2020-03-31 18:34 ` Alex Williamson
2020-04-01  6:41   ` Yan Zhao
2020-04-01 18:34     ` Alex Williamson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8e0ed8dc-fd53-a440-b28d-2af35d569f66@nvidia.com \
    --to=kwankhede@nvidia.com \
    --cc=Ken.Xue@amd.com \
    --cc=Zhengxiao.zx@Alibaba-inc.com \
    --cc=aik@ozlabs.ru \
    --cc=alex.williamson@redhat.com \
    --cc=arei.gonglei@huawei.com \
    --cc=changpeng.liu@intel.com \
    --cc=cjia@nvidia.com \
    --cc=cohuck@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=eauger@redhat.com \
    --cc=eskultet@redhat.com \
    --cc=felipe@nutanix.com \
    --cc=jonathan.davies@nutanix.com \
    --cc=kevin.tian@intel.com \
    --cc=longpeng2@huawei.com \
    --cc=mlevitsk@redhat.com \
    --cc=pasic@linux.ibm.com \
    --cc=qemu-devel@nongnu.org \
    --cc=shuangtai.tst@alibaba-inc.com \
    --cc=yan.y.zhao@intel.com \
    --cc=yi.l.liu@intel.com \
    --cc=zhi.a.wang@intel.com \
    --cc=ziye.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).