QEMU-Devel Archive on lore.kernel.org
 help / color / Atom feed
From: Alex Williamson <alex.williamson@redhat.com>
To: Yan Zhao <yan.y.zhao@intel.com>
Cc: "Zhengxiao.zx@Alibaba-inc.com" <Zhengxiao.zx@Alibaba-inc.com>,
	"Tian, Kevin" <kevin.tian@intel.com>,
	"Liu, Yi L" <yi.l.liu@intel.com>,
	"cjia@nvidia.com" <cjia@nvidia.com>,
	"kvm@vger.kernel.org" <kvm@vger.kernel.org>,
	"eskultet@redhat.com" <eskultet@redhat.com>,
	"Yang, Ziye" <ziye.yang@intel.com>,
	"qemu-devel@nongnu.org" <qemu-devel@nongnu.org>,
	"cohuck@redhat.com" <cohuck@redhat.com>,
	"shuangtai.tst@alibaba-inc.com" <shuangtai.tst@alibaba-inc.com>,
	"dgilbert@redhat.com" <dgilbert@redhat.com>,
	"Wang, Zhi A" <zhi.a.wang@intel.com>,
	"mlevitsk@redhat.com" <mlevitsk@redhat.com>,
	"pasic@linux.ibm.com" <pasic@linux.ibm.com>,
	"aik@ozlabs.ru" <aik@ozlabs.ru>,
	Kirti Wankhede <kwankhede@nvidia.com>,
	"eauger@redhat.com" <eauger@redhat.com>,
	"felipe@nutanix.com" <felipe@nutanix.com>,
	"jonathan.davies@nutanix.com" <jonathan.davies@nutanix.com>,
	"Liu, Changpeng" <changpeng.liu@intel.com>,
	"Ken.Xue@amd.com" <Ken.Xue@amd.com>
Subject: Re: [PATCH v16 Kernel 4/7] vfio iommu: Implementation of ioctl for dirty pages tracking.
Date: Mon, 30 Mar 2020 14:47:20 -0600
Message-ID: <20200330144720.18acf66d@w520.home> (raw)
In-Reply-To: <20200330020708.GB30683@joy-OptiPlex-7040>

On Sun, 29 Mar 2020 22:07:08 -0400
Yan Zhao <yan.y.zhao@intel.com> wrote:

> On Fri, Mar 27, 2020 at 01:07:38PM +0800, Kirti Wankhede wrote:
> > 
> > 
> > On 3/27/2020 6:00 AM, Yan Zhao wrote:  
> > > On Fri, Mar 27, 2020 at 05:39:01AM +0800, Kirti Wankhede wrote:  
> > >>
> > >>
> > >> On 3/25/2020 7:41 AM, Yan Zhao wrote:  
> > >>> On Wed, Mar 25, 2020 at 05:18:52AM +0800, Kirti Wankhede wrote:  
> > >>>> VFIO_IOMMU_DIRTY_PAGES ioctl performs three operations:
> > >>>> - Start dirty pages tracking while migration is active
> > >>>> - Stop dirty pages tracking.
> > >>>> - Get dirty pages bitmap. Its user space application's responsibility to
> > >>>>     copy content of dirty pages from source to destination during migration.
> > >>>>
> > >>>> To prevent DoS attack, memory for bitmap is allocated per vfio_dma
> > >>>> structure. Bitmap size is calculated considering smallest supported page
> > >>>> size. Bitmap is allocated for all vfio_dmas when dirty logging is enabled
> > >>>>
> > >>>> Bitmap is populated for already pinned pages when bitmap is allocated for
> > >>>> a vfio_dma with the smallest supported page size. Update bitmap from
> > >>>> pinning functions when tracking is enabled. When user application queries
> > >>>> bitmap, check if requested page size is same as page size used to
> > >>>> populated bitmap. If it is equal, copy bitmap, but if not equal, return
> > >>>> error.
> > >>>>
> > >>>> Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com>
> > >>>> Reviewed-by: Neo Jia <cjia@nvidia.com>
> > >>>> ---
> > >>>>    drivers/vfio/vfio_iommu_type1.c | 266 +++++++++++++++++++++++++++++++++++++++-
> > >>>>    1 file changed, 260 insertions(+), 6 deletions(-)
> > >>>>
> > >>>> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> > >>>> index 70aeab921d0f..874a1a7ae925 100644
> > >>>> --- a/drivers/vfio/vfio_iommu_type1.c
> > >>>> +++ b/drivers/vfio/vfio_iommu_type1.c
> > >>>> @@ -71,6 +71,7 @@ struct vfio_iommu {
> > >>>>    	unsigned int		dma_avail;
> > >>>>    	bool			v2;
> > >>>>    	bool			nesting;
> > >>>> +	bool			dirty_page_tracking;
> > >>>>    };
> > >>>>    
> > >>>>    struct vfio_domain {
> > >>>> @@ -91,6 +92,7 @@ struct vfio_dma {
> > >>>>    	bool			lock_cap;	/* capable(CAP_IPC_LOCK) */
> > >>>>    	struct task_struct	*task;
> > >>>>    	struct rb_root		pfn_list;	/* Ex-user pinned pfn list */
> > >>>> +	unsigned long		*bitmap;
> > >>>>    };
> > >>>>    
> > >>>>    struct vfio_group {
> > >>>> @@ -125,7 +127,21 @@ struct vfio_regions {
> > >>>>    #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)	\
> > >>>>    					(!list_empty(&iommu->domain_list))
> > >>>>    
> > >>>> +#define DIRTY_BITMAP_BYTES(n)	(ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE)
> > >>>> +
> > >>>> +/*
> > >>>> + * Input argument of number of bits to bitmap_set() is unsigned integer, which
> > >>>> + * further casts to signed integer for unaligned multi-bit operation,
> > >>>> + * __bitmap_set().
> > >>>> + * Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte,
> > >>>> + * that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page
> > >>>> + * system.
> > >>>> + */
> > >>>> +#define DIRTY_BITMAP_PAGES_MAX	(uint64_t)(INT_MAX - 1)
> > >>>> +#define DIRTY_BITMAP_SIZE_MAX	 DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
> > >>>> +
> > >>>>    static int put_pfn(unsigned long pfn, int prot);
> > >>>> +static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu);
> > >>>>    
> > >>>>    /*
> > >>>>     * This code handles mapping and unmapping of user data buffers
> > >>>> @@ -175,6 +191,77 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
> > >>>>    	rb_erase(&old->node, &iommu->dma_list);
> > >>>>    }
> > >>>>    
> > >>>> +
> > >>>> +static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, uint64_t pgsize)
> > >>>> +{
> > >>>> +	uint64_t npages = dma->size / pgsize;
> > >>>> +
> > >>>> +	if (npages > DIRTY_BITMAP_PAGES_MAX)
> > >>>> +		return -EINVAL;
> > >>>> +
> > >>>> +	dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages), GFP_KERNEL);
> > >>>> +	if (!dma->bitmap)
> > >>>> +		return -ENOMEM;
> > >>>> +
> > >>>> +	return 0;
> > >>>> +}
> > >>>> +
> > >>>> +static void vfio_dma_bitmap_free(struct vfio_dma *dma)
> > >>>> +{
> > >>>> +	kfree(dma->bitmap);
> > >>>> +	dma->bitmap = NULL;
> > >>>> +}
> > >>>> +
> > >>>> +static void vfio_dma_populate_bitmap(struct vfio_dma *dma, uint64_t pgsize)
> > >>>> +{
> > >>>> +	struct rb_node *p;
> > >>>> +
> > >>>> +	if (RB_EMPTY_ROOT(&dma->pfn_list))
> > >>>> +		return;
> > >>>> +
> > >>>> +	for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) {
> > >>>> +		struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node);
> > >>>> +
> > >>>> +		bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) / pgsize, 1);
> > >>>> +	}
> > >>>> +}
> > >>>> +
> > >>>> +static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, uint64_t pgsize)
> > >>>> +{
> > >>>> +	struct rb_node *n = rb_first(&iommu->dma_list);
> > >>>> +
> > >>>> +	for (; n; n = rb_next(n)) {
> > >>>> +		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
> > >>>> +		int ret;
> > >>>> +
> > >>>> +		ret = vfio_dma_bitmap_alloc(dma, pgsize);
> > >>>> +		if (ret) {
> > >>>> +			struct rb_node *p = rb_prev(n);
> > >>>> +
> > >>>> +			for (; p; p = rb_prev(p)) {
> > >>>> +				struct vfio_dma *dma = rb_entry(n,
> > >>>> +							struct vfio_dma, node);
> > >>>> +
> > >>>> +				vfio_dma_bitmap_free(dma);
> > >>>> +			}
> > >>>> +			return ret;
> > >>>> +		}
> > >>>> +		vfio_dma_populate_bitmap(dma, pgsize);
> > >>>> +	}
> > >>>> +	return 0;
> > >>>> +}
> > >>>> +
> > >>>> +static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu)
> > >>>> +{
> > >>>> +	struct rb_node *n = rb_first(&iommu->dma_list);
> > >>>> +
> > >>>> +	for (; n; n = rb_next(n)) {
> > >>>> +		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
> > >>>> +
> > >>>> +		vfio_dma_bitmap_free(dma);
> > >>>> +	}
> > >>>> +}
> > >>>> +
> > >>>>    /*
> > >>>>     * Helper Functions for host iova-pfn list
> > >>>>     */
> > >>>> @@ -567,6 +654,18 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
> > >>>>    			vfio_unpin_page_external(dma, iova, do_accounting);
> > >>>>    			goto pin_unwind;
> > >>>>    		}
> > >>>> +
> > >>>> +		if (iommu->dirty_page_tracking) {
> > >>>> +			unsigned long pgshift =
> > >>>> +					 __ffs(vfio_pgsize_bitmap(iommu));
> > >>>> +
> > >>>> +			/*
> > >>>> +			 * Bitmap populated with the smallest supported page
> > >>>> +			 * size
> > >>>> +			 */
> > >>>> +			bitmap_set(dma->bitmap,
> > >>>> +				   (vpfn->iova - dma->iova) >> pgshift, 1);
> > >>>> +		}
> > >>>>    	}
> > >>>>    
> > >>>>    	ret = i;
> > >>>> @@ -801,6 +900,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
> > >>>>    	vfio_unmap_unpin(iommu, dma, true);
> > >>>>    	vfio_unlink_dma(iommu, dma);
> > >>>>    	put_task_struct(dma->task);
> > >>>> +	vfio_dma_bitmap_free(dma);
> > >>>>    	kfree(dma);
> > >>>>    	iommu->dma_avail++;
> > >>>>    }
> > >>>> @@ -831,6 +931,57 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
> > >>>>    	return bitmap;
> > >>>>    }
> > >>>>    
> > >>>> +static int vfio_iova_dirty_bitmap(struct vfio_iommu *iommu, dma_addr_t iova,
> > >>>> +				  size_t size, uint64_t pgsize,
> > >>>> +				  u64 __user *bitmap)
> > >>>> +{
> > >>>> +	struct vfio_dma *dma;
> > >>>> +	unsigned long pgshift = __ffs(pgsize);
> > >>>> +	unsigned int npages, bitmap_size;
> > >>>> +
> > >>>> +	dma = vfio_find_dma(iommu, iova, 1);
> > >>>> +
> > >>>> +	if (!dma)
> > >>>> +		return -EINVAL;
> > >>>> +
> > >>>> +	if (dma->iova != iova || dma->size != size)
> > >>>> +		return -EINVAL;
> > >>>> +  
> > >>> Still don't sure if it's a good practice.
> > >>> I saw the qemu implementation.
> > >>> Qemu just iterates the whole IOVA address space,
> > >>> It needs to find IOTLB entry for an IOVA
> > >>> (1) if it can find an IOTLB for an IOVA, do the DIRTY_PAGES IOCTL and
> > >>> increment IOVA by (iotlb.addr_mask + 1)
> > >>>
> > >>> (2) if no existing IOTLB found, the imrc->translate needs to go searching shadow
> > >>> page table to try to generate one.
> > >>> if it still fails,(most probably case, as IOMMU only maps a small part in its address
> > >>> space).  increment IOVA by 1 page.
> > >>>
> > >>> So, if the address space width is 39bit, and if there's only one page
> > >>> mapped, you still have to translate IOVA for around 2^27 times in each
> > >>> query. Isn't it too inefficient?
> > >>>  
> > >>
> > >> This is Qemu side implementation, let discuss it on QEMU patches.
> > >>  
> > > But kernel has to support it first, right?
> > >   
> > 
> > Shadow page table will be in QEMU (?), as long as we support map and   
> Yes, shadow page table in QEMU.
> 
> > unmap in kernel space, QEMU part of changes should work. That shouldn't 
> > block kernel side patches.  
> Not sure whether this assertion is right:)
> I just want to raise the issue out.

And I think we need to make sure that we have a path to an efficient
userspace implementation.  Walking a shadow page table to unmap and
collect individual dirty bits is clearly better than blindly walking
every page of a 39 bit address space, but it would be an obvious
improvement if the QEMU code could zap entire levels at once.

The issues we raised before about combining multiple bitmaps are not
insurmountable, they're just complicated and potentially something that
we can defer for the initial implementation.  We can change the
implementation of the dirty bitmap without affecting the user, but we
would need to use another flag bit of the IOMMU_GET_INFO ioctl or expose
it via the CHECK_EXTENSION ioctl to indicate multi-mapping dirty bitmap
support.  In fact, the flags field on IOMMU_GET_INFO so far only
describes fields returned by the ioctl, not support for other ioctls.
Would the CHECK_EXTENSION ioctl be a better choice for both exposing
this initial support as well as a v2 when we have multi-mapping?

> > >>> So, IMHO, why we could not just save an rb tree specific for dirty pages, then generate
> > >>> a bitmap for each query?  

I'm curious to know how this might work, I was strongly encouraging
that we must have a bitmap mechanism that supports copy_to_user(),
otherwise we don't have an efficient way to push the bits to the user.
We'd need to copy_from_user() a chunk of their bitmap, set bits, then
push it back with copy_to_user().  If you're thinking of an rb-tree, do
we have a node per dirty page?  The overhead for that seem excessive.
I think we could support multi-mapping dirty bits using __bitmap_and(),
__bitmap_or(), and __bitmap_shift_left/right() to extract the unaligned
portion of the bitmap, or it into a previous segment, then shift the
remainder of the bitmap so that we could use copy_to_user() with it.

> > >> This is looping back to implentation in v10 - v12 version. There are
> > >> problems discussed during v10 to v12 version of patches with this approach.
> > >> - populating dirty bitmap at the time of query will add more CPU cycles.
> > >> - If we save these CPU cyles means dirty pages need to be tracked when
> > >> they are pinned or dirtied by CPU, that is, inttoduced per vfio_dma
> > >> bitmap. If ranges are not vfio_dma aligned, then copying bitmap to user
> > >> space becomes complicated and unefficient.
> > >>
> > >> So we decided to go with the approach implemented here.  
> > > 
> > > I checked v12, it's not like what I said.
> > > In v12, bitmaps are generated per vfio_dma, and combination of the
> > > bitmaps are required in order to generate a big bitmap suiting for dirty
> > > query. It can cause problem when offset not aligning.
> > > But what I propose here is to generate an rb tree orthogonal to the tree
> > > of vfio_dma.
> > > 
> > > as to CPU cycles saving, I don't think iterating/translating page by page
> > > would achieve that purpose.
> > > 
> > >   
> > 
> > 
> >   
> > >   
> > >>>  
> > >>>> +	npages = dma->size >> pgshift;
> > >>>> +	bitmap_size = DIRTY_BITMAP_BYTES(npages);
> > >>>> +
> > >>>> +	/* mark all pages dirty if all pages are pinned and mapped. */
> > >>>> +	if (dma->iommu_mapped)
> > >>>> +		bitmap_set(dma->bitmap, 0, npages);
> > >>>> +
> > >>>> +	if (copy_to_user((void __user *)bitmap, dma->bitmap, bitmap_size))
> > >>>> +		return -EFAULT;
> > >>>> +
> > >>>> +	/*
> > >>>> +	 * Re-populate bitmap to include all pinned pages which are considered
> > >>>> +	 * as dirty but exclude pages which are unpinned and pages which are
> > >>>> +	 * marked dirty by vfio_dma_rw()
> > >>>> +	 */
> > >>>> +	bitmap_clear(dma->bitmap, 0, npages);
> > >>>> +	vfio_dma_populate_bitmap(dma, pgsize);  
> > >>> will this also repopulate bitmap for pinned pages set by pass-through devices in
> > >>> patch 07 ?
> > >>>  
> > >>
> > >> If pass through device's driver pins pages using vfio_pin_pages and all
> > >> devices in the group pins pages through vfio_pin_pages, then
> > >> iommu->pinned_page_dirty_scope is set true, then bitmap is repolutated.
> > >>
> > >>  
> > > pass-through devices already have all guest memory pinned, it would have
> > > no reason to call vfio_pin_pages if not attempting to mark page dirty.
> > > Then if it calls vfio_pin_pages, it means "the pages are accessed, please
> > > mark them dirty, feel free to clean it when you get it",  
> > 
> > if you see vfio_dma_populate_bitmap() function, then if vfio_pin_pages 
> > is called, dma->pfn_list rb_tree will be non-empty and bitmap gets 
> > populates as per pinned pages.
> >   
> > > not "the pages will be accesses, please mark them dirty continuously"
> > >  
> > 
> > if vfio_pin_pages is not called, dma->pfn_list is empty, then it returns 
> > early.
> > If suppose there are 2 deviced in the group, one is IOMMU backed device 
> > and other non-IOMMU mdev device. In that case, all pages are pinned, 
> > iommu->pinned_page_dirty_scope is false, but dma->pfn_list is also not 
> > empty since non-IOMMU backed device pins pages using external API. We 
> > still have to populate bitmap according to dma->pfn_list here, because 
> > in prec-copy phase on first bitmap query, IOMMU backed device might pin 
> > pages using external API - with that iommu->pinned_page_dirty_scope will 
> > get updated to 'true', which means during next iteration report pinned 
> > pages by external API only.
> >  
> ok, I previously thought vfio_pin_pages for IOMMU backed device is to set
> dirty pages after it has write access to them. Looks your intention here
> is presume pinned pages are dirty so you have to re-fill them until they
> are unpinned.
> Maybe you can leave it as is, and we can add mark dirty interface later for
> the purpose I said above (mark dirty after write access).

Yes, just as with non-iommu backed devices, pinned pages are assumed to
be continuously dirtied.  A pin followed by unpin could be used by a
driver to indicate a transient dirty page, but I think we'd want to
think about a lower overhead interface when we have such a driver.
We'd essentially need vfio_dma_rw with only the portion that sets the
dirty bit on write.  Thanks,

Alex



  reply index

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-24 21:18 Kirti Wankhede
2020-03-25  2:11 ` Yan Zhao
2020-03-26 21:39   ` Kirti Wankhede
2020-03-27  0:30     ` Yan Zhao
2020-03-27  5:07       ` Kirti Wankhede
2020-03-30  2:07         ` Yan Zhao
2020-03-30 20:47           ` Alex Williamson [this message]
2020-03-30 23:49             ` Yan Zhao
2020-03-27  5:28       ` Kirti Wankhede
2020-03-30  3:24         ` Yan Zhao
2020-03-30 13:49           ` Kirti Wankhede
2020-03-30 23:51             ` Yan Zhao
2020-03-31  0:53               ` Alex Williamson
2020-03-31  0:50                 ` Yan Zhao
2020-03-31  1:12                   ` Alex Williamson
2020-03-31  1:16                     ` Yan Zhao
2020-03-31  2:38                       ` Alex Williamson
2020-03-31  2:40                         ` Zhao, Yan Y
2020-03-27 11:57 ` Dr. David Alan Gilbert
2020-03-27 13:57   ` Alex Williamson
2020-03-27 14:09     ` Dr. David Alan Gilbert
  -- strict thread matches above, loose matches on Subject: below --
2020-03-24 19:32 [PATCH v16 Kernel 0/7] KABIs to support migration for VFIO devices Kirti Wankhede
2020-03-24 19:32 ` [PATCH v16 Kernel 4/7] vfio iommu: Implementation of ioctl for dirty pages tracking Kirti Wankhede
2020-03-24 20:37   ` Alex Williamson
2020-03-24 20:45     ` Alex Williamson
2020-03-24 21:48       ` Kirti Wankhede

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200330144720.18acf66d@w520.home \
    --to=alex.williamson@redhat.com \
    --cc=Ken.Xue@amd.com \
    --cc=Zhengxiao.zx@Alibaba-inc.com \
    --cc=aik@ozlabs.ru \
    --cc=changpeng.liu@intel.com \
    --cc=cjia@nvidia.com \
    --cc=cohuck@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=eauger@redhat.com \
    --cc=eskultet@redhat.com \
    --cc=felipe@nutanix.com \
    --cc=jonathan.davies@nutanix.com \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=kwankhede@nvidia.com \
    --cc=mlevitsk@redhat.com \
    --cc=pasic@linux.ibm.com \
    --cc=qemu-devel@nongnu.org \
    --cc=shuangtai.tst@alibaba-inc.com \
    --cc=yan.y.zhao@intel.com \
    --cc=yi.l.liu@intel.com \
    --cc=zhi.a.wang@intel.com \
    --cc=ziye.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

QEMU-Devel Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/qemu-devel/0 qemu-devel/git/0.git
	git clone --mirror https://lore.kernel.org/qemu-devel/1 qemu-devel/git/1.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 qemu-devel qemu-devel/ https://lore.kernel.org/qemu-devel \
		qemu-devel@nongnu.org
	public-inbox-index qemu-devel

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.nongnu.qemu-devel


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git