linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
To: Tianyu Lan <ltykernel@gmail.com>
Cc: kys@microsoft.com, haiyangz@microsoft.com,
	sthemmin@microsoft.com, wei.liu@kernel.org, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com,
	arnd@arndb.de, akpm@linux-foundation.org,
	gregkh@linuxfoundation.org, hch@lst.de, m.szyprowski@samsung.com,
	robin.murphy@arm.com, joro@8bytes.org, will@kernel.org,
	davem@davemloft.net, kuba@kernel.org, jejb@linux.ibm.com,
	martin.petersen@oracle.com, Tianyu Lan <Tianyu.Lan@microsoft.com>,
	iommu@lists.linux-foundation.org, linux-arch@vger.kernel.org,
	linux-hyperv@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, linux-scsi@vger.kernel.org,
	netdev@vger.kernel.org, vkuznets@redhat.com,
	thomas.lendacky@amd.com, brijesh.singh@amd.com,
	sunilmut@microsoft.com
Subject: Re: [Resend RFC PATCH V2 07/12] HV/Vmbus: Initialize VMbus ring buffer for Isolation VM
Date: Thu, 15 Apr 2021 16:24:15 -0400	[thread overview]
Message-ID: <YHig78Xra5tEQhMD@dhcp-10-154-102-149.vpn.oracle.com> (raw)
In-Reply-To: <20210414144945.3460554-8-ltykernel@gmail.com>

On Wed, Apr 14, 2021 at 10:49:40AM -0400, Tianyu Lan wrote:
> From: Tianyu Lan <Tianyu.Lan@microsoft.com>
> 
> VMbus ring buffer are shared with host and it's need to
> be accessed via extra address space of Isolation VM with
> SNP support. This patch is to map the ring buffer
> address in extra address space via ioremap(). HV host

Why do you need to use ioremap()? Why not just use vmap?


> visibility hvcall smears data in the ring buffer and
> so reset the ring buffer memory to zero after calling
> visibility hvcall.

So you are exposing these two:
 EXPORT_SYMBOL_GPL(get_vm_area);
 EXPORT_SYMBOL_GPL(ioremap_page_range);

But if you used vmap wouldn't you get the same thing for free?

> 
> Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
> ---
>  drivers/hv/channel.c      | 10 +++++
>  drivers/hv/hyperv_vmbus.h |  2 +
>  drivers/hv/ring_buffer.c  | 83 +++++++++++++++++++++++++++++----------
>  mm/ioremap.c              |  1 +
>  mm/vmalloc.c              |  1 +
>  5 files changed, 76 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
> index 407b74d72f3f..4a9fb7ad4c72 100644
> --- a/drivers/hv/channel.c
> +++ b/drivers/hv/channel.c
> @@ -634,6 +634,16 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
>  	if (err)
>  		goto error_clean_ring;
>  
> +	err = hv_ringbuffer_post_init(&newchannel->outbound,
> +				      page, send_pages);
> +	if (err)
> +		goto error_free_gpadl;
> +
> +	err = hv_ringbuffer_post_init(&newchannel->inbound,
> +				      &page[send_pages], recv_pages);
> +	if (err)
> +		goto error_free_gpadl;
> +
>  	/* Create and init the channel open message */
>  	open_info = kzalloc(sizeof(*open_info) +
>  			   sizeof(struct vmbus_channel_open_channel),
> diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
> index 0778add21a9c..d78a04ad5490 100644
> --- a/drivers/hv/hyperv_vmbus.h
> +++ b/drivers/hv/hyperv_vmbus.h
> @@ -172,6 +172,8 @@ extern int hv_synic_cleanup(unsigned int cpu);
>  /* Interface */
>  
>  void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
> +int hv_ringbuffer_post_init(struct hv_ring_buffer_info *ring_info,
> +		struct page *pages, u32 page_cnt);
>  
>  int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
>  		       struct page *pages, u32 pagecnt);
> diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
> index 35833d4d1a1d..c8b0f7b45158 100644
> --- a/drivers/hv/ring_buffer.c
> +++ b/drivers/hv/ring_buffer.c
> @@ -17,6 +17,8 @@
>  #include <linux/vmalloc.h>
>  #include <linux/slab.h>
>  #include <linux/prefetch.h>
> +#include <linux/io.h>
> +#include <asm/mshyperv.h>
>  
>  #include "hyperv_vmbus.h"
>  
> @@ -188,6 +190,44 @@ void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
>  	mutex_init(&channel->outbound.ring_buffer_mutex);
>  }
>  
> +int hv_ringbuffer_post_init(struct hv_ring_buffer_info *ring_info,
> +		       struct page *pages, u32 page_cnt)
> +{
> +	struct vm_struct *area;
> +	u64 physic_addr = page_to_pfn(pages) << PAGE_SHIFT;
> +	unsigned long vaddr;
> +	int err = 0;
> +
> +	if (!hv_isolation_type_snp())
> +		return 0;
> +
> +	physic_addr += ms_hyperv.shared_gpa_boundary;
> +	area = get_vm_area((2 * page_cnt - 1) * PAGE_SIZE, VM_IOREMAP);
> +	if (!area || !area->addr)
> +		return -EFAULT;
> +
> +	vaddr = (unsigned long)area->addr;
> +	err = ioremap_page_range(vaddr, vaddr + page_cnt * PAGE_SIZE,
> +			   physic_addr, PAGE_KERNEL_IO);
> +	err |= ioremap_page_range(vaddr + page_cnt * PAGE_SIZE,
> +				  vaddr + (2 * page_cnt - 1) * PAGE_SIZE,
> +				  physic_addr + PAGE_SIZE, PAGE_KERNEL_IO);
> +	if (err) {
> +		vunmap((void *)vaddr);
> +		return -EFAULT;
> +	}
> +
> +	/* Clean memory after setting host visibility. */
> +	memset((void *)vaddr, 0x00, page_cnt * PAGE_SIZE);
> +
> +	ring_info->ring_buffer = (struct hv_ring_buffer *)vaddr;
> +	ring_info->ring_buffer->read_index = 0;
> +	ring_info->ring_buffer->write_index = 0;
> +	ring_info->ring_buffer->feature_bits.value = 1;
> +
> +	return 0;
> +}
> +
>  /* Initialize the ring buffer. */
>  int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
>  		       struct page *pages, u32 page_cnt)
> @@ -197,33 +237,34 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
>  
>  	BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
>  
> -	/*
> -	 * First page holds struct hv_ring_buffer, do wraparound mapping for
> -	 * the rest.
> -	 */
> -	pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
> -				   GFP_KERNEL);
> -	if (!pages_wraparound)
> -		return -ENOMEM;
> -
> -	pages_wraparound[0] = pages;
> -	for (i = 0; i < 2 * (page_cnt - 1); i++)
> -		pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
> +	if (!hv_isolation_type_snp()) {
> +		/*
> +		 * First page holds struct hv_ring_buffer, do wraparound mapping for
> +		 * the rest.
> +		 */
> +		pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
> +					   GFP_KERNEL);
> +		if (!pages_wraparound)
> +			return -ENOMEM;
>  
> -	ring_info->ring_buffer = (struct hv_ring_buffer *)
> -		vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
> +		pages_wraparound[0] = pages;
> +		for (i = 0; i < 2 * (page_cnt - 1); i++)
> +			pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
>  
> -	kfree(pages_wraparound);
> +		ring_info->ring_buffer = (struct hv_ring_buffer *)
> +			vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
>  
> +		kfree(pages_wraparound);
>  
> -	if (!ring_info->ring_buffer)
> -		return -ENOMEM;
> +		if (!ring_info->ring_buffer)
> +			return -ENOMEM;
>  
> -	ring_info->ring_buffer->read_index =
> -		ring_info->ring_buffer->write_index = 0;
> +		ring_info->ring_buffer->read_index =
> +			ring_info->ring_buffer->write_index = 0;
>  
> -	/* Set the feature bit for enabling flow control. */
> -	ring_info->ring_buffer->feature_bits.value = 1;
> +		/* Set the feature bit for enabling flow control. */
> +		ring_info->ring_buffer->feature_bits.value = 1;
> +	}
>  
>  	ring_info->ring_size = page_cnt << PAGE_SHIFT;
>  	ring_info->ring_size_div10_reciprocal =
> diff --git a/mm/ioremap.c b/mm/ioremap.c
> index 5fa1ab41d152..d63c4ba067f9 100644
> --- a/mm/ioremap.c
> +++ b/mm/ioremap.c
> @@ -248,6 +248,7 @@ int ioremap_page_range(unsigned long addr,
>  
>  	return err;
>  }
> +EXPORT_SYMBOL_GPL(ioremap_page_range);
>  
>  #ifdef CONFIG_GENERIC_IOREMAP
>  void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index e6f352bf0498..19724a8ebcb7 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2131,6 +2131,7 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
>  				  NUMA_NO_NODE, GFP_KERNEL,
>  				  __builtin_return_address(0));
>  }
> +EXPORT_SYMBOL_GPL(get_vm_area);
>  
>  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
>  				const void *caller)
> -- 
> 2.25.1
> 

  reply	other threads:[~2021-04-15 20:25 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-14 14:49 [Resend RFC PATCH V2 00/12] x86/Hyper-V: Add Hyper-V Isolation VM support Tianyu Lan
2021-04-14 14:49 ` [Resend RFC PATCH V2 01/12] x86/HV: Initialize GHCB page in Isolation VM Tianyu Lan
2021-04-14 14:49 ` [Resend RFC PATCH V2 02/12] x86/HV: Initialize shared memory boundary " Tianyu Lan
2021-04-14 14:49 ` [Resend RFC PATCH V2 03/12] x86/Hyper-V: Add new hvcall guest address host visibility support Tianyu Lan
2021-04-14 15:40   ` Christoph Hellwig
2021-04-15  8:13     ` Tianyu Lan
2021-04-14 14:49 ` [Resend RFC PATCH V2 04/12] HV: Add Write/Read MSR registers via ghcb Tianyu Lan
2021-04-14 15:41   ` Christoph Hellwig
2021-04-15  8:19     ` Tianyu Lan
2021-04-15 18:11   ` Konrad Rzeszutek Wilk
2021-04-14 14:49 ` [Resend RFC PATCH V2 05/12] HV: Add ghcb hvcall support for SNP VM Tianyu Lan
2021-04-14 14:49 ` [Resend RFC PATCH V2 06/12] HV/Vmbus: Add SNP support for VMbus channel initiate message Tianyu Lan
2021-04-15 18:52   ` Konrad Rzeszutek Wilk
2021-04-14 14:49 ` [Resend RFC PATCH V2 07/12] HV/Vmbus: Initialize VMbus ring buffer for Isolation VM Tianyu Lan
2021-04-15 20:24   ` Konrad Rzeszutek Wilk [this message]
2021-04-19  6:36     ` Christoph Hellwig
2021-04-14 14:49 ` [Resend RFC PATCH V2 08/12] UIO/Hyper-V: Not load UIO HV driver in the isolation VM Tianyu Lan
2021-04-14 15:42   ` Christoph Hellwig
2021-04-14 15:45   ` Greg KH
2021-04-14 16:17     ` Stephen Hemminger
2021-04-15 12:54       ` Tianyu Lan
2021-04-15 13:09     ` Tianyu Lan
2021-04-14 14:49 ` [Resend RFC PATCH V2 09/12] swiotlb: Add bounce buffer remap address setting function Tianyu Lan
2021-04-15 20:28   ` Konrad Rzeszutek Wilk
2021-04-14 14:49 ` [Resend RFC PATCH V2 10/12] HV/IOMMU: Add Hyper-V dma ops support Tianyu Lan
2021-04-14 15:47   ` Christoph Hellwig
2021-05-12 16:01     ` Tianyu Lan
2021-05-12 17:29       ` Robin Murphy
2021-05-13  3:19       ` Lu Baolu
2021-04-14 14:49 ` [Resend RFC PATCH V2 11/12] HV/Netvsc: Add Isolation VM support for netvsc driver Tianyu Lan
2021-04-14 15:50   ` Christoph Hellwig
2021-04-15  8:39     ` Tianyu Lan
2021-04-14 14:49 ` [Resend RFC PATCH V2 12/12] HV/Storvsc: Add Isolation VM support for storvsc driver Tianyu Lan
2021-04-14 15:51   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YHig78Xra5tEQhMD@dhcp-10-154-102-149.vpn.oracle.com \
    --to=konrad.wilk@oracle.com \
    --cc=Tianyu.Lan@microsoft.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=bp@alien8.de \
    --cc=brijesh.singh@amd.com \
    --cc=davem@davemloft.net \
    --cc=gregkh@linuxfoundation.org \
    --cc=haiyangz@microsoft.com \
    --cc=hch@lst.de \
    --cc=hpa@zytor.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jejb@linux.ibm.com \
    --cc=joro@8bytes.org \
    --cc=kuba@kernel.org \
    --cc=kys@microsoft.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=ltykernel@gmail.com \
    --cc=m.szyprowski@samsung.com \
    --cc=martin.petersen@oracle.com \
    --cc=mingo@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=sthemmin@microsoft.com \
    --cc=sunilmut@microsoft.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=vkuznets@redhat.com \
    --cc=wei.liu@kernel.org \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).