All of lore.kernel.org
 help / color / mirror / Atom feed
From: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
To: Elliot Berman <quic_eberman@quicinc.com>,
	Alex Elder <elder@linaro.org>,
	Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
Cc: Murali Nalajala <quic_mnalajal@quicinc.com>,
	Trilok Soni <quic_tsoni@quicinc.com>,
	Srivatsa Vaddagiri <quic_svaddagi@quicinc.com>,
	Carl van Schaik <quic_cvanscha@quicinc.com>,
	Dmitry Baryshkov <dmitry.baryshkov@linaro.org>,
	Bjorn Andersson <andersson@kernel.org>,
	Konrad Dybcio <konrad.dybcio@linaro.org>,
	Arnd Bergmann <arnd@arndb.de>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Rob Herring <robh+dt@kernel.org>,
	Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>,
	Jonathan Corbet <corbet@lwn.net>,
	Bagas Sanjaya <bagasdotme@gmail.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Jassi Brar <jassisinghbrar@gmail.com>,
	linux-arm-msm@vger.kernel.org, devicetree@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v10 12/26] gunyah: vm_mgr: Add/remove user memory regions
Date: Tue, 21 Feb 2023 12:28:53 +0000	[thread overview]
Message-ID: <db397198-d079-faa5-691f-c4b06822d2f3@linaro.org> (raw)
In-Reply-To: <20230214212417.3315422-1-quic_eberman@quicinc.com>



On 14/02/2023 21:24, Elliot Berman wrote:
> 
> When launching a virtual machine, Gunyah userspace allocates memory for
> the guest and informs Gunyah about these memory regions through
> SET_USER_MEMORY_REGION ioctl.
> 
> Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
> Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
> Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
> ---
>   drivers/virt/gunyah/Makefile    |   2 +-
>   drivers/virt/gunyah/vm_mgr.c    |  44 ++++++
>   drivers/virt/gunyah/vm_mgr.h    |  25 ++++
>   drivers/virt/gunyah/vm_mgr_mm.c | 235 ++++++++++++++++++++++++++++++++
>   include/uapi/linux/gunyah.h     |  33 +++++
>   5 files changed, 338 insertions(+), 1 deletion(-)
>   create mode 100644 drivers/virt/gunyah/vm_mgr_mm.c
> 
> diff --git a/drivers/virt/gunyah/Makefile b/drivers/virt/gunyah/Makefile
> index 03951cf82023..ff8bc4925392 100644
> --- a/drivers/virt/gunyah/Makefile
> +++ b/drivers/virt/gunyah/Makefile
> @@ -2,5 +2,5 @@
>   
>   obj-$(CONFIG_GUNYAH) += gunyah.o
>   
> -gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o
> +gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o vm_mgr_mm.o
>   obj-$(CONFIG_GUNYAH) += gunyah_rsc_mgr.o
> diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
> index fd890a57172e..84102bac03cc 100644
> --- a/drivers/virt/gunyah/vm_mgr.c
> +++ b/drivers/virt/gunyah/vm_mgr.c
> @@ -18,8 +18,16 @@
>   static void gh_vm_free(struct work_struct *work)
>   {
>   	struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work);
> +	struct gh_vm_mem *mapping, *tmp;
>   	int ret;
>   
> +	mutex_lock(&ghvm->mm_lock);
> +	list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
> +		gh_vm_mem_reclaim(ghvm, mapping);
> +		kfree(mapping);
> +	}
> +	mutex_unlock(&ghvm->mm_lock);
> +
>   	ret = gh_rm_dealloc_vmid(ghvm->rm, ghvm->vmid);
>   	if (ret)
>   		pr_warn("Failed to deallocate vmid: %d\n", ret);
> @@ -48,11 +56,46 @@ static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm)
>   	ghvm->vmid = vmid;
>   	ghvm->rm = rm;
>   
> +	mutex_init(&ghvm->mm_lock);
> +	INIT_LIST_HEAD(&ghvm->memory_mappings);
>   	INIT_WORK(&ghvm->free_work, gh_vm_free);
>   
>   	return ghvm;
>   }
>   
> +static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
> +{
> +	struct gh_vm *ghvm = filp->private_data;
> +	void __user *argp = (void __user *)arg;
> +	long r;
> +
> +	switch (cmd) {
> +	case GH_VM_SET_USER_MEM_REGION: {
> +		struct gh_userspace_memory_region region;
> +
> +		if (copy_from_user(&region, argp, sizeof(region)))
> +			return -EFAULT;
> +
> +		/* All other flag bits are reserved for future use */
> +		if (region.flags & ~(GH_MEM_ALLOW_READ | GH_MEM_ALLOW_WRITE | GH_MEM_ALLOW_EXEC |
> +			GH_MEM_LENT))
> +			return -EINVAL;
> +
> +
> +		if (region.memory_size)
> +			r = gh_vm_mem_alloc(ghvm, &region);
> +		else
> +			r = gh_vm_mem_free(ghvm, region.label);

Looks like we are repurposing GH_VM_SET_USER_MEM_REGION for allocation 
and freeing.

Should we have corresponding GH_VM_UN_SET_USER_MEM_REGION instead for 
freeing? given that label is the only relevant member of struct 
gh_userspace_memory_region in free case.


> +		break;
> +	}
> +	default:
> +		r = -ENOTTY;
> +		break;
> +	}
> +
> +	return r;
> +}
> +
>   static int gh_vm_release(struct inode *inode, struct file *filp)
>   {
>   	struct gh_vm *ghvm = filp->private_data;
> @@ -65,6 +108,7 @@ static int gh_vm_release(struct inode *inode, struct file *filp)
>   }
>   
>   static const struct file_operations gh_vm_fops = {
> +	.unlocked_ioctl = gh_vm_ioctl,
>   	.release = gh_vm_release,
>   	.compat_ioctl	= compat_ptr_ioctl,
>   	.llseek = noop_llseek,
> diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
> index 76954da706e9..97bc00c34878 100644
> --- a/drivers/virt/gunyah/vm_mgr.h
> +++ b/drivers/virt/gunyah/vm_mgr.h
> @@ -7,16 +7,41 @@
>   #define _GH_PRIV_VM_MGR_H
>   
>   #include <linux/gunyah_rsc_mgr.h>
> +#include <linux/list.h>
> +#include <linux/miscdevice.h>
> +#include <linux/mutex.h>
>   
>   #include <uapi/linux/gunyah.h>
>   
>   long gh_dev_vm_mgr_ioctl(struct gh_rm *rm, unsigned int cmd, unsigned long arg);
>   
> +enum gh_vm_mem_share_type {
> +	VM_MEM_SHARE,
> +	VM_MEM_LEND,
> +};
> +
> +struct gh_vm_mem {
> +	struct list_head list;
> +	enum gh_vm_mem_share_type share_type;
> +	struct gh_rm_mem_parcel parcel;
> +
> +	__u64 guest_phys_addr;
> +	struct page **pages;
> +	unsigned long npages;
> +};
> +
>   struct gh_vm {
>   	u16 vmid;
>   	struct gh_rm *rm;
>   
>   	struct work_struct free_work;
> +	struct mutex mm_lock;
> +	struct list_head memory_mappings;
>   };
>   
> +int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region);
> +void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping);
> +int gh_vm_mem_free(struct gh_vm *ghvm, u32 label);
> +struct gh_vm_mem *gh_vm_mem_find(struct gh_vm *ghvm, u32 label);
> +
>   #endif
> diff --git a/drivers/virt/gunyah/vm_mgr_mm.c b/drivers/virt/gunyah/vm_mgr_mm.c
> new file mode 100644
> index 000000000000..03e71a36ea3b
> --- /dev/null
> +++ b/drivers/virt/gunyah/vm_mgr_mm.c
> @@ -0,0 +1,235 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
> + */
> +
> +#define pr_fmt(fmt) "gh_vm_mgr: " fmt
> +
> +#include <linux/gunyah_rsc_mgr.h>
> +#include <linux/mm.h>
> +
> +#include <uapi/linux/gunyah.h>
> +
> +#include "vm_mgr.h"
> +
> +static inline bool page_contiguous(phys_addr_t p, phys_addr_t t)
> +{
> +	return t - p == PAGE_SIZE;
> +}
> +
> +static struct gh_vm_mem *__gh_vm_mem_find(struct gh_vm *ghvm, u32 label)
> +	__must_hold(&ghvm->mm_lock)
> +{
> +	struct gh_vm_mem *mapping;
> +
> +	list_for_each_entry(mapping, &ghvm->memory_mappings, list)
> +		if (mapping->parcel.label == label)
> +			return mapping;
> +
> +	return NULL;
> +}
> +
> +void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping)
> +	__must_hold(&ghvm->mm_lock)
> +{
> +	int i, ret = 0;
> +
> +	if (mapping->parcel.mem_handle != GH_MEM_HANDLE_INVAL) {
> +		ret = gh_rm_mem_reclaim(ghvm->rm, &mapping->parcel);
> +		if (ret)
> +			pr_warn("Failed to reclaim memory parcel for label %d: %d\n",
> +				mapping->parcel.label, ret);

what the behavoir of hypervisor if we failed to reclaim the pages?

> +	}
> +
> +	if (!ret)
So we will leave the user pages pinned if hypervisor call fails, but 
further down we free the mapping all together.

Am not 100% sure if this will have any side-effect, but is it okay to 
leave user-pages pinned with no possiblity of unpinning them in such cases?


> +		for (i = 0; i < mapping->npages; i++)
> +			unpin_user_page(mapping->pages[i]);
> +
> +	kfree(mapping->pages);
> +	kfree(mapping->parcel.acl_entries);
> +	kfree(mapping->parcel.mem_entries);
> +
> +	list_del(&mapping->list);
> +}
> +
> +struct gh_vm_mem *gh_vm_mem_find(struct gh_vm *ghvm, u32 label)
> +{
> +	struct gh_vm_mem *mapping;
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&ghvm->mm_lock);
> +	if (ret)
> +		return ERR_PTR(ret);
new line would be nice here.

> +	mapping = __gh_vm_mem_find(ghvm, label);
> +	mutex_unlock(&ghvm->mm_lock);
new line would be nice here.

> +	return mapping ? : ERR_PTR(-ENODEV);
> +}
> +
> +int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region)
> +{
> +	struct gh_vm_mem *mapping, *tmp_mapping;
> +	struct gh_rm_mem_entry *mem_entries;
> +	phys_addr_t curr_page, prev_page;
> +	struct gh_rm_mem_parcel *parcel;
> +	int i, j, pinned, ret = 0;
> +	size_t entry_size;
> +	u16 vmid;
> +
> +	if (!gh_api_has_feature(GH_API_FEATURE_MEMEXTENT))
> +		return -EOPNOTSUPP;

Should this not be first thing to do in ioctl before even entering this 
function?

> +
> +	if (!region->memory_size || !PAGE_ALIGNED(region->memory_size) ||
> +		!PAGE_ALIGNED(region->userspace_addr) || !PAGE_ALIGNED(region->guest_phys_addr))
> +		return -EINVAL;
> +
> +	ret = mutex_lock_interruptible(&ghvm->mm_lock);
> +	if (ret)
> +		return ret;
new line.

> +	mapping = __gh_vm_mem_find(ghvm, region->label);
> +	if (mapping) {
> +		mutex_unlock(&ghvm->mm_lock);
> +		return -EEXIST;
> +	}
> +
> +	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
> +	if (!mapping) {
> +		ret = -ENOMEM;
> +		goto free_mapping;

how about,

mutex_unlock(&ghvm->mm_lock);
return -ENMEM;

> +	}
> +
> +	mapping->parcel.label = region->label;
> +	mapping->guest_phys_addr = region->guest_phys_addr;
> +	mapping->npages = region->memory_size >> PAGE_SHIFT;
> +	parcel = &mapping->parcel;
> +	parcel->mem_handle = GH_MEM_HANDLE_INVAL; /* to be filled later by mem_share/mem_lend */
> +	parcel->mem_type = GH_RM_MEM_TYPE_NORMAL;
> +
> +	/* Check for overlap */
> +	list_for_each_entry(tmp_mapping, &ghvm->memory_mappings, list) {
> +		if (!((mapping->guest_phys_addr + (mapping->npages << PAGE_SHIFT) <=
> +			tmp_mapping->guest_phys_addr) ||
> +			(mapping->guest_phys_addr >=
> +			tmp_mapping->guest_phys_addr + (tmp_mapping->npages << PAGE_SHIFT)))) {
> +			ret = -EEXIST;
> +			goto free_mapping;
> +		}
> +	}
> +
> +	list_add(&mapping->list, &ghvm->memory_mappings);
> +
> +	mapping->pages = kcalloc(mapping->npages, sizeof(*mapping->pages), GFP_KERNEL);
> +	if (!mapping->pages) {
> +		ret = -ENOMEM;
> +		mapping->npages = 0; /* update npages for reclaim */
> +		goto reclaim;
> +	}
> +
> +	pinned = pin_user_pages_fast(region->userspace_addr, mapping->npages,
> +					FOLL_WRITE | FOLL_LONGTERM, mapping->pages);
> +	if (pinned < 0) {
> +		ret = pinned;
> +		mapping->npages = 0; /* update npages for reclaim */
> +		goto reclaim;
> +	} else if (pinned != mapping->npages) {
> +		ret = -EFAULT;
> +		mapping->npages = pinned; /* update npages for reclaim */
> +		goto reclaim;
> +	}
> +
> +	if (region->flags & GH_MEM_LENT) {
> +		parcel->n_acl_entries = 1;
> +		mapping->share_type = VM_MEM_LEND;
> +	} else {
> +		parcel->n_acl_entries = 2;
> +		mapping->share_type = VM_MEM_SHARE;
> +	}
> +	parcel->acl_entries = kcalloc(parcel->n_acl_entries, sizeof(*parcel->acl_entries),
> +					GFP_KERNEL);
> +	if (!parcel->acl_entries) {
> +		ret = -ENOMEM;
> +		goto reclaim;
> +	}
> +
> +	parcel->acl_entries[0].vmid = cpu_to_le16(ghvm->vmid);
new line
> +	if (region->flags & GH_MEM_ALLOW_READ)
> +		parcel->acl_entries[0].perms |= GH_RM_ACL_R;
> +	if (region->flags & GH_MEM_ALLOW_WRITE)
> +		parcel->acl_entries[0].perms |= GH_RM_ACL_W;
> +	if (region->flags & GH_MEM_ALLOW_EXEC)
> +		parcel->acl_entries[0].perms |= GH_RM_ACL_X;
> +
> +	if (mapping->share_type == VM_MEM_SHARE) {
> +		ret = gh_rm_get_vmid(ghvm->rm, &vmid);
> +		if (ret)
> +			goto reclaim;
> +
> +		parcel->acl_entries[1].vmid = cpu_to_le16(vmid);
> +		/* Host assumed to have all these permissions. Gunyah will not
> +		 * grant new permissions if host actually had less than RWX
> +		 */
> +		parcel->acl_entries[1].perms |= GH_RM_ACL_R | GH_RM_ACL_W | GH_RM_ACL_X;
> +	}
> +
> +	mem_entries = kcalloc(mapping->npages, sizeof(*mem_entries), GFP_KERNEL);
> +	if (!mem_entries) {
> +		ret = -ENOMEM;
> +		goto reclaim;
> +	}
> +
> +	/* reduce number of entries by combining contiguous pages into single memory entry */
> +	prev_page = page_to_phys(mapping->pages[0]);
> +	mem_entries[0].ipa_base = cpu_to_le64(prev_page);
> +	entry_size = PAGE_SIZE;
new line
> +	for (i = 1, j = 0; i < mapping->npages; i++) {
> +		curr_page = page_to_phys(mapping->pages[i]);
> +		if (page_contiguous(prev_page, curr_page)) {
> +			entry_size += PAGE_SIZE;
> +		} else {
> +			mem_entries[j].size = cpu_to_le64(entry_size);
> +			j++;
> +			mem_entries[j].ipa_base = cpu_to_le64(curr_page);
> +			entry_size = PAGE_SIZE;
> +		}
> +
> +		prev_page = curr_page;
> +	}
> +	mem_entries[j].size = cpu_to_le64(entry_size);
> +
> +	parcel->n_mem_entries = j + 1;
> +	parcel->mem_entries = kmemdup(mem_entries, sizeof(*mem_entries) * parcel->n_mem_entries,
> +					GFP_KERNEL);
> +	kfree(mem_entries);
> +	if (!parcel->mem_entries) {
> +		ret = -ENOMEM;
> +		goto reclaim;
> +	}
> +
> +	mutex_unlock(&ghvm->mm_lock);
> +	return 0;
> +reclaim:
> +	gh_vm_mem_reclaim(ghvm, mapping);
> +free_mapping:
> +	kfree(mapping);
> +	mutex_unlock(&ghvm->mm_lock);
> +	return ret;
> +}
> +
> +int gh_vm_mem_free(struct gh_vm *ghvm, u32 label)
> +{
> +	struct gh_vm_mem *mapping;
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&ghvm->mm_lock);
> +	if (ret)
> +		return ret;
> +
> +	mapping = __gh_vm_mem_find(ghvm, label);
> +	if (!mapping)
> +		goto out;
> +
> +	gh_vm_mem_reclaim(ghvm, mapping);
> +	kfree(mapping);
> +out:
> +	mutex_unlock(&ghvm->mm_lock);
> +	return ret;
> +}
> diff --git a/include/uapi/linux/gunyah.h b/include/uapi/linux/gunyah.h
> index 10ba32d2b0a6..d85d12119a48 100644
> --- a/include/uapi/linux/gunyah.h
> +++ b/include/uapi/linux/gunyah.h
> @@ -20,4 +20,37 @@
>    */
>   #define GH_CREATE_VM			_IO(GH_IOCTL_TYPE, 0x0) /* Returns a Gunyah VM fd */
>   
> +/*
> + * ioctls for VM fds
> + */
> +
> +/**
> + * struct gh_userspace_memory_region - Userspace memory descripion for GH_VM_SET_USER_MEM_REGION
> + * @label: Unique identifer to the region.
> + * @flags: Flags for memory parcel behavior
> + * @guest_phys_addr: Location of the memory region in guest's memory space (page-aligned)#

Note about overlapping here would be useful.

> + * @memory_size: Size of the region (page-aligned)
> + * @userspace_addr: Location of the memory region in caller (userspace)'s memory
> + *
> + * See Documentation/virt/gunyah/vm-manager.rst for further details.
> + */
> +struct gh_userspace_memory_region {
> +	__u32 label;
> +#define GH_MEM_ALLOW_READ	(1UL << 0)
> +#define GH_MEM_ALLOW_WRITE	(1UL << 1)
> +#define GH_MEM_ALLOW_EXEC	(1UL << 2)
> +/*
> + * The guest will be lent the memory instead of shared.
> + * In other words, the guest has exclusive access to the memory region and the host loses access.
> + */
> +#define GH_MEM_LENT		(1UL << 3)
> +	__u32 flags;
> +	__u64 guest_phys_addr;
> +	__u64 memory_size;
> +	__u64 userspace_addr;
> +};
> +
> +#define GH_VM_SET_USER_MEM_REGION	_IOW(GH_IOCTL_TYPE, 0x1, \
> +						struct gh_userspace_memory_region)
> +
>   #endif

WARNING: multiple messages have this Message-ID (diff)
From: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
To: Elliot Berman <quic_eberman@quicinc.com>,
	Alex Elder <elder@linaro.org>,
	Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
Cc: Murali Nalajala <quic_mnalajal@quicinc.com>,
	Trilok Soni <quic_tsoni@quicinc.com>,
	Srivatsa Vaddagiri <quic_svaddagi@quicinc.com>,
	Carl van Schaik <quic_cvanscha@quicinc.com>,
	Dmitry Baryshkov <dmitry.baryshkov@linaro.org>,
	Bjorn Andersson <andersson@kernel.org>,
	Konrad Dybcio <konrad.dybcio@linaro.org>,
	Arnd Bergmann <arnd@arndb.de>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Rob Herring <robh+dt@kernel.org>,
	Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>,
	Jonathan Corbet <corbet@lwn.net>,
	Bagas Sanjaya <bagasdotme@gmail.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Jassi Brar <jassisinghbrar@gmail.com>,
	linux-arm-msm@vger.kernel.org, devicetree@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v10 12/26] gunyah: vm_mgr: Add/remove user memory regions
Date: Tue, 21 Feb 2023 12:28:53 +0000	[thread overview]
Message-ID: <db397198-d079-faa5-691f-c4b06822d2f3@linaro.org> (raw)
In-Reply-To: <20230214212417.3315422-1-quic_eberman@quicinc.com>



On 14/02/2023 21:24, Elliot Berman wrote:
> 
> When launching a virtual machine, Gunyah userspace allocates memory for
> the guest and informs Gunyah about these memory regions through
> SET_USER_MEMORY_REGION ioctl.
> 
> Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
> Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
> Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
> ---
>   drivers/virt/gunyah/Makefile    |   2 +-
>   drivers/virt/gunyah/vm_mgr.c    |  44 ++++++
>   drivers/virt/gunyah/vm_mgr.h    |  25 ++++
>   drivers/virt/gunyah/vm_mgr_mm.c | 235 ++++++++++++++++++++++++++++++++
>   include/uapi/linux/gunyah.h     |  33 +++++
>   5 files changed, 338 insertions(+), 1 deletion(-)
>   create mode 100644 drivers/virt/gunyah/vm_mgr_mm.c
> 
> diff --git a/drivers/virt/gunyah/Makefile b/drivers/virt/gunyah/Makefile
> index 03951cf82023..ff8bc4925392 100644
> --- a/drivers/virt/gunyah/Makefile
> +++ b/drivers/virt/gunyah/Makefile
> @@ -2,5 +2,5 @@
>   
>   obj-$(CONFIG_GUNYAH) += gunyah.o
>   
> -gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o
> +gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o vm_mgr_mm.o
>   obj-$(CONFIG_GUNYAH) += gunyah_rsc_mgr.o
> diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
> index fd890a57172e..84102bac03cc 100644
> --- a/drivers/virt/gunyah/vm_mgr.c
> +++ b/drivers/virt/gunyah/vm_mgr.c
> @@ -18,8 +18,16 @@
>   static void gh_vm_free(struct work_struct *work)
>   {
>   	struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work);
> +	struct gh_vm_mem *mapping, *tmp;
>   	int ret;
>   
> +	mutex_lock(&ghvm->mm_lock);
> +	list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
> +		gh_vm_mem_reclaim(ghvm, mapping);
> +		kfree(mapping);
> +	}
> +	mutex_unlock(&ghvm->mm_lock);
> +
>   	ret = gh_rm_dealloc_vmid(ghvm->rm, ghvm->vmid);
>   	if (ret)
>   		pr_warn("Failed to deallocate vmid: %d\n", ret);
> @@ -48,11 +56,46 @@ static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm)
>   	ghvm->vmid = vmid;
>   	ghvm->rm = rm;
>   
> +	mutex_init(&ghvm->mm_lock);
> +	INIT_LIST_HEAD(&ghvm->memory_mappings);
>   	INIT_WORK(&ghvm->free_work, gh_vm_free);
>   
>   	return ghvm;
>   }
>   
> +static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
> +{
> +	struct gh_vm *ghvm = filp->private_data;
> +	void __user *argp = (void __user *)arg;
> +	long r;
> +
> +	switch (cmd) {
> +	case GH_VM_SET_USER_MEM_REGION: {
> +		struct gh_userspace_memory_region region;
> +
> +		if (copy_from_user(&region, argp, sizeof(region)))
> +			return -EFAULT;
> +
> +		/* All other flag bits are reserved for future use */
> +		if (region.flags & ~(GH_MEM_ALLOW_READ | GH_MEM_ALLOW_WRITE | GH_MEM_ALLOW_EXEC |
> +			GH_MEM_LENT))
> +			return -EINVAL;
> +
> +
> +		if (region.memory_size)
> +			r = gh_vm_mem_alloc(ghvm, &region);
> +		else
> +			r = gh_vm_mem_free(ghvm, region.label);

Looks like we are repurposing GH_VM_SET_USER_MEM_REGION for allocation 
and freeing.

Should we have corresponding GH_VM_UN_SET_USER_MEM_REGION instead for 
freeing? given that label is the only relevant member of struct 
gh_userspace_memory_region in free case.


> +		break;
> +	}
> +	default:
> +		r = -ENOTTY;
> +		break;
> +	}
> +
> +	return r;
> +}
> +
>   static int gh_vm_release(struct inode *inode, struct file *filp)
>   {
>   	struct gh_vm *ghvm = filp->private_data;
> @@ -65,6 +108,7 @@ static int gh_vm_release(struct inode *inode, struct file *filp)
>   }
>   
>   static const struct file_operations gh_vm_fops = {
> +	.unlocked_ioctl = gh_vm_ioctl,
>   	.release = gh_vm_release,
>   	.compat_ioctl	= compat_ptr_ioctl,
>   	.llseek = noop_llseek,
> diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
> index 76954da706e9..97bc00c34878 100644
> --- a/drivers/virt/gunyah/vm_mgr.h
> +++ b/drivers/virt/gunyah/vm_mgr.h
> @@ -7,16 +7,41 @@
>   #define _GH_PRIV_VM_MGR_H
>   
>   #include <linux/gunyah_rsc_mgr.h>
> +#include <linux/list.h>
> +#include <linux/miscdevice.h>
> +#include <linux/mutex.h>
>   
>   #include <uapi/linux/gunyah.h>
>   
>   long gh_dev_vm_mgr_ioctl(struct gh_rm *rm, unsigned int cmd, unsigned long arg);
>   
> +enum gh_vm_mem_share_type {
> +	VM_MEM_SHARE,
> +	VM_MEM_LEND,
> +};
> +
> +struct gh_vm_mem {
> +	struct list_head list;
> +	enum gh_vm_mem_share_type share_type;
> +	struct gh_rm_mem_parcel parcel;
> +
> +	__u64 guest_phys_addr;
> +	struct page **pages;
> +	unsigned long npages;
> +};
> +
>   struct gh_vm {
>   	u16 vmid;
>   	struct gh_rm *rm;
>   
>   	struct work_struct free_work;
> +	struct mutex mm_lock;
> +	struct list_head memory_mappings;
>   };
>   
> +int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region);
> +void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping);
> +int gh_vm_mem_free(struct gh_vm *ghvm, u32 label);
> +struct gh_vm_mem *gh_vm_mem_find(struct gh_vm *ghvm, u32 label);
> +
>   #endif
> diff --git a/drivers/virt/gunyah/vm_mgr_mm.c b/drivers/virt/gunyah/vm_mgr_mm.c
> new file mode 100644
> index 000000000000..03e71a36ea3b
> --- /dev/null
> +++ b/drivers/virt/gunyah/vm_mgr_mm.c
> @@ -0,0 +1,235 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
> + */
> +
> +#define pr_fmt(fmt) "gh_vm_mgr: " fmt
> +
> +#include <linux/gunyah_rsc_mgr.h>
> +#include <linux/mm.h>
> +
> +#include <uapi/linux/gunyah.h>
> +
> +#include "vm_mgr.h"
> +
> +static inline bool page_contiguous(phys_addr_t p, phys_addr_t t)
> +{
> +	return t - p == PAGE_SIZE;
> +}
> +
> +static struct gh_vm_mem *__gh_vm_mem_find(struct gh_vm *ghvm, u32 label)
> +	__must_hold(&ghvm->mm_lock)
> +{
> +	struct gh_vm_mem *mapping;
> +
> +	list_for_each_entry(mapping, &ghvm->memory_mappings, list)
> +		if (mapping->parcel.label == label)
> +			return mapping;
> +
> +	return NULL;
> +}
> +
> +void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping)
> +	__must_hold(&ghvm->mm_lock)
> +{
> +	int i, ret = 0;
> +
> +	if (mapping->parcel.mem_handle != GH_MEM_HANDLE_INVAL) {
> +		ret = gh_rm_mem_reclaim(ghvm->rm, &mapping->parcel);
> +		if (ret)
> +			pr_warn("Failed to reclaim memory parcel for label %d: %d\n",
> +				mapping->parcel.label, ret);

what the behavoir of hypervisor if we failed to reclaim the pages?

> +	}
> +
> +	if (!ret)
So we will leave the user pages pinned if hypervisor call fails, but 
further down we free the mapping all together.

Am not 100% sure if this will have any side-effect, but is it okay to 
leave user-pages pinned with no possiblity of unpinning them in such cases?


> +		for (i = 0; i < mapping->npages; i++)
> +			unpin_user_page(mapping->pages[i]);
> +
> +	kfree(mapping->pages);
> +	kfree(mapping->parcel.acl_entries);
> +	kfree(mapping->parcel.mem_entries);
> +
> +	list_del(&mapping->list);
> +}
> +
> +struct gh_vm_mem *gh_vm_mem_find(struct gh_vm *ghvm, u32 label)
> +{
> +	struct gh_vm_mem *mapping;
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&ghvm->mm_lock);
> +	if (ret)
> +		return ERR_PTR(ret);
new line would be nice here.

> +	mapping = __gh_vm_mem_find(ghvm, label);
> +	mutex_unlock(&ghvm->mm_lock);
new line would be nice here.

> +	return mapping ? : ERR_PTR(-ENODEV);
> +}
> +
> +int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region)
> +{
> +	struct gh_vm_mem *mapping, *tmp_mapping;
> +	struct gh_rm_mem_entry *mem_entries;
> +	phys_addr_t curr_page, prev_page;
> +	struct gh_rm_mem_parcel *parcel;
> +	int i, j, pinned, ret = 0;
> +	size_t entry_size;
> +	u16 vmid;
> +
> +	if (!gh_api_has_feature(GH_API_FEATURE_MEMEXTENT))
> +		return -EOPNOTSUPP;

Should this not be first thing to do in ioctl before even entering this 
function?

> +
> +	if (!region->memory_size || !PAGE_ALIGNED(region->memory_size) ||
> +		!PAGE_ALIGNED(region->userspace_addr) || !PAGE_ALIGNED(region->guest_phys_addr))
> +		return -EINVAL;
> +
> +	ret = mutex_lock_interruptible(&ghvm->mm_lock);
> +	if (ret)
> +		return ret;
new line.

> +	mapping = __gh_vm_mem_find(ghvm, region->label);
> +	if (mapping) {
> +		mutex_unlock(&ghvm->mm_lock);
> +		return -EEXIST;
> +	}
> +
> +	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
> +	if (!mapping) {
> +		ret = -ENOMEM;
> +		goto free_mapping;

how about,

mutex_unlock(&ghvm->mm_lock);
return -ENMEM;

> +	}
> +
> +	mapping->parcel.label = region->label;
> +	mapping->guest_phys_addr = region->guest_phys_addr;
> +	mapping->npages = region->memory_size >> PAGE_SHIFT;
> +	parcel = &mapping->parcel;
> +	parcel->mem_handle = GH_MEM_HANDLE_INVAL; /* to be filled later by mem_share/mem_lend */
> +	parcel->mem_type = GH_RM_MEM_TYPE_NORMAL;
> +
> +	/* Check for overlap */
> +	list_for_each_entry(tmp_mapping, &ghvm->memory_mappings, list) {
> +		if (!((mapping->guest_phys_addr + (mapping->npages << PAGE_SHIFT) <=
> +			tmp_mapping->guest_phys_addr) ||
> +			(mapping->guest_phys_addr >=
> +			tmp_mapping->guest_phys_addr + (tmp_mapping->npages << PAGE_SHIFT)))) {
> +			ret = -EEXIST;
> +			goto free_mapping;
> +		}
> +	}
> +
> +	list_add(&mapping->list, &ghvm->memory_mappings);
> +
> +	mapping->pages = kcalloc(mapping->npages, sizeof(*mapping->pages), GFP_KERNEL);
> +	if (!mapping->pages) {
> +		ret = -ENOMEM;
> +		mapping->npages = 0; /* update npages for reclaim */
> +		goto reclaim;
> +	}
> +
> +	pinned = pin_user_pages_fast(region->userspace_addr, mapping->npages,
> +					FOLL_WRITE | FOLL_LONGTERM, mapping->pages);
> +	if (pinned < 0) {
> +		ret = pinned;
> +		mapping->npages = 0; /* update npages for reclaim */
> +		goto reclaim;
> +	} else if (pinned != mapping->npages) {
> +		ret = -EFAULT;
> +		mapping->npages = pinned; /* update npages for reclaim */
> +		goto reclaim;
> +	}
> +
> +	if (region->flags & GH_MEM_LENT) {
> +		parcel->n_acl_entries = 1;
> +		mapping->share_type = VM_MEM_LEND;
> +	} else {
> +		parcel->n_acl_entries = 2;
> +		mapping->share_type = VM_MEM_SHARE;
> +	}
> +	parcel->acl_entries = kcalloc(parcel->n_acl_entries, sizeof(*parcel->acl_entries),
> +					GFP_KERNEL);
> +	if (!parcel->acl_entries) {
> +		ret = -ENOMEM;
> +		goto reclaim;
> +	}
> +
> +	parcel->acl_entries[0].vmid = cpu_to_le16(ghvm->vmid);
new line
> +	if (region->flags & GH_MEM_ALLOW_READ)
> +		parcel->acl_entries[0].perms |= GH_RM_ACL_R;
> +	if (region->flags & GH_MEM_ALLOW_WRITE)
> +		parcel->acl_entries[0].perms |= GH_RM_ACL_W;
> +	if (region->flags & GH_MEM_ALLOW_EXEC)
> +		parcel->acl_entries[0].perms |= GH_RM_ACL_X;
> +
> +	if (mapping->share_type == VM_MEM_SHARE) {
> +		ret = gh_rm_get_vmid(ghvm->rm, &vmid);
> +		if (ret)
> +			goto reclaim;
> +
> +		parcel->acl_entries[1].vmid = cpu_to_le16(vmid);
> +		/* Host assumed to have all these permissions. Gunyah will not
> +		 * grant new permissions if host actually had less than RWX
> +		 */
> +		parcel->acl_entries[1].perms |= GH_RM_ACL_R | GH_RM_ACL_W | GH_RM_ACL_X;
> +	}
> +
> +	mem_entries = kcalloc(mapping->npages, sizeof(*mem_entries), GFP_KERNEL);
> +	if (!mem_entries) {
> +		ret = -ENOMEM;
> +		goto reclaim;
> +	}
> +
> +	/* reduce number of entries by combining contiguous pages into single memory entry */
> +	prev_page = page_to_phys(mapping->pages[0]);
> +	mem_entries[0].ipa_base = cpu_to_le64(prev_page);
> +	entry_size = PAGE_SIZE;
new line
> +	for (i = 1, j = 0; i < mapping->npages; i++) {
> +		curr_page = page_to_phys(mapping->pages[i]);
> +		if (page_contiguous(prev_page, curr_page)) {
> +			entry_size += PAGE_SIZE;
> +		} else {
> +			mem_entries[j].size = cpu_to_le64(entry_size);
> +			j++;
> +			mem_entries[j].ipa_base = cpu_to_le64(curr_page);
> +			entry_size = PAGE_SIZE;
> +		}
> +
> +		prev_page = curr_page;
> +	}
> +	mem_entries[j].size = cpu_to_le64(entry_size);
> +
> +	parcel->n_mem_entries = j + 1;
> +	parcel->mem_entries = kmemdup(mem_entries, sizeof(*mem_entries) * parcel->n_mem_entries,
> +					GFP_KERNEL);
> +	kfree(mem_entries);
> +	if (!parcel->mem_entries) {
> +		ret = -ENOMEM;
> +		goto reclaim;
> +	}
> +
> +	mutex_unlock(&ghvm->mm_lock);
> +	return 0;
> +reclaim:
> +	gh_vm_mem_reclaim(ghvm, mapping);
> +free_mapping:
> +	kfree(mapping);
> +	mutex_unlock(&ghvm->mm_lock);
> +	return ret;
> +}
> +
> +int gh_vm_mem_free(struct gh_vm *ghvm, u32 label)
> +{
> +	struct gh_vm_mem *mapping;
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&ghvm->mm_lock);
> +	if (ret)
> +		return ret;
> +
> +	mapping = __gh_vm_mem_find(ghvm, label);
> +	if (!mapping)
> +		goto out;
> +
> +	gh_vm_mem_reclaim(ghvm, mapping);
> +	kfree(mapping);
> +out:
> +	mutex_unlock(&ghvm->mm_lock);
> +	return ret;
> +}
> diff --git a/include/uapi/linux/gunyah.h b/include/uapi/linux/gunyah.h
> index 10ba32d2b0a6..d85d12119a48 100644
> --- a/include/uapi/linux/gunyah.h
> +++ b/include/uapi/linux/gunyah.h
> @@ -20,4 +20,37 @@
>    */
>   #define GH_CREATE_VM			_IO(GH_IOCTL_TYPE, 0x0) /* Returns a Gunyah VM fd */
>   
> +/*
> + * ioctls for VM fds
> + */
> +
> +/**
> + * struct gh_userspace_memory_region - Userspace memory descripion for GH_VM_SET_USER_MEM_REGION
> + * @label: Unique identifer to the region.
> + * @flags: Flags for memory parcel behavior
> + * @guest_phys_addr: Location of the memory region in guest's memory space (page-aligned)#

Note about overlapping here would be useful.

> + * @memory_size: Size of the region (page-aligned)
> + * @userspace_addr: Location of the memory region in caller (userspace)'s memory
> + *
> + * See Documentation/virt/gunyah/vm-manager.rst for further details.
> + */
> +struct gh_userspace_memory_region {
> +	__u32 label;
> +#define GH_MEM_ALLOW_READ	(1UL << 0)
> +#define GH_MEM_ALLOW_WRITE	(1UL << 1)
> +#define GH_MEM_ALLOW_EXEC	(1UL << 2)
> +/*
> + * The guest will be lent the memory instead of shared.
> + * In other words, the guest has exclusive access to the memory region and the host loses access.
> + */
> +#define GH_MEM_LENT		(1UL << 3)
> +	__u32 flags;
> +	__u64 guest_phys_addr;
> +	__u64 memory_size;
> +	__u64 userspace_addr;
> +};
> +
> +#define GH_VM_SET_USER_MEM_REGION	_IOW(GH_IOCTL_TYPE, 0x1, \
> +						struct gh_userspace_memory_region)
> +
>   #endif

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2023-02-21 12:29 UTC|newest]

Thread overview: 222+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-14 21:12 [PATCH v10 00/26] Drivers for Gunyah hypervisor Elliot Berman
2023-02-14 21:12 ` Elliot Berman
2023-02-14 21:12 ` [PATCH v10 01/26] docs: gunyah: Introduce Gunyah Hypervisor Elliot Berman
2023-02-14 21:12   ` Elliot Berman
2023-02-23 23:41   ` Alex Elder
2023-02-23 23:41     ` Alex Elder
2023-03-01  0:00     ` Elliot Berman
2023-03-01  0:00       ` Elliot Berman
2023-02-14 21:12 ` [PATCH v10 02/26] dt-bindings: Add binding for gunyah hypervisor Elliot Berman
2023-02-14 21:12   ` Elliot Berman
2023-02-14 21:12 ` [PATCH v10 03/26] gunyah: Common types and error codes for Gunyah hypercalls Elliot Berman
2023-02-14 21:12   ` Elliot Berman
2023-02-23 21:58   ` Alex Elder
2023-02-23 21:58     ` Alex Elder
2023-03-02  1:40     ` Elliot Berman
2023-03-02  1:40       ` Elliot Berman
2023-03-02  7:18       ` Arnd Bergmann
2023-03-02  7:18         ` Arnd Bergmann
2023-02-14 21:12 ` [PATCH v10 04/26] virt: gunyah: Add hypercalls to identify Gunyah Elliot Berman
2023-02-14 21:12   ` Elliot Berman
2023-02-20 13:59   ` Srinivas Kandagatla
2023-02-20 13:59     ` Srinivas Kandagatla
2023-02-24  0:09   ` Alex Elder
2023-02-24  0:09     ` Alex Elder
2023-03-02  1:21     ` Elliot Berman
2023-03-02  1:21       ` Elliot Berman
2023-02-14 21:18 ` Elliot Berman
2023-02-14 21:18   ` Elliot Berman
2023-02-14 21:21 ` [PATCH v10 05/26] virt: gunyah: Identify hypervisor version Elliot Berman
2023-02-14 21:21   ` Elliot Berman
2023-02-14 21:23 ` [PATCH v10 06/26] virt: gunyah: msgq: Add hypercalls to send and receive messages Elliot Berman
2023-02-14 21:23   ` Elliot Berman
2023-02-24  0:15   ` Alex Elder
2023-02-24  0:15     ` Alex Elder
2023-02-24 21:24     ` Elliot Berman
2023-02-24 21:24       ` Elliot Berman
2023-02-14 21:23 ` [PATCH v10 07/26] mailbox: Add Gunyah message queue mailbox Elliot Berman
2023-02-14 21:23   ` Elliot Berman
2023-02-16  4:07   ` kernel test robot
2023-02-16  4:07     ` kernel test robot
2023-02-20 13:59   ` Srinivas Kandagatla
2023-02-20 13:59     ` Srinivas Kandagatla
2023-02-23  0:15     ` Elliot Berman
2023-02-23  0:15       ` Elliot Berman
2023-02-23 10:25       ` Srinivas Kandagatla
2023-02-23 10:25         ` Srinivas Kandagatla
2023-02-23 23:15         ` Elliot Berman
2023-02-23 23:15           ` Elliot Berman
2023-02-24  7:49           ` Srinivas Kandagatla
2023-02-24  7:49             ` Srinivas Kandagatla
2023-02-23 18:24   ` Alex Elder
2023-02-23 18:24     ` Alex Elder
2023-02-23 21:11   ` Alex Elder
2023-02-23 21:11     ` Alex Elder
2023-02-24 21:57     ` Elliot Berman
2023-02-24 21:57       ` Elliot Berman
2023-02-14 21:23 ` [PATCH v10 08/26] gunyah: rsc_mgr: Add resource manager RPC core Elliot Berman
2023-02-14 21:23   ` Elliot Berman
2023-02-16  6:43   ` Greg Kroah-Hartman
2023-02-16  6:43     ` Greg Kroah-Hartman
2023-02-16 17:40     ` Elliot Berman
2023-02-16 17:40       ` Elliot Berman
2023-02-17  7:37       ` Greg Kroah-Hartman
2023-02-17  7:37         ` Greg Kroah-Hartman
2023-02-22 22:52         ` Elliot Berman
2023-02-22 22:52           ` Elliot Berman
2023-02-20 18:10   ` Srinivas Kandagatla
2023-02-20 18:10     ` Srinivas Kandagatla
2023-02-22 23:18     ` Elliot Berman
2023-02-22 23:18       ` Elliot Berman
2023-02-23 10:29       ` Srinivas Kandagatla
2023-02-23 10:29         ` Srinivas Kandagatla
2023-02-23 23:13         ` Elliot Berman
2023-02-23 23:13           ` Elliot Berman
2023-02-28  0:52           ` Alex Elder
2023-02-28  0:52             ` Alex Elder
2023-02-28 22:49             ` Elliot Berman
2023-02-28 22:49               ` Elliot Berman
2023-02-23 23:28   ` Alex Elder
2023-02-23 23:28     ` Alex Elder
2023-02-24 22:39     ` Elliot Berman
2023-02-24 22:39       ` Elliot Berman
2023-02-14 21:23 ` [PATCH v10 09/26] gunyah: rsc_mgr: Add VM lifecycle RPC Elliot Berman
2023-02-14 21:23   ` Elliot Berman
2023-02-16  6:39   ` Greg Kroah-Hartman
2023-02-16  6:39     ` Greg Kroah-Hartman
2023-02-16 17:18     ` Elliot Berman
2023-02-16 17:18       ` Elliot Berman
2023-02-21  7:50   ` Srinivas Kandagatla
2023-02-21  7:50     ` Srinivas Kandagatla
2023-02-23 21:36   ` Alex Elder
2023-02-23 21:36     ` Alex Elder
2023-02-23 23:10     ` Elliot Berman
2023-02-23 23:10       ` Elliot Berman
2023-02-14 21:23 ` [PATCH v10 10/26] gunyah: vm_mgr: Introduce basic VM Manager Elliot Berman
2023-02-14 21:23   ` Elliot Berman
2023-02-21 10:46   ` Srinivas Kandagatla
2023-02-21 10:46     ` Srinivas Kandagatla
2023-02-22  0:27     ` Elliot Berman
2023-02-22  0:27       ` Elliot Berman
2023-02-23 10:08       ` Srinivas Kandagatla
2023-02-23 10:08         ` Srinivas Kandagatla
2023-02-23 22:40         ` Elliot Berman
2023-02-23 22:40           ` Elliot Berman
2023-02-24 10:29           ` Srinivas Kandagatla
2023-02-24 10:29             ` Srinivas Kandagatla
2023-02-24 13:20             ` Arnd Bergmann
2023-02-24 13:20               ` Arnd Bergmann
2023-02-24 22:48               ` Elliot Berman
2023-02-24 22:48                 ` Elliot Berman
2023-02-28  1:06                 ` Alex Elder
2023-02-28  1:06                   ` Alex Elder
2023-02-28  9:19                   ` Arnd Bergmann
2023-02-28  9:19                     ` Arnd Bergmann
2023-02-21 13:06   ` Srivatsa Vaddagiri
2023-02-21 13:06     ` Srivatsa Vaddagiri
2023-02-14 21:24 ` [PATCH v10 11/26] gunyah: rsc_mgr: Add RPC for sharing memory Elliot Berman
2023-02-14 21:24   ` Elliot Berman
2023-02-21 11:07   ` Srinivas Kandagatla
2023-02-21 11:07     ` Srinivas Kandagatla
2023-02-14 21:24 ` [PATCH v10 12/26] gunyah: vm_mgr: Add/remove user memory regions Elliot Berman
2023-02-14 21:24   ` Elliot Berman
2023-02-16  6:38   ` Greg Kroah-Hartman
2023-02-16  6:38     ` Greg Kroah-Hartman
2023-02-16 17:24     ` Elliot Berman
2023-02-16 17:24       ` Elliot Berman
2023-02-21 12:28   ` Srinivas Kandagatla [this message]
2023-02-21 12:28     ` Srinivas Kandagatla
2023-02-21 12:43     ` Srivatsa Vaddagiri
2023-02-21 12:43       ` Srivatsa Vaddagiri
2023-02-24  0:43     ` Elliot Berman
2023-02-24  0:43       ` Elliot Berman
2023-02-24 10:36       ` Srinivas Kandagatla
2023-02-24 10:36         ` Srinivas Kandagatla
2023-02-21 12:45   ` Srivatsa Vaddagiri
2023-02-21 12:45     ` Srivatsa Vaddagiri
2023-02-24  0:34   ` Alex Elder
2023-02-24  0:34     ` Alex Elder
2023-02-25  1:03     ` Elliot Berman
2023-02-25  1:03       ` Elliot Berman
2023-02-24 10:19   ` Fuad Tabba
2023-02-24 10:19     ` Fuad Tabba
2023-02-24 18:08     ` Elliot Berman
2023-02-24 18:08       ` Elliot Berman
2023-02-24 18:58       ` Sean Christopherson
2023-02-24 18:58         ` Sean Christopherson
2023-02-27  9:55       ` Fuad Tabba
2023-02-27  9:55         ` Fuad Tabba
2023-02-14 21:24 ` [PATCH v10 13/26] gunyah: vm_mgr: Add ioctls to support basic non-proxy VM boot Elliot Berman
2023-02-14 21:24   ` Elliot Berman
2023-02-16  6:35   ` Greg Kroah-Hartman
2023-02-16  6:35     ` Greg Kroah-Hartman
2023-02-16 17:20     ` Elliot Berman
2023-02-16 17:20       ` Elliot Berman
2023-02-20  9:15   ` Srivatsa Vaddagiri
2023-02-20  9:15     ` Srivatsa Vaddagiri
2023-02-20  9:54     ` Srivatsa Vaddagiri
2023-02-20  9:54       ` Srivatsa Vaddagiri
2023-02-21 13:06   ` Srivatsa Vaddagiri
2023-02-21 13:06     ` Srivatsa Vaddagiri
2023-02-21 14:17   ` Srinivas Kandagatla
2023-02-21 14:17     ` Srinivas Kandagatla
2023-02-23  0:50     ` Elliot Berman
2023-02-23  0:50       ` Elliot Berman
2023-02-23  9:21       ` Srinivas Kandagatla
2023-02-23  9:21         ` Srinivas Kandagatla
2023-02-14 21:24 ` [PATCH v10 14/26] samples: Add sample userspace Gunyah VM Manager Elliot Berman
2023-02-14 21:24   ` Elliot Berman
2023-02-14 21:24 ` [PATCH v10 15/26] gunyah: rsc_mgr: Add platform ops on mem_lend/mem_reclaim Elliot Berman
2023-02-14 21:24   ` Elliot Berman
2023-02-21 14:51   ` Srinivas Kandagatla
2023-02-21 14:51     ` Srinivas Kandagatla
2023-02-21 21:22     ` Elliot Berman
2023-02-21 21:22       ` Elliot Berman
2023-02-22 10:21       ` Srinivas Kandagatla
2023-02-22 10:21         ` Srinivas Kandagatla
2023-02-23  1:55         ` Elliot Berman
2023-02-23  1:55           ` Elliot Berman
2023-02-14 21:24 ` [PATCH v10 16/26] firmware: qcom_scm: Register Gunyah platform ops Elliot Berman
2023-02-14 21:24   ` Elliot Berman
2023-02-16  0:22   ` kernel test robot
2023-02-16  0:22     ` kernel test robot
2023-02-16 11:09   ` kernel test robot
2023-02-16 11:09     ` kernel test robot
2023-02-21 14:55   ` Srinivas Kandagatla
2023-02-21 14:55     ` Srinivas Kandagatla
2023-02-14 21:25 ` [PATCH v10 17/26] docs: gunyah: Document Gunyah VM Manager Elliot Berman
2023-02-14 21:25   ` Elliot Berman
2023-02-23 23:55   ` Alex Elder
2023-02-23 23:55     ` Alex Elder
2023-02-14 21:25 ` [PATCH v10 18/26] virt: gunyah: Translate gh_rm_hyp_resource into gunyah_resource Elliot Berman
2023-02-14 21:25   ` Elliot Berman
2023-02-21 17:47   ` Srinivas Kandagatla
2023-02-21 17:47     ` Srinivas Kandagatla
2023-02-14 21:25 ` [PATCH v10 19/26] gunyah: vm_mgr: Add framework to add VM Functions Elliot Berman
2023-02-14 21:25   ` Elliot Berman
2023-02-17 13:23   ` Srivatsa Vaddagiri
2023-02-17 13:23     ` Srivatsa Vaddagiri
2023-02-21 13:07   ` Srivatsa Vaddagiri
2023-02-21 13:07     ` Srivatsa Vaddagiri
2023-02-21 17:58     ` Srinivas Kandagatla
2023-02-21 17:58       ` Srinivas Kandagatla
2023-02-22 14:08   ` Srinivas Kandagatla
2023-02-22 14:08     ` Srinivas Kandagatla
2023-02-24 23:44     ` Elliot Berman
2023-02-24 23:44       ` Elliot Berman
2023-02-14 21:25 ` [PATCH v10 20/26] virt: gunyah: Add resource tickets Elliot Berman
2023-02-14 21:25   ` Elliot Berman
2023-02-14 21:25 ` [PATCH v10 21/26] virt: gunyah: Add IO handlers Elliot Berman
2023-02-14 21:25   ` Elliot Berman
2023-02-14 21:26 ` [PATCH v10 22/26] virt: gunyah: Add proxy-scheduled vCPUs Elliot Berman
2023-02-14 21:26   ` Elliot Berman
2023-02-14 21:26 ` [PATCH v10 23/26] virt: gunyah: Add hypercalls for sending doorbell Elliot Berman
2023-02-14 21:26   ` Elliot Berman
2023-02-14 21:26 ` [PATCH v10 24/26] virt: gunyah: Add irqfd interface Elliot Berman
2023-02-14 21:26   ` Elliot Berman
2023-02-14 21:26 ` [PATCH v10 25/26] virt: gunyah: Add ioeventfd Elliot Berman
2023-02-14 21:26   ` Elliot Berman
2023-02-14 21:26 ` [PATCH v10 26/26] MAINTAINERS: Add Gunyah hypervisor drivers section Elliot Berman
2023-02-14 21:26   ` Elliot Berman
2023-02-23 21:59 ` [PATCH v10 00/26] Drivers for Gunyah hypervisor Alex Elder
2023-02-23 21:59   ` Alex Elder

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=db397198-d079-faa5-691f-c4b06822d2f3@linaro.org \
    --to=srinivas.kandagatla@linaro.org \
    --cc=andersson@kernel.org \
    --cc=arnd@arndb.de \
    --cc=bagasdotme@gmail.com \
    --cc=catalin.marinas@arm.com \
    --cc=corbet@lwn.net \
    --cc=devicetree@vger.kernel.org \
    --cc=dmitry.baryshkov@linaro.org \
    --cc=elder@linaro.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=jassisinghbrar@gmail.com \
    --cc=konrad.dybcio@linaro.org \
    --cc=krzysztof.kozlowski+dt@linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=quic_cvanscha@quicinc.com \
    --cc=quic_eberman@quicinc.com \
    --cc=quic_mnalajal@quicinc.com \
    --cc=quic_pheragu@quicinc.com \
    --cc=quic_svaddagi@quicinc.com \
    --cc=quic_tsoni@quicinc.com \
    --cc=robh+dt@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.