All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 1/3] drivers: platform: goldfish: fix the checkpatch complain in Kconfig
@ 2019-01-09  2:11 rkir
  2019-01-09  2:11 ` [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver rkir
  2019-01-09  2:11 ` [PATCH v3 3/3] drivers: platform: goldfish: goldfish_sync: " rkir
  0 siblings, 2 replies; 12+ messages in thread
From: rkir @ 2019-01-09  2:11 UTC (permalink / raw)
  To: gregkh; +Cc: linux-kernel, Roman Kiryanov

From: Roman Kiryanov <rkir@google.com>

prefer 'help' over '---help---' for new help texts

Signed-off-by: Roman Kiryanov <rkir@google.com>
---
Changes in v3:
 - No changes.

Changes in v2:
 - New change, a minor cleanup.

 drivers/platform/goldfish/Kconfig | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 479031aa4f88..74fdfa68d1f2 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -2,7 +2,7 @@ menuconfig GOLDFISH
 	bool "Platform support for Goldfish virtual devices"
 	depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
 	depends on HAS_IOMEM
-	---help---
+	help
 	  Say Y here to get to see options for the Goldfish virtual platform.
 	  This option alone does not add any kernel code.
 
@@ -12,7 +12,7 @@ if GOLDFISH
 
 config GOLDFISH_PIPE
 	tristate "Goldfish virtual device for QEMU pipes"
-	---help---
+	help
 	  This is a virtual device to drive the QEMU pipe interface used by
 	  the Goldfish Android Virtual Device.
 
-- 
2.20.1.97.g81188d93c3-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver
  2019-01-09  2:11 [PATCH v3 1/3] drivers: platform: goldfish: fix the checkpatch complain in Kconfig rkir
@ 2019-01-09  2:11 ` rkir
  2019-01-09  8:31   ` Greg KH
  2019-01-22 11:07   ` Greg KH
  2019-01-09  2:11 ` [PATCH v3 3/3] drivers: platform: goldfish: goldfish_sync: " rkir
  1 sibling, 2 replies; 12+ messages in thread
From: rkir @ 2019-01-09  2:11 UTC (permalink / raw)
  To: gregkh; +Cc: linux-kernel, Roman Kiryanov

From: Roman Kiryanov <rkir@google.com>

A driver for the Goldfish Android emulator that occupies
address space to use it with the memory sharing device
on the QEMU side. The memory sharding device allocates
subranges and populate them with actual RAM.
This allows sharing host's memory with the guest.

Signed-off-by: Roman Kiryanov <rkir@google.com>
---
Changes in v3:
 - No changes.

Changes in v2:
 - Removed WARN_ON.
 - Moved to drivers/platform/goldfish (from drivers/misc).

 drivers/platform/goldfish/Kconfig             |   9 +
 drivers/platform/goldfish/Makefile            |   1 +
 .../goldfish/goldfish_address_space.c         | 666 ++++++++++++++++++
 .../linux/goldfish/goldfish_address_space.h   |  27 +
 4 files changed, 703 insertions(+)
 create mode 100644 drivers/platform/goldfish/goldfish_address_space.c
 create mode 100644 include/uapi/linux/goldfish/goldfish_address_space.h

diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 74fdfa68d1f2..60ecec4a3c59 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -16,4 +16,13 @@ config GOLDFISH_PIPE
 	  This is a virtual device to drive the QEMU pipe interface used by
 	  the Goldfish Android Virtual Device.
 
+config GOLDFISH_ADDRESS_SPACE
+	tristate "A Goldfish driver that talks to the memory sharing device in QEMU"
+	depends on PCI
+	depends on GOLDFISH
+	help
+	  A Goldfish driver that allocates address space ranges in the guest to
+	  populate them later in the host. This allows sharing host's memory
+	  with the guest.
+
 endif # GOLDFISH
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
index e0c202df9674..034abe0727b8 100644
--- a/drivers/platform/goldfish/Makefile
+++ b/drivers/platform/goldfish/Makefile
@@ -2,3 +2,4 @@
 # Makefile for Goldfish platform specific drivers
 #
 obj-$(CONFIG_GOLDFISH_PIPE)	+= goldfish_pipe.o
+obj-$(CONFIG_GOLDFISH_ADDRESS_SPACE) += goldfish_address_space.o
diff --git a/drivers/platform/goldfish/goldfish_address_space.c b/drivers/platform/goldfish/goldfish_address_space.c
new file mode 100644
index 000000000000..50513b98ddb7
--- /dev/null
+++ b/drivers/platform/goldfish/goldfish_address_space.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include <linux/device.h>
+#include <linux/pci_regs.h>
+#include <linux/pci_ids.h>
+#include <linux/pci.h>
+
+#include <uapi/linux/goldfish/goldfish_address_space.h>
+
+MODULE_DESCRIPTION("A driver for the Goldfish Android emulator that occupies "
+		   "address space to use it with the memory sharing device "
+		   "on the QEMU side. The memory sharding device allocates "
+		   "subranges and populate them with actual RAM. "
+		   "This allows sharing host's memory with the guest.");
+MODULE_AUTHOR("Roman Kiryanov <rkir@google.com>");
+MODULE_LICENSE("GPL v2");
+
+enum as_register_id {
+	AS_REGISTER_COMMAND = 0,
+	AS_REGISTER_STATUS = 4,
+	AS_REGISTER_GUEST_PAGE_SIZE = 8,
+	AS_REGISTER_BLOCK_SIZE_LOW = 12,
+	AS_REGISTER_BLOCK_SIZE_HIGH = 16,
+	AS_REGISTER_BLOCK_OFFSET_LOW = 20,
+	AS_REGISTER_BLOCK_OFFSET_HIGH = 24,
+};
+
+enum as_command_id {
+	AS_COMMAND_ALLOCATE_BLOCK = 1,
+	AS_COMMAND_DEALLOCATE_BLOCK = 2,
+};
+
+#define AS_PCI_VENDOR_ID	0x607D
+#define AS_PCI_DEVICE_ID	0xF153
+#define AS_MAGIC_U32		(AS_PCI_VENDOR_ID << 16 | AS_PCI_DEVICE_ID)
+#define AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY 32
+
+enum as_pci_bar_id {
+	AS_PCI_CONTROL_BAR_ID = 0,
+	AS_PCI_AREA_BAR_ID = 1,
+};
+
+struct as_driver_state;
+
+struct as_device_state {
+	u32	magic;
+
+	struct miscdevice	miscdevice;
+	struct pci_dev		*dev;
+	struct as_driver_state	*driver_state;
+
+	void __iomem		*io_registers;
+
+	void			*address_area;	/* to claim the address space */
+
+	/* physical address to allocate from */
+	unsigned long		address_area_phys_address;
+
+	struct mutex		registers_lock;	/* protects registers */
+
+	wait_queue_head_t	wake_queue;	/* to wait for the hardware */
+
+	int			hw_done;	/* to say hw is done */
+};
+
+struct as_block {
+	u64 offset;
+	u64 size;
+};
+
+struct as_allocated_blocks {
+	struct as_device_state *state;
+
+	struct as_block *blocks;  /* a dynamic array of allocated blocks */
+	int blocks_size;
+	int blocks_capacity;
+	struct mutex blocks_lock; /* protects operations with blocks */
+};
+
+static void __iomem *as_register_address(void __iomem *base,
+					 int offset)
+{
+	return ((char __iomem *)base) + offset;
+}
+
+static void as_write_register(void __iomem *registers,
+			      int offset,
+			      u32 value)
+{
+	writel(value, as_register_address(registers, offset));
+}
+
+static u32 as_read_register(void __iomem *registers, int offset)
+{
+	return readl(as_register_address(registers, offset));
+}
+
+static int
+as_talk_to_hardware(struct as_device_state *state, enum as_command_id cmd)
+{
+	state->hw_done = 0;
+	as_write_register(state->io_registers, AS_REGISTER_COMMAND, cmd);
+	wait_event(state->wake_queue, state->hw_done);
+	return -as_read_register(state->io_registers,
+				 AS_REGISTER_STATUS);
+}
+
+static long
+as_ioctl_allocate_block_locked_impl(struct as_device_state *state,
+				    u64 *size, u64 *offset)
+{
+	long res;
+
+	as_write_register(state->io_registers,
+			  AS_REGISTER_BLOCK_SIZE_LOW,
+			  lower_32_bits(*size));
+	as_write_register(state->io_registers,
+			  AS_REGISTER_BLOCK_SIZE_HIGH,
+			  upper_32_bits(*size));
+
+	res = as_talk_to_hardware(state, AS_COMMAND_ALLOCATE_BLOCK);
+	if (!res) {
+		u64 low = as_read_register(state->io_registers,
+					   AS_REGISTER_BLOCK_OFFSET_LOW);
+		u64 high = as_read_register(state->io_registers,
+					    AS_REGISTER_BLOCK_OFFSET_HIGH);
+		*offset = low | (high << 32);
+
+		low = as_read_register(state->io_registers,
+				       AS_REGISTER_BLOCK_SIZE_LOW);
+		high = as_read_register(state->io_registers,
+					AS_REGISTER_BLOCK_SIZE_HIGH);
+		*size = low | (high << 32);
+	}
+
+	return res;
+}
+
+static long
+as_ioctl_unallocate_block_locked_impl(struct as_device_state *state, u64 offset)
+{
+	as_write_register(state->io_registers,
+			  AS_REGISTER_BLOCK_OFFSET_LOW,
+			  lower_32_bits(offset));
+	as_write_register(state->io_registers,
+			  AS_REGISTER_BLOCK_OFFSET_HIGH,
+			  upper_32_bits(offset));
+
+	return as_talk_to_hardware(state, AS_COMMAND_DEALLOCATE_BLOCK);
+}
+
+static int as_blocks_grow_capacity(int old_capacity)
+{
+	return old_capacity + old_capacity;
+}
+
+static int
+as_blocks_insert(struct as_allocated_blocks *allocated_blocks,
+		 u64 offset,
+		 u64 size)
+{
+	int blocks_size;
+
+	if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
+		return -ERESTARTSYS;
+
+	blocks_size = allocated_blocks->blocks_size;
+
+	if (allocated_blocks->blocks_capacity == blocks_size) {
+		int new_capacity =
+			as_blocks_grow_capacity(
+				allocated_blocks->blocks_capacity);
+		struct as_block *new_blocks =
+			kcalloc(new_capacity,
+				sizeof(allocated_blocks->blocks[0]),
+				GFP_KERNEL);
+
+		if (!new_blocks) {
+			mutex_unlock(&allocated_blocks->blocks_lock);
+			return -ENOMEM;
+		}
+
+		memcpy(new_blocks, allocated_blocks->blocks,
+		       blocks_size * sizeof(allocated_blocks->blocks[0]));
+
+		kfree(allocated_blocks->blocks);
+		allocated_blocks->blocks = new_blocks;
+		allocated_blocks->blocks_capacity = new_capacity;
+	}
+
+	allocated_blocks->blocks[blocks_size] =
+		(struct as_block){ .offset = offset, .size = size };
+	allocated_blocks->blocks_size = blocks_size + 1;
+
+	mutex_unlock(&allocated_blocks->blocks_lock);
+	return 0;
+}
+
+static int
+as_blocks_remove(struct as_allocated_blocks *allocated_blocks, u64 offset)
+{
+	long res = -ENXIO;
+	struct as_block *blocks;
+	int blocks_size;
+	int i;
+
+	if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
+		return -ERESTARTSYS;
+
+	blocks = allocated_blocks->blocks;
+	blocks_size = allocated_blocks->blocks_size;
+
+	for (i = 0; i < blocks_size; ++i) {
+		if (offset == blocks[i].offset) {
+			int last = blocks_size - 1;
+
+			if (last > i)
+				blocks[i] = blocks[last];
+
+			--allocated_blocks->blocks_size;
+			res = 0;
+			break;
+		}
+	}
+
+	mutex_unlock(&allocated_blocks->blocks_lock);
+	return res;
+}
+
+static int
+as_blocks_check_if_mine(struct as_allocated_blocks *allocated_blocks,
+			u64 offset,
+			u64 size)
+{
+	const u64 end = offset + size;
+	int res = -EPERM;
+	struct as_block *block;
+	int blocks_size;
+
+	if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
+		return -ERESTARTSYS;
+
+	block = allocated_blocks->blocks;
+	blocks_size = allocated_blocks->blocks_size;
+
+	for (; blocks_size > 0; --blocks_size, ++block) {
+		u64 block_offset = block->offset;
+		u64 block_end = block_offset + block->size;
+
+		if (offset >= block_offset && end <= block_end) {
+			res = 0;
+			break;
+		}
+	}
+
+	mutex_unlock(&allocated_blocks->blocks_lock);
+	return res;
+}
+
+static int as_open(struct inode *inode, struct file *filp)
+{
+	struct as_allocated_blocks *allocated_blocks;
+
+	allocated_blocks = kzalloc(sizeof(*allocated_blocks), GFP_KERNEL);
+	if (!allocated_blocks)
+		return -ENOMEM;
+
+	allocated_blocks->state =
+		container_of(filp->private_data,
+			     struct as_device_state,
+			     miscdevice);
+
+	allocated_blocks->blocks =
+		kcalloc(AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY,
+			sizeof(allocated_blocks->blocks[0]),
+			GFP_KERNEL);
+	if (!allocated_blocks->blocks) {
+		kfree(allocated_blocks);
+		return -ENOMEM;
+	}
+
+	allocated_blocks->blocks_size = 0;
+	allocated_blocks->blocks_capacity =
+		AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY;
+	mutex_init(&allocated_blocks->blocks_lock);
+
+	filp->private_data = allocated_blocks;
+	return 0;
+}
+
+static int as_release(struct inode *inode, struct file *filp)
+{
+	struct as_allocated_blocks *allocated_blocks = filp->private_data;
+	struct as_device_state *state;
+	int blocks_size;
+	int i;
+
+	state = allocated_blocks->state;
+	blocks_size = allocated_blocks->blocks_size;
+
+	if (mutex_lock_interruptible(&state->registers_lock))
+		return -ERESTARTSYS;
+
+	for (i = 0; i < blocks_size; ++i) {
+		as_ioctl_unallocate_block_locked_impl(
+			state, allocated_blocks->blocks[i].offset);
+	}
+
+	mutex_unlock(&state->registers_lock);
+
+	kfree(allocated_blocks->blocks);
+	kfree(allocated_blocks);
+	return 0;
+}
+
+static int as_mmap_impl(struct as_device_state *state,
+			size_t size,
+			struct vm_area_struct *vma)
+{
+	unsigned long pfn = (state->address_area_phys_address >> PAGE_SHIFT) +
+		vma->vm_pgoff;
+
+	return remap_pfn_range(vma,
+			       vma->vm_start,
+			       pfn,
+			       size,
+			       vma->vm_page_prot);
+}
+
+static int as_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct as_allocated_blocks *allocated_blocks = filp->private_data;
+	size_t size = PAGE_ALIGN(vma->vm_end - vma->vm_start);
+	int res;
+
+	res = as_blocks_check_if_mine(allocated_blocks,
+				      vma->vm_pgoff << PAGE_SHIFT,
+				      size);
+
+	if (res)
+		return res;
+	else
+		return as_mmap_impl(allocated_blocks->state, size, vma);
+}
+
+static long as_ioctl_allocate_block_impl(
+	struct as_device_state *state,
+	struct goldfish_address_space_allocate_block *request)
+{
+	long res;
+
+	if (mutex_lock_interruptible(&state->registers_lock))
+		return -ERESTARTSYS;
+
+	res = as_ioctl_allocate_block_locked_impl(state,
+						  &request->size,
+						  &request->offset);
+	if (!res) {
+		request->phys_addr =
+			state->address_area_phys_address + request->offset;
+	}
+
+	mutex_unlock(&state->registers_lock);
+	return res;
+}
+
+static void
+as_ioctl_unallocate_block_impl(struct as_device_state *state, u64 offset)
+{
+	mutex_lock(&state->registers_lock);
+	as_ioctl_unallocate_block_locked_impl(state, offset);
+	mutex_unlock(&state->registers_lock);
+}
+
+static long
+as_ioctl_allocate_block(struct as_allocated_blocks *allocated_blocks,
+			void __user *ptr)
+{
+	long res;
+	struct as_device_state *state = allocated_blocks->state;
+	struct goldfish_address_space_allocate_block request;
+
+	if (copy_from_user(&request, ptr, sizeof(request)))
+		return -EFAULT;
+
+	res = as_ioctl_allocate_block_impl(state, &request);
+	if (!res) {
+		res = as_blocks_insert(allocated_blocks,
+				       request.offset,
+				       request.size);
+
+		if (res) {
+			as_ioctl_unallocate_block_impl(state, request.offset);
+		} else if (copy_to_user(ptr, &request, sizeof(request))) {
+			as_ioctl_unallocate_block_impl(state, request.offset);
+			res = -EFAULT;
+		}
+	}
+
+	return res;
+}
+
+static long
+as_ioctl_unallocate_block(struct as_allocated_blocks *allocated_blocks,
+			  void __user *ptr)
+{
+	long res;
+	u64 offset;
+
+	if (copy_from_user(&offset, ptr, sizeof(offset)))
+		return -EFAULT;
+
+	res = as_blocks_remove(allocated_blocks, offset);
+	if (!res)
+		as_ioctl_unallocate_block_impl(allocated_blocks->state, offset);
+
+	return res;
+}
+
+static long as_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct as_allocated_blocks *allocated_blocks = filp->private_data;
+
+	switch (cmd) {
+	case GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK:
+		return as_ioctl_allocate_block(allocated_blocks,
+					       (void __user *)arg);
+
+	case GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK:
+		return as_ioctl_unallocate_block(allocated_blocks,
+						 (void __user *)arg);
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static const struct file_operations userspace_file_operations = {
+	.owner = THIS_MODULE,
+	.open = as_open,
+	.release = as_release,
+	.mmap = as_mmap,
+	.unlocked_ioctl = as_ioctl,
+	.compat_ioctl = as_ioctl,
+};
+
+static void __iomem __must_check *ioremap_pci_bar(struct pci_dev *dev,
+						  int bar_id)
+{
+	void __iomem *io;
+	unsigned long size = pci_resource_len(dev, bar_id);
+
+	if (!size)
+		return IOMEM_ERR_PTR(-ENXIO);
+
+	io = ioremap(pci_resource_start(dev, bar_id), size);
+	if (!io)
+		return IOMEM_ERR_PTR(-ENOMEM);
+
+	return io;
+}
+
+static void __must_check *memremap_pci_bar(struct pci_dev *dev,
+					   int bar_id,
+					   unsigned long flags)
+{
+	void *mem;
+	unsigned long size = pci_resource_len(dev, bar_id);
+
+	if (!size)
+		return ERR_PTR(-ENXIO);
+
+	mem = memremap(pci_resource_start(dev, bar_id), size, flags);
+	if (!mem)
+		return ERR_PTR(-ENOMEM);
+
+	return mem;
+}
+
+static irqreturn_t __must_check as_interrupt_impl(struct as_device_state *state)
+{
+	state->hw_done = 1;
+	wake_up_interruptible(&state->wake_queue);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t as_interrupt(int irq, void *dev_id)
+{
+	struct as_device_state *state = dev_id;
+
+	return (state->magic == AS_MAGIC_U32)
+		? as_interrupt_impl(state) : IRQ_NONE;
+}
+
+static void fill_miscdevice(struct miscdevice *miscdev)
+{
+	memset(miscdev, 0, sizeof(*miscdev));
+
+	miscdev->minor = MISC_DYNAMIC_MINOR;
+	miscdev->name = GOLDFISH_ADDRESS_SPACE_DEVICE_NAME;
+	miscdev->fops = &userspace_file_operations;
+}
+
+static int __must_check
+create_as_device(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int res;
+	struct as_device_state *state;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	res = pci_request_region(dev,
+				 AS_PCI_CONTROL_BAR_ID,
+				 "Address space control");
+	if (res) {
+		pr_err("(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR%d",
+		       dev->bus->number,
+		       dev->devfn,
+		       AS_PCI_CONTROL_BAR_ID);
+		goto out_free_device_state;
+	}
+
+	res = pci_request_region(dev,
+				 AS_PCI_AREA_BAR_ID,
+				 "Address space area");
+	if (res) {
+		pr_err("(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR%d",
+		       dev->bus->number,
+		       dev->devfn,
+		       AS_PCI_AREA_BAR_ID);
+		goto out_release_control_bar;
+	}
+
+	fill_miscdevice(&state->miscdevice);
+	res = misc_register(&state->miscdevice);
+	if (res)
+		goto out_release_area_bar;
+
+	state->io_registers = ioremap_pci_bar(dev,
+					      AS_PCI_CONTROL_BAR_ID);
+	if (IS_ERR(state->io_registers)) {
+		res = PTR_ERR(state->io_registers);
+		goto out_misc_deregister;
+	}
+
+	state->address_area = memremap_pci_bar(dev,
+					       AS_PCI_AREA_BAR_ID,
+					       MEMREMAP_WB);
+	if (IS_ERR(state->address_area)) {
+		res = PTR_ERR(state->address_area);
+		goto out_iounmap;
+	}
+
+	state->address_area_phys_address =
+		pci_resource_start(dev, AS_PCI_AREA_BAR_ID);
+
+	res = request_irq(dev->irq,
+			  as_interrupt, IRQF_SHARED,
+			  KBUILD_MODNAME, state);
+	if (res)
+		goto out_memunmap;
+
+	as_write_register(state->io_registers,
+			  AS_REGISTER_GUEST_PAGE_SIZE,
+			  PAGE_SIZE);
+
+	state->magic = AS_MAGIC_U32;
+	state->dev = dev;
+	mutex_init(&state->registers_lock);
+	init_waitqueue_head(&state->wake_queue);
+	pci_set_drvdata(dev, state);
+
+	return 0;
+
+out_memunmap:
+	memunmap(state->address_area);
+out_iounmap:
+	iounmap(state->io_registers);
+out_misc_deregister:
+	misc_deregister(&state->miscdevice);
+out_release_area_bar:
+	pci_release_region(dev, AS_PCI_AREA_BAR_ID);
+out_release_control_bar:
+	pci_release_region(dev, AS_PCI_CONTROL_BAR_ID);
+out_free_device_state:
+	kzfree(state);
+
+	return res;
+}
+
+static void as_destroy_device(struct as_device_state *state)
+{
+	free_irq(state->dev->irq, state);
+	memunmap(state->address_area);
+	iounmap(state->io_registers);
+	misc_deregister(&state->miscdevice);
+	pci_release_region(state->dev, AS_PCI_AREA_BAR_ID);
+	pci_release_region(state->dev, AS_PCI_CONTROL_BAR_ID);
+	kfree(state);
+}
+
+static int __must_check as_pci_probe(struct pci_dev *dev,
+				     const struct pci_device_id *id)
+{
+	int res;
+	u8 hardware_revision;
+
+	res = pci_enable_device(dev);
+	if (res)
+		return res;
+
+	res = pci_read_config_byte(dev, PCI_REVISION_ID, &hardware_revision);
+	if (res)
+		goto out_disable_pci;
+
+	switch (hardware_revision) {
+	case 1:
+		res = create_as_device(dev, id);
+		break;
+
+	default:
+		res = -ENODEV;
+		goto out_disable_pci;
+	}
+
+	return 0;
+
+out_disable_pci:
+	pci_disable_device(dev);
+
+	return res;
+}
+
+static void as_pci_remove(struct pci_dev *dev)
+{
+	struct as_device_state *state = pci_get_drvdata(dev);
+
+	as_destroy_device(state);
+	pci_disable_device(dev);
+}
+
+static const struct pci_device_id as_pci_tbl[] = {
+	{ PCI_DEVICE(AS_PCI_VENDOR_ID, AS_PCI_DEVICE_ID), },
+	{ }
+};
+MODULE_DEVICE_TABLE(pci, as_pci_tbl);
+
+static struct pci_driver goldfish_address_space_driver = {
+	.name		= GOLDFISH_ADDRESS_SPACE_DEVICE_NAME,
+	.id_table	= as_pci_tbl,
+	.probe		= as_pci_probe,
+	.remove		= as_pci_remove,
+};
+
+module_pci_driver(goldfish_address_space_driver);
diff --git a/include/uapi/linux/goldfish/goldfish_address_space.h b/include/uapi/linux/goldfish/goldfish_address_space.h
new file mode 100644
index 000000000000..b1d11f95a4e1
--- /dev/null
+++ b/include/uapi/linux/goldfish/goldfish_address_space.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef UAPI_GOLDFISH_ADDRESS_SPACE_H
+#define UAPI_GOLDFISH_ADDRESS_SPACE_H
+
+#include <linux/types.h>
+
+#define GOLDFISH_ADDRESS_SPACE_DEVICE_NAME	"goldfish_address_space"
+
+struct goldfish_address_space_allocate_block {
+	__u64 size;
+	__u64 offset;
+	__u64 phys_addr;
+};
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_MAGIC	'G'
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_OP(OP, T)	\
+	_IOWR(GOLDFISH_ADDRESS_SPACE_IOCTL_MAGIC, OP, T)
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK \
+	GOLDFISH_ADDRESS_SPACE_IOCTL_OP(10, \
+		struct goldfish_address_space_allocate_block)
+
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK \
+	GOLDFISH_ADDRESS_SPACE_IOCTL_OP(11, __u64)
+
+#endif /* UAPI_GOLDFISH_ADDRESS_SPACE_H */
-- 
2.20.1.97.g81188d93c3-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v3 3/3] drivers: platform: goldfish: goldfish_sync: add a driver
  2019-01-09  2:11 [PATCH v3 1/3] drivers: platform: goldfish: fix the checkpatch complain in Kconfig rkir
  2019-01-09  2:11 ` [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver rkir
@ 2019-01-09  2:11 ` rkir
  2019-01-22 11:08   ` Greg KH
  1 sibling, 1 reply; 12+ messages in thread
From: rkir @ 2019-01-09  2:11 UTC (permalink / raw)
  To: gregkh; +Cc: linux-kernel, Roman Kiryanov

From: Roman Kiryanov <rkir@google.com>

The Goldfish sync driver is designed to provide a interface
between the underlying host's sync device and the kernel's
fence sync framework.

Signed-off-by: Roman Kiryanov <rkir@google.com>
---
Changes in v3:
 - Removed WARN_ON.

Changes in v2:
 - Added a missing include (mod_devicetable.h).
 - Put in one batch with goldfish_address_space.c to avoid merge comflicts.

 drivers/platform/goldfish/Kconfig           |   7 +
 drivers/platform/goldfish/Makefile          |   1 +
 drivers/platform/goldfish/goldfish_sync.c   | 827 ++++++++++++++++++++
 include/uapi/linux/goldfish/goldfish_sync.h |  28 +
 4 files changed, 863 insertions(+)
 create mode 100644 drivers/platform/goldfish/goldfish_sync.c
 create mode 100644 include/uapi/linux/goldfish/goldfish_sync.h

diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 60ecec4a3c59..841250235430 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -25,4 +25,11 @@ config GOLDFISH_ADDRESS_SPACE
 	  populate them later in the host. This allows sharing host's memory
 	  with the guest.
 
+config GOLDFISH_SYNC
+	tristate "Goldfish AVD Sync Driver"
+	depends on SW_SYNC
+	depends on SYNC_FILE
+	help
+	  Emulated sync fences for the Goldfish Android Virtual Device.
+
 endif # GOLDFISH
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
index 034abe0727b8..bdff4d6a0ad9 100644
--- a/drivers/platform/goldfish/Makefile
+++ b/drivers/platform/goldfish/Makefile
@@ -3,3 +3,4 @@
 #
 obj-$(CONFIG_GOLDFISH_PIPE)	+= goldfish_pipe.o
 obj-$(CONFIG_GOLDFISH_ADDRESS_SPACE) += goldfish_address_space.o
+obj-$(CONFIG_GOLDFISH_SYNC)	+= goldfish_sync.o
diff --git a/drivers/platform/goldfish/goldfish_sync.c b/drivers/platform/goldfish/goldfish_sync.c
new file mode 100644
index 000000000000..3a68e2bb436c
--- /dev/null
+++ b/drivers/platform/goldfish/goldfish_sync.c
@@ -0,0 +1,827 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/* The Goldfish sync driver is designed to provide a interface
+ * between the underlying host's sync device and the kernel's
+ * fence sync framework.
+ *
+ * The purpose of the device/driver is to enable lightweight creation and
+ * signaling of timelines and fences in order to synchronize the guest with
+ * host-side graphics events.
+ *
+ * Each time the interrupt trips, the driver may perform a sync operation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/dma-fence.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sync_file.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <uapi/linux/goldfish/goldfish_sync.h>
+
+struct sync_pt {
+	struct dma_fence base;	/* must be the first field in this struct */
+	struct list_head active_list;	/* see active_list_head below */
+};
+
+struct goldfish_sync_state;
+
+struct goldfish_sync_timeline {
+	struct goldfish_sync_state *sync_state;
+
+	/* This object is owned by userspace from open() calls and also each
+	 * sync_pt refers to it.
+	 */
+	struct kref		kref;
+	char			name[32];	/* for debugging */
+
+	u64			context;
+	unsigned int		seqno;
+	/* list of active (unsignaled/errored) sync_pts */
+	struct list_head	active_list_head;
+	spinlock_t		lock;	/* protects the fields above */
+};
+
+/* The above definitions (command codes, register layout, ioctl definitions)
+ * need to be in sync with the following files:
+ *
+ * Host-side (emulator):
+ * external/qemu/android/emulation/goldfish_sync.h
+ * external/qemu-android/hw/misc/goldfish_sync.c
+ *
+ * Guest-side (system image):
+ * device/generic/goldfish-opengl/system/egl/goldfish_sync.h
+ * device/generic/goldfish/ueventd.ranchu.rc
+ * platform/build/target/board/generic/sepolicy/file_contexts
+ */
+struct goldfish_sync_hostcmd {
+	/* sorted for alignment */
+	u64 handle;
+	u64 hostcmd_handle;
+	u32 cmd;
+	u32 time_arg;
+};
+
+struct goldfish_sync_guestcmd {
+	u64 host_command; /* u64 for alignment */
+	u64 glsync_handle;
+	u64 thread_handle;
+	u64 guest_timeline_handle;
+};
+
+/* The host operations are: */
+enum cmd_id {
+	/* Ready signal - used to mark when irq should lower */
+	CMD_SYNC_READY			= 0,
+
+	/* Create a new timeline. writes timeline handle */
+	CMD_CREATE_SYNC_TIMELINE	= 1,
+
+	/* Create a fence object. reads timeline handle and time argument.
+	 * Writes fence fd to the SYNC_REG_HANDLE register.
+	 */
+	CMD_CREATE_SYNC_FENCE		= 2,
+
+	/* Increments timeline. reads timeline handle and time argument */
+	CMD_SYNC_TIMELINE_INC		= 3,
+
+	/* Destroys a timeline. reads timeline handle */
+	CMD_DESTROY_SYNC_TIMELINE	= 4,
+
+	/* Starts a wait on the host with the given glsync object and
+	 * sync thread handle.
+	 */
+	CMD_TRIGGER_HOST_WAIT		= 5,
+};
+
+/* The host register layout is: */
+enum sync_reg_id {
+	/* host->guest batch commands */
+	SYNC_REG_BATCH_COMMAND			= 0x00,
+
+	/* guest->host batch commands */
+	SYNC_REG_BATCH_GUESTCOMMAND		= 0x04,
+
+	/* communicate physical address of host->guest batch commands */
+	SYNC_REG_BATCH_COMMAND_ADDR		= 0x08,
+	SYNC_REG_BATCH_COMMAND_ADDR_HIGH	= 0x0C, /* 64-bit part */
+
+	/* communicate physical address of guest->host commands */
+	SYNC_REG_BATCH_GUESTCOMMAND_ADDR	= 0x10,
+	SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH	= 0x14, /* 64-bit part */
+
+	/* signals that the device has been probed */
+	SYNC_REG_INIT				= 0x18,
+};
+
+#define GOLDFISH_SYNC_MAX_CMDS 32
+
+/* The driver state: */
+struct goldfish_sync_state {
+	struct miscdevice miscdev;
+
+	char __iomem *reg_base;
+	int irq;
+
+	/* Used to generate unique names, see goldfish_sync_timeline::name. */
+	u64 id_counter;
+
+	/* |mutex_lock| protects all concurrent access
+	 * to timelines for both kernel and user space.
+	 */
+	struct mutex mutex_lock;
+
+	/* Buffer holding commands issued from host. */
+	struct goldfish_sync_hostcmd to_do[GOLDFISH_SYNC_MAX_CMDS];
+	u32 to_do_end;
+	/* Protects to_do and to_do_end */
+	spinlock_t to_do_lock;
+
+	/* Buffers for the reading or writing
+	 * of individual commands. The host can directly write
+	 * to |batch_hostcmd| (and then this driver immediately
+	 * copies contents to |to_do|). This driver either replies
+	 * through |batch_hostcmd| or simply issues a
+	 * guest->host command through |batch_guestcmd|.
+	 */
+	struct goldfish_sync_hostcmd batch_hostcmd;
+	struct goldfish_sync_guestcmd batch_guestcmd;
+
+	/* Used to give this struct itself to a work queue
+	 * function for executing actual sync commands.
+	 */
+	struct work_struct work_item;
+};
+
+static struct goldfish_sync_timeline
+*goldfish_dma_fence_parent(struct dma_fence *fence)
+{
+	return container_of(fence->lock, struct goldfish_sync_timeline, lock);
+}
+
+static struct sync_pt *goldfish_sync_fence_to_sync_pt(struct dma_fence *fence)
+{
+	return container_of(fence, struct sync_pt, base);
+}
+
+/* sync_state->mutex_lock must be locked. */
+struct goldfish_sync_timeline __must_check
+*goldfish_sync_timeline_create(struct goldfish_sync_state *sync_state)
+{
+	struct goldfish_sync_timeline *tl;
+
+	tl = kzalloc(sizeof(*tl), GFP_KERNEL);
+	if (!tl)
+		return NULL;
+
+	tl->sync_state = sync_state;
+	kref_init(&tl->kref);
+	snprintf(tl->name, sizeof(tl->name),
+		 "%s:%llu", GOLDFISH_SYNC_DEVICE_NAME,
+		 ++sync_state->id_counter);
+	tl->context = dma_fence_context_alloc(1);
+	tl->seqno = 0;
+	INIT_LIST_HEAD(&tl->active_list_head);
+	spin_lock_init(&tl->lock);
+
+	return tl;
+}
+
+static void goldfish_sync_timeline_free(struct kref *kref)
+{
+	struct goldfish_sync_timeline *tl =
+		container_of(kref, struct goldfish_sync_timeline, kref);
+
+	kfree(tl);
+}
+
+static void goldfish_sync_timeline_get(struct goldfish_sync_timeline *tl)
+{
+	kref_get(&tl->kref);
+}
+
+void goldfish_sync_timeline_put(struct goldfish_sync_timeline *tl)
+{
+	kref_put(&tl->kref, goldfish_sync_timeline_free);
+}
+
+void goldfish_sync_timeline_signal(struct goldfish_sync_timeline *tl,
+				   unsigned int inc)
+{
+	unsigned long flags;
+	struct sync_pt *pt, *next;
+
+	spin_lock_irqsave(&tl->lock, flags);
+	tl->seqno += inc;
+
+	list_for_each_entry_safe(pt, next, &tl->active_list_head, active_list) {
+		/* dma_fence_is_signaled_locked has side effects */
+		if (dma_fence_is_signaled_locked(&pt->base))
+			list_del_init(&pt->active_list);
+	}
+	spin_unlock_irqrestore(&tl->lock, flags);
+}
+
+static const struct dma_fence_ops goldfish_sync_timeline_fence_ops;
+
+static struct sync_pt __must_check
+*goldfish_sync_pt_create(struct goldfish_sync_timeline *tl,
+			 unsigned int value)
+{
+	struct sync_pt *pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+
+	if (!pt)
+		return NULL;
+
+	dma_fence_init(&pt->base,
+		       &goldfish_sync_timeline_fence_ops,
+		       &tl->lock,
+		       tl->context,
+		       value);
+	INIT_LIST_HEAD(&pt->active_list);
+	goldfish_sync_timeline_get(tl);	/* pt refers to tl */
+
+	return pt;
+}
+
+static void goldfish_sync_pt_destroy(struct sync_pt *pt)
+{
+	struct goldfish_sync_timeline *tl =
+		goldfish_dma_fence_parent(&pt->base);
+	unsigned long flags;
+
+	spin_lock_irqsave(&tl->lock, flags);
+	if (!list_empty(&pt->active_list))
+		list_del(&pt->active_list);
+	spin_unlock_irqrestore(&tl->lock, flags);
+
+	goldfish_sync_timeline_put(tl);	/* unref pt from tl */
+	dma_fence_free(&pt->base);
+}
+
+static const char
+*goldfish_sync_timeline_fence_get_driver_name(struct dma_fence *fence)
+{
+	return "sw_sync";
+}
+
+static const char
+*goldfish_sync_timeline_fence_get_timeline_name(struct dma_fence *fence)
+{
+	struct goldfish_sync_timeline *tl = goldfish_dma_fence_parent(fence);
+
+	return tl->name;
+}
+
+static void goldfish_sync_timeline_fence_release(struct dma_fence *fence)
+{
+	goldfish_sync_pt_destroy(goldfish_sync_fence_to_sync_pt(fence));
+}
+
+static bool goldfish_sync_timeline_fence_signaled(struct dma_fence *fence)
+{
+	struct goldfish_sync_timeline *tl = goldfish_dma_fence_parent(fence);
+
+	return tl->seqno >= fence->seqno;
+}
+
+static bool
+goldfish_sync_timeline_fence_enable_signaling(struct dma_fence *fence)
+{
+	struct sync_pt *pt;
+	struct goldfish_sync_timeline *tl;
+
+	if (goldfish_sync_timeline_fence_signaled(fence))
+		return false;
+
+	pt = goldfish_sync_fence_to_sync_pt(fence);
+	tl = goldfish_dma_fence_parent(fence);
+	list_add_tail(&pt->active_list, &tl->active_list_head);
+	return true;
+}
+
+static void goldfish_sync_timeline_fence_value_str(struct dma_fence *fence,
+						   char *str, int size)
+{
+	snprintf(str, size, "%d", fence->seqno);
+}
+
+static void goldfish_sync_timeline_fence_timeline_value_str(
+				struct dma_fence *fence,
+				char *str, int size)
+{
+	struct goldfish_sync_timeline *tl = goldfish_dma_fence_parent(fence);
+
+	snprintf(str, size, "%d", tl->seqno);
+}
+
+static const struct dma_fence_ops goldfish_sync_timeline_fence_ops = {
+	.get_driver_name = goldfish_sync_timeline_fence_get_driver_name,
+	.get_timeline_name = goldfish_sync_timeline_fence_get_timeline_name,
+	.enable_signaling = goldfish_sync_timeline_fence_enable_signaling,
+	.signaled = goldfish_sync_timeline_fence_signaled,
+	.wait = dma_fence_default_wait,
+	.release = goldfish_sync_timeline_fence_release,
+	.fence_value_str = goldfish_sync_timeline_fence_value_str,
+	.timeline_value_str = goldfish_sync_timeline_fence_timeline_value_str,
+};
+
+static int __must_check
+goldfish_sync_fence_create(struct goldfish_sync_timeline *tl, u32 val)
+{
+	struct sync_pt *pt;
+	struct sync_file *sync_file_obj = NULL;
+	int fd;
+
+	pt = goldfish_sync_pt_create(tl, val);
+	if (!pt)
+		return -1;
+
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0)
+		goto err_cleanup_pt;
+
+	sync_file_obj = sync_file_create(&pt->base);
+	if (!sync_file_obj)
+		goto err_cleanup_fd_pt;
+
+	fd_install(fd, sync_file_obj->file);
+
+	dma_fence_put(&pt->base);	/* sync_file_obj now owns the fence */
+	return fd;
+
+err_cleanup_fd_pt:
+	put_unused_fd(fd);
+err_cleanup_pt:
+	goldfish_sync_pt_destroy(pt);
+
+	return -1;
+}
+
+static inline void
+goldfish_sync_cmd_queue(struct goldfish_sync_state *sync_state,
+			u32 cmd,
+			u64 handle,
+			u32 time_arg,
+			u64 hostcmd_handle)
+{
+	struct goldfish_sync_hostcmd *to_add =
+		&sync_state->to_do[sync_state->to_do_end];
+
+	to_add->cmd = cmd;
+	to_add->handle = handle;
+	to_add->time_arg = time_arg;
+	to_add->hostcmd_handle = hostcmd_handle;
+
+	++sync_state->to_do_end;
+}
+
+static inline void
+goldfish_sync_hostcmd_reply(struct goldfish_sync_state *sync_state,
+			    u32 cmd,
+			    u64 handle,
+			    u32 time_arg,
+			    u64 hostcmd_handle)
+{
+	unsigned long irq_flags;
+	struct goldfish_sync_hostcmd *batch_hostcmd =
+		&sync_state->batch_hostcmd;
+
+	spin_lock_irqsave(&sync_state->to_do_lock, irq_flags);
+
+	batch_hostcmd->cmd = cmd;
+	batch_hostcmd->handle = handle;
+	batch_hostcmd->time_arg = time_arg;
+	batch_hostcmd->hostcmd_handle = hostcmd_handle;
+	writel(0, sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
+
+	spin_unlock_irqrestore(&sync_state->to_do_lock, irq_flags);
+}
+
+static inline void
+goldfish_sync_send_guestcmd(struct goldfish_sync_state *sync_state,
+			    u32 cmd,
+			    u64 glsync_handle,
+			    u64 thread_handle,
+			    u64 timeline_handle)
+{
+	unsigned long irq_flags;
+	struct goldfish_sync_guestcmd *batch_guestcmd =
+		&sync_state->batch_guestcmd;
+
+	spin_lock_irqsave(&sync_state->to_do_lock, irq_flags);
+
+	batch_guestcmd->host_command = cmd;
+	batch_guestcmd->glsync_handle = glsync_handle;
+	batch_guestcmd->thread_handle = thread_handle;
+	batch_guestcmd->guest_timeline_handle = timeline_handle;
+	writel(0, sync_state->reg_base + SYNC_REG_BATCH_GUESTCOMMAND);
+
+	spin_unlock_irqrestore(&sync_state->to_do_lock, irq_flags);
+}
+
+/* |goldfish_sync_interrupt| handles IRQ raises from the virtual device.
+ * In the context of OpenGL, this interrupt will fire whenever we need
+ * to signal a fence fd in the guest, with the command
+ * |CMD_SYNC_TIMELINE_INC|.
+ * However, because this function will be called in an interrupt context,
+ * it is necessary to do the actual work of signaling off of interrupt context.
+ * The shared work queue is used for this purpose. At the end when
+ * all pending commands are intercepted by the interrupt handler,
+ * we call |schedule_work|, which will later run the actual
+ * desired sync command in |goldfish_sync_work_item_fn|.
+ */
+static irqreturn_t
+goldfish_sync_interrupt_impl(struct goldfish_sync_state *sync_state)
+{
+	struct goldfish_sync_hostcmd *batch_hostcmd =
+			&sync_state->batch_hostcmd;
+
+	spin_lock(&sync_state->to_do_lock);
+	for (;;) {
+		u32 nextcmd;
+		u32 command_r;
+		u64 handle_rw;
+		u32 time_r;
+		u64 hostcmd_handle_rw;
+
+		readl(sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
+		nextcmd = batch_hostcmd->cmd;
+
+		if (nextcmd == 0)
+			break;
+
+		command_r = nextcmd;
+		handle_rw = batch_hostcmd->handle;
+		time_r = batch_hostcmd->time_arg;
+		hostcmd_handle_rw = batch_hostcmd->hostcmd_handle;
+
+		goldfish_sync_cmd_queue(sync_state,
+					command_r,
+					handle_rw,
+					time_r,
+					hostcmd_handle_rw);
+	}
+	spin_unlock(&sync_state->to_do_lock);
+
+	schedule_work(&sync_state->work_item);
+	return IRQ_HANDLED;
+}
+
+static const struct file_operations goldfish_sync_fops;
+
+static irqreturn_t goldfish_sync_interrupt(int irq, void *dev_id)
+{
+	struct goldfish_sync_state *sync_state = dev_id;
+
+	return (sync_state->miscdev.fops == &goldfish_sync_fops) ?
+		goldfish_sync_interrupt_impl(sync_state) : IRQ_NONE;
+}
+
+/* We expect that commands will come in at a slow enough rate
+ * so that incoming items will not be more than
+ * GOLDFISH_SYNC_MAX_CMDS.
+ *
+ * This is because the way the sync device is used,
+ * it's only for managing buffer data transfers per frame,
+ * with a sequential dependency between putting things in
+ * to_do and taking them out. Once a set of commands is
+ * queued up in to_do, the user of the device waits for
+ * them to be processed before queuing additional commands,
+ * which limits the rate at which commands come in
+ * to the rate at which we take them out here.
+ *
+ * We also don't expect more than MAX_CMDS to be issued
+ * at once; there is a correspondence between
+ * which buffers need swapping to the (display / buffer queue)
+ * to particular commands, and we don't expect there to be
+ * enough display or buffer queues in operation at once
+ * to overrun GOLDFISH_SYNC_MAX_CMDS.
+ */
+static u32 __must_check
+goldfish_sync_grab_commands(struct goldfish_sync_state *sync_state,
+			    struct goldfish_sync_hostcmd *dst)
+{
+	u32 to_do_end;
+	u32 i;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sync_state->to_do_lock, irq_flags);
+
+	to_do_end = sync_state->to_do_end;
+	for (i = 0; i < to_do_end; i++)
+		dst[i] = sync_state->to_do[i];
+	sync_state->to_do_end = 0;
+
+	spin_unlock_irqrestore(&sync_state->to_do_lock, irq_flags);
+
+	return to_do_end;
+}
+
+void goldfish_sync_run_hostcmd(struct goldfish_sync_state *sync_state,
+			       struct goldfish_sync_hostcmd *todo)
+{
+	struct goldfish_sync_timeline *tl =
+		(struct goldfish_sync_timeline *)(uintptr_t)todo->handle;
+	int sync_fence_fd;
+
+	switch (todo->cmd) {
+	case CMD_SYNC_READY:
+		break;
+
+	case CMD_CREATE_SYNC_TIMELINE:
+		tl = goldfish_sync_timeline_create(sync_state);
+		goldfish_sync_hostcmd_reply(sync_state,
+					    CMD_CREATE_SYNC_TIMELINE,
+					    (uintptr_t)tl,
+					    0,
+					    todo->hostcmd_handle);
+		break;
+
+	case CMD_CREATE_SYNC_FENCE:
+		sync_fence_fd = goldfish_sync_fence_create(tl, todo->time_arg);
+		goldfish_sync_hostcmd_reply(sync_state,
+					    CMD_CREATE_SYNC_FENCE,
+					    sync_fence_fd,
+					    0,
+					    todo->hostcmd_handle);
+		break;
+
+	case CMD_SYNC_TIMELINE_INC:
+		goldfish_sync_timeline_signal(tl, todo->time_arg);
+		break;
+
+	case CMD_DESTROY_SYNC_TIMELINE:
+		goldfish_sync_timeline_put(tl);
+		break;
+	}
+}
+
+/* |goldfish_sync_work_item_fn| does the actual work of servicing
+ * host->guest sync commands. This function is triggered whenever
+ * the IRQ for the goldfish sync device is raised. Once it starts
+ * running, it grabs the contents of the buffer containing the
+ * commands it needs to execute (there may be multiple, because
+ * our IRQ is active high and not edge triggered), and then
+ * runs all of them one after the other.
+ */
+static void goldfish_sync_work_item_fn(struct work_struct *input)
+{
+	struct goldfish_sync_state *sync_state =
+		container_of(input, struct goldfish_sync_state, work_item);
+
+	struct goldfish_sync_hostcmd to_run[GOLDFISH_SYNC_MAX_CMDS];
+	u32 to_do_end;
+	u32 i;
+
+	mutex_lock(&sync_state->mutex_lock);
+
+	to_do_end = goldfish_sync_grab_commands(sync_state, to_run);
+
+	for (i = 0; i < to_do_end; i++)
+		goldfish_sync_run_hostcmd(sync_state, &to_run[i]);
+
+	mutex_unlock(&sync_state->mutex_lock);
+}
+
+static int goldfish_sync_open(struct inode *inode, struct file *filp)
+{
+	struct goldfish_sync_state *sync_state =
+		container_of(filp->private_data,
+			     struct goldfish_sync_state,
+			     miscdev);
+
+	if (mutex_lock_interruptible(&sync_state->mutex_lock))
+		return -ERESTARTSYS;
+
+	filp->private_data = goldfish_sync_timeline_create(sync_state);
+	mutex_unlock(&sync_state->mutex_lock);
+
+	return filp->private_data ? 0 : -ENOMEM;
+}
+
+static int goldfish_sync_release(struct inode *inode, struct file *filp)
+{
+	struct goldfish_sync_timeline *tl = filp->private_data;
+
+	goldfish_sync_timeline_put(tl);
+	return 0;
+}
+
+/* |goldfish_sync_ioctl| is the guest-facing interface of goldfish sync
+ * and is used in conjunction with eglCreateSyncKHR to queue up the
+ * actual work of waiting for the EGL sync command to complete,
+ * possibly returning a fence fd to the guest.
+ */
+static long
+goldfish_sync_ioctl_locked(struct goldfish_sync_timeline *tl,
+			   unsigned int cmd,
+			   unsigned long arg)
+{
+	struct goldfish_sync_ioctl_info ioctl_data;
+	int fd_out = -1;
+
+	switch (cmd) {
+	case GOLDFISH_SYNC_IOC_QUEUE_WORK:
+		if (copy_from_user(&ioctl_data,
+				   (void __user *)arg,
+				   sizeof(ioctl_data)))
+			return -EFAULT;
+
+		if (!ioctl_data.host_syncthread_handle_in)
+			return -EFAULT;
+
+		fd_out = goldfish_sync_fence_create(tl, tl->seqno + 1);
+		ioctl_data.fence_fd_out = fd_out;
+
+		if (copy_to_user((void __user *)arg,
+				 &ioctl_data,
+				 sizeof(ioctl_data))) {
+			ksys_close(fd_out);
+			return -EFAULT;
+		}
+
+		/* We are now about to trigger a host-side wait;
+		 * accumulate on |pending_waits|.
+		 */
+		goldfish_sync_send_guestcmd(tl->sync_state,
+				CMD_TRIGGER_HOST_WAIT,
+				ioctl_data.host_glsync_handle_in,
+				ioctl_data.host_syncthread_handle_in,
+				(u64)(uintptr_t)tl);
+		return 0;
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static long goldfish_sync_ioctl(struct file *filp,
+				unsigned int cmd,
+				unsigned long arg)
+{
+	struct goldfish_sync_timeline *tl = filp->private_data;
+	struct goldfish_sync_state *x = tl->sync_state;
+	long res;
+
+	if (mutex_lock_interruptible(&x->mutex_lock))
+		return -ERESTARTSYS;
+
+	res = goldfish_sync_ioctl_locked(tl, cmd, arg);
+	mutex_unlock(&x->mutex_lock);
+
+	return res;
+}
+
+static bool setup_verify_batch_cmd_addr(char *reg_base,
+					void *batch_addr,
+					u32 addr_offset,
+					u32 addr_offset_high)
+{
+	u64 batch_addr_phys;
+	u64 batch_addr_phys_test_lo;
+	u64 batch_addr_phys_test_hi;
+
+	batch_addr_phys = virt_to_phys(batch_addr);
+	writel(lower_32_bits(batch_addr_phys), reg_base + addr_offset);
+	writel(upper_32_bits(batch_addr_phys), reg_base + addr_offset_high);
+
+	batch_addr_phys_test_lo = readl(reg_base + addr_offset);
+	batch_addr_phys_test_hi = readl(reg_base + addr_offset_high);
+
+	batch_addr_phys = batch_addr_phys_test_lo |
+		(batch_addr_phys_test_hi << 32);
+
+	return virt_to_phys(batch_addr) == batch_addr_phys;
+}
+
+static const struct file_operations goldfish_sync_fops = {
+	.owner = THIS_MODULE,
+	.open = goldfish_sync_open,
+	.release = goldfish_sync_release,
+	.unlocked_ioctl = goldfish_sync_ioctl,
+	.compat_ioctl = goldfish_sync_ioctl,
+};
+
+static void fill_miscdevice(struct miscdevice *misc)
+{
+	misc->name = GOLDFISH_SYNC_DEVICE_NAME;
+	misc->minor = MISC_DYNAMIC_MINOR;
+	misc->fops = &goldfish_sync_fops;
+}
+
+static int goldfish_sync_probe(struct platform_device *pdev)
+{
+	struct goldfish_sync_state *sync_state;
+	struct resource *ioresource;
+	int result;
+
+	sync_state = devm_kzalloc(&pdev->dev, sizeof(*sync_state), GFP_KERNEL);
+	if (!sync_state)
+		return -ENOMEM;
+
+	spin_lock_init(&sync_state->to_do_lock);
+	mutex_init(&sync_state->mutex_lock);
+	INIT_WORK(&sync_state->work_item, goldfish_sync_work_item_fn);
+
+	ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!ioresource)
+		return -ENODEV;
+
+	sync_state->reg_base =
+		devm_ioremap(&pdev->dev, ioresource->start, PAGE_SIZE);
+	if (!sync_state->reg_base)
+		return -ENOMEM;
+
+	result = platform_get_irq(pdev, 0);
+	if (result < 0)
+		return -ENODEV;
+
+	sync_state->irq = result;
+
+	result = devm_request_irq(&pdev->dev,
+				  sync_state->irq,
+				  goldfish_sync_interrupt,
+				  IRQF_SHARED,
+				  pdev->name,
+				  sync_state);
+	if (result)
+		return -ENODEV;
+
+	if (!setup_verify_batch_cmd_addr(sync_state->reg_base,
+				&sync_state->batch_hostcmd,
+				SYNC_REG_BATCH_COMMAND_ADDR,
+				SYNC_REG_BATCH_COMMAND_ADDR_HIGH))
+		return -ENODEV;
+
+	if (!setup_verify_batch_cmd_addr(sync_state->reg_base,
+				&sync_state->batch_guestcmd,
+				SYNC_REG_BATCH_GUESTCOMMAND_ADDR,
+				SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH))
+		return -ENODEV;
+
+	fill_miscdevice(&sync_state->miscdev);
+	result = misc_register(&sync_state->miscdev);
+	if (result)
+		return -ENODEV;
+
+	writel(0, sync_state->reg_base + SYNC_REG_INIT);
+
+	platform_set_drvdata(pdev, sync_state);
+
+	return 0;
+}
+
+static int goldfish_sync_remove(struct platform_device *pdev)
+{
+	struct goldfish_sync_state *sync_state = platform_get_drvdata(pdev);
+
+	misc_deregister(&sync_state->miscdev);
+	return 0;
+}
+
+static const struct of_device_id goldfish_sync_of_match[] = {
+	{ .compatible = "google,goldfish-sync", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, goldfish_sync_of_match);
+
+static const struct acpi_device_id goldfish_sync_acpi_match[] = {
+	{ "GFSH0006", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_sync_acpi_match);
+
+static struct platform_driver goldfish_sync = {
+	.probe = goldfish_sync_probe,
+	.remove = goldfish_sync_remove,
+	.driver = {
+		.name = GOLDFISH_SYNC_DEVICE_NAME,
+		.of_match_table = goldfish_sync_of_match,
+		.acpi_match_table = ACPI_PTR(goldfish_sync_acpi_match),
+	}
+};
+module_platform_driver(goldfish_sync);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("Android QEMU Sync Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.0");
diff --git a/include/uapi/linux/goldfish/goldfish_sync.h b/include/uapi/linux/goldfish/goldfish_sync.h
new file mode 100644
index 000000000000..01d762f77308
--- /dev/null
+++ b/include/uapi/linux/goldfish/goldfish_sync.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef UAPI_GOLDFISH_SYNC_H
+#define UAPI_GOLDFISH_SYNC_H
+
+#include <linux/types.h>
+
+#define GOLDFISH_SYNC_DEVICE_NAME "goldfish_sync"
+
+struct goldfish_sync_ioctl_info {
+	__u64 host_glsync_handle_in;
+	__u64 host_syncthread_handle_in;
+	__s32 fence_fd_out;
+};
+
+/* There is an ioctl associated with goldfish sync driver.
+ * Make it conflict with ioctls that are not likely to be used
+ * in the emulator.
+ *
+ * '@'	00-0F	linux/radeonfb.h		conflict!
+ * '@'	00-0F	drivers/video/aty/aty128fb.c	conflict!
+ */
+#define GOLDFISH_SYNC_IOC_MAGIC	'@'
+
+#define GOLDFISH_SYNC_IOC_QUEUE_WORK	\
+	_IOWR(GOLDFISH_SYNC_IOC_MAGIC, 0, struct goldfish_sync_ioctl_info)
+
+#endif /* UAPI_GOLDFISH_SYNC_H */
-- 
2.20.1.97.g81188d93c3-goog


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver
  2019-01-09  2:11 ` [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver rkir
@ 2019-01-09  8:31   ` Greg KH
  2019-01-10  1:41     ` Roman Kiryanov
  2019-01-22 11:07   ` Greg KH
  1 sibling, 1 reply; 12+ messages in thread
From: Greg KH @ 2019-01-09  8:31 UTC (permalink / raw)
  To: rkir; +Cc: linux-kernel

On Tue, Jan 08, 2019 at 06:11:11PM -0800, rkir@google.com wrote:
> From: Roman Kiryanov <rkir@google.com>
> 
> A driver for the Goldfish Android emulator that occupies
> address space to use it with the memory sharing device
> on the QEMU side. The memory sharding device allocates
> subranges and populate them with actual RAM.
> This allows sharing host's memory with the guest.
> 

Do you have a pointer to the QEMU commit that matches up with the device
that this driver is supposed to be supporting?  That would help in
trying to review the code to determine exactly what this driver is doing
and why the existing memory driver does not work well enough for you
(hint, you didn't say why you need this...)

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver
  2019-01-09  8:31   ` Greg KH
@ 2019-01-10  1:41     ` Roman Kiryanov
  2019-01-16  4:53       ` Roman Kiryanov
  0 siblings, 1 reply; 12+ messages in thread
From: Roman Kiryanov @ 2019-01-10  1:41 UTC (permalink / raw)
  To: Greg KH; +Cc: linux-kernel

> Do you have a pointer to the QEMU commit that matches up with the device
> that this driver is supposed to be supporting?

The device code is here:

https://android.googlesource.com/platform/external/qemu/+/emu-master-dev/hw/pci/goldfish_address_space.c

Regards,
Roman.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver
  2019-01-10  1:41     ` Roman Kiryanov
@ 2019-01-16  4:53       ` Roman Kiryanov
  0 siblings, 0 replies; 12+ messages in thread
From: Roman Kiryanov @ 2019-01-16  4:53 UTC (permalink / raw)
  To: Greg KH; +Cc: linux-kernel

> > Do you have a pointer to the QEMU commit that matches up with the device
> > that this driver is supposed to be supporting?
>
> The device code is here:
>
> https://android.googlesource.com/platform/external/qemu/+/emu-master-dev/hw/pci/goldfish_address_space.c

Hi Greg, do you need anything else to review my patch?

> why the existing memory driver does not work well enough for you

I tried to find the existing driver to share memory between QEMU and a
linux guest but could not find one.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver
  2019-01-09  2:11 ` [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver rkir
  2019-01-09  8:31   ` Greg KH
@ 2019-01-22 11:07   ` Greg KH
  2019-01-29  8:32     ` Roman Kiryanov
  1 sibling, 1 reply; 12+ messages in thread
From: Greg KH @ 2019-01-22 11:07 UTC (permalink / raw)
  To: rkir; +Cc: linux-kernel

On Tue, Jan 08, 2019 at 06:11:11PM -0800, rkir@google.com wrote:
> From: Roman Kiryanov <rkir@google.com>
> 
> A driver for the Goldfish Android emulator that occupies
> address space to use it with the memory sharing device
> on the QEMU side. The memory sharding device allocates
> subranges and populate them with actual RAM.
> This allows sharing host's memory with the guest.
> 
> Signed-off-by: Roman Kiryanov <rkir@google.com>
> ---
> Changes in v3:
>  - No changes.
> 
> Changes in v2:
>  - Removed WARN_ON.
>  - Moved to drivers/platform/goldfish (from drivers/misc).
> 
>  drivers/platform/goldfish/Kconfig             |   9 +
>  drivers/platform/goldfish/Makefile            |   1 +
>  .../goldfish/goldfish_address_space.c         | 666 ++++++++++++++++++
>  .../linux/goldfish/goldfish_address_space.h   |  27 +
>  4 files changed, 703 insertions(+)
>  create mode 100644 drivers/platform/goldfish/goldfish_address_space.c
>  create mode 100644 include/uapi/linux/goldfish/goldfish_address_space.h
> 
> diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
> index 74fdfa68d1f2..60ecec4a3c59 100644
> --- a/drivers/platform/goldfish/Kconfig
> +++ b/drivers/platform/goldfish/Kconfig
> @@ -16,4 +16,13 @@ config GOLDFISH_PIPE
>  	  This is a virtual device to drive the QEMU pipe interface used by
>  	  the Goldfish Android Virtual Device.
>  
> +config GOLDFISH_ADDRESS_SPACE
> +	tristate "A Goldfish driver that talks to the memory sharing device in QEMU"
> +	depends on PCI
> +	depends on GOLDFISH
> +	help
> +	  A Goldfish driver that allocates address space ranges in the guest to
> +	  populate them later in the host. This allows sharing host's memory
> +	  with the guest.

How does QEMU do this today?  There isn't a virtio or some other virtual
memory device that allows memory regions to be shared?  I can't believe
that there isn't one yet.  If not, then this should be some kind of
"generic" QEMU memory device, not a "goldfish" specific one, right?

Please work with the QEMU developers on this, I need their ack before I
can take something like this.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 3/3] drivers: platform: goldfish: goldfish_sync: add a driver
  2019-01-09  2:11 ` [PATCH v3 3/3] drivers: platform: goldfish: goldfish_sync: " rkir
@ 2019-01-22 11:08   ` Greg KH
  0 siblings, 0 replies; 12+ messages in thread
From: Greg KH @ 2019-01-22 11:08 UTC (permalink / raw)
  To: rkir; +Cc: linux-kernel

On Tue, Jan 08, 2019 at 06:11:12PM -0800, rkir@google.com wrote:
> From: Roman Kiryanov <rkir@google.com>
> 
> The Goldfish sync driver is designed to provide a interface
> between the underlying host's sync device and the kernel's
> fence sync framework.
> 
> Signed-off-by: Roman Kiryanov <rkir@google.com>
> ---
> Changes in v3:
>  - Removed WARN_ON.
> 
> Changes in v2:
>  - Added a missing include (mod_devicetable.h).
>  - Put in one batch with goldfish_address_space.c to avoid merge comflicts.
> 
>  drivers/platform/goldfish/Kconfig           |   7 +
>  drivers/platform/goldfish/Makefile          |   1 +
>  drivers/platform/goldfish/goldfish_sync.c   | 827 ++++++++++++++++++++
>  include/uapi/linux/goldfish/goldfish_sync.h |  28 +
>  4 files changed, 863 insertions(+)
>  create mode 100644 drivers/platform/goldfish/goldfish_sync.c
>  create mode 100644 include/uapi/linux/goldfish/goldfish_sync.h
> 

Please get this reviewed by the sync subsystem maintainers to ensure
that you are using the API properly, and that this really is needed.  If
it is needed, why is this a goldfish-only type device, why do not all
systems need this?

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver
  2019-01-22 11:07   ` Greg KH
@ 2019-01-29  8:32     ` Roman Kiryanov
  2019-01-29  9:37       ` Greg KH
  2019-01-30 14:44       ` Greg KH
  0 siblings, 2 replies; 12+ messages in thread
From: Roman Kiryanov @ 2019-01-29  8:32 UTC (permalink / raw)
  To: Greg KH; +Cc: linux-kernel

> How does QEMU do this today?  There isn't a virtio or some other virtual
> memory device that allows memory regions to be shared?  I can't believe
> that there isn't one yet.  If not, then this should be some kind of
> "generic" QEMU memory device, not a "goldfish" specific one, right?

I also thought this should not be something unique to us. So I asked
in our internal mailing list, I heard nothing back.

> Please work with the QEMU developers on this, I need their ack before I
> can take something like this.

This is a good point. I asked there (nothing since 1/24):

http://lists.nongnu.org/archive/html/qemu-discuss/2019-01/msg00055.html

When we were working on our device side, we had to change some QEMU
code and later noticed that HAXM also requires some fixes (we had
meetings with Intel). I suppose QEMU does not have this feature yet.

Could you please tell if we can proceed with upstreaming as is?

Regards,
Roman.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver
  2019-01-29  8:32     ` Roman Kiryanov
@ 2019-01-29  9:37       ` Greg KH
  2019-01-30 14:44       ` Greg KH
  1 sibling, 0 replies; 12+ messages in thread
From: Greg KH @ 2019-01-29  9:37 UTC (permalink / raw)
  To: Roman Kiryanov; +Cc: linux-kernel

On Tue, Jan 29, 2019 at 12:32:12AM -0800, Roman Kiryanov wrote:
> > How does QEMU do this today?  There isn't a virtio or some other virtual
> > memory device that allows memory regions to be shared?  I can't believe
> > that there isn't one yet.  If not, then this should be some kind of
> > "generic" QEMU memory device, not a "goldfish" specific one, right?
> 
> I also thought this should not be something unique to us. So I asked
> in our internal mailing list, I heard nothing back.

As we have no idea who is on your internal mailing list, that's really
not a good representation :)

> > Please work with the QEMU developers on this, I need their ack before I
> > can take something like this.
> 
> This is a good point. I asked there (nothing since 1/24):
> 
> http://lists.nongnu.org/archive/html/qemu-discuss/2019-01/msg00055.html

less than a week is short :)

> When we were working on our device side, we had to change some QEMU
> code and later noticed that HAXM also requires some fixes (we had
> meetings with Intel). I suppose QEMU does not have this feature yet.
> 
> Could you please tell if we can proceed with upstreaming as is?

I told you what the requirements would be to get this merged in the
previous email, that has not changed.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver
  2019-01-29  8:32     ` Roman Kiryanov
  2019-01-29  9:37       ` Greg KH
@ 2019-01-30 14:44       ` Greg KH
  2019-01-31  3:56         ` Roman Kiryanov
  1 sibling, 1 reply; 12+ messages in thread
From: Greg KH @ 2019-01-30 14:44 UTC (permalink / raw)
  To: Roman Kiryanov; +Cc: linux-kernel

On Tue, Jan 29, 2019 at 12:32:12AM -0800, Roman Kiryanov wrote:
> > How does QEMU do this today?  There isn't a virtio or some other virtual
> > memory device that allows memory regions to be shared?  I can't believe
> > that there isn't one yet.  If not, then this should be some kind of
> > "generic" QEMU memory device, not a "goldfish" specific one, right?
> 
> I also thought this should not be something unique to us. So I asked
> in our internal mailing list, I heard nothing back.
> 
> > Please work with the QEMU developers on this, I need their ack before I
> > can take something like this.
> 
> This is a good point. I asked there (nothing since 1/24):
> 
> http://lists.nongnu.org/archive/html/qemu-discuss/2019-01/msg00055.html
> 
> When we were working on our device side, we had to change some QEMU
> code and later noticed that HAXM also requires some fixes (we had
> meetings with Intel). I suppose QEMU does not have this feature yet.
> 
> Could you please tell if we can proceed with upstreaming as is?

Also, why does the other Android "emulator", cuttlefish, not need
special drivers like this and the other goldfish drivers?  Shouldn't you
be using the same interfaces that they use that are already merged
upstream?

Actually, now that cuttlefish works on a mainline kernel, can't we just
delete the existing goldfish code?

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver
  2019-01-30 14:44       ` Greg KH
@ 2019-01-31  3:56         ` Roman Kiryanov
  0 siblings, 0 replies; 12+ messages in thread
From: Roman Kiryanov @ 2019-01-31  3:56 UTC (permalink / raw)
  To: Greg KH; +Cc: linux-kernel, Lingfeng Yang, Alistair Strachan

> Also, why does the other Android "emulator", cuttlefish, not need
> special drivers like this and the other goldfish drivers?  Shouldn't you
> be using the same interfaces that they use that are already merged
> upstream?
> Actually, now that cuttlefish works on a mainline kernel, can't we just
> delete the existing goldfish code?

cuttlefish is a separate emulator with different assumptions which do
not work for us.

Our emulator runs on Linux, Mac and Windows, it uses host's GPU
directly for rendering.

I am not sure how cuttlefish accesses host's GPU memory (it might not
support this),
but we need it to support coherent memory access for Vulcan. We also
might use it to access
host's camera pixels to avoid copying them.

Please keep goldfish drivers.

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2019-01-31  3:56 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-09  2:11 [PATCH v3 1/3] drivers: platform: goldfish: fix the checkpatch complain in Kconfig rkir
2019-01-09  2:11 ` [PATCH v3 2/3] drivers: platform: goldfish: goldfish_address_space: add a driver rkir
2019-01-09  8:31   ` Greg KH
2019-01-10  1:41     ` Roman Kiryanov
2019-01-16  4:53       ` Roman Kiryanov
2019-01-22 11:07   ` Greg KH
2019-01-29  8:32     ` Roman Kiryanov
2019-01-29  9:37       ` Greg KH
2019-01-30 14:44       ` Greg KH
2019-01-31  3:56         ` Roman Kiryanov
2019-01-09  2:11 ` [PATCH v3 3/3] drivers: platform: goldfish: goldfish_sync: " rkir
2019-01-22 11:08   ` Greg KH

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.