All of lore.kernel.org
 help / color / mirror / Atom feed
From: Hans de Goede <hdegoede@redhat.com>
To: Arnd Bergmann <arnd@arndb.de>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hans de Goede <hdegoede@redhat.com>,
	Michael Thayer <michael.thayer@oracle.com>,
	"Knut St . Osmundsen" <knut.osmundsen@oracle.com>,
	Larry Finger <Larry.Finger@lwfinger.net>,
	linux-kernel@vger.kernel.org
Subject: [RFC v2 1/2] virt: Add vboxguest driver for Virtual Box Guest integration
Date: Fri, 25 Aug 2017 16:37:31 +0200	[thread overview]
Message-ID: <20170825143732.10836-2-hdegoede@redhat.com> (raw)
In-Reply-To: <20170825143732.10836-1-hdegoede@redhat.com>

This commit adds a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add support for
Virtual Box Guest integration features such as copy-and-paste, seamless
mode and OpenGL pass-through.

This driver also offers vboxguest IPC functionality which is needed
for the vboxfs driver which offers folder sharing support.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
---
 drivers/virt/Kconfig                       |    1 +
 drivers/virt/Makefile                      |    1 +
 drivers/virt/vboxguest/Kconfig             |   16 +
 drivers/virt/vboxguest/Makefile            |    3 +
 drivers/virt/vboxguest/vboxguest_core.c    | 1580 +++++++++++++++++++++++++
 drivers/virt/vboxguest/vboxguest_core.h    |  194 ++++
 drivers/virt/vboxguest/vboxguest_linux.c   |  478 ++++++++
 drivers/virt/vboxguest/vboxguest_utils.c   | 1094 +++++++++++++++++
 drivers/virt/vboxguest/vboxguest_version.h |   18 +
 include/linux/vbox_utils.h                 |  107 ++
 include/linux/vbox_vmmdev.h                |  123 ++
 include/uapi/linux/vbox_err.h              |  178 +++
 include/uapi/linux/vbox_ostypes.h          |  153 +++
 include/uapi/linux/vbox_vmmdev.h           | 1745 ++++++++++++++++++++++++++++
 include/uapi/linux/vboxguest.h             |  359 ++++++
 15 files changed, 6050 insertions(+)
 create mode 100644 drivers/virt/vboxguest/Kconfig
 create mode 100644 drivers/virt/vboxguest/Makefile
 create mode 100644 drivers/virt/vboxguest/vboxguest_core.c
 create mode 100644 drivers/virt/vboxguest/vboxguest_core.h
 create mode 100644 drivers/virt/vboxguest/vboxguest_linux.c
 create mode 100644 drivers/virt/vboxguest/vboxguest_utils.c
 create mode 100644 drivers/virt/vboxguest/vboxguest_version.h
 create mode 100644 include/linux/vbox_utils.h
 create mode 100644 include/linux/vbox_vmmdev.h
 create mode 100644 include/uapi/linux/vbox_err.h
 create mode 100644 include/uapi/linux/vbox_ostypes.h
 create mode 100644 include/uapi/linux/vbox_vmmdev.h
 create mode 100644 include/uapi/linux/vboxguest.h

diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index 99ebdde590f8..8d9cdfbd6bcc 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -30,4 +30,5 @@ config FSL_HV_MANAGER
           4) A kernel interface for receiving callbacks when a managed
 	     partition shuts down.
 
+source "drivers/virt/vboxguest/Kconfig"
 endif
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
index c47f04dd343b..d3f7b2540890 100644
--- a/drivers/virt/Makefile
+++ b/drivers/virt/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_FSL_HV_MANAGER)	+= fsl_hypervisor.o
+obj-y				+= vboxguest/
diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig
new file mode 100644
index 000000000000..e88ee46c31d4
--- /dev/null
+++ b/drivers/virt/vboxguest/Kconfig
@@ -0,0 +1,16 @@
+config VBOXGUEST
+	tristate "Virtual Box Guest integration support"
+	depends on X86 && PCI && INPUT
+	help
+	  This is a driver for the Virtual Box Guest PCI device used in
+	  Virtual Box virtual machines. Enabling this driver will add
+	  support for Virtual Box Guest integration features such as
+	  copy-and-paste, seamless mode and OpenGL pass-through.
+
+	  This driver also offers vboxguest IPC functionality which is needed
+	  for the vboxfs driver which offers folder sharing support.
+
+	  Although it is possible to build this module in, it is advised
+	  to build this driver as a module, so that it can be updated
+	  independently of the kernel. Select M to build this driver as a
+	  module.
diff --git a/drivers/virt/vboxguest/Makefile b/drivers/virt/vboxguest/Makefile
new file mode 100644
index 000000000000..203b8f465817
--- /dev/null
+++ b/drivers/virt/vboxguest/Makefile
@@ -0,0 +1,3 @@
+vboxguest-y := vboxguest_linux.o vboxguest_core.o vboxguest_utils.o
+
+obj-$(CONFIG_VBOXGUEST) += vboxguest.o
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
new file mode 100644
index 000000000000..6310386a2ab9
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -0,0 +1,1580 @@
+/*
+ * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
+ *
+ * Copyright (C) 2007-2016 Oracle Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, in which case the provisions of the CDDL are applicable
+ * instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include <linux/vmalloc.h>
+#include "vboxguest_core.h"
+#include "vboxguest_version.h"
+
+#define GUEST_MAPPINGS_TRIES	5
+
+/**
+ * Reserves memory in which the VMM can relocate any guest mappings
+ * that are floating around.
+ *
+ * This operation is a little bit tricky since the VMM might not accept
+ * just any address because of address clashes between the three contexts
+ * it operates in, so we try several times.
+ *
+ * Failure to reserve the guest mappings is ignored.
+ *
+ * @param   gdev	The Guest extension device.
+ */
+static void vbg_guest_mappings_init(struct vbg_dev *gdev)
+{
+	VMMDevReqHypervisorInfo *req;
+	void *guest_mappings[GUEST_MAPPINGS_TRIES];
+	struct page **pages = NULL;
+	u32 size, hypervisor_size;
+	int i, rc;
+
+	/* Query the required space. */
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_GetHypervisorInfo);
+	if (!req)
+		return;
+
+	req->hypervisorStart = 0;
+	req->hypervisorSize = 0;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0)
+		goto out;
+
+	/*
+	 * The VMM will report back if there is nothing it wants to map, like
+	 * for instance in VT-x and AMD-V mode.
+	 */
+	if (req->hypervisorSize == 0)
+		goto out;
+
+	hypervisor_size = req->hypervisorSize;
+	/* Add 4M so that we can align the vmap to 4MiB as the host requires. */
+	size = PAGE_ALIGN(req->hypervisorSize) + SZ_4M;
+
+	pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL);
+	if (!pages)
+		goto out;
+
+	gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
+	if (!gdev->guest_mappings_dummy_page)
+		goto out;
+
+	for (i = 0; i < (size >> PAGE_SHIFT); i++)
+		pages[i] = gdev->guest_mappings_dummy_page;
+
+	/* Try several times, the host can be picky about certain addresses. */
+	for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
+		guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
+					 VM_MAP, PAGE_KERNEL_RO);
+		if (!guest_mappings[i])
+			break;
+
+		req->header.requestType = VMMDevReq_SetHypervisorInfo;
+		req->header.rc = VERR_INTERNAL_ERROR;
+		req->hypervisorSize = hypervisor_size;
+		req->hypervisorStart =
+			(unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
+
+		rc = vbg_req_perform(gdev, req);
+		if (rc >= 0) {
+			gdev->guest_mappings = guest_mappings[i];
+			break;
+		}
+	}
+
+	/* Free vmap's from failed attempts. */
+	while (--i >= 0)
+		vunmap(guest_mappings[i]);
+
+	/* On failure free the dummy-page backing the vmap */
+	if (!gdev->guest_mappings) {
+		__free_page(gdev->guest_mappings_dummy_page);
+		gdev->guest_mappings_dummy_page = NULL;
+	}
+
+out:
+	kfree(req);
+	kfree(pages);
+}
+
+/**
+ * Undo what vbg_guest_mappings_init did.
+ *
+ * @param   gdev	The Guest extension device.
+ */
+static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
+{
+	VMMDevReqHypervisorInfo *req;
+	int rc;
+
+	if (!gdev->guest_mappings)
+		return;
+
+	/*
+	 * Tell the host that we're going to free the memory we reserved for
+	 * it, the free it up. (Leak the memory if anything goes wrong here.)
+	 */
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetHypervisorInfo);
+	if (!req)
+		return;
+
+	req->hypervisorStart = 0;
+	req->hypervisorSize = 0;
+
+	rc = vbg_req_perform(gdev, req);
+
+	kfree(req);
+
+	if (rc < 0) {
+		vbg_err("%s error: %d\n", __func__, rc);
+		return;
+	}
+
+	vunmap(gdev->guest_mappings);
+	gdev->guest_mappings = NULL;
+
+	__free_page(gdev->guest_mappings_dummy_page);
+	gdev->guest_mappings_dummy_page = NULL;
+}
+
+/**
+ * Report the guest information to the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param   gdev	The Guest extension device.
+ */
+static int vbg_report_guest_info(struct vbg_dev *gdev)
+{
+	/*
+	 * Allocate and fill in the two guest info reports.
+	 */
+	VMMDevReportGuestInfo *req1 = NULL;
+	VMMDevReportGuestInfo2 *req2 = NULL;
+	int rc, ret = -ENOMEM;
+
+	req1 = vbg_req_alloc(sizeof(*req1), VMMDevReq_ReportGuestInfo);
+	req2 = vbg_req_alloc(sizeof(*req2), VMMDevReq_ReportGuestInfo2);
+	if (!req1 || !req2)
+		goto out_free;
+
+	req1->guestInfo.interfaceVersion = VMMDEV_VERSION;
+#ifdef CONFIG_X86_64
+	req1->guestInfo.osType = VBOXOSTYPE_Linux26_x64;
+#else
+	req1->guestInfo.osType = VBOXOSTYPE_Linux26;
+#endif
+
+	req2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
+	req2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
+	req2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
+	req2->guestInfo.additionsRevision = VBOX_SVN_REV;
+	/* (no features defined yet) */
+	req2->guestInfo.additionsFeatures = 0;
+	strlcpy(req2->guestInfo.szName, VBOX_VERSION_STRING,
+		sizeof(req2->guestInfo.szName));
+
+	/*
+	 * There are two protocols here:
+	 *      1. Info2 + Info1. Supported by >=3.2.51.
+	 *      2. Info1 and optionally Info2. The old protocol.
+	 *
+	 * We try protocol 2 first.  It will fail with VERR_NOT_SUPPORTED
+	 * if not supported by the VMMDev (message ordering requirement).
+	 */
+	rc = vbg_req_perform(gdev, req2);
+	if (rc >= 0) {
+		rc = vbg_req_perform(gdev, req1);
+	} else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
+		rc = vbg_req_perform(gdev, req1);
+		if (rc >= 0) {
+			rc = vbg_req_perform(gdev, req2);
+			if (rc == VERR_NOT_IMPLEMENTED)
+				rc = VINF_SUCCESS;
+		}
+	}
+	ret = vbg_status_code_to_errno(rc);
+
+out_free:
+	kfree(req2);
+	kfree(req1);
+	return ret;
+}
+
+/**
+ * Report the guest driver status to the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param   gdev	The Guest extension device.
+ * @param   active	Flag whether the driver is now active or not.
+ */
+static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
+{
+	VMMDevReportGuestStatus *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_ReportGuestStatus);
+	if (!req)
+		return -ENOMEM;
+
+	req->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
+	req->guestStatus.status = active ? VBoxGuestFacilityStatus_Active :
+					   VBoxGuestFacilityStatus_Inactive;
+	req->guestStatus.flags = 0;
+
+	rc = vbg_req_perform(gdev, req);
+	if (rc == VERR_NOT_IMPLEMENTED)	/* Compatibility with older hosts. */
+		rc = VINF_SUCCESS;
+
+	kfree(req);
+
+	return vbg_status_code_to_errno(rc);
+}
+
+/** @name Memory Ballooning
+ * @{
+ */
+
+/**
+ * Inflate the balloon by one chunk.
+ *
+ * The caller owns the balloon mutex.
+ *
+ * @returns VBox status code
+ * @param   gdev	The Guest extension device.
+ * @param   chunk_idx	Index of the chunk.
+ */
+static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
+{
+	VMMDevChangeMemBalloon *req = gdev->mem_balloon.change_req;
+	struct page **pages;
+	int i, rc;
+
+	pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
+			GFP_KERNEL | __GFP_NOWARN);
+	if (!pages)
+		return VERR_NO_MEMORY;
+
+	req->header.size = sizeof(*req);
+	req->inflate = true;
+	req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
+
+	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
+		pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
+		if (!pages[i]) {
+			rc = VERR_NO_MEMORY;
+			goto out_error;
+		}
+
+		req->phys_page[i] = page_to_phys(pages[i]);
+	}
+
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		vbg_err("%s error: %d\n", __func__, rc);
+		goto out_error;
+	}
+
+	gdev->mem_balloon.pages[chunk_idx] = pages;
+
+	return VINF_SUCCESS;
+
+out_error:
+	while (--i >= 0)
+		__free_page(pages[i]);
+	kfree(pages);
+
+	return rc;
+}
+
+/**
+ * Deflate the balloon by one chunk.
+ *
+ * The caller owns the balloon mutex.
+ *
+ * @returns VBox status code
+ * @param   gdev	The Guest extension device.
+ * @param   chunk_idx	Index of the chunk.
+ */
+static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
+{
+	VMMDevChangeMemBalloon *req = gdev->mem_balloon.change_req;
+	struct page **pages = gdev->mem_balloon.pages[chunk_idx];
+	int i, rc;
+
+	req->header.size = sizeof(*req);
+	req->inflate = false;
+	req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
+
+	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
+		req->phys_page[i] = page_to_phys(pages[i]);
+
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		vbg_err("%s error: %d\n", __func__, rc);
+		return rc;
+	}
+
+	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
+		__free_page(pages[i]);
+	kfree(pages);
+	gdev->mem_balloon.pages[chunk_idx] = NULL;
+
+	return VINF_SUCCESS;
+}
+
+/**
+ * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
+ * the host wants the balloon to be and adjust accordingly.
+ */
+static void vbg_balloon_work(struct work_struct *work)
+{
+	struct vbg_dev *gdev =
+		container_of(work, struct vbg_dev, mem_balloon.work);
+	VMMDevGetMemBalloonChangeRequest *req = gdev->mem_balloon.get_req;
+	u32 i, chunks;
+	int rc;
+
+	/*
+	 * Setting this bit means that we request the value from the host and
+	 * change the guest memory balloon according to the returned value.
+	 */
+	req->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		vbg_err("%s error: %d)\n", __func__, rc);
+		return;
+	}
+
+	/*
+	 * The host always returns the same maximum amount of chunks, so
+	 * we do this once.
+	 */
+	if (!gdev->mem_balloon.max_chunks) {
+		gdev->mem_balloon.pages =
+			devm_kcalloc(gdev->dev, req->cPhysMemChunks,
+				     sizeof(struct page **), GFP_KERNEL);
+		if (!gdev->mem_balloon.pages)
+			return;
+
+		gdev->mem_balloon.max_chunks = req->cPhysMemChunks;
+	}
+
+	chunks = req->cBalloonChunks;
+	if (chunks > gdev->mem_balloon.max_chunks) {
+		vbg_err("%s: illegal balloon size %u (max=%u)\n",
+			__func__, chunks, gdev->mem_balloon.max_chunks);
+		return;
+	}
+
+	if (req->cBalloonChunks > gdev->mem_balloon.chunks) {
+		/* inflate */
+		for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
+			rc = vbg_balloon_inflate(gdev, i);
+			if (rc < 0)
+				return;
+
+			gdev->mem_balloon.chunks++;
+		}
+	} else {
+		/* deflate */
+		for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
+			rc = vbg_balloon_deflate(gdev, i);
+			if (rc < 0)
+				return;
+
+			gdev->mem_balloon.chunks--;
+		}
+	}
+}
+
+/** @} */
+
+/** @name Heartbeat
+ * @{
+ */
+
+/**
+ * Callback for heartbeat timer.
+ */
+static void vbg_heartbeat_timer(unsigned long data)
+{
+	struct vbg_dev *gdev = (struct vbg_dev *)data;
+
+	vbg_req_perform(gdev, gdev->guest_heartbeat_req);
+	mod_timer(&gdev->heartbeat_timer,
+		  msecs_to_jiffies(gdev->heartbeat_interval_ms));
+}
+
+/**
+ * Configure the host to check guest's heartbeat
+ * and get heartbeat interval from the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param   gdev	The Guest extension device.
+ * @param   enabled	Set true to enable guest heartbeat checks on host.
+ */
+static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
+{
+	VMMDevReqHeartbeat *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_HeartbeatConfigure);
+	if (!req)
+		return -ENOMEM;
+
+	req->fEnabled = enabled;
+	req->cNsInterval = 0;
+	rc = vbg_req_perform(gdev, req);
+	do_div(req->cNsInterval, 1000000); /* ns -> ms */
+	gdev->heartbeat_interval_ms = req->cNsInterval;
+	kfree(req);
+
+	return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Initializes the heartbeat timer.
+ *
+ * This feature may be disabled by the host.
+ *
+ * @returns 0 or negative errno value (ignored).
+ * @param   gdev	The Guest extension device.
+ */
+static int vbg_heartbeat_init(struct vbg_dev *gdev)
+{
+	int ret;
+
+	/* Make sure that heartbeat checking is disabled if we fail. */
+	ret = vbg_heartbeat_host_config(gdev, false);
+	if (ret < 0)
+		return ret;
+
+	ret = vbg_heartbeat_host_config(gdev, true);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Preallocate the request to use it from the timer callback because:
+	 *    1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
+	 *       and the timer callback runs at DISPATCH_LEVEL;
+	 *    2) avoid repeated allocations.
+	 */
+	gdev->guest_heartbeat_req = vbg_req_alloc(
+					sizeof(*gdev->guest_heartbeat_req),
+					VMMDevReq_GuestHeartbeat);
+	if (!gdev->guest_heartbeat_req)
+		return -ENOMEM;
+
+	vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
+		 __func__, gdev->heartbeat_interval_ms);
+	mod_timer(&gdev->heartbeat_timer, 0);
+
+	return 0;
+}
+
+/**
+ * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
+ * @param   gdev	The Guest extension device.
+ */
+static void vbg_heartbeat_exit(struct vbg_dev *gdev)
+{
+	del_timer_sync(&gdev->heartbeat_timer);
+	vbg_heartbeat_host_config(gdev, false);
+	kfree(gdev->guest_heartbeat_req);
+
+}
+
+/** @} */
+
+/** @name Guest Capabilities and Event Filter
+ * @{
+ */
+
+/**
+ * Applies a change to the bit usage tracker.
+ *
+ * @returns true if the mask changed, false if not.
+ * @param   tracker	The bit usage tracker.
+ * @param   changed	The bits to change.
+ * @param   previous	The previous value of the bits.
+ */
+static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
+				u32 changed, u32 previous)
+{
+	bool global_change = false;
+
+	while (changed) {
+		u32 bit = ffs(changed) - 1;
+		u32 bitmask = BIT(bit);
+
+		if (bitmask & previous) {
+			tracker->per_bit_usage[bit] -= 1;
+			if (tracker->per_bit_usage[bit] == 0) {
+				global_change = true;
+				tracker->mask &= ~bitmask;
+			}
+		} else {
+			tracker->per_bit_usage[bit] += 1;
+			if (tracker->per_bit_usage[bit] == 1) {
+				global_change = true;
+				tracker->mask |= bitmask;
+			}
+		}
+
+		changed &= ~bitmask;
+	}
+
+	return global_change;
+}
+
+/**
+ * Init and termination worker for resetting the (host) event filter on the host
+ *
+ * @returns 0 or negative errno value.
+ * @param   gdev            The Guest extension device.
+ * @param   fixed_events    Fixed events (init time).
+ */
+static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
+				       u32 fixed_events)
+{
+	VMMDevCtlGuestFilterMask *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_CtlGuestFilterMask);
+	if (!req)
+		return -ENOMEM;
+
+	req->u32NotMask = U32_MAX & ~fixed_events;
+	req->u32OrMask = fixed_events;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0)
+		vbg_err("%s error: %d\n", __func__, rc);
+
+	kfree(req);
+	return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Changes the event filter mask for the given session.
+ *
+ * This is called in response to VBOXGUEST_IOCTL_CTL_FILTER_MASK as well as to
+ * do session cleanup.
+ *
+ * @returns VBox status code
+ * @param   gdev                The Guest extension device.
+ * @param   session             The session.
+ * @param   or_mask             The events to add.
+ * @param   not_mask            The events to remove.
+ * @param   session_termination Set if we're called by the session cleanup code.
+ *                              This tweaks the error handling so we perform
+ *                              proper session cleanup even if the host
+ *                              misbehaves.
+ *
+ * @remarks Takes the session spinlock.
+ */
+static int vbg_set_session_event_filter(struct vbg_dev *gdev,
+					struct vbg_session *session,
+					u32 or_mask, u32 not_mask,
+					bool session_termination)
+{
+	VMMDevCtlGuestFilterMask *req;
+	u32 changed, previous;
+	unsigned long flags;
+	int rc = VINF_SUCCESS;
+
+	/* Allocate a request buffer before taking the spinlock */
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_CtlGuestFilterMask);
+	if (!req) {
+		if (!session_termination)
+			return VERR_NO_MEMORY;
+		/* Ignore failure, we must do session cleanup. */
+	}
+
+	spin_lock_irqsave(&gdev->session_spinlock, flags);
+
+	/* Apply the changes to the session mask. */
+	previous = session->event_filter;
+	session->event_filter |= or_mask;
+	session->event_filter &= ~not_mask;
+
+	/* If anything actually changed, update the global usage counters. */
+	changed = previous ^ session->event_filter;
+	if (!changed)
+		goto out;
+
+	vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
+	req->u32OrMask = gdev->fixed_events | gdev->event_filter_tracker.mask;
+
+	if (gdev->event_filter_host == req->u32OrMask || !req)
+		goto out;
+
+	gdev->event_filter_host = req->u32OrMask;
+	req->u32NotMask = ~req->u32OrMask;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		/* Failed, roll back (unless it's session termination time). */
+		gdev->event_filter_host = U32_MAX;
+		if (session_termination)
+			goto out;
+
+		vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
+				    session->event_filter);
+		session->event_filter = previous;
+	}
+
+out:
+	spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+	kfree(req);
+
+	return rc;
+}
+
+/**
+ * Init and termination worker for set guest capabilities to zero on the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param   gdev	The Guest extension device.
+ */
+static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
+{
+	VMMDevReqGuestCapabilities2 *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetGuestCapabilities);
+	if (!req)
+		return -ENOMEM;
+
+	req->u32NotMask = U32_MAX;
+	req->u32OrMask = 0;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0)
+		vbg_err("%s error: %d\n", __func__, rc);
+
+	kfree(req);
+	return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Sets the guest capabilities for a session.
+ *
+ * @returns VBox status code
+ * @param   gdev                The Guest extension device.
+ * @param   session             The session.
+ * @param   or_mask             The capabilities to add.
+ * @param   not_mask            The capabilities to remove.
+ * @param   session_termination Set if we're called by the session cleanup code.
+ *                              This tweaks the error handling so we perform
+ *                              proper session cleanup even if the host
+ *                              misbehaves.
+ *
+ * @remarks Takes the session spinlock.
+ */
+static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+					struct vbg_session *session,
+					u32 or_mask, u32 not_mask,
+					bool session_termination)
+{
+	VMMDevReqGuestCapabilities2 *req;
+	unsigned long flags;
+	int rc = VINF_SUCCESS;
+	u32 changed, previous;
+
+	/* Allocate a request buffer before taking the spinlock */
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetGuestCapabilities);
+	if (!req) {
+		if (!session_termination)
+			return VERR_NO_MEMORY;
+		/* Ignore failure, we must do session cleanup. */
+	}
+
+	spin_lock_irqsave(&gdev->session_spinlock, flags);
+
+	/* Apply the changes to the session mask. */
+	previous = session->guest_caps;
+	session->guest_caps |= or_mask;
+	session->guest_caps &= ~not_mask;
+
+	/* If anything actually changed, update the global usage counters. */
+	changed = previous ^ session->guest_caps;
+	if (!changed)
+		goto out;
+
+	vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
+	req->u32OrMask = gdev->guest_caps_tracker.mask;
+
+	if (gdev->guest_caps_host == req->u32OrMask || !req)
+		goto out;
+
+	gdev->guest_caps_host = req->u32OrMask;
+	req->u32NotMask = ~req->u32OrMask;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		/* Failed, roll back (unless it's session termination time). */
+		gdev->guest_caps_host = U32_MAX;
+		if (session_termination)
+			goto out;
+
+		vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
+				    session->guest_caps);
+		session->guest_caps = previous;
+	}
+
+out:
+	spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+	kfree(req);
+
+	return rc;
+}
+
+/** @} */
+
+/**
+ * vbg_query_host_version try get the host feature mask and version information
+ * (vbg_host_version).
+ *
+ * @returns 0 or negative errno value (ignored).
+ * @param   gdev	The Guest extension device.
+ */
+static int vbg_query_host_version(struct vbg_dev *gdev)
+{
+	VMMDevReqHostVersion *req;
+	int rc, ret;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_GetHostVersion);
+	if (!req)
+		return -ENOMEM;
+
+	rc = vbg_req_perform(gdev, req);
+	ret = vbg_status_code_to_errno(rc);
+	if (ret)
+		goto out;
+
+	snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
+		 req->major, req->minor, req->build, req->revision);
+	gdev->host_features = req->features;
+
+	vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
+		 gdev->host_features);
+
+	if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
+		vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
+		ret = -ENODEV;
+	}
+
+out:
+	kfree(req);
+	return ret;
+}
+
+/**
+ * Initializes the VBoxGuest device extension when the
+ * device driver is loaded.
+ *
+ * The native code locates the VMMDev on the PCI bus and retrieve
+ * the MMIO and I/O port ranges, this function will take care of
+ * mapping the MMIO memory (if present). Upon successful return
+ * the native code should set up the interrupt handler.
+ *
+ * @returns 0 or negative errno value.
+ *
+ * @param   gdev           The Guest extension device.
+ * @param   fixed_events   Events that will be enabled upon init and no client
+ *                         will ever be allowed to mask.
+ */
+int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
+{
+	int ret = -ENOMEM;
+
+	gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
+	gdev->event_filter_host = U32_MAX;	/* forces a report */
+	gdev->guest_caps_host = U32_MAX;	/* forces a report */
+
+	init_waitqueue_head(&gdev->event_wq);
+	init_waitqueue_head(&gdev->hgcm_wq);
+	INIT_LIST_HEAD(&gdev->session_list);
+	spin_lock_init(&gdev->event_spinlock);
+	spin_lock_init(&gdev->session_spinlock);
+	mutex_init(&gdev->cancel_req_mutex);
+	setup_timer(&gdev->heartbeat_timer, vbg_heartbeat_timer,
+		    (unsigned long)gdev);
+	INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
+
+	gdev->mem_balloon.get_req =
+		vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
+			      VMMDevReq_GetMemBalloonChangeRequest);
+	gdev->mem_balloon.change_req =
+		vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
+			      VMMDevReq_ChangeMemBalloon);
+	gdev->cancel_req =
+		vbg_req_alloc(sizeof(*(gdev->cancel_req)),
+			      VMMDevReq_HGCMCancel2);
+	gdev->ack_events_req =
+		vbg_req_alloc(sizeof(*gdev->ack_events_req),
+			      VMMDevReq_AcknowledgeEvents);
+	gdev->mouse_status_req =
+		vbg_req_alloc(sizeof(*gdev->mouse_status_req),
+			      VMMDevReq_GetMouseStatus);
+
+	if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
+	    !gdev->cancel_req || !gdev->ack_events_req ||
+	    !gdev->mouse_status_req)
+		goto err_free_reqs;
+
+	ret = vbg_query_host_version(gdev);
+	if (ret)
+		goto err_free_reqs;
+
+	ret = vbg_report_guest_info(gdev);
+	if (ret) {
+		vbg_err("vboxguest: VBoxReportGuestInfo error: %d\n", ret);
+		goto err_free_reqs;
+	}
+
+	ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
+	if (ret) {
+		vbg_err("vboxguest: Error setting fixed event filter: %d\n",
+			ret);
+		goto err_free_reqs;
+	}
+
+	ret = vbg_reset_host_capabilities(gdev);
+	if (ret) {
+		vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
+			ret);
+		goto err_free_reqs;
+	}
+
+	ret = vbg_core_set_mouse_status(gdev, 0);
+	if (ret) {
+		vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
+		goto err_free_reqs;
+	}
+
+	/* These may fail without requiring the driver init to fail. */
+	vbg_guest_mappings_init(gdev);
+	vbg_heartbeat_init(gdev);
+
+	/* All Done! */
+	ret = vbg_report_driver_status(gdev, true);
+	if (ret < 0)
+		vbg_err("vboxguest: VBoxReportGuestDriverStatus error: %d\n",
+			ret);
+
+	return 0;
+
+err_free_reqs:
+	kfree(gdev->mouse_status_req);
+	kfree(gdev->ack_events_req);
+	kfree(gdev->cancel_req);
+	kfree(gdev->mem_balloon.change_req);
+	kfree(gdev->mem_balloon.get_req);
+	return ret;
+}
+
+/**
+ * Call this on exit to clean-up vboxguest-core managed resources.
+ *
+ * The native code should call this before the driver is loaded,
+ * but don't call this on shutdown.
+ *
+ * @param   gdev	The Guest extension device.
+ */
+void vbg_core_exit(struct vbg_dev *gdev)
+{
+	vbg_heartbeat_exit(gdev);
+	vbg_guest_mappings_exit(gdev);
+
+	/* Clear the host flags (mouse status etc). */
+	vbg_reset_host_event_filter(gdev, 0);
+	vbg_reset_host_capabilities(gdev);
+	vbg_core_set_mouse_status(gdev, 0);
+
+	kfree(gdev->mouse_status_req);
+	kfree(gdev->ack_events_req);
+	kfree(gdev->cancel_req);
+	kfree(gdev->mem_balloon.change_req);
+	kfree(gdev->mem_balloon.get_req);
+}
+
+/**
+ * Creates a VBoxGuest user session.
+ *
+ * vboxguest_linux.c calls this when userspace opens the char-device.
+ *
+ * @returns 0 or negative errno value.
+ * @param   gdev          The Guest extension device.
+ * @param   session_ret   Where to store the session on success.
+ * @param   user_session  Set if this is a session for the vboxuser device.
+ */
+int vbg_core_open_session(struct vbg_dev *gdev,
+			  struct vbg_session **session_ret, bool user_session)
+{
+	struct vbg_session *session;
+	unsigned long flags;
+
+	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		return -ENOMEM;
+
+	session->gdev = gdev;
+	session->user_session = user_session;
+
+	spin_lock_irqsave(&gdev->session_spinlock, flags);
+	list_add(&session->list_node, &gdev->session_list);
+	spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+	*session_ret = session;
+
+	return 0;
+}
+
+/**
+ * Closes a VBoxGuest session.
+ *
+ * @param   session	The session to close (and free).
+ */
+void vbg_core_close_session(struct vbg_session *session)
+{
+	struct vbg_dev *gdev = session->gdev;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&gdev->session_spinlock, flags);
+	list_del(&session->list_node);
+	spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+	vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
+	vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
+
+	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
+		if (session->hgcm_client_ids[i])
+			vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i]);
+	}
+
+	kfree(session);
+}
+
+static bool vbg_wait_event_cond(struct vbg_dev *gdev,
+				struct vbg_session *session,
+				VBoxGuestWaitEventInfo *info)
+{
+	unsigned long flags;
+	bool wakeup;
+	u32 events;
+
+	spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+	events = gdev->pending_events & info->u32EventMaskIn;
+	wakeup = events || session->cancel_waiters;
+
+	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+	return wakeup;
+}
+
+/* Must be called with the event_lock held */
+static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
+				     struct vbg_session *session,
+				     VBoxGuestWaitEventInfo *info)
+{
+	u32 events = gdev->pending_events & info->u32EventMaskIn;
+
+	gdev->pending_events &= ~events;
+	return events;
+}
+
+static int vbg_ioctl_wait_event(struct vbg_dev *gdev,
+				struct vbg_session *session,
+				VBoxGuestWaitEventInfo *info)
+{
+	unsigned long flags;
+	long timeout;
+	int rc = VINF_SUCCESS;
+
+	if (info->u32TimeoutIn == U32_MAX)
+		timeout = MAX_SCHEDULE_TIMEOUT;
+	else
+		timeout = msecs_to_jiffies(info->u32TimeoutIn);
+
+	info->u32Result = VBOXGUEST_WAITEVENT_OK;
+	info->u32EventFlagsOut = 0;
+
+	do {
+		timeout = wait_event_interruptible_timeout(
+				gdev->event_wq,
+				vbg_wait_event_cond(gdev, session, info),
+				timeout);
+
+		spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+		if (timeout < 0 || session->cancel_waiters) {
+			info->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
+			rc = VERR_INTERRUPTED;
+		} else if (timeout == 0) {
+			info->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
+			rc = VERR_TIMEOUT;
+		} else {
+			info->u32EventFlagsOut =
+			    vbg_consume_events_locked(gdev, session, info);
+		}
+
+		spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+		/*
+		 * Someone else may have consumed the event(s) first, in
+		 * which case we go back to waiting.
+		 */
+	} while (rc == VINF_SUCCESS && info->u32EventFlagsOut == 0);
+
+	return rc;
+}
+
+static int vbg_ioctl_cancel_all_wait_events(struct vbg_dev *gdev,
+					    struct vbg_session *session)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&gdev->event_spinlock, flags);
+	session->cancel_waiters = true;
+	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+	wake_up(&gdev->event_wq);
+
+	return VINF_SUCCESS;
+}
+
+/**
+ * Checks if the VMM request is allowed in the context of the given session.
+ *
+ * @returns VBox status code
+ * @param   gdev	The Guest extension device.
+ * @param   session	The calling session.
+ * @param   req		The request.
+ */
+static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
+			   VMMDevRequestHeader const *req)
+{
+	const VMMDevReportGuestStatus *guest_status;
+	bool trusted_apps_only;
+
+	switch (req->requestType) {
+	/* Trusted users apps only. */
+	case VMMDevReq_QueryCredentials:
+	case VMMDevReq_ReportCredentialsJudgement:
+	case VMMDevReq_RegisterSharedModule:
+	case VMMDevReq_UnregisterSharedModule:
+	case VMMDevReq_WriteCoreDump:
+	case VMMDevReq_GetCpuHotPlugRequest:
+	case VMMDevReq_SetCpuHotPlugStatus:
+	case VMMDevReq_CheckSharedModules:
+	case VMMDevReq_GetPageSharingStatus:
+	case VMMDevReq_DebugIsPageShared:
+	case VMMDevReq_ReportGuestStats:
+	case VMMDevReq_ReportGuestUserState:
+	case VMMDevReq_GetStatisticsChangeRequest:
+	case VMMDevReq_ChangeMemBalloon:
+		trusted_apps_only = true;
+		break;
+
+	/* Anyone. */
+	case VMMDevReq_GetMouseStatus:
+	case VMMDevReq_SetMouseStatus:
+	case VMMDevReq_SetPointerShape:
+	case VMMDevReq_GetHostVersion:
+	case VMMDevReq_Idle:
+	case VMMDevReq_GetHostTime:
+	case VMMDevReq_SetPowerStatus:
+	case VMMDevReq_AcknowledgeEvents:
+	case VMMDevReq_CtlGuestFilterMask:
+	case VMMDevReq_ReportGuestStatus:
+	case VMMDevReq_GetDisplayChangeRequest:
+	case VMMDevReq_VideoModeSupported:
+	case VMMDevReq_GetHeightReduction:
+	case VMMDevReq_GetDisplayChangeRequest2:
+	case VMMDevReq_VideoModeSupported2:
+	case VMMDevReq_VideoAccelEnable:
+	case VMMDevReq_VideoAccelFlush:
+	case VMMDevReq_VideoSetVisibleRegion:
+	case VMMDevReq_GetDisplayChangeRequestEx:
+	case VMMDevReq_GetSeamlessChangeRequest:
+	case VMMDevReq_GetVRDPChangeRequest:
+	case VMMDevReq_LogString:
+	case VMMDevReq_GetSessionId:
+		trusted_apps_only = false;
+		break;
+
+	/**
+	 * @todo this have to be changed into an I/O control and the facilities
+	 *    tracked in the session so they can automatically be failed when
+	 *    the session terminates without reporting the new status.
+	 *
+	 * The information presented by IGuest is not reliable without this!
+	 */
+	/* Depends on the request parameters... */
+	case VMMDevReq_ReportGuestCapabilities:
+		guest_status = (const VMMDevReportGuestStatus *)req;
+		switch (guest_status->guestStatus.facility) {
+		case VBoxGuestFacilityType_All:
+		case VBoxGuestFacilityType_VBoxGuestDriver:
+			vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
+				guest_status->guestStatus.facility);
+			return VERR_PERMISSION_DENIED;
+		case VBoxGuestFacilityType_VBoxService:
+			trusted_apps_only = true;
+			break;
+		case VBoxGuestFacilityType_VBoxTrayClient:
+		case VBoxGuestFacilityType_Seamless:
+		case VBoxGuestFacilityType_Graphics:
+		default:
+			trusted_apps_only = false;
+			break;
+		}
+		break;
+
+	/* Anything else is not allowed. */
+	default:
+		vbg_err("Denying userspace vmm call type %#08x\n",
+			req->requestType);
+		return VERR_PERMISSION_DENIED;
+	}
+
+	if (trusted_apps_only && session->user_session) {
+		vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
+			req->requestType);
+		return VERR_PERMISSION_DENIED;
+	}
+
+	return VINF_SUCCESS;
+}
+
+static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
+				struct vbg_session *session,
+				VMMDevRequestHeader *req, size_t req_size,
+				size_t *req_size_ret)
+{
+	int rc;
+
+	rc = vbg_req_verify(req, req_size);
+	if (rc < 0)
+		return rc;
+
+	rc = vbg_req_allowed(gdev, session, req);
+	if (rc < 0)
+		return rc;
+
+	rc = vbg_req_perform(gdev, req);
+	if (rc >= 0) {
+		WARN_ON(rc == VINF_HGCM_ASYNC_EXECUTE);
+		*req_size_ret = req->size;
+	}
+
+	return rc;
+}
+
+static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
+				  struct vbg_session *session,
+				  VBoxGuestHGCMConnectInfo *info)
+{
+	unsigned long flags;
+	u32 client_id;
+	int i, rc;
+
+	/* Find a free place in the sessions clients array and claim it */
+	spin_lock_irqsave(&gdev->session_spinlock, flags);
+	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
+		if (!session->hgcm_client_ids[i]) {
+			session->hgcm_client_ids[i] = U32_MAX;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+	if (i >= ARRAY_SIZE(session->hgcm_client_ids))
+		return VERR_TOO_MANY_OPEN_FILES;
+
+	rc = vbg_hgcm_connect(gdev, &info->Loc, &client_id);
+
+	spin_lock_irqsave(&gdev->session_spinlock, flags);
+	if (rc >= 0) {
+		info->result = VINF_SUCCESS;
+		info->u32ClientID = client_id;
+		session->hgcm_client_ids[i] = client_id;
+	} else {
+		session->hgcm_client_ids[i] = 0;
+	}
+	spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+	return rc;
+}
+
+static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
+				     struct vbg_session *session,
+				     VBoxGuestHGCMDisconnectInfo *info)
+{
+	unsigned long flags;
+	u32 client_id;
+	int i, rc;
+
+	client_id = info->u32ClientID;
+	if (client_id == 0 || client_id == U32_MAX)
+		return VERR_INVALID_HANDLE;
+
+	spin_lock_irqsave(&gdev->session_spinlock, flags);
+	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
+		if (session->hgcm_client_ids[i] == client_id) {
+			session->hgcm_client_ids[i] = U32_MAX;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+	if (i >= ARRAY_SIZE(session->hgcm_client_ids))
+		return VERR_INVALID_HANDLE;
+
+	rc = vbg_hgcm_disconnect(gdev, client_id);
+
+	spin_lock_irqsave(&gdev->session_spinlock, flags);
+	if (rc >= 0) {
+		info->result = VINF_SUCCESS;
+		session->hgcm_client_ids[i] = 0;
+	} else {
+		session->hgcm_client_ids[i] = client_id;
+	}
+	spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+	return rc;
+}
+
+static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
+			       struct vbg_session *session,
+			       u32 timeout_ms, bool f32bit,
+			       VBoxGuestHGCMCallInfo *info, size_t info_size,
+			       size_t *info_size_ret)
+{
+	u32 client_id = info->u32ClientID;
+	unsigned long flags;
+	size_t actual_size;
+	int i, rc;
+
+	if (info_size < sizeof(VBoxGuestHGCMCallInfo))
+		return VERR_BUFFER_OVERFLOW;
+
+	if (info->cParms > VBOX_HGCM_MAX_PARMS)
+		return VERR_INVALID_PARAMETER;
+
+	if (client_id == 0 || client_id == U32_MAX)
+		return VERR_INVALID_HANDLE;
+
+	actual_size = sizeof(*info);
+	if (f32bit)
+		actual_size += info->cParms * sizeof(HGCMFunctionParameter32);
+	else
+		actual_size += info->cParms * sizeof(HGCMFunctionParameter);
+	if (info_size < actual_size) {
+		vbg_debug("VBOXGUEST_IOCTL_HGCM_CALL: info_size=%#zx (%zu) required size is %#zx (%zu)\n",
+			  info_size, info_size, actual_size, actual_size);
+		return VERR_INVALID_PARAMETER;
+	}
+
+	/*
+	 * Validate the client id.
+	 */
+	spin_lock_irqsave(&gdev->session_spinlock, flags);
+	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
+		if (session->hgcm_client_ids[i] == client_id)
+			break;
+	spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+	if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
+		vbg_debug("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%#08x\n",
+			  client_id);
+		return VERR_INVALID_HANDLE;
+	}
+
+	if (f32bit)
+		rc = vbg_hgcm_call32(gdev, info, info_size, timeout_ms, true);
+	else
+		rc = vbg_hgcm_call(gdev, info, info_size, timeout_ms, true);
+
+	if (rc >= 0) {
+		*info_size_ret = actual_size;
+	} else if (rc == VERR_INTERRUPTED || rc == VERR_TIMEOUT ||
+		   rc == VERR_OUT_OF_RANGE) {
+		vbg_debug("VBOXGUEST_IOCTL_HGCM_CALL%s error: %d\n",
+			  f32bit ? "32" : "64", rc);
+	} else {
+		vbg_err("VBOXGUEST_IOCTL_HGCM_CALL%s error: %d\n",
+			f32bit ? "32" : "64", rc);
+	}
+	return rc;
+}
+
+/**
+ * Handle a request for writing a core dump of the guest on the host.
+ *
+ * @returns VBox status code
+ *
+ * @param   gdev	The Guest extension device.
+ * @param   info	The output buffer.
+ */
+static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+				     VBoxGuestWriteCoreDump *info)
+{
+	VMMDevReqWriteCoreDump *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_WriteCoreDump);
+	if (!req)
+		return VERR_NO_MEMORY;
+
+	req->fFlags = info->fFlags;
+	rc = vbg_req_perform(gdev, req);
+
+	kfree(req);
+	return rc;
+}
+
+/**
+ * Handle VBOXGUEST_IOCTL_CTL_FILTER_MASK.
+ *
+ * @returns VBox status code
+ * @param   gdev	The Guest extension device.
+ * @param   session	The session.
+ * @param   info	The request.
+ */
+static int vbg_ioctl_ctl_filter_mask(struct vbg_dev *gdev,
+				     struct vbg_session *session,
+				     VBoxGuestFilterMaskInfo *info)
+{
+	if ((info->u32OrMask | info->u32NotMask) &
+						~VMMDEV_EVENT_VALID_EVENT_MASK)
+		return VERR_INVALID_PARAMETER;
+
+	return vbg_set_session_event_filter(gdev, session, info->u32OrMask,
+					    info->u32NotMask, false);
+}
+
+/**
+ * Report guest supported mouse-features to the host.
+ *
+ * @returns 0 or negative errno value.
+ * @returns VBox status code
+ * @param   gdev	The Guest extension device.
+ * @param   features	The set of features to report to the host.
+ */
+int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
+{
+	VMMDevReqMouseStatus *req;
+	int rc;
+
+	req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetMouseStatus);
+	if (!req)
+		return -ENOMEM;
+
+	req->mouseFeatures = features;
+	req->pointerXPos = 0;
+	req->pointerYPos = 0;
+
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0)
+		vbg_err("%s error: %d\n", __func__, rc);
+
+	kfree(req);
+	return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Handle VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES.
+ *
+ * @returns VBox status code
+ * @param   gdev	The Guest extension device.
+ * @param   session	The session.
+ * @param   info	The request.
+ */
+static int vbg_ioctl_set_capabilities(struct vbg_dev *gdev,
+				      struct vbg_session *session,
+				      VBoxGuestSetCapabilitiesInfo *info)
+{
+	if ((info->u32OrMask | info->u32NotMask) &
+					~VMMDEV_GUEST_CAPABILITIES_MASK)
+		return VERR_INVALID_PARAMETER;
+
+	return vbg_set_session_capabilities(gdev, session, info->u32OrMask,
+					    info->u32NotMask, false);
+}
+
+/**
+ * Common IOCtl for user to kernel communication.
+ *
+ * This function only does the basic validation and then invokes
+ * worker functions that takes care of each specific function.
+ *
+ * @returns VBox status code
+ * @param   session        The client session.
+ * @param   req            The requested function.
+ * @param   data           The input/output data buffer.
+ * @param   data_size      The max size of the data buffer.
+ * @param   data_size_ret  Where to store the amount of returned data.
+ */
+int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data,
+		   size_t data_size, size_t *data_size_ret)
+{
+	VBoxGuestHGCMCallInfoTimed *timed = data;
+	struct vbg_dev *gdev = session->gdev;
+	bool f32bit = false;
+	size_t offset;
+	int rc;
+
+	*data_size_ret = 0;
+
+	/* All our ioctls are type 'V' dir RW */
+	if (_IOC_TYPE(req) != 'V' || _IOC_DIR(req) != (_IOC_READ|_IOC_WRITE))
+		return VERR_NOT_SUPPORTED;
+
+	/* First check for variable sized ioctls by ioc number. */
+	switch (_IOC_NR(req)) {
+	case _IOC_NR(VBOXGUEST_IOCTL_VMMREQUEST(0)):
+		return vbg_ioctl_vmmrequest(gdev, session,
+					    data, data_size, data_size_ret);
+#ifdef CONFIG_X86_64
+	case _IOC_NR(VBOXGUEST_IOCTL_HGCM_CALL_32(0)):
+		f32bit = true;
+		/* Fall through */
+#endif
+	case _IOC_NR(VBOXGUEST_IOCTL_HGCM_CALL(0)):
+		return vbg_ioctl_hgcm_call(gdev, session, U32_MAX, f32bit,
+					   data, data_size, data_size_ret);
+#ifdef CONFIG_X86_64
+	case _IOC_NR(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)):
+		f32bit = true;
+		/* Fall through */
+#endif
+	case _IOC_NR(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)):
+		offset = offsetof(VBoxGuestHGCMCallInfoTimed, info);
+		rc = vbg_ioctl_hgcm_call(gdev, session,
+					 timed->u32Timeout, f32bit,
+					 data + offset, data_size - offset,
+					 data_size_ret);
+		*data_size_ret += offset;
+		return rc;
+	case _IOC_NR(VBOXGUEST_IOCTL_LOG(0)):
+		vbg_info("%.*s", (int)data_size, (char *)data);
+		return VINF_SUCCESS;
+	}
+
+	/* Handle fixed size requests. */
+	*data_size_ret = data_size;
+
+	switch (req) {
+	case VBOXGUEST_IOCTL_WAITEVENT:
+		return vbg_ioctl_wait_event(gdev, session, data);
+	case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
+		return vbg_ioctl_cancel_all_wait_events(gdev, session);
+	case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
+		return vbg_ioctl_ctl_filter_mask(gdev, session, data);
+	case VBOXGUEST_IOCTL_HGCM_CONNECT:
+#ifdef CONFIG_X86_64 /* Needed because these are identical on 32 bit builds */
+	case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
+#endif
+		return vbg_ioctl_hgcm_connect(gdev, session, data);
+	case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
+#ifdef CONFIG_X86_64
+	case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
+#endif
+		return vbg_ioctl_hgcm_disconnect(gdev, session, data);
+	case VBOXGUEST_IOCTL_CHECK_BALLOON:
+		/*
+		 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
+		 * events entirely in the kernel, see vbg_core_isr().
+		 */
+		return VINF_SUCCESS;
+	case VBOXGUEST_IOCTL_CHANGE_BALLOON:
+		/* Under Linux we always handle the balloon in R0. */
+		return VERR_PERMISSION_DENIED;
+	case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
+		return vbg_ioctl_write_core_dump(gdev, data);
+	case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
+		vbg_err("VGDrvCommonIoCtl: VBOXGUEST_IOCTL_SET_MOUSE_STATUS should not be used under Linux\n");
+		return VERR_NOT_SUPPORTED;
+	case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
+		vbg_err("VGDrvCommonIoCtl: VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE should not be used under Linux\n");
+		return VERR_NOT_SUPPORTED;
+	case VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES:
+		return vbg_ioctl_set_capabilities(gdev, session, data);
+	}
+
+	vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
+	return VERR_NOT_SUPPORTED;
+}
+
+/** Core interrupt service routine. */
+irqreturn_t vbg_core_isr(int irq, void *dev_id)
+{
+	struct vbg_dev *gdev = dev_id;
+	VMMDevEvents *req = gdev->ack_events_req;
+	bool mouse_position_changed = false;
+	unsigned long flags;
+	u32 events = 0;
+	int rc;
+
+	if (!gdev->mmio->V.V1_04.fHaveEvents)
+		return IRQ_NONE;
+
+	/* Get and acknowlegde events. */
+	req->header.rc = VERR_INTERNAL_ERROR;
+	req->events = 0;
+	rc = vbg_req_perform(gdev, req);
+	if (rc < 0) {
+		vbg_err("Error performing VMMDevEvents req: %d\n", rc);
+		return IRQ_NONE;
+	}
+
+	events = req->events;
+
+	if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
+		mouse_position_changed = true;
+		events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
+	}
+
+	if (events & VMMDEV_EVENT_HGCM) {
+		wake_up(&gdev->hgcm_wq);
+		events &= ~VMMDEV_EVENT_HGCM;
+	}
+
+	if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
+		schedule_work(&gdev->mem_balloon.work);
+		events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
+	}
+
+	if (events) {
+		spin_lock_irqsave(&gdev->event_spinlock, flags);
+		gdev->pending_events |= events;
+		spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+		wake_up(&gdev->event_wq);
+	}
+
+	if (mouse_position_changed)
+		vbg_linux_mouse_event(gdev);
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
new file mode 100644
index 000000000000..5a84ffce6104
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2010-2016 Oracle Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, in which case the provisions of the CDDL are applicable
+ * instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __VBOXGUEST_CORE_H__
+#define __VBOXGUEST_CORE_H__
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/vbox_vmmdev.h>
+#include <linux/vboxguest.h>
+
+struct vbg_session;
+
+/** VBox guest memory balloon. */
+struct vbg_mem_balloon {
+	/** Work handling VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events */
+	struct work_struct work;
+	/** Pre-allocated VMMDevGetMemBalloonChangeRequest for query */
+	VMMDevGetMemBalloonChangeRequest *get_req;
+	/** Pre-allocated VMMDevChangeMemBalloon req for inflate / deflate */
+	VMMDevChangeMemBalloon *change_req;
+	/** The current number of chunks in the balloon. */
+	u32 chunks;
+	/** The maximum number of chunks in the balloon. */
+	u32 max_chunks;
+	/**
+	 * Array of pointers to page arrays. A page * array is allocated for
+	 * each chunk when inflating, and freed when the deflating.
+	 */
+	struct page ***pages;
+};
+
+/**
+ * Per bit usage tracker for a u32 mask.
+ *
+ * Used for optimal handling of guest properties and event filter.
+ */
+struct vbg_bit_usage_tracker {
+	/** Per bit usage counters. */
+	u32 per_bit_usage[32];
+	/** The current mask according to per_bit_usage. */
+	u32 mask;
+};
+
+/** VBox guest device (data) extension. */
+struct vbg_dev {
+	struct device *dev;
+	/** The base of the adapter I/O ports. */
+	u16 io_port;
+	/** Pointer to the mapping of the VMMDev adapter memory. */
+	VMMDevMemory *mmio;
+	/** Host version */
+	char host_version[64];
+	/** Host features */
+	unsigned int host_features;
+	/**
+	 * Dummy page and vmap address for reserved kernel virtual-address
+	 * space for the guest mappings, only used on hosts lacking vtx.
+	 */
+	struct page *guest_mappings_dummy_page;
+	void *guest_mappings;
+	/** Spinlock protecting pending_events. */
+	spinlock_t event_spinlock;
+	/** Preallocated VMMDevEvents for the IRQ handler. */
+	VMMDevEvents *ack_events_req;
+	/** Wait-for-event list for threads waiting for multiple events. */
+	wait_queue_head_t event_wq;
+	/** Mask of pending events. */
+	u32 pending_events;
+	/** Wait-for-event list for threads waiting on HGCM async completion. */
+	wait_queue_head_t hgcm_wq;
+	/** Pre-allocated hgcm cancel2 req. for cancellation on timeout */
+	VMMDevHGCMCancel2 *cancel_req;
+	/** Mutex protecting cancel_req accesses */
+	struct mutex cancel_req_mutex;
+	/** Pre-allocated mouse-status request for the input-device handling. */
+	VMMDevReqMouseStatus *mouse_status_req;
+	/** Input device for reporting abs mouse coordinates to the guest. */
+	struct input_dev *input;
+
+	/** Spinlock various items in vbg_session. */
+	spinlock_t session_spinlock;
+	/** List of guest sessions, protected by session_spinlock. */
+	struct list_head session_list;
+	/** Memory balloon information. */
+	struct vbg_mem_balloon mem_balloon;
+
+	/**
+	 * @name Host Event Filtering
+	 * @{
+	 */
+
+	/** Events we won't permit anyone to filter out. */
+	u32 fixed_events;
+	/** Usage counters for the host events (excludes fixed events). */
+	struct vbg_bit_usage_tracker event_filter_tracker;
+	/** The event filter last reported to the host (or UINT32_MAX). */
+	u32 event_filter_host;
+	/** @} */
+
+	/**
+	 * @name Guest Capabilities
+	 * @{
+	 */
+
+	/**
+	 * Usage counters for guest capabilities. Indexed by capability bit
+	 * number, one count per session using a capability.
+	 */
+	struct vbg_bit_usage_tracker guest_caps_tracker;
+	/** The guest capabilities last reported to the host (or UINT32_MAX). */
+	u32 guest_caps_host;
+	/** @} */
+
+	/**
+	 * Heartbeat timer which fires with interval
+	 * cNsHearbeatInterval and its handler sends
+	 * VMMDevReq_GuestHeartbeat to VMMDev.
+	 */
+	struct timer_list heartbeat_timer;
+	/** Heartbeat timer interval in ms. */
+	int heartbeat_interval_ms;
+	/** Preallocated VMMDevReq_GuestHeartbeat request. */
+	VMMDevRequestHeader *guest_heartbeat_req;
+
+	/** "vboxguest" char-device */
+	struct miscdevice misc_device;
+	/** "vboxuser" char-device */
+	struct miscdevice misc_device_user;
+};
+
+/** The VBoxGuest per session data. */
+struct vbg_session {
+	/** The list node. */
+	struct list_head list_node;
+	/** Pointer to the device extension. */
+	struct vbg_dev *gdev;
+
+	/**
+	 * Array containing HGCM client IDs associated with this session.
+	 * These will be automatically disconnected when the session is closed.
+	 */
+	u32 hgcm_client_ids[64];
+	/**
+	 * Host events requested by the session.
+	 * An event type requested in any guest session will be added to the
+	 * host filter. Protected by struct vbg_dev.SessionSpinlock.
+	 */
+	u32 event_filter;
+	/**
+	 * Guest capabilities for this session.
+	 * A capability claimed by any guest session will be reported to the
+	 * host. Protected by struct vbg_dev.SessionSpinlock.
+	 */
+	u32 guest_caps;
+	/** Does this session belong to a root process or a user one? */
+	bool user_session;
+	/** Set on CANCEL_ALL_WAITEVENTS, protected by the event_spinlock. */
+	bool cancel_waiters;
+};
+
+int  vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
+void vbg_core_exit(struct vbg_dev *gdev);
+int  vbg_core_open_session(struct vbg_dev *gdev,
+			   struct vbg_session **session_ret, bool user_session);
+void vbg_core_close_session(struct vbg_session *session);
+int  vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data,
+		    size_t data_size, size_t *data_size_ret);
+int  vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
+
+irqreturn_t vbg_core_isr(int irq, void *dev_id);
+
+void vbg_linux_mouse_event(struct vbg_dev *gdev);
+
+#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
new file mode 100644
index 000000000000..bc4268b161cc
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -0,0 +1,478 @@
+/*
+ * vboxguest linux pci driver, char-dev and input-device code,
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vbox_utils.h>
+#include "vboxguest_core.h"
+
+/** The device name. */
+#define DEVICE_NAME		"vboxguest"
+/** The device name for the device node open to everyone. */
+#define DEVICE_NAME_USER	"vboxuser"
+/** VirtualBox PCI vendor ID. */
+#define VBOX_VENDORID		0x80ee
+/** VMMDev PCI card product ID. */
+#define VMMDEV_DEVICEID		0xcafe
+
+/** Mutex protecting the global vbg_gdev pointer used by vbg_get/put_gdev. */
+static DEFINE_MUTEX(vbg_gdev_mutex);
+/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
+static struct vbg_dev *vbg_gdev;
+
+static int vbg_misc_device_open(struct inode *inode, struct file *filp)
+{
+	struct vbg_session *session;
+	struct vbg_dev *gdev;
+	int ret;
+
+	/* misc_open sets filp->private_data to our misc device */
+	gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
+
+	ret = vbg_core_open_session(gdev, &session, false);
+	if (ret)
+		return -ENOMEM;
+
+	filp->private_data = session;
+	return 0;
+}
+
+static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
+{
+	struct vbg_session *session;
+	struct vbg_dev *gdev;
+	int ret;
+
+	/* misc_open sets filp->private_data to our misc device */
+	gdev = container_of(filp->private_data, struct vbg_dev,
+			    misc_device_user);
+
+	ret = vbg_core_open_session(gdev, &session, true);
+	if (ret)
+		return ret;
+
+	filp->private_data = session;
+	return 0;
+}
+
+/**
+ * Close device.
+ *
+ * @param   inode	Pointer to inode info structure.
+ * @param   filp	Associated file pointer.
+ */
+static int vbg_misc_device_close(struct inode *inode, struct file *filp)
+{
+	vbg_core_close_session(filp->private_data);
+	filp->private_data = NULL;
+	return 0;
+}
+
+/**
+ * Device I/O Control entry point.
+ *
+ * @returns -ENOMEM or -EFAULT for errors inside the ioctl callback; 0
+ * on success, or a positive VBox status code on vbox guest-device errors.
+ *
+ * @param   filp	Associated file pointer.
+ * @param   req		The request specified to ioctl().
+ * @param   arg		The argument specified to ioctl().
+ */
+static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
+				  unsigned long arg)
+{
+	struct vbg_session *session = filp->private_data;
+	size_t returned_data_size, size = _IOC_SIZE(req);
+	void *buf, *buf_free = NULL;
+	u8 stack_buf[32];
+	int rc, ret = 0;
+
+	/*
+	 * The largest ioctl req is a hgcm-call with 32 function arguments,
+	 * which is 16 + 32 * 16 bytes. However the variable length buffer
+	 * passed to VMMDevReq_LogString may be longer. Allow logging up to
+	 * 1024 bytes (minus header size).
+	 */
+	if (size > 1024)
+		return -E2BIG;
+
+	/*
+	 * For small amounts of data being passed we use a stack based buffer
+	 * except for VMMREQUESTs where the data must not be on the stack.
+	 */
+	if (size <= sizeof(stack_buf) &&
+		    _IOC_NR(req) != _IOC_NR(VBOXGUEST_IOCTL_VMMREQUEST(0))) {
+		buf = stack_buf;
+	} else {
+		/* __GFP_DMA32 for VBOXGUEST_IOCTL_VMMREQUEST */
+		buf_free = buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32);
+		if (!buf)
+			return -ENOMEM;
+	}
+
+	if (copy_from_user(buf, (void *)arg, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	rc = vbg_core_ioctl(session, req, buf, size, &returned_data_size);
+	if (rc < 0) {
+		/* Negate the Vbox status code to make it positive. */
+		ret = -rc;
+		goto out;
+	}
+
+	if (returned_data_size > size) {
+		vbg_debug("%s: too much output data %zu > %zu\n",
+			  __func__, returned_data_size, size);
+		returned_data_size = size;
+	}
+	if (returned_data_size > 0) {
+		if (copy_to_user((void *)arg, buf, returned_data_size) != 0)
+			ret = -EFAULT;
+	}
+
+out:
+	kfree(buf_free);
+
+	return ret;
+}
+
+/** The file_operations structures. */
+static const struct file_operations vbg_misc_device_fops = {
+	.owner			= THIS_MODULE,
+	.open			= vbg_misc_device_open,
+	.release		= vbg_misc_device_close,
+	.unlocked_ioctl		= vbg_misc_device_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= vbg_misc_device_ioctl,
+#endif
+};
+static const struct file_operations vbg_misc_device_user_fops = {
+	.owner			= THIS_MODULE,
+	.open			= vbg_misc_device_user_open,
+	.release		= vbg_misc_device_close,
+	.unlocked_ioctl		= vbg_misc_device_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= vbg_misc_device_ioctl,
+#endif
+};
+
+/**
+ * Called when the input device is first opened.
+ *
+ * Sets up absolute mouse reporting.
+ */
+static int vbg_input_open(struct input_dev *input)
+{
+	struct vbg_dev *gdev = input_get_drvdata(input);
+	u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL;
+	int ret;
+
+	ret = vbg_core_set_mouse_status(gdev, feat);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * Called if all open handles to the input device are closed.
+ *
+ * Disables absolute reporting.
+ */
+static void vbg_input_close(struct input_dev *input)
+{
+	struct vbg_dev *gdev = input_get_drvdata(input);
+
+	vbg_core_set_mouse_status(gdev, 0);
+}
+
+/**
+ * Creates the kernel input device.
+ *
+ * @returns 0 on success, negated errno on failure.
+ */
+static int vbg_create_input_device(struct vbg_dev *gdev)
+{
+	struct input_dev *input;
+
+	input = devm_input_allocate_device(gdev->dev);
+	if (!input)
+		return -ENOMEM;
+
+	input->id.bustype = BUS_PCI;
+	input->id.vendor = VBOX_VENDORID;
+	input->id.product = VMMDEV_DEVICEID;
+	input->open = vbg_input_open;
+	input->close = vbg_input_close;
+	input->dev.parent = gdev->dev;
+	input->name = "VirtualBox mouse integration";
+
+	input_set_abs_params(input, ABS_X, VMMDEV_MOUSE_RANGE_MIN,
+			     VMMDEV_MOUSE_RANGE_MAX, 0, 0);
+	input_set_abs_params(input, ABS_Y, VMMDEV_MOUSE_RANGE_MIN,
+			     VMMDEV_MOUSE_RANGE_MAX, 0, 0);
+	input_set_capability(input, EV_KEY, BTN_MOUSE);
+	input_set_drvdata(input, gdev);
+
+	gdev->input = input;
+
+	return input_register_device(gdev->input);
+}
+
+static ssize_t host_version_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct vbg_dev *gdev = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%s\n", gdev->host_version);
+}
+
+static ssize_t host_features_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct vbg_dev *gdev = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%#x\n", gdev->host_features);
+}
+
+static DEVICE_ATTR_RO(host_version);
+static DEVICE_ATTR_RO(host_features);
+
+/**
+ * Does the PCI detection and init of the device.
+ *
+ * @returns 0 on success, negated errno on failure.
+ */
+static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+	struct device *dev = &pci->dev;
+	resource_size_t io, io_len, mmio, mmio_len;
+	VMMDevMemory *vmmdev;
+	struct vbg_dev *gdev;
+	int ret;
+
+	gdev = devm_kzalloc(dev, sizeof(*gdev), GFP_KERNEL);
+	if (!gdev)
+		return -ENOMEM;
+
+	ret = pci_enable_device(pci);
+	if (ret != 0) {
+		vbg_err("vboxguest: Error enabling device: %d\n", ret);
+		return ret;
+	}
+
+	ret = -ENODEV;
+
+	io = pci_resource_start(pci, 0);
+	io_len = pci_resource_len(pci, 0);
+	if (!io || !io_len) {
+		vbg_err("vboxguest: Error IO-port resource (0) is missing\n");
+		goto err_disable_pcidev;
+	}
+	if (devm_request_region(dev, io, io_len, DEVICE_NAME) == NULL) {
+		vbg_err("vboxguest: Error could not claim IO resource\n");
+		ret = -EBUSY;
+		goto err_disable_pcidev;
+	}
+
+	mmio = pci_resource_start(pci, 1);
+	mmio_len = pci_resource_len(pci, 1);
+	if (!mmio || !mmio_len) {
+		vbg_err("vboxguest: Error MMIO resource (1) is missing\n");
+		goto err_disable_pcidev;
+	}
+
+	if (devm_request_mem_region(dev, mmio, mmio_len, DEVICE_NAME) == NULL) {
+		vbg_err("vboxguest: Error could not claim MMIO resource\n");
+		ret = -EBUSY;
+		goto err_disable_pcidev;
+	}
+
+	vmmdev = devm_ioremap(dev, mmio, mmio_len);
+	if (!vmmdev) {
+		vbg_err("vboxguest: Error ioremap failed; MMIO addr=%p size=%d\n",
+			(void *)mmio, (int)mmio_len);
+		goto err_disable_pcidev;
+	}
+
+	/* Validate MMIO region version and size. */
+	if (vmmdev->u32Version != VMMDEV_MEMORY_VERSION ||
+	    vmmdev->u32Size < 32 || vmmdev->u32Size > mmio_len) {
+		vbg_err("vboxguest: Bogus VMMDev memory; u32Version=%08x (expected %08x) u32Size=%d (expected <= %d)\n",
+			vmmdev->u32Version, VMMDEV_MEMORY_VERSION,
+			vmmdev->u32Size, (int)mmio_len);
+		goto err_disable_pcidev;
+	}
+
+	gdev->io_port = io;
+	gdev->mmio = vmmdev;
+	gdev->dev = dev;
+	gdev->misc_device.minor = MISC_DYNAMIC_MINOR;
+	gdev->misc_device.name = DEVICE_NAME;
+	gdev->misc_device.fops = &vbg_misc_device_fops;
+	gdev->misc_device_user.minor = MISC_DYNAMIC_MINOR;
+	gdev->misc_device_user.name = DEVICE_NAME_USER;
+	gdev->misc_device_user.fops = &vbg_misc_device_user_fops;
+
+	ret = vbg_core_init(gdev, VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
+	if (ret)
+		goto err_disable_pcidev;
+
+	ret = vbg_create_input_device(gdev);
+	if (ret) {
+		vbg_err("vboxguest: Error creating input device: %d\n", ret);
+		goto err_vbg_core_exit;
+	}
+
+	ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED,
+			       DEVICE_NAME, gdev);
+	if (ret) {
+		vbg_err("vboxguest: Error requesting irq: %d\n", ret);
+		goto err_vbg_core_exit;
+	}
+
+	ret = misc_register(&gdev->misc_device);
+	if (ret) {
+		vbg_err("vboxguest: Error misc_register %s failed: %d\n",
+			DEVICE_NAME, ret);
+		goto err_vbg_core_exit;
+	}
+
+	ret = misc_register(&gdev->misc_device_user);
+	if (ret) {
+		vbg_err("vboxguest: Error misc_register %s failed: %d\n",
+			DEVICE_NAME_USER, ret);
+		goto err_unregister_misc_device;
+	}
+
+	mutex_lock(&vbg_gdev_mutex);
+	if (!vbg_gdev)
+		vbg_gdev = gdev;
+	else
+		ret = -EBUSY;
+	mutex_unlock(&vbg_gdev_mutex);
+
+	if (ret) {
+		vbg_err("vboxguest: Error more then 1 vbox guest pci device\n");
+		goto err_unregister_misc_device_user;
+	}
+
+	pci_set_drvdata(pci, gdev);
+	device_create_file(dev, &dev_attr_host_version);
+	device_create_file(dev, &dev_attr_host_features);
+
+	vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %p (size %d)\n",
+		 gdev->misc_device.minor, pci->irq, gdev->io_port,
+		 (void *)mmio, (int)mmio_len);
+
+	return 0;
+
+err_unregister_misc_device_user:
+	misc_deregister(&gdev->misc_device_user);
+err_unregister_misc_device:
+	misc_deregister(&gdev->misc_device);
+err_vbg_core_exit:
+	vbg_core_exit(gdev);
+err_disable_pcidev:
+	pci_disable_device(pci);
+
+	return ret;
+}
+
+static void vbg_pci_remove(struct pci_dev *pci)
+{
+	struct vbg_dev *gdev = pci_get_drvdata(pci);
+
+	mutex_lock(&vbg_gdev_mutex);
+	vbg_gdev = NULL;
+	mutex_unlock(&vbg_gdev_mutex);
+
+	device_remove_file(gdev->dev, &dev_attr_host_features);
+	device_remove_file(gdev->dev, &dev_attr_host_version);
+	misc_deregister(&gdev->misc_device_user);
+	misc_deregister(&gdev->misc_device);
+	vbg_core_exit(gdev);
+	pci_disable_device(pci);
+}
+
+struct vbg_dev *vbg_get_gdev(void)
+{
+	mutex_lock(&vbg_gdev_mutex);
+
+	/*
+	 * Note on success we keep the mutex locked until vbg_put_gdev(),
+	 * this stops vbg_pci_remove from removing the device from underneath
+	 * vboxsf. vboxsf will only hold a reference for a short while.
+	 */
+	if (vbg_gdev)
+		return vbg_gdev;
+
+	mutex_unlock(&vbg_gdev_mutex);
+	return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(vbg_get_gdev);
+
+void vbg_put_gdev(struct vbg_dev *gdev)
+{
+	WARN_ON(gdev != vbg_gdev);
+	mutex_unlock(&vbg_gdev_mutex);
+}
+EXPORT_SYMBOL(vbg_put_gdev);
+
+/**
+ * Callback for mouse events.
+ *
+ * This is called at the end of the ISR, after leaving the event spinlock, if
+ * VMMDEV_EVENT_MOUSE_POSITION_CHANGED was raised by the host.
+ *
+ * @param   gdev	The device extension.
+ */
+void vbg_linux_mouse_event(struct vbg_dev *gdev)
+{
+	int rc;
+
+	/* Report events to the kernel input device */
+	gdev->mouse_status_req->mouseFeatures = 0;
+	gdev->mouse_status_req->pointerXPos = 0;
+	gdev->mouse_status_req->pointerYPos = 0;
+	rc = vbg_req_perform(gdev, gdev->mouse_status_req);
+	if (rc >= 0) {
+		input_report_abs(gdev->input, ABS_X,
+				 gdev->mouse_status_req->pointerXPos);
+		input_report_abs(gdev->input, ABS_Y,
+				 gdev->mouse_status_req->pointerYPos);
+		input_sync(gdev->input);
+	}
+}
+
+static const struct pci_device_id vbg_pci_ids[] = {
+	{ .vendor = VBOX_VENDORID, .device = VMMDEV_DEVICEID },
+	{}
+};
+MODULE_DEVICE_TABLE(pci,  vbg_pci_ids);
+
+static struct pci_driver vbg_pci_driver = {
+	.name		= DEVICE_NAME,
+	.id_table	= vbg_pci_ids,
+	.probe		= vbg_pci_probe,
+	.remove		= vbg_pci_remove,
+};
+
+module_pci_driver(vbg_pci_driver);
+
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_DESCRIPTION("Oracle VM VirtualBox Guest Additions for Linux Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
new file mode 100644
index 000000000000..4d5a0b64f312
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -0,0 +1,1094 @@
+/*
+ * vboxguest vmm-req and hgcm-call code, GenericRequest.c, HGCMInternal.c
+ * and RTErrConvertToErrno.cpp in vbox upstream svn.
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, in which case the provisions of the CDDL are applicable
+ * instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include "vboxguest_core.h"
+
+/** The max parameter buffer size for a user request. */
+#define VBGLR0_MAX_HGCM_USER_PARM	(24 * SZ_1M)
+/** The max parameter buffer size for a kernel request. */
+#define VBGLR0_MAX_HGCM_KERNEL_PARM	(16 * SZ_1M)
+
+#define VBG_DEBUG_PORT			0x504
+
+/* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
+static DEFINE_SPINLOCK(vbg_log_lock);
+static char vbg_log_buf[128];
+
+#define VBG_LOG(name, pr_func) \
+void name(const char *fmt, ...)						\
+{									\
+	unsigned long flags;						\
+	va_list args;							\
+	int i, count;							\
+									\
+	va_start(args, fmt);						\
+	spin_lock_irqsave(&vbg_log_lock, flags);			\
+									\
+	count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
+	for (i = 0; i < count; i++)					\
+		outb(vbg_log_buf[i], VBG_DEBUG_PORT);			\
+									\
+	pr_func("%s", vbg_log_buf);					\
+									\
+	spin_unlock_irqrestore(&vbg_log_lock, flags);			\
+	va_end(args);							\
+}									\
+EXPORT_SYMBOL(name)
+
+VBG_LOG(vbg_info, pr_info);
+VBG_LOG(vbg_warn, pr_warn);
+VBG_LOG(vbg_err, pr_err);
+#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+VBG_LOG(vbg_debug, pr_debug);
+#endif
+
+/**
+ * Helper to determine the minimum request size for the given request.
+ * Returns 0 if the given operation is not handled and/or supported.
+ *
+ * @returns Size.
+ * @param   req		The VMMDev request to get the size for.
+ */
+static size_t vbg_req_get_min_size(const VMMDevRequestHeader *req)
+{
+	switch (req->requestType) {
+	case VMMDevReq_GetMouseStatus:
+	case VMMDevReq_SetMouseStatus:
+		return sizeof(VMMDevReqMouseStatus);
+	case VMMDevReq_SetPointerShape:
+		return sizeof(VMMDevReqMousePointer);
+	case VMMDevReq_GetHostVersion:
+		return sizeof(VMMDevReqHostVersion);
+	case VMMDevReq_Idle:
+		return sizeof(VMMDevReqIdle);
+	case VMMDevReq_GetHostTime:
+		return sizeof(VMMDevReqHostTime);
+	case VMMDevReq_GetHypervisorInfo:
+	case VMMDevReq_SetHypervisorInfo:
+		return sizeof(VMMDevReqHypervisorInfo);
+	case VMMDevReq_RegisterPatchMemory:
+	case VMMDevReq_DeregisterPatchMemory:
+		return sizeof(VMMDevReqPatchMemory);
+	case VMMDevReq_SetPowerStatus:
+		return sizeof(VMMDevPowerStateRequest);
+	case VMMDevReq_AcknowledgeEvents:
+		return sizeof(VMMDevEvents);
+	case VMMDevReq_ReportGuestInfo:
+		return sizeof(VMMDevReportGuestInfo);
+	case VMMDevReq_ReportGuestInfo2:
+		return sizeof(VMMDevReportGuestInfo2);
+	case VMMDevReq_ReportGuestStatus:
+		return sizeof(VMMDevReportGuestStatus);
+	case VMMDevReq_ReportGuestUserState:
+		return sizeof(VMMDevReportGuestUserState);
+	case VMMDevReq_GetDisplayChangeRequest:
+		return sizeof(VMMDevDisplayChangeRequest);
+	case VMMDevReq_GetDisplayChangeRequest2:
+		return sizeof(VMMDevDisplayChangeRequest2);
+	case VMMDevReq_GetDisplayChangeRequestEx:
+		return sizeof(VMMDevDisplayChangeRequestEx);
+	case VMMDevReq_VideoModeSupported:
+		return sizeof(VMMDevVideoModeSupportedRequest);
+	case VMMDevReq_GetHeightReduction:
+		return sizeof(VMMDevGetHeightReductionRequest);
+	case VMMDevReq_ReportGuestCapabilities:
+		return sizeof(VMMDevReqGuestCapabilities);
+	case VMMDevReq_SetGuestCapabilities:
+		return sizeof(VMMDevReqGuestCapabilities2);
+	case VMMDevReq_HGCMConnect:
+		return sizeof(VMMDevHGCMConnect);
+	case VMMDevReq_HGCMDisconnect:
+		return sizeof(VMMDevHGCMDisconnect);
+	case VMMDevReq_HGCMCall32:
+		return sizeof(VMMDevHGCMCall);
+	case VMMDevReq_HGCMCall64:
+		return sizeof(VMMDevHGCMCall);
+	case VMMDevReq_HGCMCancel:
+		return sizeof(VMMDevHGCMCancel);
+	case VMMDevReq_VideoAccelEnable:
+		return sizeof(VMMDevVideoAccelEnable);
+	case VMMDevReq_VideoAccelFlush:
+		return sizeof(VMMDevVideoAccelFlush);
+	case VMMDevReq_VideoSetVisibleRegion:
+		/*
+		 * The original protocol didn't consider a guest with NO
+		 * visible windows.
+		 */
+		return sizeof(VMMDevVideoSetVisibleRegion) - sizeof(RTRECT);
+	case VMMDevReq_GetSeamlessChangeRequest:
+		return sizeof(VMMDevSeamlessChangeRequest);
+	case VMMDevReq_QueryCredentials:
+		return sizeof(VMMDevCredentials);
+	case VMMDevReq_ReportGuestStats:
+		return sizeof(VMMDevReportGuestStats);
+	case VMMDevReq_GetMemBalloonChangeRequest:
+		return sizeof(VMMDevGetMemBalloonChangeRequest);
+	case VMMDevReq_GetStatisticsChangeRequest:
+		return sizeof(VMMDevGetStatisticsChangeRequest);
+	case VMMDevReq_ChangeMemBalloon:
+		return sizeof(VMMDevChangeMemBalloon);
+	case VMMDevReq_GetVRDPChangeRequest:
+		return sizeof(VMMDevVRDPChangeRequest);
+	case VMMDevReq_LogString:
+		return sizeof(VMMDevReqLogString);
+	case VMMDevReq_CtlGuestFilterMask:
+		return sizeof(VMMDevCtlGuestFilterMask);
+	case VMMDevReq_GetCpuHotPlugRequest:
+		return sizeof(VMMDevGetCpuHotPlugRequest);
+	case VMMDevReq_SetCpuHotPlugStatus:
+		return sizeof(VMMDevCpuHotPlugStatusRequest);
+	case VMMDevReq_RegisterSharedModule:
+		return sizeof(VMMDevSharedModuleRegistrationRequest);
+	case VMMDevReq_UnregisterSharedModule:
+		return sizeof(VMMDevSharedModuleUnregistrationRequest);
+	case VMMDevReq_CheckSharedModules:
+		return sizeof(VMMDevSharedModuleCheckRequest);
+	case VMMDevReq_GetPageSharingStatus:
+		return sizeof(VMMDevPageSharingStatusRequest);
+	case VMMDevReq_DebugIsPageShared:
+		return sizeof(VMMDevPageIsSharedRequest);
+	case VMMDevReq_GetSessionId:
+		return sizeof(VMMDevReqSessionId);
+	case VMMDevReq_HeartbeatConfigure:
+		return sizeof(VMMDevReqHeartbeat);
+	case VMMDevReq_GuestHeartbeat:
+		return sizeof(VMMDevRequestHeader);
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int vbg_req_verify(const VMMDevRequestHeader *req, size_t buffer_size)
+{
+	size_t min_size;
+
+	if (!req || buffer_size < sizeof(VMMDevRequestHeader)) {
+		vbg_debug("VbglGRVerify: Invalid parameter: req = %p, buffer_size = %zu\n",
+			  req, buffer_size);
+		return VERR_INVALID_PARAMETER;
+	}
+
+	if (req->size > buffer_size) {
+		vbg_debug("VbglGRVerify: request size %u > buffer size %zu\n",
+			  req->size, buffer_size);
+		return VERR_INVALID_PARAMETER;
+	}
+
+	/* The request size must correspond to the request type. */
+	min_size = vbg_req_get_min_size(req);
+
+	if (buffer_size < min_size) {
+		vbg_debug("VbglGRVerify: buffer size %zu < expected size %zu\n",
+			  buffer_size, min_size);
+		return VERR_INVALID_PARAMETER;
+	}
+
+	if (req->size < min_size) {
+		vbg_debug("VbglGRVerify: header size %u < expected size %zu\n",
+			  req->size, min_size);
+		return VERR_INVALID_PARAMETER;
+	}
+
+	if (buffer_size == min_size) {
+		/*
+		 * This is most likely a fixed size request, and in this case
+		 * the request size must be also equal to the expected size.
+		 */
+		if (req->size != min_size) {
+			vbg_debug("VbglGRVerify: request size %u != expected size %zu\n",
+				  req->size, min_size);
+			return VERR_INVALID_PARAMETER;
+		}
+
+		return VINF_SUCCESS;
+	}
+
+	/*
+	 * This can be a variable size request. Check the request type and limit
+	 * the size to VMMDEV_MAX_VMMDEVREQ_SIZE, which is max size supported by
+	 * the host.
+	 *
+	 * Note: Keep this list sorted for easier human lookup!
+	 */
+	if (req->requestType == VMMDevReq_ChangeMemBalloon ||
+	    req->requestType == VMMDevReq_HGCMCall32 ||
+	    req->requestType == VMMDevReq_HGCMCall64 ||
+	    req->requestType == VMMDevReq_RegisterSharedModule ||
+	    req->requestType == VMMDevReq_ReportGuestUserState ||
+	    req->requestType == VMMDevReq_LogString ||
+	    req->requestType == VMMDevReq_SetPointerShape ||
+	    req->requestType == VMMDevReq_VideoSetVisibleRegion) {
+		if (buffer_size > VMMDEV_MAX_VMMDEVREQ_SIZE) {
+			vbg_debug("VbglGRVerify: VMMDevReq_LogString: buffer size %zu too big\n",
+				  buffer_size);
+			return VERR_BUFFER_OVERFLOW;
+		}
+	} else {
+		vbg_debug("VbglGRVerify: unknown request-type %#08x\n",
+			  req->requestType);
+		return VERR_IO_BAD_LENGTH; /* ??? */
+	}
+
+	return VINF_SUCCESS;
+}
+
+void *vbg_req_alloc(size_t len, VMMDevRequestType req_type)
+{
+	VMMDevRequestHeader *req;
+
+	req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
+	if (!req)
+		return NULL;
+
+	memset(req, 0xaa, len);
+
+	req->size = len;
+	req->version = VMMDEV_REQUEST_HEADER_VERSION;
+	req->requestType = req_type;
+	req->rc = VERR_GENERAL_FAILURE;
+	req->reserved1 = 0;
+	req->reserved2 = 0;
+
+	return req;
+}
+
+/* Note this function returns a VBox status code, not a negative errno!! */
+int vbg_req_perform(struct vbg_dev *gdev, void *req)
+{
+	unsigned long phys_req = virt_to_phys(req);
+
+	outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
+	/*
+	 * The host changes the request as a result of the outl, make sure
+	 * the outl and any reads of the req happen in the correct order.
+	 */
+	mb();
+
+	return ((VMMDevRequestHeader *)req)->rc;
+}
+
+static bool hgcm_req_done(struct vbg_dev *gdev,
+			  VMMDevHGCMRequestHeader *header)
+{
+	unsigned long flags;
+	bool done;
+
+	spin_lock_irqsave(&gdev->event_spinlock, flags);
+	done = header->fu32Flags & VBOX_HGCM_REQ_DONE;
+	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+	return done;
+}
+
+int vbg_hgcm_connect(struct vbg_dev *gdev, HGCMServiceLocation *loc,
+		     u32 *client_id)
+{
+	VMMDevHGCMConnect *hgcm_connect = NULL;
+	int rc;
+
+	hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
+				     VMMDevReq_HGCMConnect);
+	if (!hgcm_connect)
+		return VERR_NO_MEMORY;
+
+	hgcm_connect->header.fu32Flags = 0;
+	memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
+	hgcm_connect->u32ClientID = 0;
+
+	rc = vbg_req_perform(gdev, hgcm_connect);
+
+	if (rc == VINF_HGCM_ASYNC_EXECUTE)
+		wait_event(gdev->hgcm_wq,
+			   hgcm_req_done(gdev, &hgcm_connect->header));
+
+	if (rc >= 0) {
+		*client_id = hgcm_connect->u32ClientID;
+		rc = hgcm_connect->header.result;
+	}
+
+	kfree(hgcm_connect);
+
+	return rc;
+}
+EXPORT_SYMBOL(vbg_hgcm_connect);
+
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id)
+{
+	VMMDevHGCMDisconnect *hgcm_disconnect = NULL;
+	int rc;
+
+	hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
+					VMMDevReq_HGCMDisconnect);
+	if (!hgcm_disconnect)
+		return VERR_NO_MEMORY;
+
+	hgcm_disconnect->header.fu32Flags = 0;
+	hgcm_disconnect->u32ClientID = client_id;
+
+	rc = vbg_req_perform(gdev, hgcm_disconnect);
+
+	if (rc == VINF_HGCM_ASYNC_EXECUTE)
+		wait_event(gdev->hgcm_wq,
+			   hgcm_req_done(gdev, &hgcm_disconnect->header));
+
+	if (rc >= 0)
+		rc = hgcm_disconnect->header.result;
+
+	kfree(hgcm_disconnect);
+
+	return rc;
+}
+EXPORT_SYMBOL(vbg_hgcm_disconnect);
+
+static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
+{
+	u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
+
+	return size >> PAGE_SHIFT;
+}
+
+static void hgcm_call_inc_pcb_extra(void *buf, u32 len, size_t *pcb_extra)
+{
+	u32 pages;
+
+	pages = hgcm_call_buf_size_in_pages(buf, len);
+	*pcb_extra += offsetof(HGCMPageListInfo, aPages[pages]);
+}
+
+/* Kernel mode use only, use WARN_ON for sanity checks. */
+static int hgcm_call_check_pagelist(const HGCMFunctionParameter *src_parm,
+	const VBoxGuestHGCMCallInfo *info, u32 info_size, size_t *pcb_extra)
+{
+	HGCMPageListInfo *pg_lst;
+	u32 u, offset, size;
+
+	offset = src_parm->u.PageList.offset;
+	size = src_parm->u.PageList.size;
+	if (!size)
+		return VINF_SUCCESS;
+
+	if (WARN_ON(size > VBGLR0_MAX_HGCM_KERNEL_PARM))
+		return VERR_OUT_OF_RANGE;
+
+	if (WARN_ON(offset < info->cParms * sizeof(HGCMFunctionParameter) ||
+		    offset > info_size - sizeof(HGCMPageListInfo)))
+		return VERR_INVALID_PARAMETER;
+
+	pg_lst = (HGCMPageListInfo *)((u8 *)info + offset);
+
+	u = offset + offsetof(HGCMPageListInfo, aPages[pg_lst->cPages]);
+	if (WARN_ON(u > info_size))
+		return VERR_INVALID_PARAMETER;
+
+	if (WARN_ON(pg_lst->offFirstPage >= PAGE_SIZE))
+		return VERR_INVALID_PARAMETER;
+
+	u = PAGE_ALIGN(pg_lst->offFirstPage + size) >> PAGE_SHIFT;
+	if (WARN_ON(u != pg_lst->cPages))
+		return VERR_INVALID_PARAMETER;
+
+	if (WARN_ON(!VBOX_HGCM_F_PARM_ARE_VALID(pg_lst->flags)))
+		return VERR_INVALID_PARAMETER;
+
+	for (u = 0; u < pg_lst->cPages; u++) {
+		if (WARN_ON(pg_lst->aPages[u] &
+			    (0xfff0000000000000ULL | ~PAGE_MASK)))
+			return VERR_INVALID_PARAMETER;
+	}
+
+	*pcb_extra += offsetof(HGCMPageListInfo, aPages[pg_lst->cPages]);
+
+	return VINF_SUCCESS;
+}
+
+static int hgcm_call_preprocess_linaddr(const HGCMFunctionParameter *src_parm,
+					bool is_user, void **bounce_buf_ret,
+					size_t *pcb_extra)
+{
+	void *buf, *bounce_buf;
+	bool copy_in;
+	u32 len;
+	int ret;
+
+	buf = (void *)src_parm->u.Pointer.u.linearAddr;
+	len = src_parm->u.Pointer.size;
+	copy_in = src_parm->type != VMMDevHGCMParmType_LinAddr_Out;
+
+	if (!is_user) {
+		if (WARN_ON(len > VBGLR0_MAX_HGCM_KERNEL_PARM))
+			return VERR_OUT_OF_RANGE;
+
+		hgcm_call_inc_pcb_extra(buf, len, pcb_extra);
+		return VINF_SUCCESS;
+	}
+
+	if (len > VBGLR0_MAX_HGCM_USER_PARM)
+		return VERR_OUT_OF_RANGE;
+
+	bounce_buf = kvmalloc(len, GFP_KERNEL);
+	if (!bounce_buf)
+		return VERR_NO_MEMORY;
+
+	if (copy_in) {
+		ret = copy_from_user(bounce_buf, (void __user *)buf, len);
+		if (ret)
+			return VERR_ACCESS_DENIED;
+	} else {
+		memset(bounce_buf, 0, len);
+	}
+
+	*bounce_buf_ret = bounce_buf;
+	hgcm_call_inc_pcb_extra(bounce_buf, len, pcb_extra);
+	return VINF_SUCCESS;
+}
+
+/**
+ * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
+ * figure out how much extra storage we need for page lists.
+ *
+ * @returns VBox status code
+ *
+ * @param   info             The call info.
+ * @param   info_size        The size of the call info structure.
+ * @param   is_user          Is it a user request or kernel request.
+ * @param   bounce_bufs_ret  Where to return the allocated bouncebuffer array
+ * @param   pcb_extra        Where to return the extra request space needed for
+ *                           physical page lists.
+ */
+static int hgcm_call_preprocess(const VBoxGuestHGCMCallInfo *info,
+	u32 info_size, bool is_user, void ***bounce_bufs_ret, size_t *pcb_extra)
+{
+	const HGCMFunctionParameter *src_parm = VBOXGUEST_HGCM_CALL_PARMS(info);
+	u32 i, parms = info->cParms;
+	void **bounce_bufs = NULL;
+	int rc;
+
+	*bounce_bufs_ret = NULL;
+	*pcb_extra = 0;
+
+	for (i = 0; i < parms; i++, src_parm++) {
+		switch (src_parm->type) {
+		case VMMDevHGCMParmType_32bit:
+		case VMMDevHGCMParmType_64bit:
+			break;
+
+		case VMMDevHGCMParmType_PageList:
+			if (is_user)
+				return VERR_INVALID_PARAMETER;
+
+			rc = hgcm_call_check_pagelist(src_parm, info, info_size,
+						      pcb_extra);
+			if (rc)
+				return rc;
+
+			break;
+
+		case VMMDevHGCMParmType_LinAddr_In:
+		case VMMDevHGCMParmType_LinAddr_Out:
+		case VMMDevHGCMParmType_LinAddr:
+			if (is_user && !bounce_bufs) {
+				bounce_bufs = kcalloc(parms, sizeof(void *),
+						      GFP_KERNEL);
+				if (!bounce_bufs)
+					return VERR_NO_MEMORY;
+
+				*bounce_bufs_ret = bounce_bufs;
+			}
+
+			rc = hgcm_call_preprocess_linaddr(src_parm, is_user,
+							  &bounce_bufs[i],
+							  pcb_extra);
+			if (rc)
+				return rc;
+
+			break;
+
+		default:
+			return VERR_INVALID_PARAMETER;
+		}
+	}
+
+	return VINF_SUCCESS;
+}
+
+/**
+ * Translates linear address types to page list direction flags.
+ *
+ * @returns page list flags.
+ * @param   type	The type.
+ */
+static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
+	HGCMFunctionParameterType type)
+{
+	switch (type) {
+	case VMMDevHGCMParmType_LinAddr_In:
+		return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+
+	case VMMDevHGCMParmType_LinAddr_Out:
+		return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+
+	default:
+		WARN_ON(1);
+	case VMMDevHGCMParmType_LinAddr:
+		return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
+	}
+}
+
+static void hgcm_call_init_pagelist(VMMDevHGCMCall *call,
+				    const VBoxGuestHGCMCallInfo *info,
+				    HGCMFunctionParameter *dst_parm,
+				    const HGCMFunctionParameter *src_parm,
+				    u32 *off_extra)
+{
+	const HGCMPageListInfo *src_pg_list;
+	HGCMPageListInfo *dst_pg_list;
+	u32 i, pages;
+
+	dst_parm->type = VMMDevHGCMParmType_PageList;
+	dst_parm->u.PageList.size = src_parm->u.PageList.size;
+
+	if (src_parm->u.PageList.size == 0) {
+		dst_parm->u.PageList.offset = 0;
+		return;
+	}
+
+	src_pg_list = (void *)info + src_parm->u.PageList.offset;
+	dst_pg_list = (void *)call + *off_extra;
+	pages = src_pg_list->cPages;
+
+	dst_parm->u.PageList.offset = *off_extra;
+	dst_pg_list->flags = src_pg_list->flags;
+	dst_pg_list->offFirstPage = src_pg_list->offFirstPage;
+	dst_pg_list->cPages = pages;
+
+	for (i = 0; i < pages; i++)
+		dst_pg_list->aPages[i] = src_pg_list->aPages[i];
+
+	*off_extra += offsetof(HGCMPageListInfo, aPages[pages]);
+}
+
+static void hgcm_call_init_linaddr(VMMDevHGCMCall *call,
+				   HGCMFunctionParameter *dst_parm,
+				   void *buf, u32 len,
+				   HGCMFunctionParameterType type,
+				   u32 *off_extra)
+{
+	HGCMPageListInfo *dst_pg_lst;
+	struct page *page;
+	bool is_vmalloc;
+	u32 i, pages;
+
+	dst_parm->type = type;
+
+	if (len == 0) {
+		dst_parm->u.Pointer.size = 0;
+		dst_parm->u.Pointer.u.linearAddr = 0;
+		return;
+	}
+
+	dst_pg_lst = (void *)call + *off_extra;
+	pages = hgcm_call_buf_size_in_pages(buf, len);
+	is_vmalloc = is_vmalloc_addr(buf);
+
+	dst_parm->type = VMMDevHGCMParmType_PageList;
+	dst_parm->u.PageList.size = len;
+	dst_parm->u.PageList.offset = *off_extra;
+	dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
+	dst_pg_lst->offFirstPage = (unsigned long)buf & ~PAGE_MASK;
+	dst_pg_lst->cPages = pages;
+
+	for (i = 0; i < pages; i++) {
+		if (is_vmalloc)
+			page = vmalloc_to_page(buf);
+		else
+			page = virt_to_page(buf);
+
+		dst_pg_lst->aPages[i] = page_to_phys(page);
+		buf += PAGE_SIZE;
+	}
+
+	*off_extra += offsetof(HGCMPageListInfo, aPages[pages]);
+}
+
+/**
+ * Initializes the call request that we're sending to the host.
+ *
+ * @param   call            The call to initialize.
+ * @param   info            The call info.
+ * @param   info_size       The size of the call info structure.
+ * @param   bounce_bufs     The bouncebuffer array.
+ */
+static void hgcm_call_init_call(VMMDevHGCMCall *call,
+				const VBoxGuestHGCMCallInfo *info,
+				u32 info_size, void **bounce_bufs)
+{
+	const HGCMFunctionParameter *src_parm = VBOXGUEST_HGCM_CALL_PARMS(info);
+	HGCMFunctionParameter *dst_parm = VMMDEV_HGCM_CALL_PARMS(call);
+	u32 i, parms = info->cParms;
+	u32 off_extra = (uintptr_t)(dst_parm + parms) - (uintptr_t)call;
+	void *buf;
+
+	call->header.fu32Flags = 0;
+	call->header.result = VINF_SUCCESS;
+	call->u32ClientID = info->u32ClientID;
+	call->u32Function = info->u32Function;
+	call->cParms = parms;
+
+	for (i = 0; i < parms; i++, src_parm++, dst_parm++) {
+		switch (src_parm->type) {
+		case VMMDevHGCMParmType_32bit:
+		case VMMDevHGCMParmType_64bit:
+			*dst_parm = *src_parm;
+			break;
+
+		case VMMDevHGCMParmType_PageList:
+			hgcm_call_init_pagelist(call, info, dst_parm, src_parm,
+						&off_extra);
+			break;
+
+		case VMMDevHGCMParmType_LinAddr_In:
+		case VMMDevHGCMParmType_LinAddr_Out:
+		case VMMDevHGCMParmType_LinAddr:
+			if (bounce_bufs && bounce_bufs[i])
+				buf = bounce_bufs[i];
+			else
+				buf = (void *)src_parm->u.Pointer.u.linearAddr;
+
+			hgcm_call_init_linaddr(call, dst_parm, buf,
+					       src_parm->u.Pointer.size,
+					       src_parm->type, &off_extra);
+			break;
+
+		default:
+			WARN_ON(1);
+			dst_parm->type = VMMDevHGCMParmType_Invalid;
+		}
+	}
+}
+
+/**
+ * Tries to cancel a pending HGCM call.
+ *
+ * @returns VBox status code
+ */
+static int hgcm_cancel_call(struct vbg_dev *gdev, VMMDevHGCMCall *call)
+{
+	int rc;
+
+	/*
+	 * We use a pre-allocated request for cancellations, which is
+	 * protected by cancel_req_mutex. This means that all cancellations
+	 * get serialized, this should be fine since they should be rare.
+	 */
+	mutex_lock(&gdev->cancel_req_mutex);
+	gdev->cancel_req->physReqToCancel = virt_to_phys(call);
+	rc = vbg_req_perform(gdev, gdev->cancel_req);
+	mutex_unlock(&gdev->cancel_req_mutex);
+
+	/** @todo ADDVER: Remove this on next minor version change. */
+	if (rc == VERR_NOT_IMPLEMENTED) {
+		call->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
+		call->header.header.requestType = VMMDevReq_HGCMCancel;
+
+		rc = vbg_req_perform(gdev, call);
+		if (rc == VERR_INVALID_PARAMETER)
+			rc = VERR_NOT_FOUND;
+	}
+
+	if (rc >= 0)
+		call->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
+
+	return rc;
+}
+
+/**
+ * Performs the call and completion wait.
+ *
+ * @returns VBox status code
+ *
+ * @param   gdev	The VBoxGuest device extension.
+ * @param   call	The HGCM call info.
+ * @param   timeout_ms	Timeout in ms.
+ * @param   is_user	Is this an in kernel call or from userspace ?
+ * @param   leak_it	Where to return the leak it / free it,
+ *			indicator. Cancellation fun.
+ */
+static int vbg_hgcm_do_call(struct vbg_dev *gdev, VMMDevHGCMCall *call,
+			    u32 timeout_ms, bool is_user, bool *leak_it)
+{
+	long timeout;
+	int rc, cancel_rc;
+
+	*leak_it = false;
+
+	rc = vbg_req_perform(gdev, call);
+
+	/*
+	 * If the call failed, then pretend success.
+	 * Upper layers will interpret the result code in the packet.
+	 */
+	if (rc < 0) {
+		WARN_ON(!(call->header.fu32Flags & VBOX_HGCM_REQ_DONE));
+		return VINF_SUCCESS;
+	}
+
+	if (rc != VINF_HGCM_ASYNC_EXECUTE)
+		return rc;
+
+	/* Host decided to process the request asynchronously, wait for it */
+	if (timeout_ms == U32_MAX)
+		timeout = MAX_SCHEDULE_TIMEOUT;
+	else
+		timeout = msecs_to_jiffies(timeout_ms);
+
+	if (is_user) {
+		timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
+							   hgcm_req_done
+							   (gdev,
+							    &call->header),
+							   timeout);
+	} else {
+		timeout = wait_event_timeout(gdev->hgcm_wq,
+					     hgcm_req_done(gdev,
+							   &call->header),
+					     timeout);
+	}
+
+	/* timeout > 0 means hgcm_req_done has returned true, so success */
+	if (timeout > 0)
+		return VINF_SUCCESS;
+
+	if (timeout == 0)
+		rc = VERR_TIMEOUT;
+	else
+		rc = VERR_INTERRUPTED;
+
+	/* Cancel the request */
+	cancel_rc = hgcm_cancel_call(gdev, call);
+	if (cancel_rc >= 0)
+		return rc;
+
+	/*
+	 * Failed to cancel, this should mean that the cancel has lost the
+	 * race with normal completion, wait while the host completes it.
+	 */
+	if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
+		timeout = msecs_to_jiffies(500);
+	else
+		timeout = msecs_to_jiffies(2000);
+
+	timeout = wait_event_timeout(gdev->hgcm_wq,
+				     hgcm_req_done(gdev, &call->header),
+				     timeout);
+
+	if (WARN_ON(timeout == 0)) {
+		/* We really should never get here */
+		vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
+			__func__);
+		*leak_it = true;
+		return rc;
+	}
+
+	/* The call has completed normally after all */
+	return VINF_SUCCESS;
+}
+
+/**
+ * Copies the result of the call back to the caller info structure and user
+ * buffers.
+ *
+ * @returns VBox status code
+ * @param   info                Call info structure to update.
+ * @param   call                HGCM call request.
+ * @param   bounce_bufs         The bouncebuffer array.
+ */
+static int hgcm_call_copy_back_result(VBoxGuestHGCMCallInfo *info,
+				      const VMMDevHGCMCall *call,
+				      void **bounce_bufs)
+{
+	const HGCMFunctionParameter *src_parm = VMMDEV_HGCM_CALL_PARMS(call);
+	HGCMFunctionParameter *dst_parm = VBOXGUEST_HGCM_CALL_PARMS(info);
+	u32 i, parms = info->cParms;
+	void __user *userp;
+	int ret;
+
+	/* The call result. */
+	info->result = call->header.result;
+
+	/* Copy back parameters. */
+	for (i = 0; i < parms; i++, src_parm++, dst_parm++) {
+		switch (dst_parm->type) {
+		case VMMDevHGCMParmType_32bit:
+		case VMMDevHGCMParmType_64bit:
+			*dst_parm = *src_parm;
+			break;
+
+		case VMMDevHGCMParmType_PageList:
+			dst_parm->u.PageList.size = src_parm->u.PageList.size;
+			break;
+
+		case VMMDevHGCMParmType_LinAddr_In:
+			dst_parm->u.Pointer.size = src_parm->u.Pointer.size;
+			break;
+
+		case VMMDevHGCMParmType_LinAddr_Out:
+		case VMMDevHGCMParmType_LinAddr:
+			dst_parm->u.Pointer.size = src_parm->u.Pointer.size;
+			if (!bounce_bufs)
+				break; /* In kernel call */
+
+			userp = (void __user *)dst_parm->u.Pointer.u.linearAddr;
+			ret = copy_to_user(userp, bounce_bufs[i],
+					   min(src_parm->u.Pointer.size,
+					       dst_parm->u.Pointer.size));
+			if (ret)
+				return VERR_ACCESS_DENIED;
+			break;
+
+		default:
+			WARN_ON(1);
+			return VERR_INTERNAL_ERROR_4;
+		}
+	}
+
+	return VINF_SUCCESS;
+}
+
+int vbg_hgcm_call(struct vbg_dev *gdev, VBoxGuestHGCMCallInfo *info,
+		  u32 info_size, u32 timeout_ms, bool is_user)
+{
+	VMMDevHGCMCall *call;
+	void **bounce_bufs;
+	size_t extra_size;
+	bool leak_it;
+	int i, rc;
+
+	/*
+	 * Validate, lock and buffer the parameters for the call.
+	 * This will calculate the amount of extra space for physical page list.
+	 */
+	rc = hgcm_call_preprocess(info, info_size, is_user,
+				  &bounce_bufs, &extra_size);
+	if (rc) {
+		/* Even on error bounce bufs may still have been allocated */
+		goto free_bounce_bufs;
+	}
+
+	call = vbg_req_alloc(sizeof(VMMDevHGCMCall) + info->cParms *
+				sizeof(HGCMFunctionParameter) + extra_size,
+			     VMMDevReq_HGCMCall);
+	if (!call) {
+		rc = VERR_NO_MEMORY;
+		goto free_bounce_bufs;
+	}
+
+	hgcm_call_init_call(call, info, info_size, bounce_bufs);
+
+	rc = vbg_hgcm_do_call(gdev, call, timeout_ms, is_user, &leak_it);
+	if (rc >= 0)
+		rc = hgcm_call_copy_back_result(info, call, bounce_bufs);
+
+	if (!leak_it)
+		kfree(call);
+
+free_bounce_bufs:
+	if (bounce_bufs) {
+		for (i = 0; i < info->cParms; i++)
+			kvfree(bounce_bufs[i]);
+		kfree(bounce_bufs);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(vbg_hgcm_call);
+
+#ifdef CONFIG_X86_64
+int vbg_hgcm_call32(struct vbg_dev *gdev, VBoxGuestHGCMCallInfo *info,
+		    u32 info_size, u32 timeout_ms, bool is_user)
+{
+	VBoxGuestHGCMCallInfo *info64 = NULL;
+	HGCMFunctionParameter *parm64 = NULL;
+	HGCMFunctionParameter32 *parm32 = NULL;
+	u32 i, info64_size, parms = info->cParms;
+	int rc = VINF_SUCCESS;
+
+	/* KISS allocate a temporary request and convert the parameters. */
+	info64_size = sizeof(*info64) + parms * sizeof(HGCMFunctionParameter);
+	info64 = kzalloc(info64_size, GFP_KERNEL);
+	if (!info64)
+		return VERR_NO_MEMORY;
+
+	*info64 = *info;
+	parm32 = VBOXGUEST_HGCM_CALL_PARMS32(info);
+	parm64 = VBOXGUEST_HGCM_CALL_PARMS(info64);
+	for (i = 0; i < parms; i++, parm32++, parm64++) {
+		switch (parm32->type) {
+		case VMMDevHGCMParmType_32bit:
+			parm64->type = VMMDevHGCMParmType_32bit;
+			parm64->u.value32 = parm32->u.value32;
+			break;
+
+		case VMMDevHGCMParmType_64bit:
+			parm64->type = VMMDevHGCMParmType_64bit;
+			parm64->u.value64 = parm32->u.value64;
+			break;
+
+		case VMMDevHGCMParmType_LinAddr_Out:
+		case VMMDevHGCMParmType_LinAddr:
+		case VMMDevHGCMParmType_LinAddr_In:
+			parm64->type = parm32->type;
+			parm64->u.Pointer.size = parm32->u.Pointer.size;
+			parm64->u.Pointer.u.linearAddr =
+			    parm32->u.Pointer.u.linearAddr;
+			break;
+
+		default:
+			rc = VERR_INVALID_PARAMETER;
+		}
+		if (rc < 0)
+			goto out_free;
+	}
+
+	rc = vbg_hgcm_call(gdev, info64, info64_size, timeout_ms, is_user);
+	if (rc < 0)
+		goto out_free;
+
+	/* Copy back. */
+	*info = *info64;
+	parm32 = VBOXGUEST_HGCM_CALL_PARMS32(info);
+	parm64 = VBOXGUEST_HGCM_CALL_PARMS(info64);
+	for (i = 0; i < parms; i++, parm32++, parm64++) {
+		switch (parm64->type) {
+		case VMMDevHGCMParmType_32bit:
+			parm32->u.value32 = parm64->u.value32;
+			break;
+
+		case VMMDevHGCMParmType_64bit:
+			parm32->u.value64 = parm64->u.value64;
+			break;
+
+		case VMMDevHGCMParmType_LinAddr_Out:
+		case VMMDevHGCMParmType_LinAddr:
+		case VMMDevHGCMParmType_LinAddr_In:
+			parm32->u.Pointer.size = parm64->u.Pointer.size;
+			break;
+
+		default:
+			WARN_ON(1);
+			rc = VERR_INTERNAL_ERROR_3;
+		}
+	}
+
+out_free:
+	kfree(info64);
+	return rc;
+}
+#endif
+
+int vbg_status_code_to_errno(int rc)
+{
+	if (rc >= 0)
+		return 0;
+
+	switch (rc) {
+	case VERR_ACCESS_DENIED:                    return -EPERM;
+	case VERR_FILE_NOT_FOUND:                   return -ENOENT;
+	case VERR_PROCESS_NOT_FOUND:                return -ESRCH;
+	case VERR_INTERRUPTED:                      return -EINTR;
+	case VERR_DEV_IO_ERROR:                     return -EIO;
+	case VERR_TOO_MUCH_DATA:                    return -E2BIG;
+	case VERR_BAD_EXE_FORMAT:                   return -ENOEXEC;
+	case VERR_INVALID_HANDLE:                   return -EBADF;
+	case VERR_TRY_AGAIN:                        return -EAGAIN;
+	case VERR_NO_MEMORY:                        return -ENOMEM;
+	case VERR_INVALID_POINTER:                  return -EFAULT;
+	case VERR_RESOURCE_BUSY:                    return -EBUSY;
+	case VERR_ALREADY_EXISTS:                   return -EEXIST;
+	case VERR_NOT_SAME_DEVICE:                  return -EXDEV;
+	case VERR_NOT_A_DIRECTORY:
+	case VERR_PATH_NOT_FOUND:                   return -ENOTDIR;
+	case VERR_IS_A_DIRECTORY:                   return -EISDIR;
+	case VERR_INVALID_PARAMETER:                return -EINVAL;
+	case VERR_TOO_MANY_OPEN_FILES:              return -ENFILE;
+	case VERR_INVALID_FUNCTION:                 return -ENOTTY;
+	case VERR_SHARING_VIOLATION:                return -ETXTBSY;
+	case VERR_FILE_TOO_BIG:                     return -EFBIG;
+	case VERR_DISK_FULL:                        return -ENOSPC;
+	case VERR_SEEK_ON_DEVICE:                   return -ESPIPE;
+	case VERR_WRITE_PROTECT:                    return -EROFS;
+	case VERR_BROKEN_PIPE:                      return -EPIPE;
+	case VERR_DEADLOCK:                         return -EDEADLK;
+	case VERR_FILENAME_TOO_LONG:                return -ENAMETOOLONG;
+	case VERR_FILE_LOCK_FAILED:                 return -ENOLCK;
+	case VERR_NOT_IMPLEMENTED:
+	case VERR_NOT_SUPPORTED:                    return -ENOSYS;
+	case VERR_DIR_NOT_EMPTY:                    return -ENOTEMPTY;
+	case VERR_TOO_MANY_SYMLINKS:                return -ELOOP;
+	case VERR_NO_DATA:                          return -ENODATA;
+	case VERR_NET_NO_NETWORK:                   return -ENONET;
+	case VERR_NET_NOT_UNIQUE_NAME:              return -ENOTUNIQ;
+	case VERR_NO_TRANSLATION:                   return -EILSEQ;
+	case VERR_NET_NOT_SOCKET:                   return -ENOTSOCK;
+	case VERR_NET_DEST_ADDRESS_REQUIRED:        return -EDESTADDRREQ;
+	case VERR_NET_MSG_SIZE:                     return -EMSGSIZE;
+	case VERR_NET_PROTOCOL_TYPE:                return -EPROTOTYPE;
+	case VERR_NET_PROTOCOL_NOT_AVAILABLE:       return -ENOPROTOOPT;
+	case VERR_NET_PROTOCOL_NOT_SUPPORTED:       return -EPROTONOSUPPORT;
+	case VERR_NET_SOCKET_TYPE_NOT_SUPPORTED:    return -ESOCKTNOSUPPORT;
+	case VERR_NET_OPERATION_NOT_SUPPORTED:      return -EOPNOTSUPP;
+	case VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED: return -EPFNOSUPPORT;
+	case VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED: return -EAFNOSUPPORT;
+	case VERR_NET_ADDRESS_IN_USE:               return -EADDRINUSE;
+	case VERR_NET_ADDRESS_NOT_AVAILABLE:        return -EADDRNOTAVAIL;
+	case VERR_NET_DOWN:                         return -ENETDOWN;
+	case VERR_NET_UNREACHABLE:                  return -ENETUNREACH;
+	case VERR_NET_CONNECTION_RESET:             return -ENETRESET;
+	case VERR_NET_CONNECTION_ABORTED:           return -ECONNABORTED;
+	case VERR_NET_CONNECTION_RESET_BY_PEER:     return -ECONNRESET;
+	case VERR_NET_NO_BUFFER_SPACE:              return -ENOBUFS;
+	case VERR_NET_ALREADY_CONNECTED:            return -EISCONN;
+	case VERR_NET_NOT_CONNECTED:                return -ENOTCONN;
+	case VERR_NET_SHUTDOWN:                     return -ESHUTDOWN;
+	case VERR_NET_TOO_MANY_REFERENCES:          return -ETOOMANYREFS;
+	case VERR_TIMEOUT:                          return -ETIMEDOUT;
+	case VERR_NET_CONNECTION_REFUSED:           return -ECONNREFUSED;
+	case VERR_NET_HOST_DOWN:                    return -EHOSTDOWN;
+	case VERR_NET_HOST_UNREACHABLE:             return -EHOSTUNREACH;
+	case VERR_NET_ALREADY_IN_PROGRESS:          return -EALREADY;
+	case VERR_NET_IN_PROGRESS:                  return -EINPROGRESS;
+	case VERR_MEDIA_NOT_PRESENT:                return -ENOMEDIUM;
+	case VERR_MEDIA_NOT_RECOGNIZED:             return -EMEDIUMTYPE;
+	default:
+		vbg_warn("%s: Unhandled err %d\n", __func__, rc);
+		return -EPROTO;
+	}
+}
+EXPORT_SYMBOL(vbg_status_code_to_errno);
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
new file mode 100644
index 000000000000..47a53151fcd0
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -0,0 +1,18 @@
+/*
+ * VBox Guest additions version info, this is used by the host to determine
+ * supported guest-addition features in some cases. So this will need to be
+ * synced with vbox upstreams versioning scheme when we implement / port
+ * new features from the upstream out-of-tree vboxguest driver.
+ */
+
+#ifndef __VBOX_VERSION_H__
+#define __VBOX_VERSION_H__
+
+/* Last synced July 12th 2017 */
+#define VBOX_VERSION_MAJOR 5
+#define VBOX_VERSION_MINOR 1
+#define VBOX_VERSION_BUILD 51
+#define VBOX_SVN_REV 67325
+#define VBOX_VERSION_STRING "5.1.51"
+
+#endif
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
new file mode 100644
index 000000000000..81aadd77c8f8
--- /dev/null
+++ b/include/linux/vbox_utils.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, in which case the provisions of the CDDL are applicable
+ * instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __VBOX_UTILS_H__
+#define __VBOX_UTILS_H__
+
+#include <linux/printk.h>
+#include <linux/vbox_vmmdev.h>
+#include <linux/vboxguest.h>
+
+struct vbg_dev;
+
+/**
+ * vboxguest logging functions, these log both to the backdoor and call
+ * the equivalent kernel pr_foo function.
+ */
+__printf(1, 2) void vbg_info(const char *fmt, ...);
+__printf(1, 2) void vbg_warn(const char *fmt, ...);
+__printf(1, 2) void vbg_err(const char *fmt, ...);
+
+/* Only use backdoor logging for non-dynamic debug builds */
+#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+__printf(1, 2) void vbg_debug(const char *fmt, ...);
+#else
+#define vbg_debug pr_debug
+#endif
+
+/** @name Generic request functions.
+ * @{
+ */
+
+/**
+ * Allocate memory for generic request and initialize the request header.
+ *
+ * @returns the allocated memory
+ * @param   len		Size of memory block required for the request.
+ * @param   req_type	The generic request type.
+ */
+void *vbg_req_alloc(size_t len, VMMDevRequestType req_type);
+
+/**
+ * Perform a generic request.
+ *
+ * @returns VBox status code
+ * @param   gdev	The Guest extension device.
+ * @param   req		Pointer the request structure.
+ */
+int vbg_req_perform(struct vbg_dev *gdev, void *req);
+
+/**
+ * Verify the generic request header.
+ *
+ * @returns VBox status code
+ * @param   req		Pointer the request header structure.
+ * @param   buffer_size Size of the request memory block. It should be equal to
+ *			the request size for fixed size requests. It can be
+ *			greater for variable size requests.
+ */
+int vbg_req_verify(const VMMDevRequestHeader *req, size_t buffer_size);
+/** @} */
+
+int vbg_hgcm_connect(struct vbg_dev *gdev, HGCMServiceLocation *loc,
+		     u32 *client_id);
+
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id);
+
+int vbg_hgcm_call(struct vbg_dev *gdev,
+		  VBoxGuestHGCMCallInfo *pCallInfo, u32 cbCallInfo,
+		  u32 timeout_ms, bool is_user);
+
+int vbg_hgcm_call32(struct vbg_dev *gdev,
+		    VBoxGuestHGCMCallInfo *pCallInfo, u32 cbCallInfo,
+		    u32 timeout_ms, bool is_user);
+
+/**
+ * Convert a VirtualBox status code to a standard Linux kernel return value.
+ * @returns 0 or negative errno value.
+ * @param   rc		VirtualBox status code to convert.
+ */
+int vbg_status_code_to_errno(int rc);
+
+/**
+ * Helper for the vboxsf driver to get a reference to the guest device.
+ * @returns a pointer to the gdev; or a ERR_PTR value on error.
+ */
+struct vbg_dev *vbg_get_gdev(void);
+
+/**
+ * Helper for the vboxsf driver to put a guest device reference.
+ * @param   gdev	Reference returned by vbg_get_gdev to put.
+ */
+void vbg_put_gdev(struct vbg_dev *gdev);
+
+#endif
diff --git a/include/linux/vbox_vmmdev.h b/include/linux/vbox_vmmdev.h
new file mode 100644
index 000000000000..10d6cd335446
--- /dev/null
+++ b/include/linux/vbox_vmmdev.h
@@ -0,0 +1,123 @@
+/*
+ * Virtual Device for Guest <-> VMM/Host communication (ADD,DEV).
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, in which case the provisions of the CDDL are applicable
+ * instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __VBOX_VMMDEV_H__
+#define __VBOX_VMMDEV_H__
+
+#include <linux/sizes.h>
+#include <uapi/linux/vbox_vmmdev.h>
+
+/**
+ * @name VBVA ring defines.
+ *
+ * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
+ * data. For example big bitmaps which do not fit to the buffer.
+ *
+ * Guest starts writing to the buffer by initializing a record entry in the
+ * aRecords queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
+ * written. As data is written to the ring buffer, the guest increases off32End
+ * for the record.
+ *
+ * The host reads the aRecords on flushes and processes all completed records.
+ * When host encounters situation when only a partial record presents and
+ * cbRecord & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
+ * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
+ * off32Head. After that on each flush the host continues fetching the data
+ * until the record is completed.
+ *
+ * @{
+ */
+#define VMMDEV_VBVA_RING_BUFFER_SIZE        (SZ_4M - SZ_1K)
+#define VMMDEV_VBVA_RING_BUFFER_THRESHOLD   (SZ_4K)
+
+#define VMMDEV_VBVA_MAX_RECORDS (64)
+/** @} */
+
+/** VBVA record. */
+typedef struct VMMDEVVBVARECORD {
+	/** The length of the record. Changed by guest. */
+	u32 cbRecord;
+} VMMDEVVBVARECORD;
+VMMDEV_ASSERT_SIZE(VMMDEVVBVARECORD, 4);
+
+/**
+ * VBVA memory layout.
+ *
+ * This is a subsection of the VMMDevMemory structure.
+ */
+typedef struct VBVAMEMORY {
+	/** VBVA_F_MODE_*. */
+	u32 fu32ModeFlags;
+
+	/** The offset where the data start in the buffer. */
+	u32 off32Data;
+	/** The offset where next data must be placed in the buffer. */
+	u32 off32Free;
+
+	/** The ring buffer for data. */
+	u8  au8RingBuffer[VMMDEV_VBVA_RING_BUFFER_SIZE];
+
+	/** The queue of record descriptions. */
+	VMMDEVVBVARECORD aRecords[VMMDEV_VBVA_MAX_RECORDS];
+	u32 indexRecordFirst;
+	u32 indexRecordFree;
+
+	/**
+	 * RDP orders supported by the client. The guest reports only them
+	 * and falls back to DIRTY rects for not supported ones.
+	 *
+	 * (1 << VBVA_VRDP_*)
+	 */
+	u32 fu32SupportedOrders;
+
+} VBVAMEMORY;
+VMMDEV_ASSERT_SIZE(VBVAMEMORY, 12 + (SZ_4M-SZ_1K) + 4*64 + 12);
+
+/**
+ * The layout of VMMDEV RAM region that contains information for guest.
+ */
+typedef struct VMMDevMemory {
+	/** The size of this structure. */
+	u32 u32Size;
+	/** The structure version. (VMMDEV_MEMORY_VERSION) */
+	u32 u32Version;
+
+	union {
+		struct {
+			/** Flag telling that VMMDev has events pending. */
+			bool fHaveEvents;
+		} V1_04;
+
+		struct {
+			/** Pending events flags, set by host. */
+			u32 u32HostEvents;
+			/** Mask of events the guest wants, set by guest. */
+			u32 u32GuestEventMask;
+		} V1_03;
+	} V;
+
+	VBVAMEMORY vbvaMemory;
+
+} VMMDevMemory;
+VMMDEV_ASSERT_SIZE(VMMDevMemory, 8 + 8 + sizeof(VBVAMEMORY));
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevMemory, vbvaMemory, 16);
+
+/** Version of VMMDevMemory structure (VMMDevMemory::u32Version). */
+#define VMMDEV_MEMORY_VERSION   (1)
+
+#endif
diff --git a/include/uapi/linux/vbox_err.h b/include/uapi/linux/vbox_err.h
new file mode 100644
index 000000000000..e6e7ba835e36
--- /dev/null
+++ b/include/uapi/linux/vbox_err.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __UAPI_VBOX_ERR_H__
+#define __UAPI_VBOX_ERR_H__
+
+/**
+ * @name VirtualBox virtual-hardware error macros
+ * @{
+ */
+
+#define VINF_SUCCESS                        0
+#define VERR_GENERAL_FAILURE                (-1)
+#define VERR_INVALID_PARAMETER              (-2)
+#define VERR_INVALID_MAGIC                  (-3)
+#define VERR_INVALID_HANDLE                 (-4)
+#define VERR_LOCK_FAILED                    (-5)
+#define VERR_INVALID_POINTER                (-6)
+#define VERR_IDT_FAILED                     (-7)
+#define VERR_NO_MEMORY                      (-8)
+#define VERR_ALREADY_LOADED                 (-9)
+#define VERR_PERMISSION_DENIED              (-10)
+#define VERR_VERSION_MISMATCH               (-11)
+#define VERR_NOT_IMPLEMENTED                (-12)
+#define VERR_INVALID_FLAGS                  (-13)
+
+#define VERR_NOT_EQUAL                      (-18)
+#define VERR_NOT_SYMLINK                    (-19)
+#define VERR_NO_TMP_MEMORY                  (-20)
+#define VERR_INVALID_FMODE                  (-21)
+#define VERR_WRONG_ORDER                    (-22)
+#define VERR_NO_TLS_FOR_SELF                (-23)
+#define VERR_FAILED_TO_SET_SELF_TLS         (-24)
+#define VERR_NO_CONT_MEMORY                 (-26)
+#define VERR_NO_PAGE_MEMORY                 (-27)
+#define VERR_THREAD_IS_DEAD                 (-29)
+#define VERR_THREAD_NOT_WAITABLE            (-30)
+#define VERR_PAGE_TABLE_NOT_PRESENT         (-31)
+#define VERR_INVALID_CONTEXT                (-32)
+#define VERR_TIMER_BUSY                     (-33)
+#define VERR_ADDRESS_CONFLICT               (-34)
+#define VERR_UNRESOLVED_ERROR               (-35)
+#define VERR_INVALID_FUNCTION               (-36)
+#define VERR_NOT_SUPPORTED                  (-37)
+#define VERR_ACCESS_DENIED                  (-38)
+#define VERR_INTERRUPTED                    (-39)
+#define VERR_TIMEOUT                        (-40)
+#define VERR_BUFFER_OVERFLOW                (-41)
+#define VERR_TOO_MUCH_DATA                  (-42)
+#define VERR_MAX_THRDS_REACHED              (-43)
+#define VERR_MAX_PROCS_REACHED              (-44)
+#define VERR_SIGNAL_REFUSED                 (-45)
+#define VERR_SIGNAL_PENDING                 (-46)
+#define VERR_SIGNAL_INVALID                 (-47)
+#define VERR_STATE_CHANGED                  (-48)
+#define VERR_INVALID_UUID_FORMAT            (-49)
+#define VERR_PROCESS_NOT_FOUND              (-50)
+#define VERR_PROCESS_RUNNING                (-51)
+#define VERR_TRY_AGAIN                      (-52)
+#define VERR_PARSE_ERROR                    (-53)
+#define VERR_OUT_OF_RANGE                   (-54)
+#define VERR_NUMBER_TOO_BIG                 (-55)
+#define VERR_NO_DIGITS                      (-56)
+#define VERR_NEGATIVE_UNSIGNED              (-57)
+#define VERR_NO_TRANSLATION                 (-58)
+
+#define VERR_NOT_FOUND                      (-78)
+#define VERR_INVALID_STATE                  (-79)
+#define VERR_OUT_OF_RESOURCES               (-80)
+
+#define VERR_FILE_NOT_FOUND                 (-102)
+#define VERR_PATH_NOT_FOUND                 (-103)
+#define VERR_INVALID_NAME                   (-104)
+#define VERR_ALREADY_EXISTS                 (-105)
+#define VERR_TOO_MANY_OPEN_FILES            (-106)
+#define VERR_SEEK                           (-107)
+#define VERR_NEGATIVE_SEEK                  (-108)
+#define VERR_SEEK_ON_DEVICE                 (-109)
+#define VERR_EOF                            (-110)
+#define VERR_READ_ERROR                     (-111)
+#define VERR_WRITE_ERROR                    (-112)
+#define VERR_WRITE_PROTECT                  (-113)
+#define VERR_SHARING_VIOLATION              (-114)
+#define VERR_FILE_LOCK_FAILED               (-115)
+#define VERR_FILE_LOCK_VIOLATION            (-116)
+#define VERR_CANT_CREATE                    (-117)
+#define VERR_CANT_DELETE_DIRECTORY          (-118)
+#define VERR_NOT_SAME_DEVICE                (-119)
+#define VERR_FILENAME_TOO_LONG              (-120)
+#define VERR_MEDIA_NOT_PRESENT              (-121)
+#define VERR_MEDIA_NOT_RECOGNIZED           (-122)
+#define VERR_FILE_NOT_LOCKED                (-123)
+#define VERR_FILE_LOCK_LOST                 (-124)
+#define VERR_DIR_NOT_EMPTY                  (-125)
+#define VERR_NOT_A_DIRECTORY                (-126)
+#define VERR_IS_A_DIRECTORY                 (-127)
+#define VERR_FILE_TOO_BIG                   (-128)
+
+#define VERR_NET_IO_ERROR                       (-400)
+#define VERR_NET_OUT_OF_RESOURCES               (-401)
+#define VERR_NET_HOST_NOT_FOUND                 (-402)
+#define VERR_NET_PATH_NOT_FOUND                 (-403)
+#define VERR_NET_PRINT_ERROR                    (-404)
+#define VERR_NET_NO_NETWORK                     (-405)
+#define VERR_NET_NOT_UNIQUE_NAME                (-406)
+
+#define VERR_NET_IN_PROGRESS                    (-436)
+#define VERR_NET_ALREADY_IN_PROGRESS            (-437)
+#define VERR_NET_NOT_SOCKET                     (-438)
+#define VERR_NET_DEST_ADDRESS_REQUIRED          (-439)
+#define VERR_NET_MSG_SIZE                       (-440)
+#define VERR_NET_PROTOCOL_TYPE                  (-441)
+#define VERR_NET_PROTOCOL_NOT_AVAILABLE         (-442)
+#define VERR_NET_PROTOCOL_NOT_SUPPORTED         (-443)
+#define VERR_NET_SOCKET_TYPE_NOT_SUPPORTED      (-444)
+#define VERR_NET_OPERATION_NOT_SUPPORTED        (-445)
+#define VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED  (-446)
+#define VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED   (-447)
+#define VERR_NET_ADDRESS_IN_USE                 (-448)
+#define VERR_NET_ADDRESS_NOT_AVAILABLE          (-449)
+#define VERR_NET_DOWN                           (-450)
+#define VERR_NET_UNREACHABLE                    (-451)
+#define VERR_NET_CONNECTION_RESET               (-452)
+#define VERR_NET_CONNECTION_ABORTED             (-453)
+#define VERR_NET_CONNECTION_RESET_BY_PEER       (-454)
+#define VERR_NET_NO_BUFFER_SPACE                (-455)
+#define VERR_NET_ALREADY_CONNECTED              (-456)
+#define VERR_NET_NOT_CONNECTED                  (-457)
+#define VERR_NET_SHUTDOWN                       (-458)
+#define VERR_NET_TOO_MANY_REFERENCES            (-459)
+#define VERR_NET_CONNECTION_TIMED_OUT           (-460)
+#define VERR_NET_CONNECTION_REFUSED             (-461)
+#define VERR_NET_HOST_DOWN                      (-464)
+#define VERR_NET_HOST_UNREACHABLE               (-465)
+#define VERR_NET_PROTOCOL_ERROR                 (-466)
+#define VERR_NET_INCOMPLETE_TX_PACKET           (-467)
+
+/* misc. unsorted codes */
+#define VERR_RESOURCE_BUSY                      (-138)
+#define VERR_DISK_FULL                          (-152)
+#define VERR_TOO_MANY_SYMLINKS                  (-156)
+#define VERR_NO_MORE_FILES                      (-201)
+#define VERR_INTERNAL_ERROR                     (-225)
+#define VERR_INTERNAL_ERROR_2                   (-226)
+#define VERR_INTERNAL_ERROR_3                   (-227)
+#define VERR_INTERNAL_ERROR_4                   (-228)
+#define VERR_DEV_IO_ERROR                       (-250)
+#define VERR_IO_BAD_LENGTH                      (-255)
+#define VERR_BROKEN_PIPE                        (-301)
+#define VERR_NO_DATA                            (-304)
+#define VERR_SEM_DESTROYED                      (-363)
+#define VERR_DEADLOCK                           (-365)
+#define VERR_BAD_EXE_FORMAT                     (-608)
+#define VINF_HGCM_ASYNC_EXECUTE                 (2903)
+
+#define RT_SUCCESS(rc)      ((rc) >= 0)
+#define RT_FAILURE(rc)      ((rc) < 0)
+
+#endif
diff --git a/include/uapi/linux/vbox_ostypes.h b/include/uapi/linux/vbox_ostypes.h
new file mode 100644
index 000000000000..44a8561e8892
--- /dev/null
+++ b/include/uapi/linux/vbox_ostypes.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * VirtualBox - Global Guest Operating System definition.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, in which case the provisions of the CDDL are applicable
+ * instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __UAPI_VBOX_OSTYPES_H__
+#define __UAPI_VBOX_OSTYPES_H__
+
+/** The bit number which indicates 64-bit or 32-bit. */
+#define VBOXOSTYPE_x64_BIT       8
+
+/**
+ * Global list of guest operating system types.
+ *
+ * They are grouped into families. A family identifer is always has
+ * mod 0x10000 == 0. New entries can be added, however other components
+ * depend on the values (e.g. the Qt GUI and guest additions) so the
+ * existing values MUST stay the same.
+ *
+ * Note: distinguish between 32 & 64 bits guest OSes by checking bit 8.
+ */
+typedef enum VBOXOSTYPE {
+	VBOXOSTYPE_Unknown          = 0,
+	VBOXOSTYPE_Unknown_x64      = 0x00100,
+	VBOXOSTYPE_DOS              = 0x10000,
+	VBOXOSTYPE_Win31            = 0x15000,
+	VBOXOSTYPE_Win9x            = 0x20000,
+	VBOXOSTYPE_Win95            = 0x21000,
+	VBOXOSTYPE_Win98            = 0x22000,
+	VBOXOSTYPE_WinMe            = 0x23000,
+	VBOXOSTYPE_WinNT            = 0x30000,
+	VBOXOSTYPE_WinNT_x64        = 0x30100,
+	VBOXOSTYPE_WinNT4           = 0x31000,
+	VBOXOSTYPE_Win2k            = 0x32000,
+	VBOXOSTYPE_WinXP            = 0x33000,
+	VBOXOSTYPE_WinXP_x64        = 0x33100,
+	VBOXOSTYPE_Win2k3           = 0x34000,
+	VBOXOSTYPE_Win2k3_x64       = 0x34100,
+	VBOXOSTYPE_WinVista         = 0x35000,
+	VBOXOSTYPE_WinVista_x64     = 0x35100,
+	VBOXOSTYPE_Win2k8           = 0x36000,
+	VBOXOSTYPE_Win2k8_x64       = 0x36100,
+	VBOXOSTYPE_Win7             = 0x37000,
+	VBOXOSTYPE_Win7_x64         = 0x37100,
+	VBOXOSTYPE_Win8             = 0x38000,
+	VBOXOSTYPE_Win8_x64         = 0x38100,
+	VBOXOSTYPE_Win2k12_x64      = 0x39100,
+	VBOXOSTYPE_Win81            = 0x3A000,
+	VBOXOSTYPE_Win81_x64        = 0x3A100,
+	VBOXOSTYPE_Win10            = 0x3B000,
+	VBOXOSTYPE_Win10_x64        = 0x3B100,
+	VBOXOSTYPE_Win2k16_x64      = 0x3C100,
+	VBOXOSTYPE_OS2              = 0x40000,
+	VBOXOSTYPE_OS2Warp3         = 0x41000,
+	VBOXOSTYPE_OS2Warp4         = 0x42000,
+	VBOXOSTYPE_OS2Warp45        = 0x43000,
+	VBOXOSTYPE_ECS              = 0x44000,
+	VBOXOSTYPE_OS21x            = 0x48000,
+	VBOXOSTYPE_Linux            = 0x50000,
+	VBOXOSTYPE_Linux_x64        = 0x50100,
+	VBOXOSTYPE_Linux22          = 0x51000,
+	VBOXOSTYPE_Linux24          = 0x52000,
+	VBOXOSTYPE_Linux24_x64      = 0x52100,
+	VBOXOSTYPE_Linux26          = 0x53000,
+	VBOXOSTYPE_Linux26_x64      = 0x53100,
+	VBOXOSTYPE_ArchLinux        = 0x54000,
+	VBOXOSTYPE_ArchLinux_x64    = 0x54100,
+	VBOXOSTYPE_Debian           = 0x55000,
+	VBOXOSTYPE_Debian_x64       = 0x55100,
+	VBOXOSTYPE_OpenSUSE         = 0x56000,
+	VBOXOSTYPE_OpenSUSE_x64     = 0x56100,
+	VBOXOSTYPE_FedoraCore       = 0x57000,
+	VBOXOSTYPE_FedoraCore_x64   = 0x57100,
+	VBOXOSTYPE_Gentoo           = 0x58000,
+	VBOXOSTYPE_Gentoo_x64       = 0x58100,
+	VBOXOSTYPE_Mandriva         = 0x59000,
+	VBOXOSTYPE_Mandriva_x64     = 0x59100,
+	VBOXOSTYPE_RedHat           = 0x5A000,
+	VBOXOSTYPE_RedHat_x64       = 0x5A100,
+	VBOXOSTYPE_Turbolinux       = 0x5B000,
+	VBOXOSTYPE_Turbolinux_x64   = 0x5B100,
+	VBOXOSTYPE_Ubuntu           = 0x5C000,
+	VBOXOSTYPE_Ubuntu_x64       = 0x5C100,
+	VBOXOSTYPE_Xandros          = 0x5D000,
+	VBOXOSTYPE_Xandros_x64      = 0x5D100,
+	VBOXOSTYPE_Oracle           = 0x5E000,
+	VBOXOSTYPE_Oracle_x64       = 0x5E100,
+	VBOXOSTYPE_FreeBSD          = 0x60000,
+	VBOXOSTYPE_FreeBSD_x64      = 0x60100,
+	VBOXOSTYPE_OpenBSD          = 0x61000,
+	VBOXOSTYPE_OpenBSD_x64      = 0x61100,
+	VBOXOSTYPE_NetBSD           = 0x62000,
+	VBOXOSTYPE_NetBSD_x64       = 0x62100,
+	VBOXOSTYPE_Netware          = 0x70000,
+	VBOXOSTYPE_Solaris          = 0x80000,
+	VBOXOSTYPE_Solaris_x64      = 0x80100,
+	VBOXOSTYPE_OpenSolaris      = 0x81000,
+	VBOXOSTYPE_OpenSolaris_x64  = 0x81100,
+	VBOXOSTYPE_Solaris11_x64    = 0x82100,
+	VBOXOSTYPE_L4               = 0x90000,
+	VBOXOSTYPE_QNX              = 0xA0000,
+	VBOXOSTYPE_MacOS            = 0xB0000,
+	VBOXOSTYPE_MacOS_x64        = 0xB0100,
+	VBOXOSTYPE_MacOS106         = 0xB2000,
+	VBOXOSTYPE_MacOS106_x64     = 0xB2100,
+	VBOXOSTYPE_MacOS107_x64     = 0xB3100,
+	VBOXOSTYPE_MacOS108_x64     = 0xB4100,
+	VBOXOSTYPE_MacOS109_x64     = 0xB5100,
+	VBOXOSTYPE_MacOS1010_x64    = 0xB6100,
+	VBOXOSTYPE_MacOS1011_x64    = 0xB7100,
+	VBOXOSTYPE_JRockitVE        = 0xC0000,
+	VBOXOSTYPE_Haiku            = 0xD0000,
+	VBOXOSTYPE_Haiku_x64        = 0xD0100,
+	VBOXOSTYPE_VBoxBS_x64       = 0xE0100,
+	/** The mask which indicates 64-bit. */
+	VBOXOSTYPE_x64              = 1 << VBOXOSTYPE_x64_BIT,
+	/** The usual 32-bit hack. */
+	VBOXOSTYPE_32BIT_HACK       = 0x7fffffff
+} VBOXOSTYPE;
+
+/**
+ * Global list of guest OS families.
+ */
+typedef enum VBOXOSFAMILY {
+	VBOXOSFAMILY_Unknown          = 0,
+	VBOXOSFAMILY_Windows32        = 1,
+	VBOXOSFAMILY_Windows64        = 2,
+	VBOXOSFAMILY_Linux32          = 3,
+	VBOXOSFAMILY_Linux64          = 4,
+	VBOXOSFAMILY_FreeBSD32        = 5,
+	VBOXOSFAMILY_FreeBSD64        = 6,
+	VBOXOSFAMILY_Solaris32        = 7,
+	VBOXOSFAMILY_Solaris64        = 8,
+	VBOXOSFAMILY_MacOSX32         = 9,
+	VBOXOSFAMILY_MacOSX64         = 10,
+	/** The usual 32-bit hack. */
+	VBOXOSFAMILY_32BIT_HACK       = 0x7fffffff
+} VBOXOSFAMILY;
+
+#endif
diff --git a/include/uapi/linux/vbox_vmmdev.h b/include/uapi/linux/vbox_vmmdev.h
new file mode 100644
index 000000000000..ce82b8f499de
--- /dev/null
+++ b/include/uapi/linux/vbox_vmmdev.h
@@ -0,0 +1,1745 @@
+/*
+ * Virtual Device for Guest <-> VMM/Host communication (ADD,DEV).
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, in which case the provisions of the CDDL are applicable
+ * instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __UAPI_VBOX_VMMDEV_H__
+#define __UAPI_VBOX_VMMDEV_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/types.h>
+#include <linux/vbox_ostypes.h>
+
+/*
+ * We cannot use linux' compiletime_assert here because it expects to be used
+ * inside a function only. Use a typedef to a char array with a negative size.
+ */
+#define VMMDEV_ASSERT_SIZE(type, size) \
+	typedef char type ## _assert_size[1 - 2*!!(sizeof(type) != (size))]
+#define VMMDEV_ASSERT_MEMBER_OFFSET(type, member, offset) \
+	typedef char type ## _ ## member ## _assert_member_offset \
+	[1 - 2*!!(offsetof(type, member) != (offset))]
+
+/*
+ * The host expects dwords / 32 bit packing. Using __aligned(4) everywhere is
+ * not really practical and also does not seem to work. Specifically I've been
+ * unable to get some structs (HGCMFunctionParameter32|64) to compile to the
+ * right size using __aligned(), so we're sticking with pragma pack(4) here.
+ */
+#pragma pack(4)
+
+/**
+ * @defgroup grp_vmmdev    VMM Device
+ *
+ * @note This interface cannot be changed, it can only be extended!
+ *
+ * @{
+ */
+
+/** Port for generic request interface (relative offset). */
+#define VMMDEV_PORT_OFF_REQUEST                             0
+
+/**
+ * @name VMMDev events.
+ *
+ * Used mainly by VMMDevReq_AcknowledgeEvents/VMMDevEvents and version 1.3 of
+ * VMMDevMemory.
+ *
+ * @{
+ */
+/** Host mouse capabilities has been changed. */
+#define VMMDEV_EVENT_MOUSE_CAPABILITIES_CHANGED             BIT(0)
+/** HGCM event. */
+#define VMMDEV_EVENT_HGCM                                   BIT(1)
+/** A display change request has been issued. */
+#define VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST                 BIT(2)
+/** Credentials are available for judgement. */
+#define VMMDEV_EVENT_JUDGE_CREDENTIALS                      BIT(3)
+/** The guest has been restored. */
+#define VMMDEV_EVENT_RESTORED                               BIT(4)
+/** Seamless mode state changed. */
+#define VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST           BIT(5)
+/** Memory balloon size changed. */
+#define VMMDEV_EVENT_BALLOON_CHANGE_REQUEST                 BIT(6)
+/** Statistics interval changed. */
+#define VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST     BIT(7)
+/** VRDP status changed. */
+#define VMMDEV_EVENT_VRDP                                   BIT(8)
+/** New mouse position data available. */
+#define VMMDEV_EVENT_MOUSE_POSITION_CHANGED                 BIT(9)
+/** CPU hotplug event occurred. */
+#define VMMDEV_EVENT_CPU_HOTPLUG                            BIT(10)
+/** The mask of valid events, for sanity checking. */
+#define VMMDEV_EVENT_VALID_EVENT_MASK                       0x000007ffU
+/** @} */
+
+/** @defgroup grp_vmmdev_req    VMMDev Generic Request Interface
+ * @{
+ */
+
+/** @name Current version of the VMMDev interface.
+ *
+ * Additions are allowed to work only if
+ * additions_major == vmmdev_current && additions_minor <= vmmdev_current.
+ * Additions version is reported to host (VMMDev) by VMMDevReq_ReportGuestInfo.
+ *
+ * @remarks These defines also live in the 16-bit and assembly versions of this
+ *          header.
+ */
+#define VMMDEV_VERSION                      0x00010004
+#define VMMDEV_VERSION_MAJOR                (VMMDEV_VERSION >> 16)
+#define VMMDEV_VERSION_MINOR                (VMMDEV_VERSION & 0xffff)
+/** @} */
+
+/** Maximum request packet size. */
+#define VMMDEV_MAX_VMMDEVREQ_SIZE           1048576
+/** Maximum number of HGCM parameters. */
+#define VMMDEV_MAX_HGCM_PARMS               1024
+/** Maximum total size of hgcm buffers in one call. */
+#define VMMDEV_MAX_HGCM_DATA_SIZE           0x7fffffffU
+
+/**
+ * VMMDev request types.
+ * @note when updating this, adjust vmmdevGetRequestSize() as well
+ */
+typedef enum {
+	VMMDevReq_InvalidRequest             =  0,
+	VMMDevReq_GetMouseStatus             =  1,
+	VMMDevReq_SetMouseStatus             =  2,
+	VMMDevReq_SetPointerShape            =  3,
+	VMMDevReq_GetHostVersion             =  4,
+	VMMDevReq_Idle                       =  5,
+	VMMDevReq_GetHostTime                = 10,
+	VMMDevReq_GetHypervisorInfo          = 20,
+	VMMDevReq_SetHypervisorInfo          = 21,
+	VMMDevReq_RegisterPatchMemory        = 22, /* since version 3.0.6 */
+	VMMDevReq_DeregisterPatchMemory      = 23, /* since version 3.0.6 */
+	VMMDevReq_SetPowerStatus             = 30,
+	VMMDevReq_AcknowledgeEvents          = 41,
+	VMMDevReq_CtlGuestFilterMask         = 42,
+	VMMDevReq_ReportGuestInfo            = 50,
+	VMMDevReq_ReportGuestInfo2           = 58, /* since version 3.2.0 */
+	VMMDevReq_ReportGuestStatus          = 59, /* since version 3.2.8 */
+	VMMDevReq_ReportGuestUserState       = 74, /* since version 4.3 */
+	/**
+	 * Retrieve a display resize request sent by the host using
+	 * @a IDisplay:setVideoModeHint.  Deprecated.
+	 *
+	 * Similar to @a VMMDevReq_GetDisplayChangeRequest2, except that it only
+	 * considers host requests sent for the first virtual display. This
+	 * guest-req should not be used in new guest code, and the results are
+	 * undefined if a guest mixes calls to this and
+	 * @a VMMDevReq_GetDisplayChangeRequest2.
+	 */
+	VMMDevReq_GetDisplayChangeRequest    = 51,
+	VMMDevReq_VideoModeSupported         = 52,
+	VMMDevReq_GetHeightReduction         = 53,
+	/**
+	 * Retrieve a display resize request sent by the host using
+	 * @a IDisplay:setVideoModeHint.
+	 *
+	 * Queries a display resize request sent from the host.  If the
+	 * @a eventAck member is sent to true and there is an unqueried request
+	 * available for one of the virtual display then that request will
+	 * be returned.  If several displays have unqueried requests the lowest
+	 * numbered display will be chosen first.  Only the most recent unseen
+	 * request for each display is remembered.
+	 * If @a eventAck is set to false, the last host request queried with
+	 * @a eventAck set is resent, or failing that the most recent received
+	 * from the host.  If no host request was ever received then all zeros
+	 * are returned.
+	 */
+	VMMDevReq_GetDisplayChangeRequest2   = 54,
+	VMMDevReq_ReportGuestCapabilities    = 55,
+	VMMDevReq_SetGuestCapabilities       = 56,
+	VMMDevReq_VideoModeSupported2        = 57, /* since version 3.2.0 */
+	VMMDevReq_GetDisplayChangeRequestEx  = 80, /* since version 4.2.4 */
+	VMMDevReq_HGCMConnect                = 60,
+	VMMDevReq_HGCMDisconnect             = 61,
+	VMMDevReq_HGCMCall32                 = 62,
+	VMMDevReq_HGCMCall64                 = 63,
+	VMMDevReq_HGCMCancel                 = 64,
+	VMMDevReq_HGCMCancel2                = 65,
+	VMMDevReq_VideoAccelEnable           = 70,
+	VMMDevReq_VideoAccelFlush            = 71,
+	VMMDevReq_VideoSetVisibleRegion      = 72,
+	VMMDevReq_GetSeamlessChangeRequest   = 73,
+	VMMDevReq_QueryCredentials           = 100,
+	VMMDevReq_ReportCredentialsJudgement = 101,
+	VMMDevReq_ReportGuestStats           = 110,
+	VMMDevReq_GetMemBalloonChangeRequest = 111,
+	VMMDevReq_GetStatisticsChangeRequest = 112,
+	VMMDevReq_ChangeMemBalloon           = 113,
+	VMMDevReq_GetVRDPChangeRequest       = 150,
+	VMMDevReq_LogString                  = 200,
+	VMMDevReq_GetCpuHotPlugRequest       = 210,
+	VMMDevReq_SetCpuHotPlugStatus        = 211,
+	VMMDevReq_RegisterSharedModule       = 212,
+	VMMDevReq_UnregisterSharedModule     = 213,
+	VMMDevReq_CheckSharedModules         = 214,
+	VMMDevReq_GetPageSharingStatus       = 215,
+	VMMDevReq_DebugIsPageShared          = 216,
+	VMMDevReq_GetSessionId               = 217, /* since version 3.2.8 */
+	VMMDevReq_WriteCoreDump              = 218,
+	VMMDevReq_GuestHeartbeat             = 219,
+	VMMDevReq_HeartbeatConfigure         = 220,
+	VMMDevReq_SizeHack                   = 0x7fffffff
+} VMMDevRequestType;
+
+#if __BITS_PER_LONG == 64
+#define VMMDevReq_HGCMCall VMMDevReq_HGCMCall64
+#else
+#define VMMDevReq_HGCMCall VMMDevReq_HGCMCall32
+#endif
+
+/** Version of VMMDevRequestHeader structure. */
+#define VMMDEV_REQUEST_HEADER_VERSION (0x10001)
+
+/**
+ * Generic VMMDev request header.
+ */
+typedef struct {
+	/** IN: Size of the structure in bytes (including body). */
+	__u32 size;
+	/** IN: Version of the structure.  */
+	__u32 version;
+	/** IN: Type of the request. */
+	VMMDevRequestType requestType;
+	/** OUT: Return code. */
+	__s32 rc;
+	/** Reserved field no.1. MBZ. */
+	__u32 reserved1;
+	/** Reserved field no.2. MBZ. */
+	__u32 reserved2;
+} VMMDevRequestHeader;
+VMMDEV_ASSERT_SIZE(VMMDevRequestHeader, 24);
+
+/**
+ * Mouse status request structure.
+ *
+ * Used by VMMDevReq_GetMouseStatus and VMMDevReq_SetMouseStatus.
+ */
+typedef struct {
+	/** header */
+	VMMDevRequestHeader header;
+	/** Mouse feature mask. See VMMDEV_MOUSE_*. */
+	__u32 mouseFeatures;
+	/** Mouse x position. */
+	__s32 pointerXPos;
+	/** Mouse y position. */
+	__s32 pointerYPos;
+} VMMDevReqMouseStatus;
+VMMDEV_ASSERT_SIZE(VMMDevReqMouseStatus, 24+12);
+
+/**
+ * @name Mouse capability bits (VMMDevReqMouseStatus::mouseFeatures).
+ * @{
+ */
+/** The guest can (== wants to) handle absolute coordinates.  */
+#define VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE                     BIT(0)
+/**
+ * The host can (== wants to) send absolute coordinates.
+ * (Input not captured.)
+ */
+#define VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE                    BIT(1)
+/**
+ * The guest can *NOT* switch to software cursor and therefore depends on the
+ * host cursor.
+ *
+ * When guest additions are installed and the host has promised to display the
+ * cursor itself, the guest installs a hardware mouse driver. Don't ask the
+ * guest to switch to a software cursor then.
+ */
+#define VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR                BIT(2)
+/** The host does NOT provide support for drawing the cursor itself. */
+#define VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER                  BIT(3)
+/** The guest can read VMMDev events to find out about pointer movement */
+#define VMMDEV_MOUSE_NEW_PROTOCOL                           BIT(4)
+/**
+ * If the guest changes the status of the VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR
+ * bit, the host will honour this.
+ */
+#define VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR        BIT(5)
+/**
+ * The host supplies an absolute pointing device.  The Guest Additions may
+ * wish to use this to decide whether to install their own driver.
+ */
+#define VMMDEV_MOUSE_HOST_HAS_ABS_DEV                       BIT(6)
+/** The mask of all VMMDEV_MOUSE_* flags */
+#define VMMDEV_MOUSE_MASK                                   0x0000007fU
+/**
+ * The mask of guest capability changes for which notification events should
+ * be sent.
+ */
+#define VMMDEV_MOUSE_NOTIFY_HOST_MASK \
+	(VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)
+/** The mask of all capabilities which the guest can legitimately change */
+#define VMMDEV_MOUSE_GUEST_MASK \
+	(VMMDEV_MOUSE_NOTIFY_HOST_MASK | VMMDEV_MOUSE_NEW_PROTOCOL)
+/**
+ * The mask of host capability changes for which notification events should
+ * be sent.
+ */
+#define VMMDEV_MOUSE_NOTIFY_GUEST_MASK \
+	VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE
+/** The mask of all capabilities which the host can legitimately change */
+#define VMMDEV_MOUSE_HOST_MASK \
+	(VMMDEV_MOUSE_NOTIFY_GUEST_MASK |\
+	 VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER |\
+	 VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR| \
+	 VMMDEV_MOUSE_HOST_HAS_ABS_DEV)
+/** @} */
+
+/**
+ * @name Absolute mouse reporting range
+ * @{
+ */
+/** @todo Should these be here?  They are needed by both host and guest. */
+/** The minimum value our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE_MIN 0
+/** The maximum value our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE_MAX 0xFFFF
+/** The full range our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE (VMMDEV_MOUSE_RANGE_MAX - VMMDEV_MOUSE_RANGE_MIN)
+/** @} */
+
+/**
+ * Mouse pointer shape/visibility change request.
+ *
+ * Used by VMMDevReq_SetPointerShape. The size is variable.
+ */
+typedef struct VMMDevReqMousePointer {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** VBOX_MOUSE_POINTER_* bit flags from VBox/Graphics/VBoxVideo.h. */
+	__u32 fFlags;
+	/** x coordinate of hot spot. */
+	__u32 xHot;
+	/** y coordinate of hot spot. */
+	__u32 yHot;
+	/** Width of the pointer in pixels. */
+	__u32 width;
+	/** Height of the pointer in scanlines. */
+	__u32 height;
+	/**
+	 * Pointer data.
+	 *
+	 ****
+	 * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
+	 * mask.
+	 *
+	 * For pointers without alpha channel the XOR mask pixels are 32 bit
+	 * values: (lsb)BGR0(msb).
+	 * For pointers with alpha channel the XOR mask consists of
+	 * (lsb)BGRA(msb) 32 bit values.
+	 *
+	 * Guest driver must create the AND mask for pointers with alpha chan,
+	 * so if host does not support alpha, the pointer could be displayed as
+	 * a normal color pointer. The AND mask can be constructed from alpha
+	 * values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
+	 *
+	 * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
+	 * mask, therefore, is cbAnd = (width + 7) / 8 * height. The padding
+	 * bits at the end of any scanline are undefined.
+	 *
+	 * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
+	 * u8 *pXor = pAnd + (cbAnd + 3) & ~3
+	 * Bytes in the gap between the AND and the XOR mask are undefined.
+	 * XOR mask scanlines have no gap between them and size of XOR mask is:
+	 * cXor = width * 4 * height.
+	 ****
+	 *
+	 * Preallocate 4 bytes for accessing actual data as p->pointerData.
+	 */
+	char pointerData[4];
+} VMMDevReqMousePointer;
+VMMDEV_ASSERT_SIZE(VMMDevReqMousePointer, 24+24);
+
+/**
+ * String log request structure.
+ *
+ * Used by VMMDevReq_LogString.
+ * @deprecated  Use the IPRT logger or VbglR3WriteLog instead.
+ */
+typedef struct {
+	/** header */
+	VMMDevRequestHeader header;
+	/** variable length string data */
+	char szString[1];
+} VMMDevReqLogString;
+VMMDEV_ASSERT_SIZE(VMMDevReqLogString, 24+4);
+
+/**
+ * VirtualBox host version request structure.
+ *
+ * Used by VMMDevReq_GetHostVersion.
+ *
+ * @remarks VBGL uses this to detect the precense of new features in the
+ *          interface.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Major version. */
+	__u16 major;
+	/** Minor version. */
+	__u16 minor;
+	/** Build number. */
+	__u32 build;
+	/** SVN revision. */
+	__u32 revision;
+	/** Feature mask. */
+	__u32 features;
+} VMMDevReqHostVersion;
+VMMDEV_ASSERT_SIZE(VMMDevReqHostVersion, 24+16);
+
+/**
+ * @name VMMDevReqHostVersion::features
+ * @{
+ */
+/** Physical page lists are supported by HGCM. */
+#define VMMDEV_HVF_HGCM_PHYS_PAGE_LIST  BIT(0)
+/** @} */
+
+/**
+ * Guest capabilities structure.
+ *
+ * Used by VMMDevReq_ReportGuestCapabilities.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Capabilities (VMMDEV_GUEST_*). */
+	__u32 caps;
+} VMMDevReqGuestCapabilities;
+VMMDEV_ASSERT_SIZE(VMMDevReqGuestCapabilities, 24+4);
+
+/**
+ * Guest capabilities structure, version 2.
+ *
+ * Used by VMMDevReq_SetGuestCapabilities.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Mask of capabilities to be added. */
+	__u32 u32OrMask;
+	/** Mask of capabilities to be removed. */
+	__u32 u32NotMask;
+} VMMDevReqGuestCapabilities2;
+VMMDEV_ASSERT_SIZE(VMMDevReqGuestCapabilities2, 24+8);
+
+/**
+ * @name Guest capability bits.
+ * Used by VMMDevReq_ReportGuestCapabilities and VMMDevReq_SetGuestCapabilities.
+ * @{
+ */
+/** The guest supports seamless display rendering. */
+#define VMMDEV_GUEST_SUPPORTS_SEAMLESS                      BIT(0)
+/** The guest supports mapping guest to host windows. */
+#define VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING     BIT(1)
+/**
+ * The guest graphical additions are active.
+ * Used for fast activation and deactivation of certain graphical operations
+ * (e.g. resizing & seamless). The legacy VMMDevReq_ReportGuestCapabilities
+ * request sets this automatically, but VMMDevReq_SetGuestCapabilities does
+ * not.
+ */
+#define VMMDEV_GUEST_SUPPORTS_GRAPHICS                      BIT(2)
+/** The mask of valid events, for sanity checking. */
+#define VMMDEV_GUEST_CAPABILITIES_MASK                      0x00000007U
+/** @} */
+
+/**
+ * Idle request structure.
+ *
+ * Used by VMMDevReq_Idle.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+} VMMDevReqIdle;
+VMMDEV_ASSERT_SIZE(VMMDevReqIdle, 24);
+
+/**
+ * Host time request structure.
+ *
+ * Used by VMMDevReq_GetHostTime.
+ */
+typedef struct {
+	/** Header */
+	VMMDevRequestHeader header;
+	/** OUT: Time in milliseconds since unix epoch. */
+	__u64 time;
+} VMMDevReqHostTime;
+VMMDEV_ASSERT_SIZE(VMMDevReqHostTime, 24+8);
+
+/**
+ * Hypervisor info structure.
+ *
+ * Used by VMMDevReq_GetHypervisorInfo and VMMDevReq_SetHypervisorInfo.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/**
+	 * Guest virtual address of proposed hypervisor start.
+	 * Not used by VMMDevReq_GetHypervisorInfo.
+	 * @todo Make this 64-bit compatible?
+	 */
+	__u32 hypervisorStart;
+	/** Hypervisor size in bytes. */
+	__u32 hypervisorSize;
+} VMMDevReqHypervisorInfo;
+VMMDEV_ASSERT_SIZE(VMMDevReqHypervisorInfo, 24+8);
+
+/**
+ * @name Default patch memory size .
+ * Used by VMMDevReq_RegisterPatchMemory and VMMDevReq_DeregisterPatchMemory.
+ * @{
+ */
+#define VMMDEV_GUEST_DEFAULT_PATCHMEM_SIZE          8192
+/** @} */
+
+/**
+ * Patching memory structure. (locked executable & read-only page from the
+ * guest's perspective)
+ *
+ * Used by VMMDevReq_RegisterPatchMemory and VMMDevReq_DeregisterPatchMemory
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Guest virtual address of the patching page(s). */
+	__u64 pPatchMem;
+	/** Patch page size in bytes. */
+	__u32 cbPatchMem;
+} VMMDevReqPatchMemory;
+VMMDEV_ASSERT_SIZE(VMMDevReqPatchMemory, 24+12);
+
+/**
+ * Guest power requests.
+ *
+ * See VMMDevReq_SetPowerStatus and VMMDevPowerStateRequest.
+ */
+typedef enum {
+	VMMDevPowerState_Invalid   = 0,
+	VMMDevPowerState_Pause     = 1,
+	VMMDevPowerState_PowerOff  = 2,
+	VMMDevPowerState_SaveState = 3,
+	VMMDevPowerState_SizeHack = 0x7fffffff
+} VMMDevPowerState;
+VMMDEV_ASSERT_SIZE(VMMDevPowerState, 4);
+
+/**
+ * VM power status structure.
+ *
+ * Used by VMMDevReq_SetPowerStatus.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Power state request. */
+	VMMDevPowerState powerState;
+} VMMDevPowerStateRequest;
+VMMDEV_ASSERT_SIZE(VMMDevPowerStateRequest, 24+4);
+
+/**
+ * Pending events structure.
+ *
+ * Used by VMMDevReq_AcknowledgeEvents.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** OUT: Pending event mask. */
+	__u32 events;
+} VMMDevEvents;
+VMMDEV_ASSERT_SIZE(VMMDevEvents, 24+4);
+
+/**
+ * Guest event filter mask control.
+ *
+ * Used by VMMDevReq_CtlGuestFilterMask.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Mask of events to be added to the filter. */
+	__u32 u32OrMask;
+	/** Mask of events to be removed from the filter. */
+	__u32 u32NotMask;
+} VMMDevCtlGuestFilterMask;
+VMMDEV_ASSERT_SIZE(VMMDevCtlGuestFilterMask, 24+8);
+
+/**
+ * Guest information structure.
+ *
+ * Used by VMMDevReportGuestInfo and PDMIVMMDEVCONNECTOR::pfnUpdateGuestVersion.
+ */
+typedef struct VBoxGuestInfo {
+	/**
+	 * The VMMDev interface version expected by additions.
+	 * *Deprecated*, do not use anymore! Will be removed.
+	 */
+	__u32 interfaceVersion;
+	/** Guest OS type. */
+	VBOXOSTYPE osType;
+} VBoxGuestInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestInfo, 8);
+
+/**
+ * Guest information report.
+ *
+ * Used by VMMDevReq_ReportGuestInfo.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Guest information. */
+	VBoxGuestInfo guestInfo;
+} VMMDevReportGuestInfo;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestInfo, 24+8);
+
+/**
+ * Guest information structure, version 2.
+ *
+ * Used by VMMDevReportGuestInfo2.
+ */
+typedef struct VBoxGuestInfo2 {
+	/** Major version. */
+	__u16 additionsMajor;
+	/** Minor version. */
+	__u16 additionsMinor;
+	/** Build number. */
+	__u32 additionsBuild;
+	/** SVN revision. */
+	__u32 additionsRevision;
+	/** Feature mask, currently unused. */
+	__u32 additionsFeatures;
+	/**
+	 * The intentional meaning of this field was:
+	 * Some additional information, for example 'Beta 1' or something like
+	 * that.
+	 *
+	 * The way it was implemented was implemented: VBOX_VERSION_STRING.
+	 *
+	 * This means the first three members are duplicated in this field (if
+	 * the guest build config is sane). So, the user must check this and
+	 * chop it off before usage. There is, because of the Main code's blind
+	 * trust in the field's content, no way back.
+	 */
+	char szName[128];
+} VBoxGuestInfo2;
+VMMDEV_ASSERT_SIZE(VBoxGuestInfo2, 144);
+
+/**
+ * Guest information report, version 2.
+ *
+ * Used by VMMDevReq_ReportGuestInfo2.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Guest information. */
+	VBoxGuestInfo2 guestInfo;
+} VMMDevReportGuestInfo2;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestInfo2, 24+144);
+
+/**
+ * The guest facility.
+ * This needs to be kept in sync with AdditionsFacilityType of the Main API!
+ */
+typedef enum {
+	VBoxGuestFacilityType_Unknown         = 0,
+	VBoxGuestFacilityType_VBoxGuestDriver = 20,
+	/* VBoxGINA / VBoxCredProv / pam_vbox. */
+	VBoxGuestFacilityType_AutoLogon       = 90,
+	VBoxGuestFacilityType_VBoxService     = 100,
+	/* VBoxTray (Windows), VBoxClient (Linux, Unix). */
+	VBoxGuestFacilityType_VBoxTrayClient  = 101,
+	VBoxGuestFacilityType_Seamless        = 1000,
+	VBoxGuestFacilityType_Graphics        = 1100,
+	VBoxGuestFacilityType_All             = 0x7ffffffe,
+	VBoxGuestFacilityType_SizeHack        = 0x7fffffff
+} VBoxGuestFacilityType;
+VMMDEV_ASSERT_SIZE(VBoxGuestFacilityType, 4);
+
+/**
+ * The current guest status of a facility.
+ * This needs to be kept in sync with AdditionsFacilityStatus of the Main API!
+ *
+ * @remarks r=bird: Pretty please, for future types like this, simply do a
+ *          linear allocation without any gaps. This stuff is impossible to work
+ *          efficiently with, let alone validate.  Applies to the other facility
+ *          enums too.
+ */
+typedef enum {
+	VBoxGuestFacilityStatus_Inactive    = 0,
+	VBoxGuestFacilityStatus_Paused      = 1,
+	VBoxGuestFacilityStatus_PreInit     = 20,
+	VBoxGuestFacilityStatus_Init        = 30,
+	VBoxGuestFacilityStatus_Active      = 50,
+	VBoxGuestFacilityStatus_Terminating = 100,
+	VBoxGuestFacilityStatus_Terminated  = 101,
+	VBoxGuestFacilityStatus_Failed      = 800,
+	VBoxGuestFacilityStatus_Unknown     = 999,
+	VBoxGuestFacilityStatus_SizeHack    = 0x7fffffff
+} VBoxGuestFacilityStatus;
+VMMDEV_ASSERT_SIZE(VBoxGuestFacilityStatus, 4);
+
+/**
+ * The facility class.
+ * This needs to be kept in sync with AdditionsFacilityClass of the Main API!
+ */
+typedef enum {
+	VBoxGuestFacilityClass_None       = 0,
+	VBoxGuestFacilityClass_Driver     = 10,
+	VBoxGuestFacilityClass_Service    = 30,
+	VBoxGuestFacilityClass_Program    = 50,
+	VBoxGuestFacilityClass_Feature    = 100,
+	VBoxGuestFacilityClass_ThirdParty = 999,
+	VBoxGuestFacilityClass_All        = 0x7ffffffe,
+	VBoxGuestFacilityClass_SizeHack   = 0x7fffffff
+} VBoxGuestFacilityClass;
+VMMDEV_ASSERT_SIZE(VBoxGuestFacilityClass, 4);
+
+/**
+ * Guest status structure.
+ *
+ * Used by VMMDevReqGuestStatus.
+ */
+typedef struct VBoxGuestStatus {
+	/** Facility the status is indicated for. */
+	VBoxGuestFacilityType facility;
+	/** Current guest status. */
+	VBoxGuestFacilityStatus status;
+	/** Flags, not used at the moment. */
+	__u32 flags;
+} VBoxGuestStatus;
+VMMDEV_ASSERT_SIZE(VBoxGuestStatus, 12);
+
+/**
+ * Guest Additions status structure.
+ *
+ * Used by VMMDevReq_ReportGuestStatus.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Guest information. */
+	VBoxGuestStatus guestStatus;
+} VMMDevReportGuestStatus;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestStatus, 24+12);
+
+/**
+ * The current status of specific guest user.
+ * This needs to be kept in sync with GuestUserState of the Main API!
+ */
+typedef enum VBoxGuestUserState {
+	VBoxGuestUserState_Unknown            = 0,
+	VBoxGuestUserState_LoggedIn           = 1,
+	VBoxGuestUserState_LoggedOut          = 2,
+	VBoxGuestUserState_Locked             = 3,
+	VBoxGuestUserState_Unlocked           = 4,
+	VBoxGuestUserState_Disabled           = 5,
+	VBoxGuestUserState_Idle               = 6,
+	VBoxGuestUserState_InUse              = 7,
+	VBoxGuestUserState_Created            = 8,
+	VBoxGuestUserState_Deleted            = 9,
+	VBoxGuestUserState_SessionChanged     = 10,
+	VBoxGuestUserState_CredentialsChanged = 11,
+	VBoxGuestUserState_RoleChanged        = 12,
+	VBoxGuestUserState_GroupAdded         = 13,
+	VBoxGuestUserState_GroupRemoved       = 14,
+	VBoxGuestUserState_Elevated           = 15,
+	VBoxGuestUserState_SizeHack           = 0x7fffffff
+} VBoxGuestUserState;
+VMMDEV_ASSERT_SIZE(VBoxGuestUserState, 4);
+
+/**
+ * Guest user status updates.
+ */
+typedef struct VBoxGuestUserStatus {
+	/** The guest user state to send. */
+	VBoxGuestUserState state;
+	/** Size (in bytes) of szUser. */
+	__u32 cbUser;
+	/** Size (in bytes) of szDomain. */
+	__u32 cbDomain;
+	/** Size (in bytes) of aDetails. */
+	__u32 cbDetails;
+	/** Note: Here begins the dynamically allocated region. */
+	/** Guest user to report state for. */
+	char szUser[1];
+	/** Domain the guest user is bound to. */
+	char szDomain[1];
+	/** Optional details of the state. */
+	__u8 aDetails[1];
+} VBoxGuestUserStatus;
+VMMDEV_ASSERT_SIZE(VBoxGuestUserStatus, 20);
+
+/**
+ * Guest user status structure.
+ *
+ * Used by VMMDevReq_ReportGuestUserStatus.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Guest user status. */
+	VBoxGuestUserStatus status;
+} VMMDevReportGuestUserState;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestUserState, 24+20);
+
+/**
+ * Guest statistics structure.
+ *
+ * Used by VMMDevReportGuestStats and PDMIVMMDEVCONNECTOR::pfnReportStatistics.
+ */
+typedef struct VBoxGuestStatistics {
+	/** Virtual CPU ID. */
+	__u32 u32CpuId;
+	/** Reported statistics. */
+	__u32 u32StatCaps;
+	/** Idle CPU load (0-100) for last interval. */
+	__u32 u32CpuLoad_Idle;
+	/** Kernel CPU load (0-100) for last interval. */
+	__u32 u32CpuLoad_Kernel;
+	/** User CPU load (0-100) for last interval. */
+	__u32 u32CpuLoad_User;
+	/** Nr of threads. */
+	__u32 u32Threads;
+	/** Nr of processes. */
+	__u32 u32Processes;
+	/** Nr of handles. */
+	__u32 u32Handles;
+	/** Memory load (0-100). */
+	__u32 u32MemoryLoad;
+	/** Page size of guest system. */
+	__u32 u32PageSize;
+	/** Total physical memory (in 4KB pages). */
+	__u32 u32PhysMemTotal;
+	/** Available physical memory (in 4KB pages). */
+	__u32 u32PhysMemAvail;
+	/** Ballooned physical memory (in 4KB pages). */
+	__u32 u32PhysMemBalloon;
+	/** Total committed memory (not necessarily in-use) (in 4KB pages). */
+	__u32 u32MemCommitTotal;
+	/** Total amount of memory used by the kernel (in 4KB pages). */
+	__u32 u32MemKernelTotal;
+	/** Total amount of paged memory used by the kernel (in 4KB pages). */
+	__u32 u32MemKernelPaged;
+	/** Total amount of nonpaged memory used by the kernel (4KB pages). */
+	__u32 u32MemKernelNonPaged;
+	/** Total amount of memory used for the system cache (in 4KB pages). */
+	__u32 u32MemSystemCache;
+	/** Pagefile size (in 4KB pages). */
+	__u32 u32PageFileSize;
+} VBoxGuestStatistics;
+VMMDEV_ASSERT_SIZE(VBoxGuestStatistics, 19*4);
+
+/**
+ * @name Guest statistics values (VBoxGuestStatistics::u32StatCaps).
+ * @{
+ */
+#define VBOX_GUEST_STAT_CPU_LOAD_IDLE       BIT(0)
+#define VBOX_GUEST_STAT_CPU_LOAD_KERNEL     BIT(1)
+#define VBOX_GUEST_STAT_CPU_LOAD_USER       BIT(2)
+#define VBOX_GUEST_STAT_THREADS             BIT(3)
+#define VBOX_GUEST_STAT_PROCESSES           BIT(4)
+#define VBOX_GUEST_STAT_HANDLES             BIT(5)
+#define VBOX_GUEST_STAT_MEMORY_LOAD         BIT(6)
+#define VBOX_GUEST_STAT_PHYS_MEM_TOTAL      BIT(7)
+#define VBOX_GUEST_STAT_PHYS_MEM_AVAIL      BIT(8)
+#define VBOX_GUEST_STAT_PHYS_MEM_BALLOON    BIT(9)
+#define VBOX_GUEST_STAT_MEM_COMMIT_TOTAL    BIT(10)
+#define VBOX_GUEST_STAT_MEM_KERNEL_TOTAL    BIT(11)
+#define VBOX_GUEST_STAT_MEM_KERNEL_PAGED    BIT(12)
+#define VBOX_GUEST_STAT_MEM_KERNEL_NONPAGED BIT(13)
+#define VBOX_GUEST_STAT_MEM_SYSTEM_CACHE    BIT(14)
+#define VBOX_GUEST_STAT_PAGE_FILE_SIZE      BIT(15)
+/** @} */
+
+/**
+ * Guest statistics command structure.
+ *
+ * Used by VMMDevReq_ReportGuestStats.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Guest information. */
+	VBoxGuestStatistics guestStats;
+} VMMDevReportGuestStats;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestStats, 24+19*4);
+
+/**
+ * @name The ballooning chunk size which VMMDev works at.
+ * @{
+ */
+#define VMMDEV_MEMORY_BALLOON_CHUNK_SIZE             (1048576)
+#define VMMDEV_MEMORY_BALLOON_CHUNK_PAGES            (1048576 / 4096)
+/** @} */
+
+/**
+ * Poll for ballooning change request.
+ *
+ * Used by VMMDevReq_GetMemBalloonChangeRequest.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Balloon size in megabytes. */
+	__u32 cBalloonChunks;
+	/** Guest ram size in megabytes. */
+	__u32 cPhysMemChunks;
+	/**
+	 * Setting this to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST indicates that
+	 * the request is a response to that event.
+	 * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+	 */
+	__u32 eventAck;
+} VMMDevGetMemBalloonChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetMemBalloonChangeRequest, 24+12);
+
+/**
+ * Change the size of the balloon.
+ *
+ * Used by VMMDevReq_ChangeMemBalloon.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** The number of pages in the array. */
+	__u32 pages;
+	/** true = inflate, false = deflate.  */
+	__u32 inflate;
+	/** Physical address (u64) of each page. */
+	__u64 phys_page[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES];
+} VMMDevChangeMemBalloon;
+
+/**
+ * Guest statistics interval change request structure.
+ *
+ * Used by VMMDevReq_GetStatisticsChangeRequest.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** The interval in seconds. */
+	__u32 u32StatInterval;
+	/**
+	 * Setting this to VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST
+	 * indicates that the request is a response to that event.
+	 * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+	 */
+	__u32 eventAck;
+} VMMDevGetStatisticsChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetStatisticsChangeRequest, 24+8);
+
+/**
+ * The size of a string field in the credentials request (including '\\0').
+ * @see VMMDevCredentials
+ */
+#define VMMDEV_CREDENTIALS_SZ_SIZE          128
+
+/**
+ * Credentials request structure.
+ *
+ * Used by VMMDevReq_QueryCredentials.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** IN/OUT: Request flags. */
+	__u32 u32Flags;
+	/** OUT: User name (UTF-8). */
+	char szUserName[VMMDEV_CREDENTIALS_SZ_SIZE];
+	/** OUT: Password (UTF-8). */
+	char szPassword[VMMDEV_CREDENTIALS_SZ_SIZE];
+	/** OUT: Domain name (UTF-8). */
+	char szDomain[VMMDEV_CREDENTIALS_SZ_SIZE];
+} VMMDevCredentials;
+VMMDEV_ASSERT_SIZE(VMMDevCredentials, 24+4+3*128);
+
+/**
+ * @name Credentials request flag (VMMDevCredentials::u32Flags)
+ * @{
+ */
+/** query from host whether credentials are present */
+#define VMMDEV_CREDENTIALS_QUERYPRESENCE     BIT(1)
+/** read credentials from host (can be combined with clear) */
+#define VMMDEV_CREDENTIALS_READ              BIT(2)
+/** clear credentials on host (can be combined with read) */
+#define VMMDEV_CREDENTIALS_CLEAR             BIT(3)
+/** read credentials for judgement in the guest */
+#define VMMDEV_CREDENTIALS_READJUDGE         BIT(8)
+/** clear credentials for judegement on the host */
+#define VMMDEV_CREDENTIALS_CLEARJUDGE        BIT(9)
+/** report credentials acceptance by guest */
+#define VMMDEV_CREDENTIALS_JUDGE_OK          BIT(10)
+/** report credentials denial by guest */
+#define VMMDEV_CREDENTIALS_JUDGE_DENY        BIT(11)
+/** report that no judgement could be made by guest */
+#define VMMDEV_CREDENTIALS_JUDGE_NOJUDGEMENT BIT(12)
+
+/** flag telling the guest that credentials are present */
+#define VMMDEV_CREDENTIALS_PRESENT           BIT(16)
+/** flag telling guest that local logons should be prohibited */
+#define VMMDEV_CREDENTIALS_NOLOCALLOGON      BIT(17)
+/** @} */
+
+/**
+ * Seamless mode.
+ *
+ * Used by VbglR3SeamlessWaitEvent
+ *
+ * @ingroup grp_vmmdev_req
+ *
+ * @todo DARN! DARN! DARN! Who forgot to do the 32-bit hack here???
+ *       FIXME! XXX!
+ *
+ *       We will now have to carefully check how our compilers have treated this
+ *       flag. If any are compressing it into a byte type, we'll have to check
+ *       how the request memory is initialized. If we are 104% sure it's ok to
+ *       expand it, we'll expand it. If not, we must redefine the field to a
+ *       u8 and a 3 byte padding.
+ */
+typedef enum {
+	/** normal mode; entire guest desktop displayed. */
+	VMMDev_Seamless_Disabled         = 0,
+	/** visible region mode; only top-level guest windows displayed. */
+	VMMDev_Seamless_Visible_Region   = 1,
+	/**
+	 * windowed mode; each top-level guest window is represented in a
+	 * host window.
+	 */
+	VMMDev_Seamless_Host_Window      = 2
+} VMMDevSeamlessMode;
+
+/**
+ * Seamless mode change request structure.
+ *
+ * Used by VMMDevReq_GetSeamlessChangeRequest.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+
+	/** New seamless mode. */
+	VMMDevSeamlessMode mode;
+	/**
+	 * Setting this to VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST indicates
+	 * that the request is a response to that event.
+	 * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+	 */
+	__u32 eventAck;
+} VMMDevSeamlessChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSeamlessChangeRequest, 24+8);
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevSeamlessChangeRequest, eventAck, 24+4);
+
+/**
+ * Display change request structure.
+ *
+ * Used by VMMDevReq_GetDisplayChangeRequest.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Horizontal pixel resolution (0 = do not change). */
+	__u32 xres;
+	/** Vertical pixel resolution (0 = do not change). */
+	__u32 yres;
+	/** Bits per pixel (0 = do not change). */
+	__u32 bpp;
+	/**
+	 * Setting this to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST indicates
+	 * that the request is a response to that event.
+	 * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+	 */
+	__u32 eventAck;
+} VMMDevDisplayChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevDisplayChangeRequest, 24+16);
+
+/**
+ * Display change request structure, version 2.
+ *
+ * Used by VMMDevReq_GetDisplayChangeRequest2.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Horizontal pixel resolution (0 = do not change). */
+	__u32 xres;
+	/** Vertical pixel resolution (0 = do not change). */
+	__u32 yres;
+	/** Bits per pixel (0 = do not change). */
+	__u32 bpp;
+	/**
+	 * Setting this to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST indicates
+	 * that the request is a response to that event.
+	 * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+	 */
+	__u32 eventAck;
+	/** 0 for primary display, 1 for the first secondary, etc. */
+	__u32 display;
+} VMMDevDisplayChangeRequest2;
+VMMDEV_ASSERT_SIZE(VMMDevDisplayChangeRequest2, 24+20);
+
+/**
+ * Display change request structure, version Extended.
+ *
+ * Used by VMMDevReq_GetDisplayChangeRequestEx.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Horizontal pixel resolution (0 = do not change). */
+	__u32 xres;
+	/** Vertical pixel resolution (0 = do not change). */
+	__u32 yres;
+	/** Bits per pixel (0 = do not change). */
+	__u32 bpp;
+	/**
+	 * Setting this to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST indicates
+	 * that the request is a response to that event.
+	 * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+	 */
+	__u32 eventAck;
+	/** 0 for primary display, 1 for the first secondary, etc. */
+	__u32 display;
+	/** New OriginX of secondary virtual screen */
+	__u32 cxOrigin;
+	/** New OriginY of secondary virtual screen  */
+	__u32 cyOrigin;
+	/** Change in origin of the secondary virtual screen is required */
+	u8 fChangeOrigin;
+	/** Secondary virtual screen enabled or disabled */
+	u8 fEnabled;
+	/** Alignment */
+	u8 alignment[2];
+} VMMDevDisplayChangeRequestEx;
+VMMDEV_ASSERT_SIZE(VMMDevDisplayChangeRequestEx, 24+32);
+
+/**
+ * Video mode supported request structure.
+ *
+ * Used by VMMDevReq_VideoModeSupported.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** IN: Horizontal pixel resolution. */
+	__u32 width;
+	/** IN: Vertical pixel resolution. */
+	__u32 height;
+	/** IN: Bits per pixel. */
+	__u32 bpp;
+	/** OUT: Support indicator. */
+	u8 fSupported;
+	/** Alignment */
+	u8 alignment[3];
+} VMMDevVideoModeSupportedRequest;
+VMMDEV_ASSERT_SIZE(VMMDevVideoModeSupportedRequest, 24+16);
+
+/**
+ * Video mode supported request structure for a specific display.
+ *
+ * Used by VMMDevReq_VideoModeSupported2.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** IN: The guest display number. */
+	__u32 display;
+	/** IN: Horizontal pixel resolution. */
+	__u32 width;
+	/** IN: Vertical pixel resolution. */
+	__u32 height;
+	/** IN: Bits per pixel. */
+	__u32 bpp;
+	/** OUT: Support indicator. */
+	u8 fSupported;
+	/** Alignment */
+	u8 alignment[3];
+} VMMDevVideoModeSupportedRequest2;
+VMMDEV_ASSERT_SIZE(VMMDevVideoModeSupportedRequest2, 24+20);
+
+/**
+ * Video modes height reduction request structure.
+ *
+ * Used by VMMDevReq_GetHeightReduction.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** OUT: Height reduction in pixels. */
+	__u32 heightReduction;
+} VMMDevGetHeightReductionRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetHeightReductionRequest, 24+4);
+
+/**
+ * VRDP change request structure.
+ *
+ * Used by VMMDevReq_GetVRDPChangeRequest.
+ */
+typedef struct {
+	/** Header */
+	VMMDevRequestHeader header;
+	/** Whether VRDP is active or not. */
+	__u8 u8VRDPActive;
+	/** The configured experience level for active VRDP. */
+	__u32 u32VRDPExperienceLevel;
+} VMMDevVRDPChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevVRDPChangeRequest, 24+8);
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevVRDPChangeRequest, u8VRDPActive, 24);
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevVRDPChangeRequest, u32VRDPExperienceLevel,
+			    24+4);
+
+/**
+ * @name VRDP Experience level (VMMDevVRDPChangeRequest::u32VRDPExperienceLevel)
+ * @{
+ */
+#define VRDP_EXPERIENCE_LEVEL_ZERO     0 /**< Theming disabled. */
+#define VRDP_EXPERIENCE_LEVEL_LOW      1 /**< Full win drag + wallpaper dis. */
+#define VRDP_EXPERIENCE_LEVEL_MEDIUM   2 /**< Font smoothing, gradients. */
+#define VRDP_EXPERIENCE_LEVEL_HIGH     3 /**< Animation effects disabled. */
+#define VRDP_EXPERIENCE_LEVEL_FULL     4 /**< Everything enabled. */
+/** @} */
+
+/**
+ * VBVA enable request structure.
+ *
+ * Used by VMMDevReq_VideoAccelEnable.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** 0 - disable, !0 - enable. */
+	__u32 u32Enable;
+	/**
+	 * The size of VBVAMEMORY::au8RingBuffer expected by driver.
+	 *  The host will refuse to enable VBVA if the size is not equal to
+	 *  VBVA_RING_BUFFER_SIZE.
+	 */
+	__u32 cbRingBuffer;
+	/**
+	 * Guest initializes the status to 0. Host sets appropriate
+	 * VBVA_F_STATUS_ flags.
+	 */
+	__u32 fu32Status;
+} VMMDevVideoAccelEnable;
+VMMDEV_ASSERT_SIZE(VMMDevVideoAccelEnable, 24+12);
+
+/**
+ * @name VMMDevVideoAccelEnable::fu32Status.
+ * @{
+ */
+#define VBVA_F_STATUS_ACCEPTED (0x01)
+#define VBVA_F_STATUS_ENABLED  (0x02)
+/** @} */
+
+/**
+ * VBVA flush request structure.
+ *
+ * Used by VMMDevReq_VideoAccelFlush.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+} VMMDevVideoAccelFlush;
+VMMDEV_ASSERT_SIZE(VMMDevVideoAccelFlush, 24);
+
+/**
+ * Rectangle data type, double point.
+ */
+typedef struct RTRECT {
+	/** left X coordinate. */
+	__s32 xLeft;
+	/** top Y coordinate. */
+	__s32 yTop;
+	/** right X coordinate. (exclusive) */
+	__s32 xRight;
+	/** bottom Y coordinate. (exclusive) */
+	__s32 yBottom;
+} RTRECT;
+
+/**
+ * VBVA set visible region request structure.
+ *
+ * Used by VMMDevReq_VideoSetVisibleRegion.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Number of rectangles */
+	__u32 cRect;
+	/**
+	 * Rectangle array.
+	 * @todo array is spelled aRects[1].
+	 */
+	RTRECT Rect;
+} VMMDevVideoSetVisibleRegion;
+VMMDEV_ASSERT_SIZE(RTRECT, 16);
+VMMDEV_ASSERT_SIZE(VMMDevVideoSetVisibleRegion, 24+4+16);
+
+/**
+ * CPU event types.
+ */
+typedef enum {
+	VMMDevCpuStatusType_Invalid  = 0,
+	VMMDevCpuStatusType_Disable  = 1,
+	VMMDevCpuStatusType_Enable   = 2,
+	VMMDevCpuStatusType_SizeHack = 0x7fffffff
+} VMMDevCpuStatusType;
+
+/**
+ * CPU hotplug event status request.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Status type */
+	VMMDevCpuStatusType enmStatusType;
+} VMMDevCpuHotPlugStatusRequest;
+VMMDEV_ASSERT_SIZE(VMMDevCpuHotPlugStatusRequest, 24+4);
+
+/**
+ * CPU event types.
+ *
+ * Used by VbglR3CpuHotplugWaitForEvent
+ *
+ * @ingroup grp_vmmdev_req
+ */
+typedef enum {
+	VMMDevCpuEventType_Invalid  = 0,
+	VMMDevCpuEventType_None     = 1,
+	VMMDevCpuEventType_Plug     = 2,
+	VMMDevCpuEventType_Unplug   = 3,
+	VMMDevCpuEventType_SizeHack = 0x7fffffff
+} VMMDevCpuEventType;
+
+/**
+ * Get the ID of the changed CPU and event type.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Event type */
+	VMMDevCpuEventType enmEventType;
+	/** core id of the CPU changed */
+	__u32 idCpuCore;
+	/** package id of the CPU changed */
+	__u32 idCpuPackage;
+} VMMDevGetCpuHotPlugRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetCpuHotPlugRequest, 24+4+4+4);
+
+/**
+ * Shared region description
+ */
+typedef struct VMMDEVSHAREDREGIONDESC {
+	__u64 GCRegionAddr;
+	__u32 cbRegion;
+	__u32 alignment;
+} VMMDEVSHAREDREGIONDESC;
+VMMDEV_ASSERT_SIZE(VMMDEVSHAREDREGIONDESC, 16);
+
+#define VMMDEVSHAREDREGIONDESC_MAX          32
+
+/**
+ * Shared module registration
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Shared module size. */
+	__u32 cbModule;
+	/** Number of included region descriptors */
+	__u32 cRegions;
+	/** Base address of the shared module. */
+	__u64 GCBaseAddr;
+	/** Guest OS type. */
+	VBOXOSFAMILY enmGuestOS;
+	/** Alignment. */
+	__u32 alignment;
+	/** Module name */
+	char szName[128];
+	/** Module version */
+	char szVersion[16];
+	/** Shared region descriptor(s). */
+	VMMDEVSHAREDREGIONDESC aRegions[1];
+} VMMDevSharedModuleRegistrationRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSharedModuleRegistrationRequest,
+		   24+4+4+8+4+4+128+16+16);
+
+/**
+ * Shared module unregistration
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Shared module size. */
+	__u32 cbModule;
+	/** Align at 8 byte boundary. */
+	__u32 alignment;
+	/** Base address of the shared module. */
+	__u64 GCBaseAddr;
+	/** Module name */
+	char szName[128];
+	/** Module version */
+	char szVersion[16];
+} VMMDevSharedModuleUnregistrationRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSharedModuleUnregistrationRequest, 24+4+4+8+128+16);
+
+/**
+ * Shared module periodic check
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+} VMMDevSharedModuleCheckRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSharedModuleCheckRequest, 24);
+
+/**
+ * Paging sharing enabled query
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Enabled flag (out) */
+	u8 fEnabled;
+	/** Alignment */
+	u8 alignment[3];
+} VMMDevPageSharingStatusRequest;
+VMMDEV_ASSERT_SIZE(VMMDevPageSharingStatusRequest, 24+4);
+
+/**
+ * Page sharing status query (debug build only)
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Page address, 32 bits on 32 bit builds, 64 bit on 64 bit builds */
+	unsigned long GCPtrPage;
+	/** Page flags. */
+	__u64 uPageFlags;
+	/** Shared flag (out) */
+	u8 fShared;
+	/** Alignment */
+	u8 alignment[3];
+} VMMDevPageIsSharedRequest;
+
+/**
+ * Session id request structure.
+ *
+ * Used by VMMDevReq_GetSessionId.
+ */
+typedef struct {
+	/** Header */
+	VMMDevRequestHeader header;
+	/**
+	 * OUT: unique session id; the id will be different after each start,
+	 * reset or restore of the VM.
+	 */
+	__u64 idSession;
+} VMMDevReqSessionId;
+VMMDEV_ASSERT_SIZE(VMMDevReqSessionId, 24+8);
+
+/**
+ * Write Core Dump request.
+ *
+ * Used by VMMDevReq_WriteCoreDump.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** Flags (reserved, MBZ). */
+	__u32 fFlags;
+} VMMDevReqWriteCoreDump;
+VMMDEV_ASSERT_SIZE(VMMDevReqWriteCoreDump, 24+4);
+
+/** Heart beat check state structure. Used by VMMDevReq_HeartbeatConfigure. */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** OUT: Guest heartbeat interval in nanosec. */
+	__u64 cNsInterval;
+	/** Heartbeat check flag. */
+	u8 fEnabled;
+	/** Alignment */
+	u8 alignment[3];
+} VMMDevReqHeartbeat;
+VMMDEV_ASSERT_SIZE(VMMDevReqHeartbeat, 24+12);
+
+/**
+ * @name HGCM flags.
+ * @{
+ */
+#define VBOX_HGCM_REQ_DONE      BIT(VBOX_HGCM_REQ_DONE_BIT)
+#define VBOX_HGCM_REQ_DONE_BIT  0
+#define VBOX_HGCM_REQ_CANCELLED (0x2)
+/** @} */
+
+/**
+ * HGCM request header.
+ */
+typedef struct VMMDevHGCMRequestHeader {
+	/** Request header. */
+	VMMDevRequestHeader header;
+
+	/** HGCM flags. */
+	__u32 fu32Flags;
+
+	/** Result code. */
+	__s32 result;
+} VMMDevHGCMRequestHeader;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMRequestHeader, 24+8);
+
+/**
+ * HGCM service location types.
+ * @ingroup grp_vmmdev_req
+ */
+typedef enum {
+	VMMDevHGCMLoc_Invalid    = 0,
+	VMMDevHGCMLoc_LocalHost  = 1,
+	VMMDevHGCMLoc_LocalHost_Existing = 2,
+	VMMDevHGCMLoc_SizeHack   = 0x7fffffff
+} HGCMServiceLocationType;
+VMMDEV_ASSERT_SIZE(HGCMServiceLocationType, 4);
+
+/**
+ * HGCM host service location.
+ * @ingroup grp_vmmdev_req
+ */
+typedef struct {
+	char achName[128]; /**< This is really szName. */
+} HGCMServiceLocationHost;
+VMMDEV_ASSERT_SIZE(HGCMServiceLocationHost, 128);
+
+/**
+ * HGCM service location.
+ * @ingroup grp_vmmdev_req
+ */
+typedef struct HGCMSERVICELOCATION {
+	/** Type of the location. */
+	HGCMServiceLocationType type;
+
+	union {
+		HGCMServiceLocationHost host;
+	} u;
+} HGCMServiceLocation;
+VMMDEV_ASSERT_SIZE(HGCMServiceLocation, 128+4);
+
+/**
+ * HGCM connect request structure.
+ *
+ * Used by VMMDevReq_HGCMConnect.
+ */
+typedef struct {
+	/** HGCM request header. */
+	VMMDevHGCMRequestHeader header;
+
+	/** IN: Description of service to connect to. */
+	HGCMServiceLocation loc;
+
+	/** OUT: Client identifier assigned by local instance of HGCM. */
+	__u32 u32ClientID;
+} VMMDevHGCMConnect;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMConnect, 32+132+4);
+
+/**
+ * HGCM disconnect request structure.
+ *
+ * Used by VMMDevReq_HGCMDisconnect.
+ */
+typedef struct {
+	/** HGCM request header. */
+	VMMDevHGCMRequestHeader header;
+
+	/** IN: Client identifier. */
+	__u32 u32ClientID;
+} VMMDevHGCMDisconnect;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMDisconnect, 32+4);
+
+/**
+ * HGCM parameter type.
+ */
+typedef enum {
+	VMMDevHGCMParmType_Invalid            = 0,
+	VMMDevHGCMParmType_32bit              = 1,
+	VMMDevHGCMParmType_64bit              = 2,
+	/** @deprecated Doesn't work, use PageList. */
+	VMMDevHGCMParmType_PhysAddr           = 3,
+	/** In and Out */
+	VMMDevHGCMParmType_LinAddr            = 4,
+	/** In  (read;  host<-guest) */
+	VMMDevHGCMParmType_LinAddr_In         = 5,
+	/** Out (write; host->guest) */
+	VMMDevHGCMParmType_LinAddr_Out        = 6,
+	/* 7 - 9 VMMDevHGCMParmType_LinAddr_Locked*, non Linux R0 usage only */
+	/** Physical addresses of locked pages for a buffer. */
+	VMMDevHGCMParmType_PageList           = 10,
+	VMMDevHGCMParmType_SizeHack           = 0x7fffffff
+} HGCMFunctionParameterType;
+VMMDEV_ASSERT_SIZE(HGCMFunctionParameterType, 4);
+
+/**
+ * HGCM function parameter, 32-bit client.
+ */
+typedef struct HGCMFunctionParameter32 {
+	HGCMFunctionParameterType type;
+	union {
+		__u32 value32;
+		__u64 value64;
+		struct {
+			__u32 size;
+			union {
+				__u32 physAddr;
+				__u32 linearAddr;
+			} u;
+		} Pointer;
+		struct {
+			/** Size of the buffer described by the page list. */
+			__u32 size;
+			/** Relative to the request header. */
+			__u32 offset;
+		} PageList;
+	} u;
+} HGCMFunctionParameter32;
+VMMDEV_ASSERT_SIZE(HGCMFunctionParameter32, 4+8);
+
+/**
+ * HGCM function parameter, 64-bit client.
+ */
+typedef struct HGCMFunctionParameter64 {
+	HGCMFunctionParameterType type;
+	union {
+		__u32 value32;
+		__u64 value64;
+		struct {
+			__u32 size;
+			union {
+				__u64 physAddr;
+				__u64 linearAddr;
+			} u;
+		} Pointer;
+		struct {
+			/** Size of the buffer described by the page list. */
+			__u32 size;
+			/** Relative to the request header. */
+			__u32 offset;
+		} PageList;
+	} u;
+} HGCMFunctionParameter64;
+VMMDEV_ASSERT_SIZE(HGCMFunctionParameter64, 4+12);
+
+#if __BITS_PER_LONG == 64
+#define HGCMFunctionParameter HGCMFunctionParameter64
+#else
+#define HGCMFunctionParameter HGCMFunctionParameter32
+#endif
+
+/**
+ * HGCM call request structure.
+ *
+ * Used by VMMDevReq_HGCMCall32 and VMMDevReq_HGCMCall64.
+ */
+typedef struct {
+	/* request header */
+	VMMDevHGCMRequestHeader header;
+
+	/** IN: Client identifier. */
+	__u32 u32ClientID;
+	/** IN: Service function number. */
+	__u32 u32Function;
+	/** IN: Number of parameters. */
+	__u32 cParms;
+	/** Parameters follow in form: HGCMFunctionParameter32|64 aParms[X]; */
+} VMMDevHGCMCall;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMCall, 32+12);
+
+/**
+ * @name Direction of data transfer (HGCMPageListInfo::flags). Bit flags.
+ * @{
+ */
+#define VBOX_HGCM_F_PARM_DIRECTION_NONE      0x00000000U
+#define VBOX_HGCM_F_PARM_DIRECTION_TO_HOST   0x00000001U
+#define VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST 0x00000002U
+#define VBOX_HGCM_F_PARM_DIRECTION_BOTH      0x00000003U
+/**
+ * Macro for validating that the specified flags are valid.
+ * Note BOTH is not valid.
+ */
+#define VBOX_HGCM_F_PARM_ARE_VALID(fFlags) \
+	((fFlags) > VBOX_HGCM_F_PARM_DIRECTION_NONE && \
+	 (fFlags) < VBOX_HGCM_F_PARM_DIRECTION_BOTH)
+/** @} */
+
+/**
+ * VMMDevHGCMParmType_PageList points to this structure to actually describe
+ * the buffer.
+ */
+typedef struct {
+	__u32 flags;        /**< VBOX_HGCM_F_PARM_*. */
+	__u16 offFirstPage; /**< Offset in the first page where data begins. */
+	__u16 cPages;       /**< Number of pages. */
+	__u64 aPages[1];    /**< Page addresses. */
+} HGCMPageListInfo;
+VMMDEV_ASSERT_SIZE(HGCMPageListInfo, 4+2+2+8);
+
+/** Get the pointer to the first parmater of a HGCM call request.  */
+#define VMMDEV_HGCM_CALL_PARMS(a) \
+	((HGCMFunctionParameter *)((__u8 *)(a) + sizeof(VMMDevHGCMCall)))
+
+#define VBOX_HGCM_MAX_PARMS 32
+
+/**
+ * HGCM cancel request structure.
+ *
+ * The Cancel request is issued using the same physical memory address as was
+ * used for the corresponding initial HGCMCall.
+ *
+ * Used by VMMDevReq_HGCMCancel.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevHGCMRequestHeader header;
+} VMMDevHGCMCancel;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMCancel, 32);
+
+/**
+ * HGCM cancel request structure, version 2.
+ *
+ * Used by VMMDevReq_HGCMCancel2.
+ *
+ * VINF_SUCCESS when cancelled.
+ * VERR_NOT_FOUND if the specified request cannot be found.
+ * VERR_INVALID_PARAMETER if the address is invalid valid.
+ */
+typedef struct {
+	/** Header. */
+	VMMDevRequestHeader header;
+	/** The physical address of the request to cancel. */
+	__u32 physReqToCancel;
+} VMMDevHGCMCancel2;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMCancel2, 24+4);
+
+/** @} */
+
+#pragma pack()
+
+#endif
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
new file mode 100644
index 000000000000..7fa83c8b9054
--- /dev/null
+++ b/include/uapi/linux/vboxguest.h
@@ -0,0 +1,359 @@
+/*
+ * VBoxGuest - VirtualBox Guest Additions Driver Interface. (ADD,DEV)
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, in which case the provisions of the CDDL are applicable
+ * instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __UAPI_VBOXGUEST_H__
+#define __UAPI_VBOXGUEST_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/ioctl.h>
+#include <linux/vbox_vmmdev.h> /* For HGCMServiceLocation */
+
+/**
+ * @defgroup grp_vboxguest  VirtualBox Guest Additions Device Driver
+ *
+ * Also know as VBoxGuest.
+ *
+ * @{
+ */
+
+/**
+ * @defgroup grp_vboxguest_ioc  VirtualBox Guest Additions Driver Interface
+ * @{
+ */
+
+/**
+ * @name VBoxGuest IOCTL codes and structures.
+ *
+ * The range 0..15 is for basic driver communication.
+ * The range 16..31 is for HGCM communication.
+ * The range 32..47 is reserved for future use.
+ * The range 48..63 is for OS specific communication.
+ * The 7th bit is reserved for future hacks.
+ * The 8th bit is reserved for distinguishing between 32-bit and 64-bit
+ * processes in future 64-bit guest additions.
+ * @{
+ */
+#if __BITS_PER_LONG == 64
+#define VBOXGUEST_IOCTL_FLAG		128
+#else
+#define VBOXGUEST_IOCTL_FLAG		0
+#endif
+/** @} */
+
+#define VBOXGUEST_IOCTL_CODE_(function, size) \
+	_IOC(_IOC_READ|_IOC_WRITE, 'V', (function), (size))
+
+#define VBOXGUEST_IOCTL_CODE(function, size) \
+	VBOXGUEST_IOCTL_CODE_((function) | VBOXGUEST_IOCTL_FLAG, (size))
+/* Define 32 bit codes to support 32 bit applications in 64 bit guest driver. */
+#define VBOXGUEST_IOCTL_CODE_32(function, size) \
+	VBOXGUEST_IOCTL_CODE_((function), (size))
+
+/** IOCTL to VBoxGuest to wait for a VMMDev host notification */
+#define VBOXGUEST_IOCTL_WAITEVENT \
+	VBOXGUEST_IOCTL_CODE_(2, sizeof(VBoxGuestWaitEventInfo))
+
+/**
+ * @name Result codes for VBoxGuestWaitEventInfo::u32Result
+ * @{
+ */
+/** Successful completion, an event occurred. */
+#define VBOXGUEST_WAITEVENT_OK          (0)
+/** Successful completion, timed out. */
+#define VBOXGUEST_WAITEVENT_TIMEOUT     (1)
+/** Wait was interrupted. */
+#define VBOXGUEST_WAITEVENT_INTERRUPTED (2)
+/** An error occurred while processing the request. */
+#define VBOXGUEST_WAITEVENT_ERROR       (3)
+/** @} */
+
+/** Input and output buffers layout of the IOCTL_VBOXGUEST_WAITEVENT */
+typedef struct VBoxGuestWaitEventInfo {
+	/** timeout in milliseconds */
+	__u32 u32TimeoutIn;
+	/** events to wait for */
+	__u32 u32EventMaskIn;
+	/** result code */
+	__u32 u32Result;
+	/** events occurred */
+	__u32 u32EventFlagsOut;
+} VBoxGuestWaitEventInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestWaitEventInfo, 16);
+
+
+/**
+ * IOCTL to VBoxGuest to perform a VMM request
+ * @remark  The data buffer for this IOCtl has an variable size, keep this in
+ *          mind on systems where this matters.
+ */
+#define VBOXGUEST_IOCTL_VMMREQUEST(size) \
+	VBOXGUEST_IOCTL_CODE_(3, (size))
+
+
+/** IOCTL to VBoxGuest to control event filter mask. */
+#define VBOXGUEST_IOCTL_CTL_FILTER_MASK \
+	VBOXGUEST_IOCTL_CODE_(4, sizeof(VBoxGuestFilterMaskInfo))
+
+/** Input and output buffer layout of the IOCTL_VBOXGUEST_CTL_FILTER_MASK. */
+typedef struct VBoxGuestFilterMaskInfo {
+	__u32 u32OrMask;
+	__u32 u32NotMask;
+} VBoxGuestFilterMaskInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestFilterMaskInfo, 8);
+
+/**
+ * IOCTL to VBoxGuest to interrupt (cancel) any pending WAITEVENTs and return.
+ * Handled inside the guest additions and not seen by the host at all.
+ * After calling this, VBOXGUEST_IOCTL_WAITEVENT should no longer be called in
+ * the same session. Any VBOXGUEST_IOCTL_WAITEVENT calls in the same session
+ * done after calling this will directly exit with VERR_INTERRUPTED.
+ * @see VBOXGUEST_IOCTL_WAITEVENT
+ */
+#define VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS \
+	VBOXGUEST_IOCTL_CODE_(5, 0)
+
+/**
+ * IOCTL to VBoxGuest to perform backdoor logging.
+ * The argument is a string buffer of the specified size.
+ */
+#define VBOXGUEST_IOCTL_LOG(size) \
+	VBOXGUEST_IOCTL_CODE_(6, (size))
+
+/**
+ * IOCTL to VBoxGuest to check memory ballooning.  The guest kernel module /
+ * device driver will ask the host for the current size of the balloon and
+ * adjust the size. Or it will set fHandledInR0 = false and R3 is responsible
+ * for allocating memory and calling R0 (VBOXGUEST_IOCTL_CHANGE_BALLOON).
+ */
+#define VBOXGUEST_IOCTL_CHECK_BALLOON \
+	VBOXGUEST_IOCTL_CODE_(7, sizeof(VBoxGuestCheckBalloonInfo))
+
+/** Output buffer layout of the VBOXGUEST_IOCTL_CHECK_BALLOON. */
+typedef struct VBoxGuestCheckBalloonInfo {
+	/** The size of the balloon in chunks of 1MB. */
+	__u32 cBalloonChunks;
+	/**
+	 * false = handled in R0, no further action required.
+	 * true = allocate balloon memory in R3.
+	 */
+	__u32 fHandleInR3;
+} VBoxGuestCheckBalloonInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestCheckBalloonInfo, 8);
+
+/**
+ * IOCTL to VBoxGuest to supply or revoke one chunk for ballooning.
+ * The guest kernel module / device driver will lock down supplied memory or
+ * unlock reclaimed memory and then forward the physical addresses of the
+ * changed balloon chunk to the host.
+ */
+#define VBOXGUEST_IOCTL_CHANGE_BALLOON \
+	VBOXGUEST_IOCTL_CODE_(8, sizeof(VBoxGuestChangeBalloonInfo))
+
+/**
+ * Input buffer layout of the VBOXGUEST_IOCTL_CHANGE_BALLOON request.
+ * Information about a memory chunk used to inflate or deflate the balloon.
+ */
+typedef struct VBoxGuestChangeBalloonInfo {
+	/** Address of the chunk. */
+	__u64 u64ChunkAddr;
+	/** true = inflate, false = deflate. */
+	__u32 fInflate;
+	/** Alignment padding. */
+	__u32 u32Align;
+} VBoxGuestChangeBalloonInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestChangeBalloonInfo, 16);
+
+/** IOCTL to VBoxGuest to write guest core. */
+#define VBOXGUEST_IOCTL_WRITE_CORE_DUMP \
+	VBOXGUEST_IOCTL_CODE(9, sizeof(VBoxGuestWriteCoreDump))
+
+/** Input and output buffer layout of the VBOXGUEST_IOCTL_WRITE_CORE request. */
+typedef struct VBoxGuestWriteCoreDump {
+	/** Flags (reserved, MBZ). */
+	__u32 fFlags;
+} VBoxGuestWriteCoreDump;
+VMMDEV_ASSERT_SIZE(VBoxGuestWriteCoreDump, 4);
+
+/** IOCTL to VBoxGuest to update the mouse status features. */
+#define VBOXGUEST_IOCTL_SET_MOUSE_STATUS \
+	VBOXGUEST_IOCTL_CODE_(10, sizeof(__u32))
+
+/** IOCTL to VBoxGuest to connect to a HGCM service. */
+#define VBOXGUEST_IOCTL_HGCM_CONNECT \
+	VBOXGUEST_IOCTL_CODE(16, sizeof(VBoxGuestHGCMConnectInfo))
+
+/**
+ * HGCM connect info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_CONNECT.
+ */
+struct VBoxGuestHGCMConnectInfo {
+	__s32 result;               /**< OUT */
+	HGCMServiceLocation Loc;  /**< IN */
+	__u32 u32ClientID;          /**< OUT */
+} __packed;
+typedef struct VBoxGuestHGCMConnectInfo VBoxGuestHGCMConnectInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestHGCMConnectInfo, 4+4+128+4);
+
+/** IOCTL to VBoxGuest to disconnect from a HGCM service. */
+#define VBOXGUEST_IOCTL_HGCM_DISCONNECT \
+	VBOXGUEST_IOCTL_CODE(17, sizeof(VBoxGuestHGCMDisconnectInfo))
+
+/**
+ * HGCM disconnect info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_DISCONNECT.
+ */
+typedef struct VBoxGuestHGCMDisconnectInfo {
+	__s32 result;          /**< OUT */
+	__u32 u32ClientID;     /**< IN */
+} VBoxGuestHGCMDisconnectInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestHGCMDisconnectInfo, 8);
+
+/**
+ * IOCTL to VBoxGuest to make a call to a HGCM service.
+ * @see VBoxGuestHGCMCallInfo
+ */
+#define VBOXGUEST_IOCTL_HGCM_CALL(size) \
+	VBOXGUEST_IOCTL_CODE(18, (size))
+
+/**
+ * HGCM call info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_CALL.
+ */
+typedef struct VBoxGuestHGCMCallInfo {
+	__s32 result;          /**< OUT Host HGCM return code.*/
+	__u32 u32ClientID;     /**< IN  The id of the caller. */
+	__u32 u32Function;     /**< IN  Function number. */
+	__u32 cParms;          /**< IN  How many parms. */
+	/* Parameters follow in form HGCMFunctionParameter aParms[cParms] */
+} VBoxGuestHGCMCallInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestHGCMCallInfo, 16);
+
+/** IOCTL to VBoxGuest to make a timed call to a HGCM service. */
+#define VBOXGUEST_IOCTL_HGCM_CALL_TIMED(size) \
+	VBOXGUEST_IOCTL_CODE(20, (size))
+
+/**
+ * HGCM call info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_CALL_TIMED.
+ */
+struct VBoxGuestHGCMCallInfoTimed {
+	/** IN  How long to wait for completion before cancelling the call. */
+	__u32 u32Timeout;
+	/** IN  Is this request interruptible? */
+	__u32 fInterruptible;
+	/**
+	 * IN/OUT The rest of the call information.  Placed after the timeout
+	 * so that the parameters follow as they would for a normal call.
+	 */
+	VBoxGuestHGCMCallInfo info;
+	/* Parameters follow in form HGCMFunctionParameter aParms[cParms] */
+} __packed;
+typedef struct VBoxGuestHGCMCallInfoTimed VBoxGuestHGCMCallInfoTimed;
+VMMDEV_ASSERT_SIZE(VBoxGuestHGCMCallInfoTimed, 8+16);
+
+/**
+ * @name IOCTL numbers that 32-bit clients, like the Windows OpenGL guest
+ *        driver, will use when talking to a 64-bit driver.
+ * @remarks These are only used by the driver implementation!
+ * @{
+ */
+#define VBOXGUEST_IOCTL_HGCM_CONNECT_32 \
+	VBOXGUEST_IOCTL_CODE_32(16, sizeof(VBoxGuestHGCMConnectInfo))
+#define VBOXGUEST_IOCTL_HGCM_DISCONNECT_32 \
+	VBOXGUEST_IOCTL_CODE_32(17, sizeof(VBoxGuestHGCMDisconnectInfo))
+#define VBOXGUEST_IOCTL_HGCM_CALL_32(size) \
+	VBOXGUEST_IOCTL_CODE_32(18, (size))
+#define VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(size) \
+	VBOXGUEST_IOCTL_CODE_32(20, (size))
+/** @} */
+
+/** Get the pointer to the first HGCM parameter.  */
+#define VBOXGUEST_HGCM_CALL_PARMS(a) \
+    ((HGCMFunctionParameter *)((__u8 *)(a) + sizeof(VBoxGuestHGCMCallInfo)))
+/** Get the pointer to the first HGCM parameter in a 32-bit request.  */
+#define VBOXGUEST_HGCM_CALL_PARMS32(a) \
+    ((HGCMFunctionParameter32 *)((__u8 *)(a) + sizeof(VBoxGuestHGCMCallInfo)))
+
+typedef enum VBOXGUESTCAPSACQUIRE_FLAGS {
+	VBOXGUESTCAPSACQUIRE_FLAGS_NONE = 0,
+	/*
+	 * Configures VBoxGuest to use the specified caps in Acquire mode, w/o
+	 * making any caps acquisition/release. so far it is only possible to
+	 * set acquire mode for caps, but not clear it, so u32NotMask is
+	 * ignored for this request.
+	 */
+	VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE,
+	/* To ensure enum is 32bit */
+	VBOXGUESTCAPSACQUIRE_FLAGS_32bit = 0x7fffffff
+} VBOXGUESTCAPSACQUIRE_FLAGS;
+
+typedef struct VBoxGuestCapsAquire {
+	/*
+	 * result status
+	 * VINF_SUCCESS - on success
+	 * VERR_RESOURCE_BUSY - some caps in the u32OrMask are acquired by some
+	 *	other VBoxGuest connection. NOTE: no u32NotMask caps are cleaned
+	 *	in this case, No modifications are done on failure.
+	 * VER_INVALID_PARAMETER - invalid Caps are specified with either
+	 *	u32OrMask or u32NotMask. No modifications are done on failure.
+	 */
+	__s32 rc;
+	/* Acquire command */
+	VBOXGUESTCAPSACQUIRE_FLAGS enmFlags;
+	/* caps to acquire, OR-ed VMMDEV_GUEST_SUPPORTS_XXX flags */
+	__u32 u32OrMask;
+	/* caps to release, OR-ed VMMDEV_GUEST_SUPPORTS_XXX flags */
+	__u32 u32NotMask;
+} VBoxGuestCapsAquire;
+
+/**
+ * IOCTL to for Acquiring/Releasing Guest Caps
+ * This is used for multiple purposes:
+ * 1. By doing Acquire r3 client application (e.g. VBoxTray) claims it will use
+ *    the given connection for performing operations like Auto-resize, or
+ *    Seamless. If the application terminates, the driver will automatically
+ *    cleanup the caps reported to host, so that host knows guest does not
+ *    support them anymore.
+ * 2. In a multy-user environment this will not allow r3 applications (like
+ *    VBoxTray) running in different user sessions simultaneously to interfere
+ *    with each other. An r3 client application (like VBoxTray) is responsible
+ *    for Acquiring/Releasing caps properly as needed.
+ **/
+#define VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE \
+	VBOXGUEST_IOCTL_CODE(32, sizeof(VBoxGuestCapsAquire))
+
+/** IOCTL to VBoxGuest to set guest capabilities. */
+#define VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES \
+	VBOXGUEST_IOCTL_CODE_(33, sizeof(VBoxGuestSetCapabilitiesInfo))
+
+/** Input/output buffer layout for VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES. */
+typedef struct VBoxGuestSetCapabilitiesInfo {
+	__u32 u32OrMask;
+	__u32 u32NotMask;
+} VBoxGuestSetCapabilitiesInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestSetCapabilitiesInfo, 8);
+
+/** @} */
+
+/** @} */
+
+#endif
-- 
2.13.4

  reply	other threads:[~2017-08-25 14:37 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-25 14:37 [RFC v2 0/2] Add Virtual Box vboxguest and vboxsf guest drivers to the mainline kernel Hans de Goede
2017-08-25 14:37 ` Hans de Goede [this message]
2017-08-25 14:58   ` [RFC v2 1/2] virt: Add vboxguest driver for Virtual Box Guest integration Christoph Hellwig
2017-08-25 15:00     ` Hans de Goede
2017-08-25 15:03       ` Christoph Hellwig
2017-08-25 15:06         ` Hans de Goede
2017-08-25 15:09           ` Christoph Hellwig
2017-08-25 15:13             ` Hans de Goede
2017-08-25 15:19               ` Christoph Hellwig
2017-08-25 14:37 ` [RFC v2 2/2] fs: Add VirtualBox guest shared folder (vboxsf) support Hans de Goede
2017-08-25 15:02   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170825143732.10836-2-hdegoede@redhat.com \
    --to=hdegoede@redhat.com \
    --cc=Larry.Finger@lwfinger.net \
    --cc=arnd@arndb.de \
    --cc=gregkh@linuxfoundation.org \
    --cc=knut.osmundsen@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=michael.thayer@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.