linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Marc Zyngier <marc.zyngier@arm.com>
To: linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu
Cc: Christoffer Dall <christoffer.dall@linaro.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Jason Cooper <jason@lakedaemon.net>,
	Eric Auger <eric.auger@redhat.com>,
	Shanker Donthineni <shankerd@codeaurora.org>,
	Mark Rutland <mark.rutland@arm.com>
Subject: [PATCH v2 17/52] irqchip/gic-v3-its: Add VLPI map/unmap operations
Date: Wed, 28 Jun 2017 16:03:36 +0100	[thread overview]
Message-ID: <20170628150411.15846-18-marc.zyngier@arm.com> (raw)
In-Reply-To: <20170628150411.15846-1-marc.zyngier@arm.com>

In order to let a VLPI being injected into a guest, the VLPI must
be mapped using the VMAPTI command. When moved to a different vcpu,
it must be moved with the VMOVI command.

These commands are issued via the irq_set_vcpu_affinity method,
making sure we unmap the corresponding host LPI first.

The reverse is also done when the VLPI is unmapped from the guest.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 drivers/irqchip/irq-gic-v3-its.c | 250 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 247 insertions(+), 3 deletions(-)

diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c02e1e6cd169..527e4e95e10d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -117,11 +117,17 @@ struct event_lpi_map {
 	u16			*col_map;
 	irq_hw_number_t		lpi_base;
 	int			nr_lpis;
+	struct mutex		vlpi_lock;
+	struct its_vm		*vm;
+	struct its_vlpi_map	*vlpi_maps;
+	int			nr_vlpis;
 };
 
 /*
- * The ITS view of a device - belongs to an ITS, a collection, owns an
- * interrupt translation table, and a list of interrupts.
+ * The ITS view of a device - belongs to an ITS, owns an interrupt
+ * translation table, and a list of interrupts.  If it some of its
+ * LPIs are injected into a guest (GICv4), the event_map.vm field
+ * indicates which one.
  */
 struct its_device {
 	struct list_head	entry;
@@ -207,6 +213,21 @@ struct its_cmd_desc {
 		struct {
 			struct its_collection *col;
 		} its_invall_cmd;
+
+		struct {
+			struct its_vpe *vpe;
+			struct its_device *dev;
+			u32 virt_id;
+			u32 event_id;
+			bool db_enabled;
+		} its_vmapti_cmd;
+
+		struct {
+			struct its_vpe *vpe;
+			struct its_device *dev;
+			u32 event_id;
+			bool db_enabled;
+		} its_vmovi_cmd;
 	};
 };
 
@@ -223,6 +244,9 @@ struct its_cmd_block {
 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
 						    struct its_cmd_desc *);
 
+typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
+					      struct its_cmd_desc *);
+
 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
 {
 	u64 mask = GENMASK_ULL(h, l);
@@ -275,6 +299,26 @@ static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
 	its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
 }
 
+static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
+{
+	its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
+}
+
+static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
+{
+	its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
+}
+
+static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
+{
+	its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
+}
+
+static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
+{
+	its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
+}
+
 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
 {
 	/* Let's fixup BE commands */
@@ -433,6 +477,50 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
 	return NULL;
 }
 
+static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
+					    struct its_cmd_desc *desc)
+{
+	u32 db;
+
+	if (desc->its_vmapti_cmd.db_enabled)
+		db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
+	else
+		db = 1023;
+
+	its_encode_cmd(cmd, GITS_CMD_VMAPTI);
+	its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
+	its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
+	its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
+	its_encode_db_phys_id(cmd, db);
+	its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
+
+	its_fixup_cmd(cmd);
+
+	return desc->its_vmapti_cmd.vpe;
+}
+
+static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
+					   struct its_cmd_desc *desc)
+{
+	u32 db;
+
+	if (desc->its_vmovi_cmd.db_enabled)
+		db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
+	else
+		db = 1023;
+
+	its_encode_cmd(cmd, GITS_CMD_VMOVI);
+	its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
+	its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
+	its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
+	its_encode_db_phys_id(cmd, db);
+	its_encode_db_valid(cmd, true);
+
+	its_fixup_cmd(cmd);
+
+	return desc->its_vmovi_cmd.vpe;
+}
+
 static u64 its_cmd_ptr_to_offset(struct its_node *its,
 				 struct its_cmd_block *ptr)
 {
@@ -578,6 +666,18 @@ static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
 			     struct its_collection, its_build_sync_cmd)
 
+static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
+				struct its_vpe *sync_vpe)
+{
+	its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
+	its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
+
+	its_fixup_cmd(sync_cmd);
+}
+
+static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
+			     struct its_vpe, its_build_vsync_cmd)
+
 static void its_send_int(struct its_device *dev, u32 event_id)
 {
 	struct its_cmd_desc desc;
@@ -671,6 +771,33 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
 	its_send_single_command(its, its_build_invall_cmd, &desc);
 }
 
+static void its_send_vmapti(struct its_device *dev, u32 id)
+{
+	struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+	struct its_cmd_desc desc;
+
+	desc.its_vmapti_cmd.vpe = map->vm->vpes[map->vpe_idx];
+	desc.its_vmapti_cmd.dev = dev;
+	desc.its_vmapti_cmd.virt_id = map->vintid;
+	desc.its_vmapti_cmd.event_id = id;
+	desc.its_vmapti_cmd.db_enabled = map->db_enabled;
+
+	its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
+}
+
+static void its_send_vmovi(struct its_device *dev, u32 id)
+{
+	struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+	struct its_cmd_desc desc;
+
+	desc.its_vmovi_cmd.vpe = map->vm->vpes[map->vpe_idx];
+	desc.its_vmovi_cmd.dev = dev;
+	desc.its_vmovi_cmd.event_id = id;
+	desc.its_vmovi_cmd.db_enabled = map->db_enabled;
+
+	its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
+}
+
 /*
  * irqchip functions - assumes MSI, mostly.
  */
@@ -780,19 +907,135 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
 	return 0;
 }
 
+static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	u32 event = its_get_event_id(d);
+	int ret = 0;
+
+	if (!info->map)
+		return -EINVAL;
+
+	mutex_lock(&its_dev->event_map.vlpi_lock);
+
+	if (!its_dev->event_map.vm) {
+		struct its_vlpi_map *maps;
+
+		maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
+			       GFP_KERNEL);
+		if (!maps) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		its_dev->event_map.vm = info->map->vm;
+		its_dev->event_map.vlpi_maps = maps;
+	} else if (its_dev->event_map.vm != info->map->vm) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Get our private copy of the mapping information */
+	its_dev->event_map.vlpi_maps[event] = *info->map;
+
+	if (irqd_is_forwarded_to_vcpu(d)) {
+		/* Already mapped, move it around */
+		its_send_vmovi(its_dev, event);
+	} else {
+		/* Drop the physical mapping */
+		its_send_discard(its_dev, event);
+
+		/* and install the virtual one */
+		its_send_vmapti(its_dev, event);
+		irqd_set_forwarded_to_vcpu(d);
+
+		/* Increment the number of VLPIs */
+		its_dev->event_map.nr_vlpis++;
+	}
+
+out:
+	mutex_unlock(&its_dev->event_map.vlpi_lock);
+	return ret;
+}
+
+static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	u32 event = its_get_event_id(d);
+	int ret = 0;
+
+	mutex_lock(&its_dev->event_map.vlpi_lock);
+
+	if (!its_dev->event_map.vm ||
+	    !its_dev->event_map.vlpi_maps[event].vm) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Copy our mapping information to the incoming request */
+	*info->map = its_dev->event_map.vlpi_maps[event];
+
+out:
+	mutex_unlock(&its_dev->event_map.vlpi_lock);
+	return ret;
+}
+
+static int its_vlpi_unmap(struct irq_data *d)
+{
+	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+	u32 event = its_get_event_id(d);
+	int ret = 0;
+
+	mutex_lock(&its_dev->event_map.vlpi_lock);
+
+	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Drop the virtual mapping */
+	its_send_discard(its_dev, event);
+
+	/* and restore the physical one */
+	irqd_clr_forwarded_to_vcpu(d);
+	its_send_mapti(its_dev, d->hwirq, event);
+	lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
+				    LPI_PROP_ENABLED |
+				    LPI_PROP_GROUP1));
+
+	/*
+	 * Drop the refcount and make the device available again if
+	 * this was the last VLPI.
+	 */
+	if (!--its_dev->event_map.nr_vlpis) {
+		its_dev->event_map.vm = NULL;
+		kfree(its_dev->event_map.vlpi_maps);
+	}
+
+out:
+	mutex_unlock(&its_dev->event_map.vlpi_lock);
+	return ret;
+}
+
 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
 {
 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 	struct its_cmd_info *info = vcpu_info;
 
 	/* Need a v4 ITS */
-	if (!its_dev->its->is_v4 || !info)
+	if (!its_dev->its->is_v4)
 		return -EINVAL;
 
+	/* Unmap request? */
+	if (!info)
+		return its_vlpi_unmap(d);
+
 	switch (info->cmd_type) {
 	case MAP_VLPI:
+		return its_vlpi_map(d, info);
 
 	case GET_VLPI:
+		return its_vlpi_get(d, info);
 
 	case PROP_UPDATE_VLPI:
 	case PROP_UPDATE_AND_INV_VLPI:
@@ -1509,6 +1752,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
 	dev->event_map.col_map = col_map;
 	dev->event_map.lpi_base = lpi_base;
 	dev->event_map.nr_lpis = nr_lpis;
+	mutex_init(&dev->event_map.vlpi_lock);
 	dev->device_id = dev_id;
 	INIT_LIST_HEAD(&dev->entry);
 
-- 
2.11.0

  parent reply	other threads:[~2017-06-28 15:18 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-28 15:03 [PATCH v2 00/52] irqchip: KVM: Add support for GICv4 Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 01/52] genirq: Let irq_set_vcpu_affinity() iterate over hierarchy Marc Zyngier
2017-07-04 21:15   ` Thomas Gleixner
2017-06-28 15:03 ` [PATCH v2 02/52] irqchip/gic-v3: Add redistributor iterator Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 03/52] irqchip/gic-v3: Add VLPI/DirectLPI discovery Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 04/52] irqchip/gic-v3-its: Move LPI definitions around Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 05/52] irqchip/gic-v3-its: Add probing for VLPI properties Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 06/52] irqchip/gic-v3-its: Macro-ize its_send_single_command Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 07/52] irqchip/gic-v3-its: Implement irq_set_irqchip_state for pending state Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 08/52] irqchip/gic-v3-its: Split out property table allocation Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 09/52] irqchip/gic-v3-its: Allow use of indirect VCPU tables Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 10/52] irqchip/gic-v3-its: Split out pending table allocation Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 11/52] irqchip/gic-v3-its: Rework LPI freeing Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 12/52] irqchip/gic-v3-its: Generalize device table allocation Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 13/52] irqchip/gic-v3-its: Generalize LPI configuration Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 14/52] irqchip/gic-v4: Add management structure definitions Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 15/52] irqchip/gic-v3-its: Add GICv4 ITS command definitions Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 16/52] irqchip/gic-v3-its: Add VLPI configuration hook Marc Zyngier
2017-06-28 15:03 ` Marc Zyngier [this message]
2017-06-28 15:03 ` [PATCH v2 18/52] irqchip/gic-v3-its: Add VLPI configuration handling Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 19/52] irqchip/gic-v3-its: Add VPE domain infrastructure Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 20/52] irqchip/gic-v3-its: Add VPE irq domain allocation/teardown Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 21/52] irqchip/gic-v3-its: Add VPE irq domain [de]activation Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 22/52] irqchip/gic-v3-its: Add VPENDBASER/VPROPBASER accessors Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 23/52] irqchip/gic-v3-its: Add VPE scheduling Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 24/52] irqchip/gic-v3-its: Add VPE invalidation hook Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 25/52] irqchip/gic-v3-its: Add VPE affinity changes Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 26/52] irqchip/gic-v3-its: Add VPE interrupt masking Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 27/52] irqchip/gic-v3-its: Support VPE doorbell invalidation even when !DirectLPI Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 28/52] irqchip/gic-v3-its: Set implementation defined bit to enable VLPIs Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 29/52] irqchip/gic-v4: Add per-VM VPE domain creation Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 30/52] irqchip/gic-v4: Add VPE command interface Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 31/52] irqchip/gic-v4: Add VLPI configuration interface Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 32/52] irqchip/gic-v4: Add some basic documentation Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 33/52] irqchip/gic-v4: Enable low-level GICv4 operations Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 34/52] irqchip/gic-v3: Advertise GICv4 support to KVM Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 35/52] KVM: arm/arm64: vgic: Move kvm_vgic_destroy call around Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 36/52] KVM: arm/arm64: vITS: Add MSI translation helpers Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 37/52] KVM: arm/arm64: GICv4: Add init and teardown of the vPE irq domain Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 38/52] KVM: arm/arm64: GICv4: Wire init/teardown of per-VM support Marc Zyngier
2017-07-08 11:26   ` Shanker Donthineni
2017-07-10 16:34     ` Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 39/52] KVM: arm/arm64: GICv4: Wire mapping/unmapping of VLPIs in VFIO irq bypass Marc Zyngier
2017-06-28 15:03 ` [PATCH v2 40/52] KVM: arm/arm64: GICv4: Handle INT command applied to a VLPI Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 41/52] KVM: arm/arm64: GICv4: Unmap VLPI when freeing an LPI Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 42/52] KVM: arm/arm64: GICv4: Handle MOVI applied to a VLPI Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 43/52] KVM: arm/arm64: GICv4: Handle CLEAR " Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 44/52] KVM: arm/arm64: GICv4: Handle MOVALL applied to a vPE Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 45/52] KVM: arm/arm64: GICv4: Propagate property updates to VLPIs Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 46/52] KVM: arm/arm64: GICv4: Handle INVALL applied to a vPE Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 47/52] KVM: arm/arm64: GICv4: Propagate VLPI properties at map time Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 48/52] KVM: arm/arm64: GICv4: Add doorbell interrupt handling Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 49/52] KVM: arm/arm64: GICv4: Hook vPE scheduling into vgic flush/sync Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 50/52] KVM: arm/arm64: GICv4: Enable virtual cpuif if VLPIs can be delivered Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 51/52] KVM: arm/arm64: GICv4: Use pending_last as a scheduling hint Marc Zyngier
2017-06-28 15:04 ` [PATCH v2 52/52] KVM: arm/arm64: GICv4: Enable VLPI support Marc Zyngier
2017-07-01 14:54 ` [PATCH v2 00/52] irqchip: KVM: Add support for GICv4 Shanker Donthineni

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170628150411.15846-18-marc.zyngier@arm.com \
    --to=marc.zyngier@arm.com \
    --cc=christoffer.dall@linaro.org \
    --cc=eric.auger@redhat.com \
    --cc=jason@lakedaemon.net \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=shankerd@codeaurora.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).