linux-pci.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Maya Nakamura <m.maya.nakamura@gmail.com>
To: lorenzo.pieralisi@arm.com, bhelgaas@google.com,
	linux-pci@vger.kernel.org, kys@microsoft.com,
	sthemmin@microsoft.com, olaf@aepfle.de, apw@canonical.com,
	jasowang@redhat.com, mikelley@microsoft.com,
	Alexander.Levin@microsoft.com
Cc: linux-kernel@vger.kernel.org, linux-hyperv@vger.kernel.org,
	haiyangz@microsoft.com, vkuznets@redhat.com,
	marcelo.cerri@canonical.com
Subject: [PATCH v5 2/3] PCI: hv: Replace hv_vp_set with hv_vpset
Date: Fri, 1 Mar 2019 06:59:02 +0000	[thread overview]
Message-ID: <d18b5c492df84fcf4a55954ffc508dd937a44eed.1551421809.git.m.maya.nakamura@gmail.com> (raw)
In-Reply-To: <cover.1551421809.git.m.maya.nakamura@gmail.com>

Remove a duplicate definition of VP set (hv_vp_set) and use the common
definition (hv_vpset) that is used in other places.

Change the order of the members in struct hv_pcibus_device so that the
declaration of retarget_msi_interrupt_params is the last member. Struct
hv_vpset, which contains a flexible array, is nested two levels deep in
struct hv_pcibus_device via retarget_msi_interrupt_params.

Add a comment that retarget_msi_interrupt_params should be the last
member of struct hv_pcibus_device.

Signed-off-by: Maya Nakamura <m.maya.nakamura@gmail.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Tested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
Changes in v5:
 - Remove the code added in v4.
 - Delete the v4 code change related comment from the commit message.
 - Add the Reviewed-by and Tested-by tags.

Changes in v4:
 - Add __aligned(8) to struct retarget_msi_interrupt.
 - Update the commit message.

Change in v3:
 - Correct the v2 change log.

Change in v2:
 - Update the commit message.

 drivers/pci/controller/pci-hyperv.c | 25 ++++++++++++-------------
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 73862eef09ec..d71695db1ba0 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -393,12 +393,6 @@ struct hv_interrupt_entry {
 
 #define HV_VP_SET_BANK_COUNT_MAX	5 /* current implementation limit */
 
-struct hv_vp_set {
-	u64	format;			/* 0 (HvGenericSetSparse4k) */
-	u64	valid_banks;
-	u64	masks[HV_VP_SET_BANK_COUNT_MAX];
-};
-
 /*
  * flags for hv_device_interrupt_target.flags
  */
@@ -410,7 +404,7 @@ struct hv_device_interrupt_target {
 	u32	flags;
 	union {
 		u64		 vp_mask;
-		struct hv_vp_set vp_set;
+		struct hv_vpset vp_set;
 	};
 };
 
@@ -460,12 +454,16 @@ struct hv_pcibus_device {
 	struct msi_controller msi_chip;
 	struct irq_domain *irq_domain;
 
-	/* hypercall arg, must not cross page boundary */
-	struct retarget_msi_interrupt retarget_msi_interrupt_params;
-
 	spinlock_t retarget_msi_interrupt_lock;
 
 	struct workqueue_struct *wq;
+
+	/* hypercall arg, must not cross page boundary */
+	struct retarget_msi_interrupt retarget_msi_interrupt_params;
+
+	/*
+	 * Don't put anything here: retarget_msi_interrupt_params must be last
+	 */
 };
 
 /*
@@ -955,12 +953,13 @@ static void hv_irq_unmask(struct irq_data *data)
 		 */
 		params->int_target.flags |=
 			HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
-		params->int_target.vp_set.valid_banks =
+		params->int_target.vp_set.valid_bank_mask =
 			(1ull << HV_VP_SET_BANK_COUNT_MAX) - 1;
 
 		/*
 		 * var-sized hypercall, var-size starts after vp_mask (thus
-		 * vp_set.format does not count, but vp_set.valid_banks does).
+		 * vp_set.format does not count, but vp_set.valid_bank_mask
+		 * does).
 		 */
 		var_size = 1 + HV_VP_SET_BANK_COUNT_MAX;
 
@@ -974,7 +973,7 @@ static void hv_irq_unmask(struct irq_data *data)
 				goto exit_unlock;
 			}
 
-			params->int_target.vp_set.masks[cpu_vmbus / 64] |=
+			params->int_target.vp_set.bank_contents[cpu_vmbus / 64]	|=
 				(1ULL << (cpu_vmbus & 63));
 		}
 	} else {
-- 
2.17.1


  parent reply	other threads:[~2019-03-01  6:59 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-01  6:54 [PATCH v5 0/3] PCI: hv: Refactor hv_irq_unmask() to use hv_vpset and cpumask_to_vpset() Maya Nakamura
2019-03-01  6:56 ` [PATCH v5 1/3] PCI: hv: Add __aligned(8) to struct retarget_msi_interrupt Maya Nakamura
2019-03-01  6:59 ` Maya Nakamura [this message]
2019-03-01  7:04 ` [PATCH v5 3/3] PCI: hv: Refactor hv_irq_unmask() to use cpumask_to_vpset() Maya Nakamura
2019-03-01 11:55 ` [PATCH v5 0/3] PCI: hv: Refactor hv_irq_unmask() to use hv_vpset and cpumask_to_vpset() Lorenzo Pieralisi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d18b5c492df84fcf4a55954ffc508dd937a44eed.1551421809.git.m.maya.nakamura@gmail.com \
    --to=m.maya.nakamura@gmail.com \
    --cc=Alexander.Levin@microsoft.com \
    --cc=apw@canonical.com \
    --cc=bhelgaas@google.com \
    --cc=haiyangz@microsoft.com \
    --cc=jasowang@redhat.com \
    --cc=kys@microsoft.com \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=lorenzo.pieralisi@arm.com \
    --cc=marcelo.cerri@canonical.com \
    --cc=mikelley@microsoft.com \
    --cc=olaf@aepfle.de \
    --cc=sthemmin@microsoft.com \
    --cc=vkuznets@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).