All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips
@ 2012-03-28 17:47 Jan Kiszka
  2012-03-28 17:52 ` Jan Kiszka
                   ` (3 more replies)
  0 siblings, 4 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-03-28 17:47 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Michael S. Tsirkin

Currently, MSI messages can only be injected to in-kernel irqchips by
defining a corresponding IRQ route for each message. This is not only
unhandy if the MSI messages are generated "on the fly" by user space,
IRQ routes are a limited resource that user space as to manage
carefully.

By providing a direct injection with, we can both avoid using up limited
resources and simplify the necessary steps for user land.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---

Changes in v2:
 - rebased
 - dropped KVM_MSI_FLAG_RAISE (but the IOCTL is still extensible)
 - check for in-kernel irqchip presence added

 Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
 arch/x86/kvm/Kconfig              |    1 +
 include/linux/kvm.h               |   11 +++++++++++
 virt/kvm/Kconfig                  |    3 +++
 virt/kvm/kvm_main.c               |   21 +++++++++++++++++++++
 5 files changed, 57 insertions(+), 0 deletions(-)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 81ff39f..dd6377e 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
 by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
 evaluated.
 
+4.61 KVM_SET_MSI
+
+Capability: KVM_CAP_SET_MSI
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_msi (in)
+Returns: 0 on success, -1 on error
+
+Directly inject a MSI message. Only valid with in-kernel irqchip that handles
+MSI messages.
+
+struct kvm_msi {
+	__u32 address_lo;
+	__u32 address_hi;
+	__u32 data;
+	__u32 flags;
+	__u8  pad[16];
+};
+
+No flags are defined so far. The corresponding field must be 0.
+
 4.62 KVM_CREATE_SPAPR_TCE
 
 Capability: KVM_CAP_SPAPR_TCE
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 1a7fe86..a28f338 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -36,6 +36,7 @@ config KVM
 	select TASKSTATS
 	select TASK_DELAY_ACCT
 	select PERF_EVENTS
+	select HAVE_KVM_MSI
 	---help---
 	  Support hosting fully virtualized guest machines using hardware
 	  virtualization extensions.  You will need a fairly recent
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 7a9dd4b..2dc24ac 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
 #define KVM_CAP_SYNC_REGS 74
 #define KVM_CAP_PCI_2_3 75
 #define KVM_CAP_KVMCLOCK_CTRL 76
+#define KVM_CAP_SET_MSI 77
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -715,6 +716,14 @@ struct kvm_one_reg {
 	__u64 addr;
 };
 
+struct kvm_msi {
+	__u32 address_lo;
+	__u32 address_hi;
+	__u32 data;
+	__u32 flags;
+	__u8  pad[16];
+};
+
 /*
  * ioctls for VM fds
  */
@@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
 /* Available with KVM_CAP_PCI_2_3 */
 #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
 				       struct kvm_assigned_pci_dev)
+/* Available with KVM_CAP_SET_MSI */
+#define KVM_SET_MSI               _IOW(KVMIO,  0xa5, struct kvm_msi)
 
 /*
  * ioctls for vcpu fds
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index f63ccb0..28694f4 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -18,3 +18,6 @@ config KVM_MMIO
 
 config KVM_ASYNC_PF
        bool
+
+config HAVE_KVM_MSI
+       bool
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a612bc8..ea9887e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2063,6 +2063,24 @@ static long kvm_vm_ioctl(struct file *filp,
 		mutex_unlock(&kvm->lock);
 		break;
 #endif
+#ifdef CONFIG_HAVE_KVM_MSI
+	case KVM_SET_MSI: {
+		struct kvm_kernel_irq_routing_entry route;
+		struct kvm_msi msi;
+
+		r = -EFAULT;
+		if (copy_from_user(&msi, argp, sizeof msi))
+			goto out;
+		r = -EINVAL;
+		if (!irqchip_in_kernel(kvm) || msi.flags != 0)
+			goto out;
+		route.msi.address_lo = msi.address_lo;
+		route.msi.address_hi = msi.address_hi;
+		route.msi.data = msi.data;
+		r =  kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+		break;
+	}
+#endif
 	default:
 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
 		if (r == -ENOTTY)
@@ -2191,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
 	case KVM_CAP_SET_BOOT_CPU_ID:
 #endif
 	case KVM_CAP_INTERNAL_ERROR_DATA:
+#ifdef CONFIG_HAVE_KVM_MSI
+	case KVM_CAP_SET_MSI:
+#endif
 		return 1;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
 	case KVM_CAP_IRQ_ROUTING:
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 43+ messages in thread

* Re: [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-28 17:47 [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips Jan Kiszka
@ 2012-03-28 17:52 ` Jan Kiszka
  2012-03-28 19:58 ` Eric Northup
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-03-28 17:52 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Michael S. Tsirkin

On 2012-03-28 19:47, Jan Kiszka wrote:
> Currently, MSI messages can only be injected to in-kernel irqchips by
> defining a corresponding IRQ route for each message. This is not only
> unhandy if the MSI messages are generated "on the fly" by user space,
> IRQ routes are a limited resource that user space as to manage
> carefully.
> 
> By providing a direct injection with, we can both avoid using up limited

"By providing a direct injection path, [...]". Will fix if another round
is required, please add manually otherwise.

Jan

> resources and simplify the necessary steps for user land.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
> 
> Changes in v2:
>  - rebased
>  - dropped KVM_MSI_FLAG_RAISE (but the IOCTL is still extensible)
>  - check for in-kernel irqchip presence added
> 
>  Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
>  arch/x86/kvm/Kconfig              |    1 +
>  include/linux/kvm.h               |   11 +++++++++++
>  virt/kvm/Kconfig                  |    3 +++
>  virt/kvm/kvm_main.c               |   21 +++++++++++++++++++++
>  5 files changed, 57 insertions(+), 0 deletions(-)
> 
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> index 81ff39f..dd6377e 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
>  evaluated.
>  
> +4.61 KVM_SET_MSI
> +
> +Capability: KVM_CAP_SET_MSI
> +Architectures: x86
> +Type: vm ioctl
> +Parameters: struct kvm_msi (in)
> +Returns: 0 on success, -1 on error
> +
> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
> +MSI messages.
> +
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
> +No flags are defined so far. The corresponding field must be 0.
> +
>  4.62 KVM_CREATE_SPAPR_TCE
>  
>  Capability: KVM_CAP_SPAPR_TCE
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index 1a7fe86..a28f338 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -36,6 +36,7 @@ config KVM
>  	select TASKSTATS
>  	select TASK_DELAY_ACCT
>  	select PERF_EVENTS
> +	select HAVE_KVM_MSI
>  	---help---
>  	  Support hosting fully virtualized guest machines using hardware
>  	  virtualization extensions.  You will need a fairly recent
> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index 7a9dd4b..2dc24ac 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
>  #define KVM_CAP_SYNC_REGS 74
>  #define KVM_CAP_PCI_2_3 75
>  #define KVM_CAP_KVMCLOCK_CTRL 76
> +#define KVM_CAP_SET_MSI 77
>  
>  #ifdef KVM_CAP_IRQ_ROUTING
>  
> @@ -715,6 +716,14 @@ struct kvm_one_reg {
>  	__u64 addr;
>  };
>  
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
>  /*
>   * ioctls for VM fds
>   */
> @@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
>  /* Available with KVM_CAP_PCI_2_3 */
>  #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
>  				       struct kvm_assigned_pci_dev)
> +/* Available with KVM_CAP_SET_MSI */
> +#define KVM_SET_MSI               _IOW(KVMIO,  0xa5, struct kvm_msi)
>  
>  /*
>   * ioctls for vcpu fds
> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
> index f63ccb0..28694f4 100644
> --- a/virt/kvm/Kconfig
> +++ b/virt/kvm/Kconfig
> @@ -18,3 +18,6 @@ config KVM_MMIO
>  
>  config KVM_ASYNC_PF
>         bool
> +
> +config HAVE_KVM_MSI
> +       bool
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index a612bc8..ea9887e 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -2063,6 +2063,24 @@ static long kvm_vm_ioctl(struct file *filp,
>  		mutex_unlock(&kvm->lock);
>  		break;
>  #endif
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_SET_MSI: {
> +		struct kvm_kernel_irq_routing_entry route;
> +		struct kvm_msi msi;
> +
> +		r = -EFAULT;
> +		if (copy_from_user(&msi, argp, sizeof msi))
> +			goto out;
> +		r = -EINVAL;
> +		if (!irqchip_in_kernel(kvm) || msi.flags != 0)
> +			goto out;
> +		route.msi.address_lo = msi.address_lo;
> +		route.msi.address_hi = msi.address_hi;
> +		route.msi.data = msi.data;
> +		r =  kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> +		break;
> +	}
> +#endif
>  	default:
>  		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
>  		if (r == -ENOTTY)
> @@ -2191,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
>  	case KVM_CAP_SET_BOOT_CPU_ID:
>  #endif
>  	case KVM_CAP_INTERNAL_ERROR_DATA:
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_CAP_SET_MSI:
> +#endif
>  		return 1;
>  #ifdef CONFIG_HAVE_KVM_IRQCHIP
>  	case KVM_CAP_IRQ_ROUTING:

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-28 17:47 [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips Jan Kiszka
  2012-03-28 17:52 ` Jan Kiszka
@ 2012-03-28 19:58 ` Eric Northup
  2012-03-28 20:21   ` Jan Kiszka
  2012-03-29 15:39 ` Michael S. Tsirkin
  2012-03-29 16:15 ` [PATCH v3] " Jan Kiszka
  3 siblings, 1 reply; 43+ messages in thread
From: Eric Northup @ 2012-03-28 19:58 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Avi Kivity, Marcelo Tosatti, kvm, Michael S. Tsirkin

On Wed, Mar 28, 2012 at 10:47 AM, Jan Kiszka <jan.kiszka@siemens.com> wrote:
[...]
> +4.61 KVM_SET_MSI
> +
> +Capability: KVM_CAP_SET_MSI
> +Architectures: x86
> +Type: vm ioctl
> +Parameters: struct kvm_msi (in)
> +Returns: 0 on success, -1 on error

Is this the actual behavior?  It looked to me like the successful
return value ended up getting set by __apic_accept_irq(), which claims
to "Return 1 if successfully added and 0 if discarded".

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-28 19:58 ` Eric Northup
@ 2012-03-28 20:21   ` Jan Kiszka
  0 siblings, 0 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-03-28 20:21 UTC (permalink / raw)
  To: Eric Northup; +Cc: Avi Kivity, Marcelo Tosatti, kvm, Michael S. Tsirkin

[-- Attachment #1: Type: text/plain, Size: 711 bytes --]

On 2012-03-28 21:58, Eric Northup wrote:
> On Wed, Mar 28, 2012 at 10:47 AM, Jan Kiszka <jan.kiszka@siemens.com> wrote:
> [...]
>> +4.61 KVM_SET_MSI
>> +
>> +Capability: KVM_CAP_SET_MSI
>> +Architectures: x86
>> +Type: vm ioctl
>> +Parameters: struct kvm_msi (in)
>> +Returns: 0 on success, -1 on error
> 
> Is this the actual behavior?  It looked to me like the successful
> return value ended up getting set by __apic_accept_irq(), which claims
> to "Return 1 if successfully added and 0 if discarded".

True. And I think we want this information, e.g. to implement IRQ
decoalescing for periodic HPET timers that use MSIs.

Will adjust the documentations.

Thanks for reviewing,
Jan



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 262 bytes --]

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-28 17:47 [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips Jan Kiszka
  2012-03-28 17:52 ` Jan Kiszka
  2012-03-28 19:58 ` Eric Northup
@ 2012-03-29 15:39 ` Michael S. Tsirkin
  2012-03-29 15:43   ` Jan Kiszka
  2012-03-29 16:15 ` [PATCH v3] " Jan Kiszka
  3 siblings, 1 reply; 43+ messages in thread
From: Michael S. Tsirkin @ 2012-03-29 15:39 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Avi Kivity, Marcelo Tosatti, kvm

On Wed, Mar 28, 2012 at 07:47:31PM +0200, Jan Kiszka wrote:
> Currently, MSI messages can only be injected to in-kernel irqchips by
> defining a corresponding IRQ route for each message. This is not only
> unhandy if the MSI messages are generated "on the fly" by user space,
> IRQ routes are a limited resource that user space as to manage
> carefully.
> 
> By providing a direct injection with, we can both avoid using up limited
> resources and simplify the necessary steps for user land.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>

Looks straight-forward to me. Others noted some
documentation nits, so I know you are going
to repost, anyway. When you do how about renaming
SET_MSI -> SEND_MSI or SIGNAL_MSI ?
We don't set anything, as such ...

I know we have kvm_set_msi internally but this is
more or less a misnomer.


> ---
> 
> Changes in v2:
>  - rebased
>  - dropped KVM_MSI_FLAG_RAISE (but the IOCTL is still extensible)
>  - check for in-kernel irqchip presence added
> 
>  Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
>  arch/x86/kvm/Kconfig              |    1 +
>  include/linux/kvm.h               |   11 +++++++++++
>  virt/kvm/Kconfig                  |    3 +++
>  virt/kvm/kvm_main.c               |   21 +++++++++++++++++++++
>  5 files changed, 57 insertions(+), 0 deletions(-)
> 
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> index 81ff39f..dd6377e 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
>  evaluated.
>  
> +4.61 KVM_SET_MSI
> +
> +Capability: KVM_CAP_SET_MSI
> +Architectures: x86
> +Type: vm ioctl
> +Parameters: struct kvm_msi (in)
> +Returns: 0 on success, -1 on error
> +
> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
> +MSI messages.
> +
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
> +No flags are defined so far. The corresponding field must be 0.
> +
>  4.62 KVM_CREATE_SPAPR_TCE
>  
>  Capability: KVM_CAP_SPAPR_TCE
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index 1a7fe86..a28f338 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -36,6 +36,7 @@ config KVM
>  	select TASKSTATS
>  	select TASK_DELAY_ACCT
>  	select PERF_EVENTS
> +	select HAVE_KVM_MSI
>  	---help---
>  	  Support hosting fully virtualized guest machines using hardware
>  	  virtualization extensions.  You will need a fairly recent
> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index 7a9dd4b..2dc24ac 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
>  #define KVM_CAP_SYNC_REGS 74
>  #define KVM_CAP_PCI_2_3 75
>  #define KVM_CAP_KVMCLOCK_CTRL 76
> +#define KVM_CAP_SET_MSI 77
>  
>  #ifdef KVM_CAP_IRQ_ROUTING
>  
> @@ -715,6 +716,14 @@ struct kvm_one_reg {
>  	__u64 addr;
>  };
>  
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
>  /*
>   * ioctls for VM fds
>   */
> @@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
>  /* Available with KVM_CAP_PCI_2_3 */
>  #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
>  				       struct kvm_assigned_pci_dev)
> +/* Available with KVM_CAP_SET_MSI */
> +#define KVM_SET_MSI               _IOW(KVMIO,  0xa5, struct kvm_msi)
>  
>  /*
>   * ioctls for vcpu fds
> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
> index f63ccb0..28694f4 100644
> --- a/virt/kvm/Kconfig
> +++ b/virt/kvm/Kconfig
> @@ -18,3 +18,6 @@ config KVM_MMIO
>  
>  config KVM_ASYNC_PF
>         bool
> +
> +config HAVE_KVM_MSI
> +       bool
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index a612bc8..ea9887e 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -2063,6 +2063,24 @@ static long kvm_vm_ioctl(struct file *filp,
>  		mutex_unlock(&kvm->lock);
>  		break;
>  #endif
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_SET_MSI: {
> +		struct kvm_kernel_irq_routing_entry route;
> +		struct kvm_msi msi;
> +
> +		r = -EFAULT;
> +		if (copy_from_user(&msi, argp, sizeof msi))
> +			goto out;
> +		r = -EINVAL;
> +		if (!irqchip_in_kernel(kvm) || msi.flags != 0)
> +			goto out;
> +		route.msi.address_lo = msi.address_lo;
> +		route.msi.address_hi = msi.address_hi;
> +		route.msi.data = msi.data;
> +		r =  kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> +		break;
> +	}
> +#endif
>  	default:
>  		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
>  		if (r == -ENOTTY)
> @@ -2191,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
>  	case KVM_CAP_SET_BOOT_CPU_ID:
>  #endif
>  	case KVM_CAP_INTERNAL_ERROR_DATA:
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_CAP_SET_MSI:
> +#endif
>  		return 1;
>  #ifdef CONFIG_HAVE_KVM_IRQCHIP
>  	case KVM_CAP_IRQ_ROUTING:
> -- 
> 1.7.3.4

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-29 15:39 ` Michael S. Tsirkin
@ 2012-03-29 15:43   ` Jan Kiszka
  0 siblings, 0 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-03-29 15:43 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Avi Kivity, Marcelo Tosatti, kvm

On 2012-03-29 17:39, Michael S. Tsirkin wrote:
> On Wed, Mar 28, 2012 at 07:47:31PM +0200, Jan Kiszka wrote:
>> Currently, MSI messages can only be injected to in-kernel irqchips by
>> defining a corresponding IRQ route for each message. This is not only
>> unhandy if the MSI messages are generated "on the fly" by user space,
>> IRQ routes are a limited resource that user space as to manage
>> carefully.
>>
>> By providing a direct injection with, we can both avoid using up limited
>> resources and simplify the necessary steps for user land.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> 
> Looks straight-forward to me. Others noted some
> documentation nits, so I know you are going
> to repost, anyway. When you do how about renaming
> SET_MSI -> SEND_MSI or SIGNAL_MSI ?
> We don't set anything, as such ...
> 
> I know we have kvm_set_msi internally but this is
> more or less a misnomer.

KVM_SET_MSI dates back to the idea to revoke an unfinished injection.
But I can also call it SIGNAL_MSI. Update will follow.

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v3] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-28 17:47 [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips Jan Kiszka
                   ` (2 preceding siblings ...)
  2012-03-29 15:39 ` Michael S. Tsirkin
@ 2012-03-29 16:15 ` Jan Kiszka
  2012-03-29 16:46   ` Michael S. Tsirkin
                     ` (3 more replies)
  3 siblings, 4 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-03-29 16:15 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Michael S. Tsirkin, Eric Northup

Currently, MSI messages can only be injected to in-kernel irqchips by
defining a corresponding IRQ route for each message. This is not only
unhandy if the MSI messages are generated "on the fly" by user space,
IRQ routes are a limited resource that user space as to manage
carefully.

By providing a direct injection path, we can both avoid using up limited
resources and simplify the necessary steps for user land.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---

Changes in v3:
 - align return code doc to reality
 - rename SET_MSI -> SIGNAL_MSI

 Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
 arch/x86/kvm/Kconfig              |    1 +
 include/linux/kvm.h               |   11 +++++++++++
 virt/kvm/Kconfig                  |    3 +++
 virt/kvm/kvm_main.c               |   21 +++++++++++++++++++++
 5 files changed, 57 insertions(+), 0 deletions(-)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 81ff39f..ed27d1b 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
 by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
 evaluated.
 
+4.61 KVM_SIGNAL_MSI
+
+Capability: KVM_CAP_SIGNAL_MSI
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_msi (in)
+Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
+
+Directly inject a MSI message. Only valid with in-kernel irqchip that handles
+MSI messages.
+
+struct kvm_msi {
+	__u32 address_lo;
+	__u32 address_hi;
+	__u32 data;
+	__u32 flags;
+	__u8  pad[16];
+};
+
+No flags are defined so far. The corresponding field must be 0.
+
 4.62 KVM_CREATE_SPAPR_TCE
 
 Capability: KVM_CAP_SPAPR_TCE
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 1a7fe86..a28f338 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -36,6 +36,7 @@ config KVM
 	select TASKSTATS
 	select TASK_DELAY_ACCT
 	select PERF_EVENTS
+	select HAVE_KVM_MSI
 	---help---
 	  Support hosting fully virtualized guest machines using hardware
 	  virtualization extensions.  You will need a fairly recent
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 7a9dd4b..225b452 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
 #define KVM_CAP_SYNC_REGS 74
 #define KVM_CAP_PCI_2_3 75
 #define KVM_CAP_KVMCLOCK_CTRL 76
+#define KVM_CAP_SIGNAL_MSI 77
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -715,6 +716,14 @@ struct kvm_one_reg {
 	__u64 addr;
 };
 
+struct kvm_msi {
+	__u32 address_lo;
+	__u32 address_hi;
+	__u32 data;
+	__u32 flags;
+	__u8  pad[16];
+};
+
 /*
  * ioctls for VM fds
  */
@@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
 /* Available with KVM_CAP_PCI_2_3 */
 #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
 				       struct kvm_assigned_pci_dev)
+/* Available with KVM_CAP_SIGNAL_MSI */
+#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
 
 /*
  * ioctls for vcpu fds
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index f63ccb0..28694f4 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -18,3 +18,6 @@ config KVM_MMIO
 
 config KVM_ASYNC_PF
        bool
+
+config HAVE_KVM_MSI
+       bool
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a612bc8..3aeb7ab 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2063,6 +2063,24 @@ static long kvm_vm_ioctl(struct file *filp,
 		mutex_unlock(&kvm->lock);
 		break;
 #endif
+#ifdef CONFIG_HAVE_KVM_MSI
+	case KVM_SIGNAL_MSI: {
+		struct kvm_kernel_irq_routing_entry route;
+		struct kvm_msi msi;
+
+		r = -EFAULT;
+		if (copy_from_user(&msi, argp, sizeof msi))
+			goto out;
+		r = -EINVAL;
+		if (!irqchip_in_kernel(kvm) || msi.flags != 0)
+			goto out;
+		route.msi.address_lo = msi.address_lo;
+		route.msi.address_hi = msi.address_hi;
+		route.msi.data = msi.data;
+		r =  kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+		break;
+	}
+#endif
 	default:
 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
 		if (r == -ENOTTY)
@@ -2191,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
 	case KVM_CAP_SET_BOOT_CPU_ID:
 #endif
 	case KVM_CAP_INTERNAL_ERROR_DATA:
+#ifdef CONFIG_HAVE_KVM_MSI
+	case KVM_CAP_SIGNAL_MSI:
+#endif
 		return 1;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
 	case KVM_CAP_IRQ_ROUTING:
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 43+ messages in thread

* Re: [PATCH v3] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-29 16:15 ` [PATCH v3] " Jan Kiszka
@ 2012-03-29 16:46   ` Michael S. Tsirkin
  2012-03-29 16:50     ` Jan Kiszka
  2012-03-29 18:25   ` Jan Kiszka
                     ` (2 subsequent siblings)
  3 siblings, 1 reply; 43+ messages in thread
From: Michael S. Tsirkin @ 2012-03-29 16:46 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Avi Kivity, Marcelo Tosatti, kvm, Eric Northup

On Thu, Mar 29, 2012 at 06:15:41PM +0200, Jan Kiszka wrote:
> Currently, MSI messages can only be injected to in-kernel irqchips by
> defining a corresponding IRQ route for each message. This is not only
> unhandy if the MSI messages are generated "on the fly" by user space,
> IRQ routes are a limited resource that user space as to manage


s/as/has/

> carefully.
> 
> By providing a direct injection path, we can both avoid using up limited
> resources and simplify the necessary steps for user land.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>

Further, we can ensure better, deterministic worst-case performance
for real-time guests with large number of vectors.

Acked-by: Michael S. Tsirkin <mst@redhat.com>

> ---
> 
> Changes in v3:
>  - align return code doc to reality
>  - rename SET_MSI -> SIGNAL_MSI
> 
>  Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
>  arch/x86/kvm/Kconfig              |    1 +
>  include/linux/kvm.h               |   11 +++++++++++
>  virt/kvm/Kconfig                  |    3 +++
>  virt/kvm/kvm_main.c               |   21 +++++++++++++++++++++
>  5 files changed, 57 insertions(+), 0 deletions(-)
> 
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> index 81ff39f..ed27d1b 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
>  evaluated.
>  
> +4.61 KVM_SIGNAL_MSI
> +
> +Capability: KVM_CAP_SIGNAL_MSI
> +Architectures: x86
> +Type: vm ioctl
> +Parameters: struct kvm_msi (in)
> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
> +
> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
> +MSI messages.
> +
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
> +No flags are defined so far. The corresponding field must be 0.
> +
>  4.62 KVM_CREATE_SPAPR_TCE
>  
>  Capability: KVM_CAP_SPAPR_TCE
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index 1a7fe86..a28f338 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -36,6 +36,7 @@ config KVM
>  	select TASKSTATS
>  	select TASK_DELAY_ACCT
>  	select PERF_EVENTS
> +	select HAVE_KVM_MSI
>  	---help---
>  	  Support hosting fully virtualized guest machines using hardware
>  	  virtualization extensions.  You will need a fairly recent
> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index 7a9dd4b..225b452 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
>  #define KVM_CAP_SYNC_REGS 74
>  #define KVM_CAP_PCI_2_3 75
>  #define KVM_CAP_KVMCLOCK_CTRL 76
> +#define KVM_CAP_SIGNAL_MSI 77
>  
>  #ifdef KVM_CAP_IRQ_ROUTING
>  
> @@ -715,6 +716,14 @@ struct kvm_one_reg {
>  	__u64 addr;
>  };
>  
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
>  /*
>   * ioctls for VM fds
>   */
> @@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
>  /* Available with KVM_CAP_PCI_2_3 */
>  #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
>  				       struct kvm_assigned_pci_dev)
> +/* Available with KVM_CAP_SIGNAL_MSI */
> +#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
>  
>  /*
>   * ioctls for vcpu fds
> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
> index f63ccb0..28694f4 100644
> --- a/virt/kvm/Kconfig
> +++ b/virt/kvm/Kconfig
> @@ -18,3 +18,6 @@ config KVM_MMIO
>  
>  config KVM_ASYNC_PF
>         bool
> +
> +config HAVE_KVM_MSI
> +       bool
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index a612bc8..3aeb7ab 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -2063,6 +2063,24 @@ static long kvm_vm_ioctl(struct file *filp,
>  		mutex_unlock(&kvm->lock);
>  		break;
>  #endif
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_SIGNAL_MSI: {
> +		struct kvm_kernel_irq_routing_entry route;
> +		struct kvm_msi msi;
> +
> +		r = -EFAULT;
> +		if (copy_from_user(&msi, argp, sizeof msi))
> +			goto out;
> +		r = -EINVAL;
> +		if (!irqchip_in_kernel(kvm) || msi.flags != 0)
> +			goto out;
> +		route.msi.address_lo = msi.address_lo;
> +		route.msi.address_hi = msi.address_hi;
> +		route.msi.data = msi.data;
> +		r =  kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> +		break;
> +	}
> +#endif
>  	default:
>  		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
>  		if (r == -ENOTTY)
> @@ -2191,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
>  	case KVM_CAP_SET_BOOT_CPU_ID:
>  #endif
>  	case KVM_CAP_INTERNAL_ERROR_DATA:
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_CAP_SIGNAL_MSI:
> +#endif
>  		return 1;
>  #ifdef CONFIG_HAVE_KVM_IRQCHIP
>  	case KVM_CAP_IRQ_ROUTING:
> -- 
> 1.7.3.4

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v3] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-29 16:46   ` Michael S. Tsirkin
@ 2012-03-29 16:50     ` Jan Kiszka
  0 siblings, 0 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-03-29 16:50 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Avi Kivity, Marcelo Tosatti, kvm, Eric Northup

On 2012-03-29 18:46, Michael S. Tsirkin wrote:
> On Thu, Mar 29, 2012 at 06:15:41PM +0200, Jan Kiszka wrote:
>> Currently, MSI messages can only be injected to in-kernel irqchips by
>> defining a corresponding IRQ route for each message. This is not only
>> unhandy if the MSI messages are generated "on the fly" by user space,
>> IRQ routes are a limited resource that user space as to manage
> 
> 
> s/as/has/

Yeah, should be fixable on merge.

> 
>> carefully.
>>
>> By providing a direct injection path, we can both avoid using up limited
>> resources and simplify the necessary steps for user land.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> 
> Further, we can ensure better, deterministic worst-case performance
> for real-time guests with large number of vectors.

Good to hear that this is on your radar. :)

> 
> Acked-by: Michael S. Tsirkin <mst@redhat.com>
> 

Thanks,
Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v3] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-29 16:15 ` [PATCH v3] " Jan Kiszka
  2012-03-29 16:46   ` Michael S. Tsirkin
@ 2012-03-29 18:25   ` Jan Kiszka
  2012-03-29 19:14   ` [PATCH v4] " Jan Kiszka
  2012-04-11 22:10   ` [PATCH v3] " Marcelo Tosatti
  3 siblings, 0 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-03-29 18:25 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Michael S. Tsirkin, Eric Northup

On 2012-03-29 18:15, Jan Kiszka wrote:
> Currently, MSI messages can only be injected to in-kernel irqchips by
> defining a corresponding IRQ route for each message. This is not only
> unhandy if the MSI messages are generated "on the fly" by user space,
> IRQ routes are a limited resource that user space as to manage
> carefully.
> 
> By providing a direct injection path, we can both avoid using up limited
> resources and simplify the necessary steps for user land.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
> 
> Changes in v3:
>  - align return code doc to reality
>  - rename SET_MSI -> SIGNAL_MSI
> 
>  Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
>  arch/x86/kvm/Kconfig              |    1 +
>  include/linux/kvm.h               |   11 +++++++++++
>  virt/kvm/Kconfig                  |    3 +++
>  virt/kvm/kvm_main.c               |   21 +++++++++++++++++++++
>  5 files changed, 57 insertions(+), 0 deletions(-)
> 
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> index 81ff39f..ed27d1b 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
>  evaluated.
>  
> +4.61 KVM_SIGNAL_MSI
> +
> +Capability: KVM_CAP_SIGNAL_MSI
> +Architectures: x86
> +Type: vm ioctl
> +Parameters: struct kvm_msi (in)
> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
> +
> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
> +MSI messages.
> +
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
> +No flags are defined so far. The corresponding field must be 0.
> +
>  4.62 KVM_CREATE_SPAPR_TCE
>  
>  Capability: KVM_CAP_SPAPR_TCE
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index 1a7fe86..a28f338 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -36,6 +36,7 @@ config KVM
>  	select TASKSTATS
>  	select TASK_DELAY_ACCT
>  	select PERF_EVENTS
> +	select HAVE_KVM_MSI
>  	---help---
>  	  Support hosting fully virtualized guest machines using hardware
>  	  virtualization extensions.  You will need a fairly recent
> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index 7a9dd4b..225b452 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
>  #define KVM_CAP_SYNC_REGS 74
>  #define KVM_CAP_PCI_2_3 75
>  #define KVM_CAP_KVMCLOCK_CTRL 76
> +#define KVM_CAP_SIGNAL_MSI 77
>  
>  #ifdef KVM_CAP_IRQ_ROUTING
>  
> @@ -715,6 +716,14 @@ struct kvm_one_reg {
>  	__u64 addr;
>  };
>  
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
>  /*
>   * ioctls for VM fds
>   */
> @@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
>  /* Available with KVM_CAP_PCI_2_3 */
>  #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
>  				       struct kvm_assigned_pci_dev)
> +/* Available with KVM_CAP_SIGNAL_MSI */
> +#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
>  
>  /*
>   * ioctls for vcpu fds
> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
> index f63ccb0..28694f4 100644
> --- a/virt/kvm/Kconfig
> +++ b/virt/kvm/Kconfig
> @@ -18,3 +18,6 @@ config KVM_MMIO
>  
>  config KVM_ASYNC_PF
>         bool
> +
> +config HAVE_KVM_MSI
> +       bool
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index a612bc8..3aeb7ab 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -2063,6 +2063,24 @@ static long kvm_vm_ioctl(struct file *filp,
>  		mutex_unlock(&kvm->lock);
>  		break;
>  #endif
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_SIGNAL_MSI: {
> +		struct kvm_kernel_irq_routing_entry route;
> +		struct kvm_msi msi;
> +
> +		r = -EFAULT;
> +		if (copy_from_user(&msi, argp, sizeof msi))
> +			goto out;
> +		r = -EINVAL;
> +		if (!irqchip_in_kernel(kvm) || msi.flags != 0)

Ouch, this doesn't build if you have the proper setup.

There is no arch-agnostic test for active irqchip support yet. I guess
I'll refactor irqchip_in_kernel to kvm_arch_irqchip_in_kernel.

Jan

> +			goto out;
> +		route.msi.address_lo = msi.address_lo;
> +		route.msi.address_hi = msi.address_hi;
> +		route.msi.data = msi.data;
> +		r =  kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> +		break;
> +	}
> +#endif
>  	default:
>  		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
>  		if (r == -ENOTTY)
> @@ -2191,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
>  	case KVM_CAP_SET_BOOT_CPU_ID:
>  #endif
>  	case KVM_CAP_INTERNAL_ERROR_DATA:
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_CAP_SIGNAL_MSI:
> +#endif
>  		return 1;
>  #ifdef CONFIG_HAVE_KVM_IRQCHIP
>  	case KVM_CAP_IRQ_ROUTING:

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-29 16:15 ` [PATCH v3] " Jan Kiszka
  2012-03-29 16:46   ` Michael S. Tsirkin
  2012-03-29 18:25   ` Jan Kiszka
@ 2012-03-29 19:14   ` Jan Kiszka
  2012-03-29 19:41     ` Michael S. Tsirkin
                       ` (2 more replies)
  2012-04-11 22:10   ` [PATCH v3] " Marcelo Tosatti
  3 siblings, 3 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-03-29 19:14 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Michael S. Tsirkin, Eric Northup

Currently, MSI messages can only be injected to in-kernel irqchips by
defining a corresponding IRQ route for each message. This is not only
unhandy if the MSI messages are generated "on the fly" by user space,
IRQ routes are a limited resource that user space has to manage
carefully.

By providing a direct injection path, we can both avoid using up limited
resources and simplify the necessary steps for user land.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---

Changes in v4:
 - Fix the build by factoring out kvm_send_userspace_msi:
   irqchip_in_kernel is not generically available. But abstracting it
   for all arch is tricky and therefore left to the poor people who have
   to introduce non-x86 irqchip support to this x86-focused corner.

Lesson (probably not) learned: Never underestimate the complexity of
trivial changes.

 Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
 arch/x86/kvm/Kconfig              |    1 +
 include/linux/kvm.h               |   11 +++++++++++
 include/linux/kvm_host.h          |    2 ++
 virt/kvm/Kconfig                  |    3 +++
 virt/kvm/irq_comm.c               |   14 ++++++++++++++
 virt/kvm/kvm_main.c               |   14 ++++++++++++++
 7 files changed, 66 insertions(+), 0 deletions(-)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 81ff39f..ed27d1b 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
 by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
 evaluated.
 
+4.61 KVM_SIGNAL_MSI
+
+Capability: KVM_CAP_SIGNAL_MSI
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_msi (in)
+Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
+
+Directly inject a MSI message. Only valid with in-kernel irqchip that handles
+MSI messages.
+
+struct kvm_msi {
+	__u32 address_lo;
+	__u32 address_hi;
+	__u32 data;
+	__u32 flags;
+	__u8  pad[16];
+};
+
+No flags are defined so far. The corresponding field must be 0.
+
 4.62 KVM_CREATE_SPAPR_TCE
 
 Capability: KVM_CAP_SPAPR_TCE
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 1a7fe86..a28f338 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -36,6 +36,7 @@ config KVM
 	select TASKSTATS
 	select TASK_DELAY_ACCT
 	select PERF_EVENTS
+	select HAVE_KVM_MSI
 	---help---
 	  Support hosting fully virtualized guest machines using hardware
 	  virtualization extensions.  You will need a fairly recent
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 7a9dd4b..225b452 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
 #define KVM_CAP_SYNC_REGS 74
 #define KVM_CAP_PCI_2_3 75
 #define KVM_CAP_KVMCLOCK_CTRL 76
+#define KVM_CAP_SIGNAL_MSI 77
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -715,6 +716,14 @@ struct kvm_one_reg {
 	__u64 addr;
 };
 
+struct kvm_msi {
+	__u32 address_lo;
+	__u32 address_hi;
+	__u32 data;
+	__u32 flags;
+	__u8  pad[16];
+};
+
 /*
  * ioctls for VM fds
  */
@@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
 /* Available with KVM_CAP_PCI_2_3 */
 #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
 				       struct kvm_assigned_pci_dev)
+/* Available with KVM_CAP_SIGNAL_MSI */
+#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
 
 /*
  * ioctls for vcpu fds
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6cf158c..35c69d6 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -774,6 +774,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
 			unsigned flags);
 void kvm_free_irq_routing(struct kvm *kvm);
 
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
+
 #else
 
 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index f63ccb0..28694f4 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -18,3 +18,6 @@ config KVM_MMIO
 
 config KVM_ASYNC_PF
        bool
+
+config HAVE_KVM_MSI
+       bool
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 9f614b4..a6a0365 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -138,6 +138,20 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
 	return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
 }
 
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
+{
+	struct kvm_kernel_irq_routing_entry route;
+
+	if (!irqchip_in_kernel(kvm) || msi->flags != 0)
+		return -EINVAL;
+
+	route.msi.address_lo = msi->address_lo;
+	route.msi.address_hi = msi->address_hi;
+	route.msi.data = msi->data;
+
+	return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+}
+
 /*
  * Return value:
  *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a612bc8..5addfa8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2063,6 +2063,17 @@ static long kvm_vm_ioctl(struct file *filp,
 		mutex_unlock(&kvm->lock);
 		break;
 #endif
+#ifdef CONFIG_HAVE_KVM_MSI
+	case KVM_SIGNAL_MSI: {
+		struct kvm_msi msi;
+
+		r = -EFAULT;
+		if (copy_from_user(&msi, argp, sizeof msi))
+			goto out;
+		r = kvm_send_userspace_msi(kvm, &msi);
+		break;
+	}
+#endif
 	default:
 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
 		if (r == -ENOTTY)
@@ -2191,6 +2202,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
 	case KVM_CAP_SET_BOOT_CPU_ID:
 #endif
 	case KVM_CAP_INTERNAL_ERROR_DATA:
+#ifdef CONFIG_HAVE_KVM_MSI
+	case KVM_CAP_SIGNAL_MSI:
+#endif
 		return 1;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
 	case KVM_CAP_IRQ_ROUTING:
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-29 19:14   ` [PATCH v4] " Jan Kiszka
@ 2012-03-29 19:41     ` Michael S. Tsirkin
  2012-03-30  7:45       ` Jan Kiszka
  2012-04-03 16:27     ` Avi Kivity
  2012-04-24 11:57     ` [PATCH v4] KVM: Introduce direct MSI message " Avi Kivity
  2 siblings, 1 reply; 43+ messages in thread
From: Michael S. Tsirkin @ 2012-03-29 19:41 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Avi Kivity, Marcelo Tosatti, kvm, Eric Northup

On Thu, Mar 29, 2012 at 09:14:12PM +0200, Jan Kiszka wrote:
> Currently, MSI messages can only be injected to in-kernel irqchips by
> defining a corresponding IRQ route for each message. This is not only
> unhandy if the MSI messages are generated "on the fly" by user space,
> IRQ routes are a limited resource that user space has to manage
> carefully.
> 
> By providing a direct injection path, we can both avoid using up limited
> resources and simplify the necessary steps for user land.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
> 
> Changes in v4:
>  - Fix the build by factoring out kvm_send_userspace_msi:
>    irqchip_in_kernel is not generically available. But abstracting it
>    for all arch is tricky and therefore left to the poor people who have
>    to introduce non-x86 irqchip support to this x86-focused corner.
> 
> Lesson (probably not) learned: Never underestimate the complexity of
> trivial changes.
> 
>  Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
>  arch/x86/kvm/Kconfig              |    1 +
>  include/linux/kvm.h               |   11 +++++++++++
>  include/linux/kvm_host.h          |    2 ++
>  virt/kvm/Kconfig                  |    3 +++
>  virt/kvm/irq_comm.c               |   14 ++++++++++++++
>  virt/kvm/kvm_main.c               |   14 ++++++++++++++
>  7 files changed, 66 insertions(+), 0 deletions(-)
> 
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> index 81ff39f..ed27d1b 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
>  evaluated.
>  
> +4.61 KVM_SIGNAL_MSI
> +
> +Capability: KVM_CAP_SIGNAL_MSI
> +Architectures: x86
> +Type: vm ioctl
> +Parameters: struct kvm_msi (in)
> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
> +
> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
> +MSI messages.
> +
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
> +No flags are defined so far. The corresponding field must be 0.
> +
>  4.62 KVM_CREATE_SPAPR_TCE
>  
>  Capability: KVM_CAP_SPAPR_TCE
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index 1a7fe86..a28f338 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -36,6 +36,7 @@ config KVM
>  	select TASKSTATS
>  	select TASK_DELAY_ACCT
>  	select PERF_EVENTS
> +	select HAVE_KVM_MSI
>  	---help---
>  	  Support hosting fully virtualized guest machines using hardware
>  	  virtualization extensions.  You will need a fairly recent
> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index 7a9dd4b..225b452 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
>  #define KVM_CAP_SYNC_REGS 74
>  #define KVM_CAP_PCI_2_3 75
>  #define KVM_CAP_KVMCLOCK_CTRL 76
> +#define KVM_CAP_SIGNAL_MSI 77
>  
>  #ifdef KVM_CAP_IRQ_ROUTING
>  
> @@ -715,6 +716,14 @@ struct kvm_one_reg {
>  	__u64 addr;
>  };
>  
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
>  /*
>   * ioctls for VM fds
>   */
> @@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
>  /* Available with KVM_CAP_PCI_2_3 */
>  #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
>  				       struct kvm_assigned_pci_dev)
> +/* Available with KVM_CAP_SIGNAL_MSI */
> +#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
>  
>  /*
>   * ioctls for vcpu fds
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 6cf158c..35c69d6 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -774,6 +774,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
>  			unsigned flags);
>  void kvm_free_irq_routing(struct kvm *kvm);
>  
> +int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
> +
>  #else
>  
>  static inline void kvm_free_irq_routing(struct kvm *kvm) {}
> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
> index f63ccb0..28694f4 100644
> --- a/virt/kvm/Kconfig
> +++ b/virt/kvm/Kconfig
> @@ -18,3 +18,6 @@ config KVM_MMIO
>  
>  config KVM_ASYNC_PF
>         bool
> +
> +config HAVE_KVM_MSI
> +       bool
> diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
> index 9f614b4..a6a0365 100644
> --- a/virt/kvm/irq_comm.c
> +++ b/virt/kvm/irq_comm.c
> @@ -138,6 +138,20 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
>  	return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
>  }
>  
> +int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
> +{
> +	struct kvm_kernel_irq_routing_entry route;
> +
> +	if (!irqchip_in_kernel(kvm) || msi->flags != 0)
> +		return -EINVAL;
> +
> +	route.msi.address_lo = msi->address_lo;
> +	route.msi.address_hi = msi->address_hi;
> +	route.msi.data = msi->data;
> +
> +	return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> +}
> +
>  /*
>   * Return value:
>   *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index a612bc8..5addfa8 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -2063,6 +2063,17 @@ static long kvm_vm_ioctl(struct file *filp,
>  		mutex_unlock(&kvm->lock);
>  		break;
>  #endif
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_SIGNAL_MSI: {
> +		struct kvm_msi msi;
> +
> +		r = -EFAULT;
> +		if (copy_from_user(&msi, argp, sizeof msi))
> +			goto out;

Wait a second, we should validate flags: you say
"this field should be 0" so we should check it.

> +		r = kvm_send_userspace_msi(kvm, &msi);
> +		break;
> +	}
> +#endif
>  	default:
>  		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
>  		if (r == -ENOTTY)
> @@ -2191,6 +2202,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
>  	case KVM_CAP_SET_BOOT_CPU_ID:
>  #endif
>  	case KVM_CAP_INTERNAL_ERROR_DATA:
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_CAP_SIGNAL_MSI:
> +#endif
>  		return 1;
>  #ifdef CONFIG_HAVE_KVM_IRQCHIP
>  	case KVM_CAP_IRQ_ROUTING:
> -- 
> 1.7.3.4

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-29 19:41     ` Michael S. Tsirkin
@ 2012-03-30  7:45       ` Jan Kiszka
  2012-03-30 12:45         ` Michael S. Tsirkin
  0 siblings, 1 reply; 43+ messages in thread
From: Jan Kiszka @ 2012-03-30  7:45 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Avi Kivity, Marcelo Tosatti, kvm, Eric Northup

On 2012-03-29 21:41, Michael S. Tsirkin wrote:
> On Thu, Mar 29, 2012 at 09:14:12PM +0200, Jan Kiszka wrote:
>> Currently, MSI messages can only be injected to in-kernel irqchips by
>> defining a corresponding IRQ route for each message. This is not only
>> unhandy if the MSI messages are generated "on the fly" by user space,
>> IRQ routes are a limited resource that user space has to manage
>> carefully.
>>
>> By providing a direct injection path, we can both avoid using up limited
>> resources and simplify the necessary steps for user land.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
>> ---
>>
>> Changes in v4:
>>  - Fix the build by factoring out kvm_send_userspace_msi:
>>    irqchip_in_kernel is not generically available. But abstracting it
>>    for all arch is tricky and therefore left to the poor people who have
>>    to introduce non-x86 irqchip support to this x86-focused corner.
>>
>> Lesson (probably not) learned: Never underestimate the complexity of
>> trivial changes.
>>
>>  Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
>>  arch/x86/kvm/Kconfig              |    1 +
>>  include/linux/kvm.h               |   11 +++++++++++
>>  include/linux/kvm_host.h          |    2 ++
>>  virt/kvm/Kconfig                  |    3 +++
>>  virt/kvm/irq_comm.c               |   14 ++++++++++++++
>>  virt/kvm/kvm_main.c               |   14 ++++++++++++++
>>  7 files changed, 66 insertions(+), 0 deletions(-)
>>
>> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
>> index 81ff39f..ed27d1b 100644
>> --- a/Documentation/virtual/kvm/api.txt
>> +++ b/Documentation/virtual/kvm/api.txt
>> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
>>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
>>  evaluated.
>>  
>> +4.61 KVM_SIGNAL_MSI
>> +
>> +Capability: KVM_CAP_SIGNAL_MSI
>> +Architectures: x86
>> +Type: vm ioctl
>> +Parameters: struct kvm_msi (in)
>> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
>> +
>> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
>> +MSI messages.
>> +
>> +struct kvm_msi {
>> +	__u32 address_lo;
>> +	__u32 address_hi;
>> +	__u32 data;
>> +	__u32 flags;
>> +	__u8  pad[16];
>> +};
>> +
>> +No flags are defined so far. The corresponding field must be 0.
>> +
>>  4.62 KVM_CREATE_SPAPR_TCE
>>  
>>  Capability: KVM_CAP_SPAPR_TCE
>> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
>> index 1a7fe86..a28f338 100644
>> --- a/arch/x86/kvm/Kconfig
>> +++ b/arch/x86/kvm/Kconfig
>> @@ -36,6 +36,7 @@ config KVM
>>  	select TASKSTATS
>>  	select TASK_DELAY_ACCT
>>  	select PERF_EVENTS
>> +	select HAVE_KVM_MSI
>>  	---help---
>>  	  Support hosting fully virtualized guest machines using hardware
>>  	  virtualization extensions.  You will need a fairly recent
>> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
>> index 7a9dd4b..225b452 100644
>> --- a/include/linux/kvm.h
>> +++ b/include/linux/kvm.h
>> @@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
>>  #define KVM_CAP_SYNC_REGS 74
>>  #define KVM_CAP_PCI_2_3 75
>>  #define KVM_CAP_KVMCLOCK_CTRL 76
>> +#define KVM_CAP_SIGNAL_MSI 77
>>  
>>  #ifdef KVM_CAP_IRQ_ROUTING
>>  
>> @@ -715,6 +716,14 @@ struct kvm_one_reg {
>>  	__u64 addr;
>>  };
>>  
>> +struct kvm_msi {
>> +	__u32 address_lo;
>> +	__u32 address_hi;
>> +	__u32 data;
>> +	__u32 flags;
>> +	__u8  pad[16];
>> +};
>> +
>>  /*
>>   * ioctls for VM fds
>>   */
>> @@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
>>  /* Available with KVM_CAP_PCI_2_3 */
>>  #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
>>  				       struct kvm_assigned_pci_dev)
>> +/* Available with KVM_CAP_SIGNAL_MSI */
>> +#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
>>  
>>  /*
>>   * ioctls for vcpu fds
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 6cf158c..35c69d6 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -774,6 +774,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
>>  			unsigned flags);
>>  void kvm_free_irq_routing(struct kvm *kvm);
>>  
>> +int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
>> +
>>  #else
>>  
>>  static inline void kvm_free_irq_routing(struct kvm *kvm) {}
>> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
>> index f63ccb0..28694f4 100644
>> --- a/virt/kvm/Kconfig
>> +++ b/virt/kvm/Kconfig
>> @@ -18,3 +18,6 @@ config KVM_MMIO
>>  
>>  config KVM_ASYNC_PF
>>         bool
>> +
>> +config HAVE_KVM_MSI
>> +       bool
>> diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
>> index 9f614b4..a6a0365 100644
>> --- a/virt/kvm/irq_comm.c
>> +++ b/virt/kvm/irq_comm.c
>> @@ -138,6 +138,20 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
>>  	return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
>>  }
>>  
>> +int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
>> +{
>> +	struct kvm_kernel_irq_routing_entry route;
>> +
>> +	if (!irqchip_in_kernel(kvm) || msi->flags != 0)
>> +		return -EINVAL;
>> +
>> +	route.msi.address_lo = msi->address_lo;
>> +	route.msi.address_hi = msi->address_hi;
>> +	route.msi.data = msi->data;
>> +
>> +	return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
>> +}
>> +
>>  /*
>>   * Return value:
>>   *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index a612bc8..5addfa8 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -2063,6 +2063,17 @@ static long kvm_vm_ioctl(struct file *filp,
>>  		mutex_unlock(&kvm->lock);
>>  		break;
>>  #endif
>> +#ifdef CONFIG_HAVE_KVM_MSI
>> +	case KVM_SIGNAL_MSI: {
>> +		struct kvm_msi msi;
>> +
>> +		r = -EFAULT;
>> +		if (copy_from_user(&msi, argp, sizeof msi))
>> +			goto out;
> 
> Wait a second, we should validate flags: you say
> "this field should be 0" so we should check it.

Yep, see kvm_send_userspace_msi.

Jan

> 
>> +		r = kvm_send_userspace_msi(kvm, &msi);
>> +		break;
>> +	}
>> +#endif
>>  	default:
>>  		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
>>  		if (r == -ENOTTY)
>> @@ -2191,6 +2202,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
>>  	case KVM_CAP_SET_BOOT_CPU_ID:
>>  #endif
>>  	case KVM_CAP_INTERNAL_ERROR_DATA:
>> +#ifdef CONFIG_HAVE_KVM_MSI
>> +	case KVM_CAP_SIGNAL_MSI:
>> +#endif
>>  		return 1;
>>  #ifdef CONFIG_HAVE_KVM_IRQCHIP
>>  	case KVM_CAP_IRQ_ROUTING:
>> -- 
>> 1.7.3.4

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-30  7:45       ` Jan Kiszka
@ 2012-03-30 12:45         ` Michael S. Tsirkin
  0 siblings, 0 replies; 43+ messages in thread
From: Michael S. Tsirkin @ 2012-03-30 12:45 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Avi Kivity, Marcelo Tosatti, kvm, Eric Northup

On Fri, Mar 30, 2012 at 09:45:35AM +0200, Jan Kiszka wrote:
> On 2012-03-29 21:41, Michael S. Tsirkin wrote:
> > On Thu, Mar 29, 2012 at 09:14:12PM +0200, Jan Kiszka wrote:
> >> Currently, MSI messages can only be injected to in-kernel irqchips by
> >> defining a corresponding IRQ route for each message. This is not only
> >> unhandy if the MSI messages are generated "on the fly" by user space,
> >> IRQ routes are a limited resource that user space has to manage
> >> carefully.
> >>
> >> By providing a direct injection path, we can both avoid using up limited
> >> resources and simplify the necessary steps for user land.
> >>
> >> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> >> ---
> >>
> >> Changes in v4:
> >>  - Fix the build by factoring out kvm_send_userspace_msi:
> >>    irqchip_in_kernel is not generically available. But abstracting it
> >>    for all arch is tricky and therefore left to the poor people who have
> >>    to introduce non-x86 irqchip support to this x86-focused corner.
> >>
> >> Lesson (probably not) learned: Never underestimate the complexity of
> >> trivial changes.
> >>
> >>  Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
> >>  arch/x86/kvm/Kconfig              |    1 +
> >>  include/linux/kvm.h               |   11 +++++++++++
> >>  include/linux/kvm_host.h          |    2 ++
> >>  virt/kvm/Kconfig                  |    3 +++
> >>  virt/kvm/irq_comm.c               |   14 ++++++++++++++
> >>  virt/kvm/kvm_main.c               |   14 ++++++++++++++
> >>  7 files changed, 66 insertions(+), 0 deletions(-)
> >>
> >> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> >> index 81ff39f..ed27d1b 100644
> >> --- a/Documentation/virtual/kvm/api.txt
> >> +++ b/Documentation/virtual/kvm/api.txt
> >> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
> >>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
> >>  evaluated.
> >>  
> >> +4.61 KVM_SIGNAL_MSI
> >> +
> >> +Capability: KVM_CAP_SIGNAL_MSI
> >> +Architectures: x86
> >> +Type: vm ioctl
> >> +Parameters: struct kvm_msi (in)
> >> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
> >> +
> >> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
> >> +MSI messages.
> >> +
> >> +struct kvm_msi {
> >> +	__u32 address_lo;
> >> +	__u32 address_hi;
> >> +	__u32 data;
> >> +	__u32 flags;
> >> +	__u8  pad[16];
> >> +};
> >> +
> >> +No flags are defined so far. The corresponding field must be 0.
> >> +
> >>  4.62 KVM_CREATE_SPAPR_TCE
> >>  
> >>  Capability: KVM_CAP_SPAPR_TCE
> >> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> >> index 1a7fe86..a28f338 100644
> >> --- a/arch/x86/kvm/Kconfig
> >> +++ b/arch/x86/kvm/Kconfig
> >> @@ -36,6 +36,7 @@ config KVM
> >>  	select TASKSTATS
> >>  	select TASK_DELAY_ACCT
> >>  	select PERF_EVENTS
> >> +	select HAVE_KVM_MSI
> >>  	---help---
> >>  	  Support hosting fully virtualized guest machines using hardware
> >>  	  virtualization extensions.  You will need a fairly recent
> >> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> >> index 7a9dd4b..225b452 100644
> >> --- a/include/linux/kvm.h
> >> +++ b/include/linux/kvm.h
> >> @@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
> >>  #define KVM_CAP_SYNC_REGS 74
> >>  #define KVM_CAP_PCI_2_3 75
> >>  #define KVM_CAP_KVMCLOCK_CTRL 76
> >> +#define KVM_CAP_SIGNAL_MSI 77
> >>  
> >>  #ifdef KVM_CAP_IRQ_ROUTING
> >>  
> >> @@ -715,6 +716,14 @@ struct kvm_one_reg {
> >>  	__u64 addr;
> >>  };
> >>  
> >> +struct kvm_msi {
> >> +	__u32 address_lo;
> >> +	__u32 address_hi;
> >> +	__u32 data;
> >> +	__u32 flags;
> >> +	__u8  pad[16];
> >> +};
> >> +
> >>  /*
> >>   * ioctls for VM fds
> >>   */
> >> @@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
> >>  /* Available with KVM_CAP_PCI_2_3 */
> >>  #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
> >>  				       struct kvm_assigned_pci_dev)
> >> +/* Available with KVM_CAP_SIGNAL_MSI */
> >> +#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
> >>  
> >>  /*
> >>   * ioctls for vcpu fds
> >> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> >> index 6cf158c..35c69d6 100644
> >> --- a/include/linux/kvm_host.h
> >> +++ b/include/linux/kvm_host.h
> >> @@ -774,6 +774,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
> >>  			unsigned flags);
> >>  void kvm_free_irq_routing(struct kvm *kvm);
> >>  
> >> +int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
> >> +
> >>  #else
> >>  
> >>  static inline void kvm_free_irq_routing(struct kvm *kvm) {}
> >> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
> >> index f63ccb0..28694f4 100644
> >> --- a/virt/kvm/Kconfig
> >> +++ b/virt/kvm/Kconfig
> >> @@ -18,3 +18,6 @@ config KVM_MMIO
> >>  
> >>  config KVM_ASYNC_PF
> >>         bool
> >> +
> >> +config HAVE_KVM_MSI
> >> +       bool
> >> diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
> >> index 9f614b4..a6a0365 100644
> >> --- a/virt/kvm/irq_comm.c
> >> +++ b/virt/kvm/irq_comm.c
> >> @@ -138,6 +138,20 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
> >>  	return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
> >>  }
> >>  
> >> +int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
> >> +{
> >> +	struct kvm_kernel_irq_routing_entry route;
> >> +
> >> +	if (!irqchip_in_kernel(kvm) || msi->flags != 0)
> >> +		return -EINVAL;
> >> +
> >> +	route.msi.address_lo = msi->address_lo;
> >> +	route.msi.address_hi = msi->address_hi;
> >> +	route.msi.data = msi->data;
> >> +
> >> +	return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> >> +}
> >> +
> >>  /*
> >>   * Return value:
> >>   *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
> >> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> >> index a612bc8..5addfa8 100644
> >> --- a/virt/kvm/kvm_main.c
> >> +++ b/virt/kvm/kvm_main.c
> >> @@ -2063,6 +2063,17 @@ static long kvm_vm_ioctl(struct file *filp,
> >>  		mutex_unlock(&kvm->lock);
> >>  		break;
> >>  #endif
> >> +#ifdef CONFIG_HAVE_KVM_MSI
> >> +	case KVM_SIGNAL_MSI: {
> >> +		struct kvm_msi msi;
> >> +
> >> +		r = -EFAULT;
> >> +		if (copy_from_user(&msi, argp, sizeof msi))
> >> +			goto out;
> > 
> > Wait a second, we should validate flags: you say
> > "this field should be 0" so we should check it.
> 
> Yep, see kvm_send_userspace_msi.
> 
> Jan

Right, missed this. Thanks.

> > 
> >> +		r = kvm_send_userspace_msi(kvm, &msi);
> >> +		break;
> >> +	}
> >> +#endif
> >>  	default:
> >>  		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
> >>  		if (r == -ENOTTY)
> >> @@ -2191,6 +2202,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
> >>  	case KVM_CAP_SET_BOOT_CPU_ID:
> >>  #endif
> >>  	case KVM_CAP_INTERNAL_ERROR_DATA:
> >> +#ifdef CONFIG_HAVE_KVM_MSI
> >> +	case KVM_CAP_SIGNAL_MSI:
> >> +#endif
> >>  		return 1;
> >>  #ifdef CONFIG_HAVE_KVM_IRQCHIP
> >>  	case KVM_CAP_IRQ_ROUTING:
> >> -- 
> >> 1.7.3.4
> 
> -- 
> Siemens AG, Corporate Technology, CT T DE IT 1
> Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-29 19:14   ` [PATCH v4] " Jan Kiszka
  2012-03-29 19:41     ` Michael S. Tsirkin
@ 2012-04-03 16:27     ` Avi Kivity
  2012-04-03 16:47       ` Jan Kiszka
  2012-04-10 18:30       ` [PATCH] KVM: Introduce generic interrupt " Jan Kiszka
  2012-04-24 11:57     ` [PATCH v4] KVM: Introduce direct MSI message " Avi Kivity
  2 siblings, 2 replies; 43+ messages in thread
From: Avi Kivity @ 2012-04-03 16:27 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 03/29/2012 09:14 PM, Jan Kiszka wrote:
> Currently, MSI messages can only be injected to in-kernel irqchips by
> defining a corresponding IRQ route for each message. This is not only
> unhandy if the MSI messages are generated "on the fly" by user space,
> IRQ routes are a limited resource that user space has to manage
> carefully.
>
> By providing a direct injection path, we can both avoid using up limited
> resources and simplify the necessary steps for user land.
>
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> index 81ff39f..ed27d1b 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
>  evaluated.
>  
> +4.61 KVM_SIGNAL_MSI
> +
> +Capability: KVM_CAP_SIGNAL_MSI
> +Architectures: x86
> +Type: vm ioctl
> +Parameters: struct kvm_msi (in)
> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
> +
> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
> +MSI messages.
> +
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
> +No flags are defined so far. The corresponding field must be 0.
>

There are two ways in which this can be generalized:

struct kvm_general_irq {
      __u32 type; // line | MSI
      __u32 op;  // raise/lower/trigger
      union {
                 ... line;
                 struct kvm_msi msi;
      }
};

so we have a single ioctl for all interrupt handling.  This allows
eventual removal of the line-oriented ioctls.

The other alternative is to have a dma interface, similar to the kvm_run
mmio interface but with the kernel acting as destination.  The advantage
here is that we can handle dma from a device to any kernel-emulated
device, not just the APIC MSI range.  A downside is that we can't return
values related to interrupt coalescing.

A performance note: delivering an interrupt needs to search all vcpus
for an APIC ID match.  The previous plan was to cache (or pre-calculate)
this lookup in the irq routing table.  Now it looks like we'll need a
separate cache for this.

(yes, I said on the call I don't anticipate objections but preparing to
apply a patch always triggers more critical thinking)

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-03 16:27     ` Avi Kivity
@ 2012-04-03 16:47       ` Jan Kiszka
  2012-04-03 16:54         ` Avi Kivity
  2012-04-04  8:38         ` Michael S. Tsirkin
  2012-04-10 18:30       ` [PATCH] KVM: Introduce generic interrupt " Jan Kiszka
  1 sibling, 2 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-04-03 16:47 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 2012-04-03 18:27, Avi Kivity wrote:
> On 03/29/2012 09:14 PM, Jan Kiszka wrote:
>> Currently, MSI messages can only be injected to in-kernel irqchips by
>> defining a corresponding IRQ route for each message. This is not only
>> unhandy if the MSI messages are generated "on the fly" by user space,
>> IRQ routes are a limited resource that user space has to manage
>> carefully.
>>
>> By providing a direct injection path, we can both avoid using up limited
>> resources and simplify the necessary steps for user land.
>>
>> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
>> index 81ff39f..ed27d1b 100644
>> --- a/Documentation/virtual/kvm/api.txt
>> +++ b/Documentation/virtual/kvm/api.txt
>> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
>>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
>>  evaluated.
>>  
>> +4.61 KVM_SIGNAL_MSI
>> +
>> +Capability: KVM_CAP_SIGNAL_MSI
>> +Architectures: x86
>> +Type: vm ioctl
>> +Parameters: struct kvm_msi (in)
>> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
>> +
>> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
>> +MSI messages.
>> +
>> +struct kvm_msi {
>> +	__u32 address_lo;
>> +	__u32 address_hi;
>> +	__u32 data;
>> +	__u32 flags;
>> +	__u8  pad[16];
>> +};
>> +
>> +No flags are defined so far. The corresponding field must be 0.
>>
> 
> There are two ways in which this can be generalized:
> 
> struct kvm_general_irq {
>       __u32 type; // line | MSI
>       __u32 op;  // raise/lower/trigger
>       union {
>                  ... line;
>                  struct kvm_msi msi;
>       }
> };
> 
> so we have a single ioctl for all interrupt handling.  This allows
> eventual removal of the line-oriented ioctls.
> 
> The other alternative is to have a dma interface, similar to the kvm_run
> mmio interface but with the kernel acting as destination.  The advantage
> here is that we can handle dma from a device to any kernel-emulated
> device, not just the APIC MSI range.  A downside is that we can't return
> values related to interrupt coalescing.

Due to lacking injection feedback, I'm in favor of option 1. Will have a
look.

> 
> A performance note: delivering an interrupt needs to search all vcpus
> for an APIC ID match.  The previous plan was to cache (or pre-calculate)
> this lookup in the irq routing table.  Now it looks like we'll need a
> separate cache for this.

As this is non-existent until today, we don't regress here. And it can
still be added on top later on, transparently.

> 
> (yes, I said on the call I don't anticipate objections but preparing to
> apply a patch always triggers more critical thinking)
> 

Well, we make progress, though slower than I was hoping. :)

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-03 16:47       ` Jan Kiszka
@ 2012-04-03 16:54         ` Avi Kivity
  2012-04-03 17:24           ` Jan Kiszka
  2012-04-04  8:38         ` Michael S. Tsirkin
  1 sibling, 1 reply; 43+ messages in thread
From: Avi Kivity @ 2012-04-03 16:54 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 04/03/2012 07:47 PM, Jan Kiszka wrote:
> > 
> > so we have a single ioctl for all interrupt handling.  This allows
> > eventual removal of the line-oriented ioctls.
> > 
> > The other alternative is to have a dma interface, similar to the kvm_run
> > mmio interface but with the kernel acting as destination.  The advantage
> > here is that we can handle dma from a device to any kernel-emulated
> > device, not just the APIC MSI range.  A downside is that we can't return
> > values related to interrupt coalescing.
>
> Due to lacking injection feedback, I'm in favor of option 1. Will have a
> look.

I wonder if we can create a side channel for it.  Lack of a kernel DMA
API is a hole in the current code, though we haven't been bitten by it
yet.  An example is a guest that is swapping its own page tables; right
now the shadow mmu doesn't notice those writes (when the page tables are
swapped in) and will deliver incorrect results.  Of course no guest does
that, so it doesn't happen in practice.

> > 
> > A performance note: delivering an interrupt needs to search all vcpus
> > for an APIC ID match.  The previous plan was to cache (or pre-calculate)
> > this lookup in the irq routing table.  Now it looks like we'll need a
> > separate cache for this.
>
> As this is non-existent until today, we don't regress here. And it can
> still be added on top later on, transparently.

Yes, it's just a note, not an objection.  The cache lookup will be
slower than the gsi lookup (hash table vs. array) but still O(1) vs. the
current O(n).

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-03 16:54         ` Avi Kivity
@ 2012-04-03 17:24           ` Jan Kiszka
  2012-04-04  8:47             ` Avi Kivity
  0 siblings, 1 reply; 43+ messages in thread
From: Jan Kiszka @ 2012-04-03 17:24 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 2012-04-03 18:54, Avi Kivity wrote:
> On 04/03/2012 07:47 PM, Jan Kiszka wrote:
>>>
>>> so we have a single ioctl for all interrupt handling.  This allows
>>> eventual removal of the line-oriented ioctls.
>>>
>>> The other alternative is to have a dma interface, similar to the kvm_run
>>> mmio interface but with the kernel acting as destination.  The advantage
>>> here is that we can handle dma from a device to any kernel-emulated
>>> device, not just the APIC MSI range.  A downside is that we can't return
>>> values related to interrupt coalescing.
>>
>> Due to lacking injection feedback, I'm in favor of option 1. Will have a
>> look.
> 
> I wonder if we can create a side channel for it.  Lack of a kernel DMA
> API is a hole in the current code, though we haven't been bitten by it
> yet.  An example is a guest that is swapping its own page tables; right
> now the shadow mmu doesn't notice those writes (when the page tables are
> swapped in) and will deliver incorrect results.  Of course no guest does
> that, so it doesn't happen in practice.
> 
>>>
>>> A performance note: delivering an interrupt needs to search all vcpus
>>> for an APIC ID match.  The previous plan was to cache (or pre-calculate)
>>> this lookup in the irq routing table.  Now it looks like we'll need a
>>> separate cache for this.
>>
>> As this is non-existent until today, we don't regress here. And it can
>> still be added on top later on, transparently.
> 
> Yes, it's just a note, not an objection.  The cache lookup will be
> slower than the gsi lookup (hash table vs. array) but still O(1) vs. the
> current O(n).

If you are concerned about performance in this path, wouldn't a DMA
interface for MSI injection be counterproductive?

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-03 16:47       ` Jan Kiszka
  2012-04-03 16:54         ` Avi Kivity
@ 2012-04-04  8:38         ` Michael S. Tsirkin
  2012-04-04  8:44           ` Avi Kivity
  1 sibling, 1 reply; 43+ messages in thread
From: Michael S. Tsirkin @ 2012-04-04  8:38 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Avi Kivity, Marcelo Tosatti, kvm, Eric Northup

On Tue, Apr 03, 2012 at 06:47:49PM +0200, Jan Kiszka wrote:
> On 2012-04-03 18:27, Avi Kivity wrote:
> > On 03/29/2012 09:14 PM, Jan Kiszka wrote:
> >> Currently, MSI messages can only be injected to in-kernel irqchips by
> >> defining a corresponding IRQ route for each message. This is not only
> >> unhandy if the MSI messages are generated "on the fly" by user space,
> >> IRQ routes are a limited resource that user space has to manage
> >> carefully.
> >>
> >> By providing a direct injection path, we can both avoid using up limited
> >> resources and simplify the necessary steps for user land.
> >>
> >> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> >> index 81ff39f..ed27d1b 100644
> >> --- a/Documentation/virtual/kvm/api.txt
> >> +++ b/Documentation/virtual/kvm/api.txt
> >> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
> >>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
> >>  evaluated.
> >>  
> >> +4.61 KVM_SIGNAL_MSI
> >> +
> >> +Capability: KVM_CAP_SIGNAL_MSI
> >> +Architectures: x86
> >> +Type: vm ioctl
> >> +Parameters: struct kvm_msi (in)
> >> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
> >> +
> >> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
> >> +MSI messages.
> >> +
> >> +struct kvm_msi {
> >> +	__u32 address_lo;
> >> +	__u32 address_hi;
> >> +	__u32 data;
> >> +	__u32 flags;
> >> +	__u8  pad[16];
> >> +};
> >> +
> >> +No flags are defined so far. The corresponding field must be 0.
> >>
> > 
> > There are two ways in which this can be generalized:
> > 
> > struct kvm_general_irq {
> >       __u32 type; // line | MSI
> >       __u32 op;  // raise/lower/trigger
> >       union {
> >                  ... line;
> >                  struct kvm_msi msi;
> >       }
> > };
> > 
> > so we have a single ioctl for all interrupt handling.  This allows
> > eventual removal of the line-oriented ioctls.
> > 
> > The other alternative is to have a dma interface, similar to the kvm_run
> > mmio interface but with the kernel acting as destination.  The advantage
> > here is that we can handle dma from a device to any kernel-emulated
> > device, not just the APIC MSI range.  A downside is that we can't return
> > values related to interrupt coalescing.
> 
> Due to lacking injection feedback, I'm in favor of option 1. Will have a
> look.
> 
> > 
> > A performance note: delivering an interrupt needs to search all vcpus
> > for an APIC ID match.  The previous plan was to cache (or pre-calculate)
> > this lookup in the irq routing table.  Now it looks like we'll need a
> > separate cache for this.
> 
> As this is non-existent until today, we don't regress here. And it can
> still be added on top later on, transparently.

I always worry about hash collisions and the cost of
calculating good hash functions.

We could instead return an index in the cache on injection, maintain in
userspace and use it for fast path on the next injection.
Will make it easy to use an array index instead of a hash here,
and fallback to a slower ID lookup on mismatch.

Until we do have this fast path we can just fill this value with zeros,
so kernel patch (almost) does not need to change for this -
just the header.

> > 
> > (yes, I said on the call I don't anticipate objections but preparing to
> > apply a patch always triggers more critical thinking)
> > 
> 
> Well, we make progress, though slower than I was hoping. :)
> 
> Jan
> 
> -- 
> Siemens AG, Corporate Technology, CT T DE IT 1
> Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-04  8:38         ` Michael S. Tsirkin
@ 2012-04-04  8:44           ` Avi Kivity
  2012-04-04  8:53             ` Michael S. Tsirkin
  0 siblings, 1 reply; 43+ messages in thread
From: Avi Kivity @ 2012-04-04  8:44 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Jan Kiszka, Marcelo Tosatti, kvm, Eric Northup

On 04/04/2012 11:38 AM, Michael S. Tsirkin wrote:
> > 
> > > 
> > > A performance note: delivering an interrupt needs to search all vcpus
> > > for an APIC ID match.  The previous plan was to cache (or pre-calculate)
> > > this lookup in the irq routing table.  Now it looks like we'll need a
> > > separate cache for this.
> > 
> > As this is non-existent until today, we don't regress here. And it can
> > still be added on top later on, transparently.
>
> I always worry about hash collisions and the cost of
> calculating good hash functions.
>
> We could instead return an index in the cache on injection, maintain in
> userspace and use it for fast path on the next injection.

Ahem, that is almost the existing routing table to a T.

> Will make it easy to use an array index instead of a hash here,
> and fallback to a slower ID lookup on mismatch.

Need a free ioctl so we can reuse IDs.

> Until we do have this fast path we can just fill this value with zeros,
> so kernel patch (almost) does not need to change for this -
> just the header.

Partially implemented interfaces invite breakage.


-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-03 17:24           ` Jan Kiszka
@ 2012-04-04  8:47             ` Avi Kivity
  0 siblings, 0 replies; 43+ messages in thread
From: Avi Kivity @ 2012-04-04  8:47 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 04/03/2012 08:24 PM, Jan Kiszka wrote:
> > 
> >>>
> >>> A performance note: delivering an interrupt needs to search all vcpus
> >>> for an APIC ID match.  The previous plan was to cache (or pre-calculate)
> >>> this lookup in the irq routing table.  Now it looks like we'll need a
> >>> separate cache for this.
> >>
> >> As this is non-existent until today, we don't regress here. And it can
> >> still be added on top later on, transparently.
> > 
> > Yes, it's just a note, not an objection.  The cache lookup will be
> > slower than the gsi lookup (hash table vs. array) but still O(1) vs. the
> > current O(n).
>
> If you are concerned about performance in this path, wouldn't a DMA
> interface for MSI injection be counterproductive?

Yes, it would.  The lack of coalescing reporting support is also
problematic.  I just mentioned this idea as food for thought.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-04  8:44           ` Avi Kivity
@ 2012-04-04  8:53             ` Michael S. Tsirkin
  2012-04-04  9:22               ` Jan Kiszka
  0 siblings, 1 reply; 43+ messages in thread
From: Michael S. Tsirkin @ 2012-04-04  8:53 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Jan Kiszka, Marcelo Tosatti, kvm, Eric Northup

On Wed, Apr 04, 2012 at 11:44:23AM +0300, Avi Kivity wrote:
> On 04/04/2012 11:38 AM, Michael S. Tsirkin wrote:
> > > 
> > > > 
> > > > A performance note: delivering an interrupt needs to search all vcpus
> > > > for an APIC ID match.  The previous plan was to cache (or pre-calculate)
> > > > this lookup in the irq routing table.  Now it looks like we'll need a
> > > > separate cache for this.
> > > 
> > > As this is non-existent until today, we don't regress here. And it can
> > > still be added on top later on, transparently.
> >
> > I always worry about hash collisions and the cost of
> > calculating good hash functions.
> >
> > We could instead return an index in the cache on injection, maintain in
> > userspace and use it for fast path on the next injection.
> 
> Ahem, that is almost the existing routing table to a T.
> 
> > Will make it easy to use an array index instead of a hash here,
> > and fallback to a slower ID lookup on mismatch.
> 
> Need a free ioctl so we can reuse IDs.

No, it could be kernel controlled not userspace controlled. We get both
and address and an index:

if (table[u.i].addr == u.addr && table[u.i].data == u.data) {
	return table[u.i].id;
}

u.i = find_lru_idx(&table);
table[u.i].addr = u.addr;
table[u.i].data = u.data;
table[u.i].id = find_id(u.addr, u.data);
return table[u.i].id;


> > Until we do have this fast path we can just fill this value with zeros,
> > so kernel patch (almost) does not need to change for this -
> > just the header.
> 
> Partially implemented interfaces invite breakage.

Hmm true. OK scrap this idea then, it's not clear
whether we are going to optimize this anyway.

> 
> -- 
> I have a truly marvellous patch that fixes the bug which this
> signature is too narrow to contain.

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-04  8:53             ` Michael S. Tsirkin
@ 2012-04-04  9:22               ` Jan Kiszka
  2012-04-04  9:36                 ` Avi Kivity
  0 siblings, 1 reply; 43+ messages in thread
From: Jan Kiszka @ 2012-04-04  9:22 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Avi Kivity, Marcelo Tosatti, kvm, Eric Northup

On 2012-04-04 10:53, Michael S. Tsirkin wrote:
> On Wed, Apr 04, 2012 at 11:44:23AM +0300, Avi Kivity wrote:
>> On 04/04/2012 11:38 AM, Michael S. Tsirkin wrote:
>>>>
>>>>>
>>>>> A performance note: delivering an interrupt needs to search all vcpus
>>>>> for an APIC ID match.  The previous plan was to cache (or pre-calculate)
>>>>> this lookup in the irq routing table.  Now it looks like we'll need a
>>>>> separate cache for this.
>>>>
>>>> As this is non-existent until today, we don't regress here. And it can
>>>> still be added on top later on, transparently.
>>>
>>> I always worry about hash collisions and the cost of
>>> calculating good hash functions.
>>>
>>> We could instead return an index in the cache on injection, maintain in
>>> userspace and use it for fast path on the next injection.
>>
>> Ahem, that is almost the existing routing table to a T.
>>
>>> Will make it easy to use an array index instead of a hash here,
>>> and fallback to a slower ID lookup on mismatch.
>>
>> Need a free ioctl so we can reuse IDs.
> 
> No, it could be kernel controlled not userspace controlled. We get both
> and address and an index:
> 
> if (table[u.i].addr == u.addr && table[u.i].data == u.data) {
> 	return table[u.i].id;
> }
> 
> u.i = find_lru_idx(&table);
> table[u.i].addr = u.addr;
> table[u.i].data = u.data;
> table[u.i].id = find_id(u.addr, u.data);
> return table[u.i].id;
> 
> 
>>> Until we do have this fast path we can just fill this value with zeros,
>>> so kernel patch (almost) does not need to change for this -
>>> just the header.
>>
>> Partially implemented interfaces invite breakage.
> 
> Hmm true. OK scrap this idea then, it's not clear
> whether we are going to optimize this anyway.
> 

Also, the problem is that keeping that ID in userspace requires an
infrastructure like the MSIRoutingCache that I proposed originally. Not
much won /wrt invasiveness there. So we should really do the routing
optimization in the kernel - one day.

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-04  9:22               ` Jan Kiszka
@ 2012-04-04  9:36                 ` Avi Kivity
  2012-04-04  9:38                   ` Jan Kiszka
  0 siblings, 1 reply; 43+ messages in thread
From: Avi Kivity @ 2012-04-04  9:36 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Michael S. Tsirkin, Marcelo Tosatti, kvm, Eric Northup

On 04/04/2012 12:22 PM, Jan Kiszka wrote:
> > 
> >>> Until we do have this fast path we can just fill this value with zeros,
> >>> so kernel patch (almost) does not need to change for this -
> >>> just the header.
> >>
> >> Partially implemented interfaces invite breakage.
> > 
> > Hmm true. OK scrap this idea then, it's not clear
> > whether we are going to optimize this anyway.
> > 
>
> Also, the problem is that keeping that ID in userspace requires an
> infrastructure like the MSIRoutingCache that I proposed originally. Not
> much won /wrt invasiveness there. 

Internal qemu refactorings are not a driver for kvm interface changes.

> So we should really do the routing
> optimization in the kernel - one day.

No, we need to make a choice:

explicit handles: array lookup, more expensive setup
no handles: hash loopup, more expensive, but no setup, and no artificial
limits

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-04  9:36                 ` Avi Kivity
@ 2012-04-04  9:38                   ` Jan Kiszka
  2012-04-04  9:55                     ` Avi Kivity
  0 siblings, 1 reply; 43+ messages in thread
From: Jan Kiszka @ 2012-04-04  9:38 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Michael S. Tsirkin, Marcelo Tosatti, kvm, Eric Northup

On 2012-04-04 11:36, Avi Kivity wrote:
> On 04/04/2012 12:22 PM, Jan Kiszka wrote:
>>>
>>>>> Until we do have this fast path we can just fill this value with zeros,
>>>>> so kernel patch (almost) does not need to change for this -
>>>>> just the header.
>>>>
>>>> Partially implemented interfaces invite breakage.
>>>
>>> Hmm true. OK scrap this idea then, it's not clear
>>> whether we are going to optimize this anyway.
>>>
>>
>> Also, the problem is that keeping that ID in userspace requires an
>> infrastructure like the MSIRoutingCache that I proposed originally. Not
>> much won /wrt invasiveness there. 
> 
> Internal qemu refactorings are not a driver for kvm interface changes.

No, but qemu demonstrates the applicability and handiness of the kernel
interfaces.

> 
>> So we should really do the routing
>> optimization in the kernel - one day.
> 
> No, we need to make a choice:
> 
> explicit handles: array lookup, more expensive setup
> no handles: hash loopup, more expensive, but no setup, and no artificial
> limits

...and I think we should head for option 2.

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-04  9:38                   ` Jan Kiszka
@ 2012-04-04  9:55                     ` Avi Kivity
  2012-04-04 10:48                       ` Jan Kiszka
  0 siblings, 1 reply; 43+ messages in thread
From: Avi Kivity @ 2012-04-04  9:55 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Michael S. Tsirkin, Marcelo Tosatti, kvm, Eric Northup

On 04/04/2012 12:38 PM, Jan Kiszka wrote:
> On 2012-04-04 11:36, Avi Kivity wrote:
> > On 04/04/2012 12:22 PM, Jan Kiszka wrote:
> >>>
> >>>>> Until we do have this fast path we can just fill this value with zeros,
> >>>>> so kernel patch (almost) does not need to change for this -
> >>>>> just the header.
> >>>>
> >>>> Partially implemented interfaces invite breakage.
> >>>
> >>> Hmm true. OK scrap this idea then, it's not clear
> >>> whether we are going to optimize this anyway.
> >>>
> >>
> >> Also, the problem is that keeping that ID in userspace requires an
> >> infrastructure like the MSIRoutingCache that I proposed originally. Not
> >> much won /wrt invasiveness there. 
> > 
> > Internal qemu refactorings are not a driver for kvm interface changes.
>
> No, but qemu demonstrates the applicability and handiness of the kernel
> interfaces.

True.

> > 
> >> So we should really do the routing
> >> optimization in the kernel - one day.
> > 
> > No, we need to make a choice:
> > 
> > explicit handles: array lookup, more expensive setup
> > no handles: hash loopup, more expensive, but no setup, and no artificial
> > limits
>
> ...and I think we should head for option 2.

I'm not so sure anymore.  Sorry about the U turn, but remind me why?  In
the long term it will be slower.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-04  9:55                     ` Avi Kivity
@ 2012-04-04 10:48                       ` Jan Kiszka
  2012-04-04 11:50                         ` Avi Kivity
  0 siblings, 1 reply; 43+ messages in thread
From: Jan Kiszka @ 2012-04-04 10:48 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Michael S. Tsirkin, Marcelo Tosatti, kvm, Eric Northup

On 2012-04-04 11:55, Avi Kivity wrote:
> On 04/04/2012 12:38 PM, Jan Kiszka wrote:
>> On 2012-04-04 11:36, Avi Kivity wrote:
>>> On 04/04/2012 12:22 PM, Jan Kiszka wrote:
>>>>>
>>>>>>> Until we do have this fast path we can just fill this value with zeros,
>>>>>>> so kernel patch (almost) does not need to change for this -
>>>>>>> just the header.
>>>>>>
>>>>>> Partially implemented interfaces invite breakage.
>>>>>
>>>>> Hmm true. OK scrap this idea then, it's not clear
>>>>> whether we are going to optimize this anyway.
>>>>>
>>>>
>>>> Also, the problem is that keeping that ID in userspace requires an
>>>> infrastructure like the MSIRoutingCache that I proposed originally. Not
>>>> much won /wrt invasiveness there. 
>>>
>>> Internal qemu refactorings are not a driver for kvm interface changes.
>>
>> No, but qemu demonstrates the applicability and handiness of the kernel
>> interfaces.
> 
> True.
> 
>>>
>>>> So we should really do the routing
>>>> optimization in the kernel - one day.
>>>
>>> No, we need to make a choice:
>>>
>>> explicit handles: array lookup, more expensive setup
>>> no handles: hash loopup, more expensive, but no setup, and no artificial
>>> limits
>>
>> ...and I think we should head for option 2.
> 
> I'm not so sure anymore.  Sorry about the U turn, but remind me why?  In
> the long term it will be slower.

Likely not measurably slower. If you look at a message through the arch
glasses, you can usually spot the destination directly, specifically if
a message targets a single processor - no need for hashing and table
lookups in the common case.

In contrast, the maintenance costs for the current explicit route based
model are significant as we see now.

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-04 10:48                       ` Jan Kiszka
@ 2012-04-04 11:50                         ` Avi Kivity
  2012-04-04 12:01                           ` Jan Kiszka
  0 siblings, 1 reply; 43+ messages in thread
From: Avi Kivity @ 2012-04-04 11:50 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Michael S. Tsirkin, Marcelo Tosatti, kvm, Eric Northup

On 04/04/2012 01:48 PM, Jan Kiszka wrote:
> > 
> > I'm not so sure anymore.  Sorry about the U turn, but remind me why?  In
> > the long term it will be slower.
>
> Likely not measurably slower. If you look at a message through the arch
> glasses, you can usually spot the destination directly, specifically if
> a message targets a single processor - no need for hashing and table
> lookups in the common case.

Not on x86.  The APIC ID is guest-provided.  In x2apic mode it can be
quite large.

> In contrast, the maintenance costs for the current explicit route based
> model are significant as we see now.
>

You mean in amount of code in userspace?  That doesn't get solved since
we need to keep compatibility.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-04 11:50                         ` Avi Kivity
@ 2012-04-04 12:01                           ` Jan Kiszka
  0 siblings, 0 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-04-04 12:01 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Michael S. Tsirkin, Marcelo Tosatti, kvm, Eric Northup

On 2012-04-04 13:50, Avi Kivity wrote:
> On 04/04/2012 01:48 PM, Jan Kiszka wrote:
>>>
>>> I'm not so sure anymore.  Sorry about the U turn, but remind me why?  In
>>> the long term it will be slower.
>>
>> Likely not measurably slower. If you look at a message through the arch
>> glasses, you can usually spot the destination directly, specifically if
>> a message targets a single processor - no need for hashing and table
>> lookups in the common case.
> 
> Not on x86.  The APIC ID is guest-provided.

...but is still a rather stable mapping on the physical ID.

>  In x2apic mode it can be
> quite large.

Yes, but then you can at least hash/search/cache inside that group only,
with a smaller scope.

> 
>> In contrast, the maintenance costs for the current explicit route based
>> model are significant as we see now.
>>
> 
> You mean in amount of code in userspace?  That doesn't get solved since
> we need to keep compatibility.

We do not need to track MSI origins to correlate them with routes (with
the exception of 3 special devices: vhost-based virtio, kvm device
assignment, and vfio device assignment). We emulate this centrally with
a hand full of LOC in the kvm layer, and we bypass it with the advent of
a direct injection API. Compare this to my original series that
introduced MSIRoutingCaches to cope with the current kernel API.

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH] KVM: Introduce generic interrupt injection for in-kernel irqchips
  2012-04-03 16:27     ` Avi Kivity
  2012-04-03 16:47       ` Jan Kiszka
@ 2012-04-10 18:30       ` Jan Kiszka
  2012-04-23 14:44         ` Jan Kiszka
  2012-04-23 15:32         ` Avi Kivity
  1 sibling, 2 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-04-10 18:30 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Michael S. Tsirkin, Eric Northup

Currently, MSI messages can only be injected to in-kernel irqchips by
defining a corresponding IRQ route for each message. This is not only
unhandy if the MSI messages are generated "on the fly" by user space,
IRQ routes are a limited resource that user space has to manage
carefully.

By providing a direct injection path, we can both avoid using up limited
resources and simplify the necessary steps for user land. This path is
provide in a way that allows for use with other interrupt sources as
well. Besides MSIs also external interrupt lines can be manipulated
through this interface, obsoleting KVM_IRQ_LINE_STATUS.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---

This picks up Avi's first suggestion as I still think it is the better
option to provide a direct MSI injection channel.

 Documentation/virtual/kvm/api.txt |   46 +++++++++++++++++++++++++++++++++++++
 include/linux/kvm.h               |   26 +++++++++++++++++++++
 include/linux/kvm_host.h          |    2 +
 virt/kvm/irq_comm.c               |   29 +++++++++++++++++++++++
 virt/kvm/kvm_main.c               |   20 ++++++++++++++++
 5 files changed, 123 insertions(+), 0 deletions(-)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 81ff39f..c70be58 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1482,6 +1482,52 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
 by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
 evaluated.
 
+4.61 KVM_GENERAL_IRQ
+
+Capability: KVM_CAP_GENERAL_IRQ
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_general_irq (in/out)
+Returns: 0 on success, <0 on error
+
+Inject an interrupt event to the guest. Only valid if in-kernel irqchip is
+enabled.
+
+struct kvm_general_irq {
+	__u32 type;
+	__u32 op;
+	__s32 status;
+	__u32 pad;
+	union {
+		__u32 line;
+		struct {
+			__u32 address_lo;
+			__u32 address_hi;
+			__u32 data;
+		} msi;
+		__u8 pad[32];
+	} u;
+};
+
+Support IRQ types are:
+
+#define KVM_IRQTYPE_EXTERNAL_LINE	0
+#define KVM_IRQTYPE_MSI			1
+
+Available operations are:
+
+#define KVM_IRQOP_LOWER			0
+#define KVM_IRQOP_RAISE			1
+#define KVM_IRQOP_TRIGGER		2
+
+The level of an external interrupt line can either be raised or lowered, a
+MSI can only be triggered.
+
+If 0 is returned from the IOCTL, the status field was updated as well to
+reflect the injection result. It will be >0 on interrupt delivery, 0 if the
+interrupt was coalesced with an already pending one, and <0 if the guest
+blocked the delivery or some delivery error occurred.
+
 4.62 KVM_CREATE_SPAPR_TCE
 
 Capability: KVM_CAP_SPAPR_TCE
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 7a9dd4b..cb3afaf 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
 #define KVM_CAP_SYNC_REGS 74
 #define KVM_CAP_PCI_2_3 75
 #define KVM_CAP_KVMCLOCK_CTRL 76
+#define KVM_CAP_GENERAL_IRQ 77
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -715,6 +716,29 @@ struct kvm_one_reg {
 	__u64 addr;
 };
 
+#define KVM_IRQTYPE_EXTERNAL_LINE	0
+#define KVM_IRQTYPE_MSI			1
+
+#define KVM_IRQOP_LOWER			0
+#define KVM_IRQOP_RAISE			1
+#define KVM_IRQOP_TRIGGER		2
+
+struct kvm_general_irq {
+	__u32 type;
+	__u32 op;
+	__s32 status;
+	__u32 pad;
+	union {
+		__u32 line;
+		struct {
+			__u32 address_lo;
+			__u32 address_hi;
+			__u32 data;
+		} msi;
+		__u8 pad[32];
+	} u;
+};
+
 /*
  * ioctls for VM fds
  */
@@ -789,6 +813,8 @@ struct kvm_s390_ucas_mapping {
 /* Available with KVM_CAP_PCI_2_3 */
 #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
 				       struct kvm_assigned_pci_dev)
+/* Available with KVM_CAP_GENERAL_IRQ */
+#define KVM_GENERAL_IRQ           _IOWR(KVMIO,  0xa5, struct kvm_general_irq)
 
 /*
  * ioctls for vcpu fds
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 49c2f2f..31d3b44 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -773,6 +773,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
 			unsigned flags);
 void kvm_free_irq_routing(struct kvm *kvm);
 
+int kvm_general_irq(struct kvm *kvm, struct kvm_general_irq *irq);
+
 #else
 
 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 9f614b4..e487d3f 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -138,6 +138,35 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
 	return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
 }
 
+int kvm_general_irq(struct kvm *kvm, struct kvm_general_irq *irq)
+{
+	struct kvm_kernel_irq_routing_entry route;
+
+	if (!irqchip_in_kernel(kvm))
+		return -EINVAL;
+
+	switch (irq->type) {
+	case KVM_IRQTYPE_EXTERNAL_LINE:
+		if (irq->op > KVM_IRQOP_RAISE)
+			return -EINVAL;
+		irq->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
+					  irq->u.line, irq->op);
+		break;
+	case KVM_IRQTYPE_MSI:
+		if (irq->op != KVM_IRQOP_TRIGGER)
+			return -EINVAL;
+		route.msi.address_lo = irq->u.msi.address_lo;
+		route.msi.address_hi = irq->u.msi.address_hi;
+		route.msi.data = irq->u.msi.data;
+		irq->status = kvm_set_msi(&route, kvm,
+					  KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
 /*
  * Return value:
  *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6bd34a6..95dffec 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2059,6 +2059,23 @@ static long kvm_vm_ioctl(struct file *filp,
 		mutex_unlock(&kvm->lock);
 		break;
 #endif
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+	case KVM_GENERAL_IRQ: {
+		struct kvm_general_irq irq;
+
+		r = -EFAULT;
+		if (copy_from_user(&irq, argp, sizeof(irq)))
+			goto out;
+		r = kvm_general_irq(kvm, &irq);
+		if (r < 0)
+			goto out;
+		if (copy_to_user(argp +
+				 offsetof(struct kvm_general_irq, status),
+				 &irq.status, sizeof(irq.status)))
+			r = -EFAULT;
+		break;
+	}
+#endif
 	default:
 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
 		if (r == -ENOTTY)
@@ -2187,6 +2204,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
 	case KVM_CAP_SET_BOOT_CPU_ID:
 #endif
 	case KVM_CAP_INTERNAL_ERROR_DATA:
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+	case KVM_CAP_GENERAL_IRQ:
+#endif
 		return 1;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
 	case KVM_CAP_IRQ_ROUTING:
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 43+ messages in thread

* Re: [PATCH v3] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-29 16:15 ` [PATCH v3] " Jan Kiszka
                     ` (2 preceding siblings ...)
  2012-03-29 19:14   ` [PATCH v4] " Jan Kiszka
@ 2012-04-11 22:10   ` Marcelo Tosatti
  2012-04-12  9:28     ` Jan Kiszka
  3 siblings, 1 reply; 43+ messages in thread
From: Marcelo Tosatti @ 2012-04-11 22:10 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Avi Kivity, kvm, Michael S. Tsirkin, Eric Northup

On Thu, Mar 29, 2012 at 06:15:41PM +0200, Jan Kiszka wrote:
> Currently, MSI messages can only be injected to in-kernel irqchips by
> defining a corresponding IRQ route for each message. This is not only
> unhandy if the MSI messages are generated "on the fly" by user space,

The MSI message format is configured on device configuration, and once
its settled, does not change. This should be an unfrequent operation,
no? (i am trying to understand what you mean by "on the fly" here).

If that is the case, the real problem is that irq routing tables do not
handle large numbers of vectors? And isnt that limitation also an issue
if you'd like to add more IOAPICs, for example?

> IRQ routes are a limited resource that user space as to manage
> carefully.
> 
> By providing a direct injection path, we can both avoid using up limited
> resources and simplify the necessary steps for user land.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
> 
> Changes in v3:
>  - align return code doc to reality
>  - rename SET_MSI -> SIGNAL_MSI
> 
>  Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
>  arch/x86/kvm/Kconfig              |    1 +
>  include/linux/kvm.h               |   11 +++++++++++
>  virt/kvm/Kconfig                  |    3 +++
>  virt/kvm/kvm_main.c               |   21 +++++++++++++++++++++
>  5 files changed, 57 insertions(+), 0 deletions(-)
> 
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> index 81ff39f..ed27d1b 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
>  evaluated.
>  
> +4.61 KVM_SIGNAL_MSI
> +
> +Capability: KVM_CAP_SIGNAL_MSI
> +Architectures: x86
> +Type: vm ioctl
> +Parameters: struct kvm_msi (in)
> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
> +
> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
> +MSI messages.
> +
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
> +No flags are defined so far. The corresponding field must be 0.
> +
>  4.62 KVM_CREATE_SPAPR_TCE
>  
>  Capability: KVM_CAP_SPAPR_TCE
> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> index 1a7fe86..a28f338 100644
> --- a/arch/x86/kvm/Kconfig
> +++ b/arch/x86/kvm/Kconfig
> @@ -36,6 +36,7 @@ config KVM
>  	select TASKSTATS
>  	select TASK_DELAY_ACCT
>  	select PERF_EVENTS
> +	select HAVE_KVM_MSI
>  	---help---
>  	  Support hosting fully virtualized guest machines using hardware
>  	  virtualization extensions.  You will need a fairly recent
> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index 7a9dd4b..225b452 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
>  #define KVM_CAP_SYNC_REGS 74
>  #define KVM_CAP_PCI_2_3 75
>  #define KVM_CAP_KVMCLOCK_CTRL 76
> +#define KVM_CAP_SIGNAL_MSI 77
>  
>  #ifdef KVM_CAP_IRQ_ROUTING
>  
> @@ -715,6 +716,14 @@ struct kvm_one_reg {
>  	__u64 addr;
>  };
>  
> +struct kvm_msi {
> +	__u32 address_lo;
> +	__u32 address_hi;
> +	__u32 data;
> +	__u32 flags;
> +	__u8  pad[16];
> +};
> +
>  /*
>   * ioctls for VM fds
>   */
> @@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
>  /* Available with KVM_CAP_PCI_2_3 */
>  #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
>  				       struct kvm_assigned_pci_dev)
> +/* Available with KVM_CAP_SIGNAL_MSI */
> +#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
>  
>  /*
>   * ioctls for vcpu fds
> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
> index f63ccb0..28694f4 100644
> --- a/virt/kvm/Kconfig
> +++ b/virt/kvm/Kconfig
> @@ -18,3 +18,6 @@ config KVM_MMIO
>  
>  config KVM_ASYNC_PF
>         bool
> +
> +config HAVE_KVM_MSI
> +       bool
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index a612bc8..3aeb7ab 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -2063,6 +2063,24 @@ static long kvm_vm_ioctl(struct file *filp,
>  		mutex_unlock(&kvm->lock);
>  		break;
>  #endif
> +#ifdef CONFIG_HAVE_KVM_MSI
> +	case KVM_SIGNAL_MSI: {
> +		struct kvm_kernel_irq_routing_entry route;
> +		struct kvm_msi msi;

Zero them (future proof).

> +
> +		r = -EFAULT;
> +		if (copy_from_user(&msi, argp, sizeof msi))
> +			goto out;
> +		r = -EINVAL;
> +		if (!irqchip_in_kernel(kvm) || msi.flags != 0)
> +			goto out;
> +		route.msi.address_lo = msi.address_lo;
> +		route.msi.address_hi = msi.address_hi;
> +		route.msi.data = msi.data;
> +		r =  kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);

extra whitespace.

> +		break;
> +	}

Have you checked that direct MSI injection does not make use of 
IRQ routing data structures, such as for acking? 

irqchip_in_kernel(kvm) returns true before kvm->irq_routing is 
actually in place. With kvm_set_irq there is no problem, but now
there is another path into injection.

The real purpose of this is not entirely clear (and as Avi mentioned two
interfaces should be avoided if possible).


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v3] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-11 22:10   ` [PATCH v3] " Marcelo Tosatti
@ 2012-04-12  9:28     ` Jan Kiszka
  2012-04-12 22:38       ` Marcelo Tosatti
  0 siblings, 1 reply; 43+ messages in thread
From: Jan Kiszka @ 2012-04-12  9:28 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, kvm, Michael S. Tsirkin, Eric Northup

On 2012-04-12 00:10, Marcelo Tosatti wrote:
> On Thu, Mar 29, 2012 at 06:15:41PM +0200, Jan Kiszka wrote:
>> Currently, MSI messages can only be injected to in-kernel irqchips by
>> defining a corresponding IRQ route for each message. This is not only
>> unhandy if the MSI messages are generated "on the fly" by user space,
> 
> The MSI message format is configured on device configuration, and once
> its settled, does not change. This should be an unfrequent operation,
> no? (i am trying to understand what you mean by "on the fly" here).

The point is that you need to track those changes and/or provide
mechanism to cache routing information related to a specific MSI source.
That either means patching the full path from the source to the sink or
paying some price on injection.

> 
> If that is the case, the real problem is that irq routing tables do not
> handle large numbers of vectors? And isnt that limitation also an issue
> if you'd like to add more IOAPICs, for example?

For sure, more IRQ lines will also contribute to the shortage of GSIs.
Yet another reason to avoid wasting them on userspace generated MSIs.

> 
>> IRQ routes are a limited resource that user space as to manage
>> carefully.
>>
>> By providing a direct injection path, we can both avoid using up limited
>> resources and simplify the necessary steps for user land.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
>> ---
>>
>> Changes in v3:
>>  - align return code doc to reality
>>  - rename SET_MSI -> SIGNAL_MSI
>>
>>  Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
>>  arch/x86/kvm/Kconfig              |    1 +
>>  include/linux/kvm.h               |   11 +++++++++++
>>  virt/kvm/Kconfig                  |    3 +++
>>  virt/kvm/kvm_main.c               |   21 +++++++++++++++++++++
>>  5 files changed, 57 insertions(+), 0 deletions(-)
>>
>> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
>> index 81ff39f..ed27d1b 100644
>> --- a/Documentation/virtual/kvm/api.txt
>> +++ b/Documentation/virtual/kvm/api.txt
>> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
>>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
>>  evaluated.
>>  
>> +4.61 KVM_SIGNAL_MSI
>> +
>> +Capability: KVM_CAP_SIGNAL_MSI
>> +Architectures: x86
>> +Type: vm ioctl
>> +Parameters: struct kvm_msi (in)
>> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
>> +
>> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
>> +MSI messages.
>> +
>> +struct kvm_msi {
>> +	__u32 address_lo;
>> +	__u32 address_hi;
>> +	__u32 data;
>> +	__u32 flags;
>> +	__u8  pad[16];
>> +};
>> +
>> +No flags are defined so far. The corresponding field must be 0.
>> +
>>  4.62 KVM_CREATE_SPAPR_TCE
>>  
>>  Capability: KVM_CAP_SPAPR_TCE
>> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
>> index 1a7fe86..a28f338 100644
>> --- a/arch/x86/kvm/Kconfig
>> +++ b/arch/x86/kvm/Kconfig
>> @@ -36,6 +36,7 @@ config KVM
>>  	select TASKSTATS
>>  	select TASK_DELAY_ACCT
>>  	select PERF_EVENTS
>> +	select HAVE_KVM_MSI
>>  	---help---
>>  	  Support hosting fully virtualized guest machines using hardware
>>  	  virtualization extensions.  You will need a fairly recent
>> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
>> index 7a9dd4b..225b452 100644
>> --- a/include/linux/kvm.h
>> +++ b/include/linux/kvm.h
>> @@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
>>  #define KVM_CAP_SYNC_REGS 74
>>  #define KVM_CAP_PCI_2_3 75
>>  #define KVM_CAP_KVMCLOCK_CTRL 76
>> +#define KVM_CAP_SIGNAL_MSI 77
>>  
>>  #ifdef KVM_CAP_IRQ_ROUTING
>>  
>> @@ -715,6 +716,14 @@ struct kvm_one_reg {
>>  	__u64 addr;
>>  };
>>  
>> +struct kvm_msi {
>> +	__u32 address_lo;
>> +	__u32 address_hi;
>> +	__u32 data;
>> +	__u32 flags;
>> +	__u8  pad[16];
>> +};
>> +
>>  /*
>>   * ioctls for VM fds
>>   */
>> @@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
>>  /* Available with KVM_CAP_PCI_2_3 */
>>  #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
>>  				       struct kvm_assigned_pci_dev)
>> +/* Available with KVM_CAP_SIGNAL_MSI */
>> +#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
>>  
>>  /*
>>   * ioctls for vcpu fds
>> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
>> index f63ccb0..28694f4 100644
>> --- a/virt/kvm/Kconfig
>> +++ b/virt/kvm/Kconfig
>> @@ -18,3 +18,6 @@ config KVM_MMIO
>>  
>>  config KVM_ASYNC_PF
>>         bool
>> +
>> +config HAVE_KVM_MSI
>> +       bool
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index a612bc8..3aeb7ab 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -2063,6 +2063,24 @@ static long kvm_vm_ioctl(struct file *filp,
>>  		mutex_unlock(&kvm->lock);
>>  		break;
>>  #endif
>> +#ifdef CONFIG_HAVE_KVM_MSI
>> +	case KVM_SIGNAL_MSI: {
>> +		struct kvm_kernel_irq_routing_entry route;
>> +		struct kvm_msi msi;
> 
> Zero them (future proof).
> 
>> +
>> +		r = -EFAULT;
>> +		if (copy_from_user(&msi, argp, sizeof msi))
>> +			goto out;
>> +		r = -EINVAL;
>> +		if (!irqchip_in_kernel(kvm) || msi.flags != 0)
>> +			goto out;
>> +		route.msi.address_lo = msi.address_lo;
>> +		route.msi.address_hi = msi.address_hi;
>> +		route.msi.data = msi.data;
>> +		r =  kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> 
> extra whitespace.
> 
>> +		break;
>> +	}
> 
> Have you checked that direct MSI injection does not make use of 
> IRQ routing data structures, such as for acking? 

See kvm_set_msi: The routing structure is only read in the context of
that function, no reference is kept.

> 
> irqchip_in_kernel(kvm) returns true before kvm->irq_routing is 
> actually in place. With kvm_set_irq there is no problem, but now
> there is another path into injection.
> 
> The real purpose of this is not entirely clear (and as Avi mentioned two
> interfaces should be avoided if possible).

See [1] for an implementation of one of Avi's proposals.

Jan

[1] http://thread.gmane.org/gmane.comp.emulators.kvm.devel/89379

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v3] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-12  9:28     ` Jan Kiszka
@ 2012-04-12 22:38       ` Marcelo Tosatti
  2012-04-13 13:45         ` Jan Kiszka
  0 siblings, 1 reply; 43+ messages in thread
From: Marcelo Tosatti @ 2012-04-12 22:38 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Avi Kivity, kvm, Michael S. Tsirkin, Eric Northup

On Thu, Apr 12, 2012 at 11:28:09AM +0200, Jan Kiszka wrote:
> On 2012-04-12 00:10, Marcelo Tosatti wrote:
> > On Thu, Mar 29, 2012 at 06:15:41PM +0200, Jan Kiszka wrote:
> >> Currently, MSI messages can only be injected to in-kernel irqchips by
> >> defining a corresponding IRQ route for each message. This is not only
> >> unhandy if the MSI messages are generated "on the fly" by user space,
> > 
> > The MSI message format is configured on device configuration, and once
> > its settled, does not change. This should be an unfrequent operation,
> > no? (i am trying to understand what you mean by "on the fly" here).
> 
> The point is that you need to track those changes and/or provide
> mechanism to cache routing information related to a specific MSI source.
> That either means patching the full path from the source to the sink or
> paying some price on injection.
> 
> > 
> > If that is the case, the real problem is that irq routing tables do not
> > handle large numbers of vectors? And isnt that limitation also an issue
> > if you'd like to add more IOAPICs, for example?
> 
> For sure, more IRQ lines will also contribute to the shortage of GSIs.
> Yet another reason to avoid wasting them on userspace generated MSIs.
> 
> > 
> >> IRQ routes are a limited resource that user space as to manage
> >> carefully.
> >>
> >> By providing a direct injection path, we can both avoid using up limited
> >> resources and simplify the necessary steps for user land.
> >>
> >> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> >> ---
> >>
> >> Changes in v3:
> >>  - align return code doc to reality
> >>  - rename SET_MSI -> SIGNAL_MSI
> >>
> >>  Documentation/virtual/kvm/api.txt |   21 +++++++++++++++++++++
> >>  arch/x86/kvm/Kconfig              |    1 +
> >>  include/linux/kvm.h               |   11 +++++++++++
> >>  virt/kvm/Kconfig                  |    3 +++
> >>  virt/kvm/kvm_main.c               |   21 +++++++++++++++++++++
> >>  5 files changed, 57 insertions(+), 0 deletions(-)
> >>
> >> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> >> index 81ff39f..ed27d1b 100644
> >> --- a/Documentation/virtual/kvm/api.txt
> >> +++ b/Documentation/virtual/kvm/api.txt
> >> @@ -1482,6 +1482,27 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
> >>  by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
> >>  evaluated.
> >>  
> >> +4.61 KVM_SIGNAL_MSI
> >> +
> >> +Capability: KVM_CAP_SIGNAL_MSI
> >> +Architectures: x86
> >> +Type: vm ioctl
> >> +Parameters: struct kvm_msi (in)
> >> +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
> >> +
> >> +Directly inject a MSI message. Only valid with in-kernel irqchip that handles
> >> +MSI messages.
> >> +
> >> +struct kvm_msi {
> >> +	__u32 address_lo;
> >> +	__u32 address_hi;
> >> +	__u32 data;
> >> +	__u32 flags;
> >> +	__u8  pad[16];
> >> +};
> >> +
> >> +No flags are defined so far. The corresponding field must be 0.
> >> +
> >>  4.62 KVM_CREATE_SPAPR_TCE
> >>  
> >>  Capability: KVM_CAP_SPAPR_TCE
> >> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
> >> index 1a7fe86..a28f338 100644
> >> --- a/arch/x86/kvm/Kconfig
> >> +++ b/arch/x86/kvm/Kconfig
> >> @@ -36,6 +36,7 @@ config KVM
> >>  	select TASKSTATS
> >>  	select TASK_DELAY_ACCT
> >>  	select PERF_EVENTS
> >> +	select HAVE_KVM_MSI
> >>  	---help---
> >>  	  Support hosting fully virtualized guest machines using hardware
> >>  	  virtualization extensions.  You will need a fairly recent
> >> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> >> index 7a9dd4b..225b452 100644
> >> --- a/include/linux/kvm.h
> >> +++ b/include/linux/kvm.h
> >> @@ -590,6 +590,7 @@ struct kvm_ppc_pvinfo {
> >>  #define KVM_CAP_SYNC_REGS 74
> >>  #define KVM_CAP_PCI_2_3 75
> >>  #define KVM_CAP_KVMCLOCK_CTRL 76
> >> +#define KVM_CAP_SIGNAL_MSI 77
> >>  
> >>  #ifdef KVM_CAP_IRQ_ROUTING
> >>  
> >> @@ -715,6 +716,14 @@ struct kvm_one_reg {
> >>  	__u64 addr;
> >>  };
> >>  
> >> +struct kvm_msi {
> >> +	__u32 address_lo;
> >> +	__u32 address_hi;
> >> +	__u32 data;
> >> +	__u32 flags;
> >> +	__u8  pad[16];
> >> +};
> >> +
> >>  /*
> >>   * ioctls for VM fds
> >>   */
> >> @@ -789,6 +798,8 @@ struct kvm_s390_ucas_mapping {
> >>  /* Available with KVM_CAP_PCI_2_3 */
> >>  #define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
> >>  				       struct kvm_assigned_pci_dev)
> >> +/* Available with KVM_CAP_SIGNAL_MSI */
> >> +#define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
> >>  
> >>  /*
> >>   * ioctls for vcpu fds
> >> diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
> >> index f63ccb0..28694f4 100644
> >> --- a/virt/kvm/Kconfig
> >> +++ b/virt/kvm/Kconfig
> >> @@ -18,3 +18,6 @@ config KVM_MMIO
> >>  
> >>  config KVM_ASYNC_PF
> >>         bool
> >> +
> >> +config HAVE_KVM_MSI
> >> +       bool
> >> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> >> index a612bc8..3aeb7ab 100644
> >> --- a/virt/kvm/kvm_main.c
> >> +++ b/virt/kvm/kvm_main.c
> >> @@ -2063,6 +2063,24 @@ static long kvm_vm_ioctl(struct file *filp,
> >>  		mutex_unlock(&kvm->lock);
> >>  		break;
> >>  #endif
> >> +#ifdef CONFIG_HAVE_KVM_MSI
> >> +	case KVM_SIGNAL_MSI: {
> >> +		struct kvm_kernel_irq_routing_entry route;
> >> +		struct kvm_msi msi;
> > 
> > Zero them (future proof).
> > 
> >> +
> >> +		r = -EFAULT;
> >> +		if (copy_from_user(&msi, argp, sizeof msi))
> >> +			goto out;
> >> +		r = -EINVAL;
> >> +		if (!irqchip_in_kernel(kvm) || msi.flags != 0)
> >> +			goto out;
> >> +		route.msi.address_lo = msi.address_lo;
> >> +		route.msi.address_hi = msi.address_hi;
> >> +		route.msi.data = msi.data;
> >> +		r =  kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
> > 
> > extra whitespace.
> > 
> >> +		break;
> >> +	}
> > 
> > Have you checked that direct MSI injection does not make use of 
> > IRQ routing data structures, such as for acking? 
> 
> See kvm_set_msi: The routing structure is only read in the context of
> that function, no reference is kept.

I was thinking more along the lines of inconsistent state
due to races.

- set MSI for IOAPIC handled vector before kvm->irq_routing 
is assigned.
- IOAPIC EOI for that vector.
- EOI handler expects kvm->irq_routing present.

> > irqchip_in_kernel(kvm) returns true before kvm->irq_routing is 
> > actually in place. With kvm_set_irq there is no problem, but now
> > there is another path into injection.
> > 
> > The real purpose of this is not entirely clear (and as Avi mentioned two
> > interfaces should be avoided if possible).
> 
> See [1] for an implementation of one of Avi's proposals.

Just now that i start to appreciate KVM_SIGNAL_MSI.

"so we have a single ioctl for all interrupt handling.  This allows
eventual removal of the line-oriented ioctls." 

So you move from one interface that handles both MSI/INTx, to _another_
interface that handles both. KVM_SIGNAL_MSI with address/data is clean
and obvious.


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v3] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-12 22:38       ` Marcelo Tosatti
@ 2012-04-13 13:45         ` Jan Kiszka
  0 siblings, 0 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-04-13 13:45 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, kvm, Michael S. Tsirkin, Eric Northup

On 2012-04-13 00:38, Marcelo Tosatti wrote:
>>> Have you checked that direct MSI injection does not make use of 
>>> IRQ routing data structures, such as for acking? 
>>
>> See kvm_set_msi: The routing structure is only read in the context of
>> that function, no reference is kept.
> 
> I was thinking more along the lines of inconsistent state
> due to races.
> 
> - set MSI for IOAPIC handled vector before kvm->irq_routing 
> is assigned.
> - IOAPIC EOI for that vector.
> - EOI handler expects kvm->irq_routing present.

OK, starting to understand your worries. But isn't kvm->irq_routing
always pointing to some traversable table after KVM_CREATE_IRQCHIP? If
not, that race would have been exploitable before.

> 
>>> irqchip_in_kernel(kvm) returns true before kvm->irq_routing is 
>>> actually in place. With kvm_set_irq there is no problem, but now
>>> there is another path into injection.
>>>
>>> The real purpose of this is not entirely clear (and as Avi mentioned two
>>> interfaces should be avoided if possible).
>>
>> See [1] for an implementation of one of Avi's proposals.
> 
> Just now that i start to appreciate KVM_SIGNAL_MSI.
> 
> "so we have a single ioctl for all interrupt handling.  This allows
> eventual removal of the line-oriented ioctls." 
> 
> So you move from one interface that handles both MSI/INTx, to _another_
> interface that handles both. KVM_SIGNAL_MSI with address/data is clean
> and obvious.

Well, I'm just looking for a MSI message injection mechanism. So I'm
fine with both KVM_GENERAL_IRQ and KVM_SIGNAL_MSI.

KVM_GENERAL_IRQ /may/ be helpful for kernel irqchips of upcoming arch if
they need to provide IRQ injection paths that do not match well on the
existing ones. However, it will surely take a long while until we can
drop KVM_IRQ_LINE/KVM_IRQ_LINE_STATUS.

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH] KVM: Introduce generic interrupt injection for in-kernel irqchips
  2012-04-10 18:30       ` [PATCH] KVM: Introduce generic interrupt " Jan Kiszka
@ 2012-04-23 14:44         ` Jan Kiszka
  2012-04-23 15:17           ` Avi Kivity
  2012-04-23 15:32         ` Avi Kivity
  1 sibling, 1 reply; 43+ messages in thread
From: Jan Kiszka @ 2012-04-23 14:44 UTC (permalink / raw)
  To: Avi Kivity, Marcelo Tosatti; +Cc: kvm, Michael S. Tsirkin, Eric Northup

On 2012-04-10 20:30, Jan Kiszka wrote:
> Currently, MSI messages can only be injected to in-kernel irqchips by
> defining a corresponding IRQ route for each message. This is not only
> unhandy if the MSI messages are generated "on the fly" by user space,
> IRQ routes are a limited resource that user space has to manage
> carefully.
> 
> By providing a direct injection path, we can both avoid using up limited
> resources and simplify the necessary steps for user land. This path is
> provide in a way that allows for use with other interrupt sources as
> well. Besides MSIs also external interrupt lines can be manipulated
> through this interface, obsoleting KVM_IRQ_LINE_STATUS.
> 
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
> 
> This picks up Avi's first suggestion as I still think it is the better
> option to provide a direct MSI injection channel.

Ping. What's now the preferred approach? I'd like to make some progress
with this topic.

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH] KVM: Introduce generic interrupt injection for in-kernel irqchips
  2012-04-23 14:44         ` Jan Kiszka
@ 2012-04-23 15:17           ` Avi Kivity
  0 siblings, 0 replies; 43+ messages in thread
From: Avi Kivity @ 2012-04-23 15:17 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 04/23/2012 05:44 PM, Jan Kiszka wrote:
> On 2012-04-10 20:30, Jan Kiszka wrote:
> > Currently, MSI messages can only be injected to in-kernel irqchips by
> > defining a corresponding IRQ route for each message. This is not only
> > unhandy if the MSI messages are generated "on the fly" by user space,
> > IRQ routes are a limited resource that user space has to manage
> > carefully.
> > 
> > By providing a direct injection path, we can both avoid using up limited
> > resources and simplify the necessary steps for user land. This path is
> > provide in a way that allows for use with other interrupt sources as
> > well. Besides MSIs also external interrupt lines can be manipulated
> > through this interface, obsoleting KVM_IRQ_LINE_STATUS.
> > 
> > Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> > ---
> > 
> > This picks up Avi's first suggestion as I still think it is the better
> > option to provide a direct MSI injection channel.
>
> Ping. What's now the preferred approach? I'd like to make some progress
> with this topic.
>

Sorry, I haven't seen it.  Will review now.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH] KVM: Introduce generic interrupt injection for in-kernel irqchips
  2012-04-10 18:30       ` [PATCH] KVM: Introduce generic interrupt " Jan Kiszka
  2012-04-23 14:44         ` Jan Kiszka
@ 2012-04-23 15:32         ` Avi Kivity
  2012-04-23 15:55           ` Jan Kiszka
  1 sibling, 1 reply; 43+ messages in thread
From: Avi Kivity @ 2012-04-23 15:32 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 04/10/2012 09:30 PM, Jan Kiszka wrote:
> Currently, MSI messages can only be injected to in-kernel irqchips by
> defining a corresponding IRQ route for each message. This is not only
> unhandy if the MSI messages are generated "on the fly" by user space,
> IRQ routes are a limited resource that user space has to manage
> carefully.
>
> By providing a direct injection path, we can both avoid using up limited
> resources and simplify the necessary steps for user land. This path is
> provide in a way that allows for use with other interrupt sources as
> well. Besides MSIs also external interrupt lines can be manipulated
> through this interface, obsoleting KVM_IRQ_LINE_STATUS.
>
> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> ---
>
> This picks up Avi's first suggestion as I still think it is the better
> option to provide a direct MSI injection channel.

My main objection to the previous patch was that it's not needed; qemu
has to work with older kernels that have neither this nor the other
patch.  Given that, why do anything further?  It won't be cleaner
because the ugly path will remain for compatibility.

Is there any concrete problem that this solves, that cannot be solved by
a pure (but ugly) user space solution?

wrt the patch itself, it seems fine.  I have to agree that the
single-purpose ioctl looks cleaner (but may be less suitable for a
syscall based API).

Just to add some confusion: is this future proof wrt iommu/interrupt
remapping emulation?  If you have a single iommu that intercepts all of
the MSI space then there's no problem, but if there are multiple iommus,
or if some devices are "in front of" the iommu, then we need to identify
the source of the message as well.  So we'd need an MBZ field for MSI
injection, which can later be filled with the source ID.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH] KVM: Introduce generic interrupt injection for in-kernel irqchips
  2012-04-23 15:32         ` Avi Kivity
@ 2012-04-23 15:55           ` Jan Kiszka
  2012-04-24 11:54             ` Avi Kivity
  0 siblings, 1 reply; 43+ messages in thread
From: Jan Kiszka @ 2012-04-23 15:55 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 2012-04-23 17:32, Avi Kivity wrote:
> On 04/10/2012 09:30 PM, Jan Kiszka wrote:
>> Currently, MSI messages can only be injected to in-kernel irqchips by
>> defining a corresponding IRQ route for each message. This is not only
>> unhandy if the MSI messages are generated "on the fly" by user space,
>> IRQ routes are a limited resource that user space has to manage
>> carefully.
>>
>> By providing a direct injection path, we can both avoid using up limited
>> resources and simplify the necessary steps for user land. This path is
>> provide in a way that allows for use with other interrupt sources as
>> well. Besides MSIs also external interrupt lines can be manipulated
>> through this interface, obsoleting KVM_IRQ_LINE_STATUS.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
>> ---
>>
>> This picks up Avi's first suggestion as I still think it is the better
>> option to provide a direct MSI injection channel.
> 
> My main objection to the previous patch was that it's not needed; qemu
> has to work with older kernels that have neither this nor the other
> patch.  Given that, why do anything further?  It won't be cleaner
> because the ugly path will remain for compatibility.
> 
> Is there any concrete problem that this solves, that cannot be solved by
> a pure (but ugly) user space solution?

As I explained a few times: We avoid taking the lengthy path in
userspace, GSI exhaustion, sporadic routing flushes etc. when running on
a modern kernel. The "ugly" part in userspace is rather small as we can
refrain from optimizing it. Specifically we do not need to touch QEMU
all over the MSI path just for KVM.

> 
> wrt the patch itself, it seems fine.  I have to agree that the
> single-purpose ioctl looks cleaner (but may be less suitable for a
> syscall based API).

Why would some MSI-only injection IOCTL be problematic for a syscall
model? I rather suspect the generic IOCTL may have some limitations as
it /might/ be used for mixing broadcasts with solely VCPU-targeted IRQ
injections one day, on some arch.

> 
> Just to add some confusion: is this future proof wrt iommu/interrupt
> remapping emulation?  If you have a single iommu that intercepts all of
> the MSI space then there's no problem, but if there are multiple iommus,
> or if some devices are "in front of" the iommu, then we need to identify
> the source of the message as well.  So we'd need an MBZ field for MSI
> injection, which can later be filled with the source ID.

We will need hierarchical dispatching, using additional source
information. But that information is totally unrelated to the pseudo GSI
currently used by KVM for the final delivery step to the APIC bus. In
fact, the whole message content can be unrelated as it can be modified
and even coalesced with other sources along its way to the KVM injection
API.

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH] KVM: Introduce generic interrupt injection for in-kernel irqchips
  2012-04-23 15:55           ` Jan Kiszka
@ 2012-04-24 11:54             ` Avi Kivity
  0 siblings, 0 replies; 43+ messages in thread
From: Avi Kivity @ 2012-04-24 11:54 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 04/23/2012 06:55 PM, Jan Kiszka wrote:
> On 2012-04-23 17:32, Avi Kivity wrote:
> > On 04/10/2012 09:30 PM, Jan Kiszka wrote:
> >> Currently, MSI messages can only be injected to in-kernel irqchips by
> >> defining a corresponding IRQ route for each message. This is not only
> >> unhandy if the MSI messages are generated "on the fly" by user space,
> >> IRQ routes are a limited resource that user space has to manage
> >> carefully.
> >>
> >> By providing a direct injection path, we can both avoid using up limited
> >> resources and simplify the necessary steps for user land. This path is
> >> provide in a way that allows for use with other interrupt sources as
> >> well. Besides MSIs also external interrupt lines can be manipulated
> >> through this interface, obsoleting KVM_IRQ_LINE_STATUS.
> >>
> >> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
> >> ---
> >>
> >> This picks up Avi's first suggestion as I still think it is the better
> >> option to provide a direct MSI injection channel.
> > 
> > My main objection to the previous patch was that it's not needed; qemu
> > has to work with older kernels that have neither this nor the other
> > patch.  Given that, why do anything further?  It won't be cleaner
> > because the ugly path will remain for compatibility.
> > 
> > Is there any concrete problem that this solves, that cannot be solved by
> > a pure (but ugly) user space solution?
>
> As I explained a few times: We avoid taking the lengthy path in
> userspace, GSI exhaustion, sporadic routing flushes etc. when running on
> a modern kernel. The "ugly" part in userspace is rather small as we can
> refrain from optimizing it. Specifically we do not need to touch QEMU
> all over the MSI path just for KVM.

Okay, I guess waiting for a grace period in order to queue an interrupt
is excessive.

> > 
> > wrt the patch itself, it seems fine.  I have to agree that the
> > single-purpose ioctl looks cleaner (but may be less suitable for a
> > syscall based API).
>
> Why would some MSI-only injection IOCTL be problematic for a syscall
> model? 

It increases the number of syscalls, and doesn't bound it if new
interrupt types (like MSI with source information) becomes available.

However, this thinking shouldn't influence the ioctl interface too much.

> I rather suspect the generic IOCTL may have some limitations as
> it /might/ be used for mixing broadcasts with solely VCPU-targeted IRQ
> injections one day, on some arch.

Well we can do that now, with either interface, since MSIs can be
unicast or multicast.

> > 
> > Just to add some confusion: is this future proof wrt iommu/interrupt
> > remapping emulation?  If you have a single iommu that intercepts all of
> > the MSI space then there's no problem, but if there are multiple iommus,
> > or if some devices are "in front of" the iommu, then we need to identify
> > the source of the message as well.  So we'd need an MBZ field for MSI
> > injection, which can later be filled with the source ID.
>
> We will need hierarchical dispatching, using additional source
> information. But that information is totally unrelated to the pseudo GSI
> currently used by KVM for the final delivery step to the APIC bus. In
> fact, the whole message content can be unrelated as it can be modified
> and even coalesced with other sources along its way to the KVM injection
> API.

Depends on whether we emulate interrupt remapping in the kernel or
userspace.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-03-29 19:14   ` [PATCH v4] " Jan Kiszka
  2012-03-29 19:41     ` Michael S. Tsirkin
  2012-04-03 16:27     ` Avi Kivity
@ 2012-04-24 11:57     ` Avi Kivity
  2012-04-24 12:07       ` Jan Kiszka
  2 siblings, 1 reply; 43+ messages in thread
From: Avi Kivity @ 2012-04-24 11:57 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 03/29/2012 09:14 PM, Jan Kiszka wrote:
> Currently, MSI messages can only be injected to in-kernel irqchips by
> defining a corresponding IRQ route for each message. This is not only
> unhandy if the MSI messages are generated "on the fly" by user space,
> IRQ routes are a limited resource that user space has to manage
> carefully.
>
> By providing a direct injection path, we can both avoid using up limited
> resources and simplify the necessary steps for user land.
>
>

Applied to queue (for 3.5).

Thanks for your patience.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-24 11:57     ` [PATCH v4] KVM: Introduce direct MSI message " Avi Kivity
@ 2012-04-24 12:07       ` Jan Kiszka
  2012-04-24 12:59         ` Avi Kivity
  0 siblings, 1 reply; 43+ messages in thread
From: Jan Kiszka @ 2012-04-24 12:07 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 2012-04-24 13:57, Avi Kivity wrote:
> On 03/29/2012 09:14 PM, Jan Kiszka wrote:
>> Currently, MSI messages can only be injected to in-kernel irqchips by
>> defining a corresponding IRQ route for each message. This is not only
>> unhandy if the MSI messages are generated "on the fly" by user space,
>> IRQ routes are a limited resource that user space has to manage
>> carefully.
>>
>> By providing a direct injection path, we can both avoid using up limited
>> resources and simplify the necessary steps for user land.
>>
>>
> 
> Applied to queue (for 3.5).
> 
> Thanks for your patience.

Oops, that was now unexpectedly fast.

Extending and slightly reformatting the API docs I noticed some
inconsistency. Will send fixes soon. Can you fold this into my patch, or
just apply it on top?

Thanks,
Jan

--------8<--------

KVM: Reorder KVM_SIGNAL_MSI API documentation

4.61 is not free as two earlier sections share the same number.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---
 Documentation/virtual/kvm/api.txt |   42 ++++++++++++++++++------------------
 1 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index ed27d1b..a155221 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1482,27 +1482,6 @@ See KVM_ASSIGN_DEV_IRQ for the data structure.  The target device is specified
 by assigned_dev_id.  In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
 evaluated.
 
-4.61 KVM_SIGNAL_MSI
-
-Capability: KVM_CAP_SIGNAL_MSI
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_msi (in)
-Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
-
-Directly inject a MSI message. Only valid with in-kernel irqchip that handles
-MSI messages.
-
-struct kvm_msi {
-	__u32 address_lo;
-	__u32 address_hi;
-	__u32 data;
-	__u32 flags;
-	__u8  pad[16];
-};
-
-No flags are defined so far. The corresponding field must be 0.
-
 4.62 KVM_CREATE_SPAPR_TCE
 
 Capability: KVM_CAP_SPAPR_TCE
@@ -1710,6 +1689,27 @@ where the guest will clear the flag: when the soft lockup watchdog timer resets
 itself or when a soft lockup is detected.  This ioctl can be called any time
 after pausing the vcpu, but before it is resumed.
 
+4.71 KVM_SIGNAL_MSI
+
+Capability: KVM_CAP_SIGNAL_MSI
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_msi (in)
+Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
+
+Directly inject a MSI message. Only valid with in-kernel irqchip that handles
+MSI messages.
+
+struct kvm_msi {
+	__u32 address_lo;
+	__u32 address_hi;
+	__u32 data;
+	__u32 flags;
+	__u8  pad[16];
+};
+
+No flags are defined so far. The corresponding field must be 0.
+
 5. The kvm_run structure
 
 Application code obtains a pointer to the kvm_run structure by
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-24 12:07       ` Jan Kiszka
@ 2012-04-24 12:59         ` Avi Kivity
  2012-04-24 13:24           ` Jan Kiszka
  0 siblings, 1 reply; 43+ messages in thread
From: Avi Kivity @ 2012-04-24 12:59 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 04/24/2012 03:07 PM, Jan Kiszka wrote:
> On 2012-04-24 13:57, Avi Kivity wrote:
> > On 03/29/2012 09:14 PM, Jan Kiszka wrote:
> >> Currently, MSI messages can only be injected to in-kernel irqchips by
> >> defining a corresponding IRQ route for each message. This is not only
> >> unhandy if the MSI messages are generated "on the fly" by user space,
> >> IRQ routes are a limited resource that user space has to manage
> >> carefully.
> >>
> >> By providing a direct injection path, we can both avoid using up limited
> >> resources and simplify the necessary steps for user land.
> >>
> >>
> > 
> > Applied to queue (for 3.5).
> > 
> > Thanks for your patience.
>
> Oops, that was now unexpectedly fast.

I hope you don't mean the ~ 1 month timeframe for the whole thing.

> Extending and slightly reformatting the API docs I noticed some
> inconsistency. Will send fixes soon. Can you fold this into my patch, or
> just apply it on top?
>
>

Since it's just in queue, not next, will fold into parent patch.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v4] KVM: Introduce direct MSI message injection for in-kernel irqchips
  2012-04-24 12:59         ` Avi Kivity
@ 2012-04-24 13:24           ` Jan Kiszka
  0 siblings, 0 replies; 43+ messages in thread
From: Jan Kiszka @ 2012-04-24 13:24 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, kvm, Michael S. Tsirkin, Eric Northup

On 2012-04-24 14:59, Avi Kivity wrote:
> On 04/24/2012 03:07 PM, Jan Kiszka wrote:
>> On 2012-04-24 13:57, Avi Kivity wrote:
>>> On 03/29/2012 09:14 PM, Jan Kiszka wrote:
>>>> Currently, MSI messages can only be injected to in-kernel irqchips by
>>>> defining a corresponding IRQ route for each message. This is not only
>>>> unhandy if the MSI messages are generated "on the fly" by user space,
>>>> IRQ routes are a limited resource that user space has to manage
>>>> carefully.
>>>>
>>>> By providing a direct injection path, we can both avoid using up limited
>>>> resources and simplify the necessary steps for user land.
>>>>
>>>>
>>>
>>> Applied to queue (for 3.5).
>>>
>>> Thanks for your patience.
>>
>> Oops, that was now unexpectedly fast.
> 
> I hope you don't mean the ~ 1 month timeframe for the whole thing.

Really the last phase. I was preparing for another round of discussions.

> 
>> Extending and slightly reformatting the API docs I noticed some
>> inconsistency. Will send fixes soon. Can you fold this into my patch, or
>> just apply it on top?
>>
>>
> 
> Since it's just in queue, not next, will fold into parent patch.
> 

Thanks,
Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 43+ messages in thread

end of thread, other threads:[~2012-04-24 13:24 UTC | newest]

Thread overview: 43+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-03-28 17:47 [PATCH v2] KVM: Introduce direct MSI message injection for in-kernel irqchips Jan Kiszka
2012-03-28 17:52 ` Jan Kiszka
2012-03-28 19:58 ` Eric Northup
2012-03-28 20:21   ` Jan Kiszka
2012-03-29 15:39 ` Michael S. Tsirkin
2012-03-29 15:43   ` Jan Kiszka
2012-03-29 16:15 ` [PATCH v3] " Jan Kiszka
2012-03-29 16:46   ` Michael S. Tsirkin
2012-03-29 16:50     ` Jan Kiszka
2012-03-29 18:25   ` Jan Kiszka
2012-03-29 19:14   ` [PATCH v4] " Jan Kiszka
2012-03-29 19:41     ` Michael S. Tsirkin
2012-03-30  7:45       ` Jan Kiszka
2012-03-30 12:45         ` Michael S. Tsirkin
2012-04-03 16:27     ` Avi Kivity
2012-04-03 16:47       ` Jan Kiszka
2012-04-03 16:54         ` Avi Kivity
2012-04-03 17:24           ` Jan Kiszka
2012-04-04  8:47             ` Avi Kivity
2012-04-04  8:38         ` Michael S. Tsirkin
2012-04-04  8:44           ` Avi Kivity
2012-04-04  8:53             ` Michael S. Tsirkin
2012-04-04  9:22               ` Jan Kiszka
2012-04-04  9:36                 ` Avi Kivity
2012-04-04  9:38                   ` Jan Kiszka
2012-04-04  9:55                     ` Avi Kivity
2012-04-04 10:48                       ` Jan Kiszka
2012-04-04 11:50                         ` Avi Kivity
2012-04-04 12:01                           ` Jan Kiszka
2012-04-10 18:30       ` [PATCH] KVM: Introduce generic interrupt " Jan Kiszka
2012-04-23 14:44         ` Jan Kiszka
2012-04-23 15:17           ` Avi Kivity
2012-04-23 15:32         ` Avi Kivity
2012-04-23 15:55           ` Jan Kiszka
2012-04-24 11:54             ` Avi Kivity
2012-04-24 11:57     ` [PATCH v4] KVM: Introduce direct MSI message " Avi Kivity
2012-04-24 12:07       ` Jan Kiszka
2012-04-24 12:59         ` Avi Kivity
2012-04-24 13:24           ` Jan Kiszka
2012-04-11 22:10   ` [PATCH v3] " Marcelo Tosatti
2012-04-12  9:28     ` Jan Kiszka
2012-04-12 22:38       ` Marcelo Tosatti
2012-04-13 13:45         ` Jan Kiszka

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.