From: Julien Grall <julien.grall@citrix.com> To: xen-devel@lists.xensource.com Cc: Stefano.Stabellini@eu.citrix.com, qemu-devel@nongnu.org, julian.pidancet@citrix.com Subject: [Qemu-devel] [XEN][RFC PATCH 05/15] hvm: Modify hvm_op Date: Thu, 22 Mar 2012 15:59:26 +0000 [thread overview] Message-ID: <2b109c735483d07e494b0dc14256d1f93b150595.1332430810.git.julien.grall@citrix.com> (raw) In-Reply-To: <cover.1332430810.git.julien.grall@citrix.com> This patch remove useless hvm_param due to structure modification and bind the new hypercalls to handle ioreq servers and pci. Signed-off-by: Julien Grall <julien.grall@citrix.com> --- xen/arch/x86/hvm/hvm.c | 127 ++++++++++++++++++++++++++++++------------------ 1 files changed, 80 insertions(+), 47 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 1b38762..3117ae1 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4009,7 +4009,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) case HVMOP_get_param: { struct xen_hvm_param a; - struct hvm_ioreq_page *iorp; struct domain *d; struct vcpu *v; @@ -4037,21 +4036,14 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) switch ( a.index ) { - case HVM_PARAM_IOREQ_PFN: - iorp = &d->arch.hvm_domain.ioreq; - if ( (rc = hvm_set_ioreq_page(d, iorp, a.value)) != 0 ) - break; - spin_lock(&iorp->lock); - if ( iorp->va != NULL ) - /* Initialise evtchn port info if VCPUs already created. */ - for_each_vcpu ( d, v ) - get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; - spin_unlock(&iorp->lock); - break; - case HVM_PARAM_BUFIOREQ_PFN: - iorp = &d->arch.hvm_domain.buf_ioreq; - rc = hvm_set_ioreq_page(d, iorp, a.value); + case HVM_PARAM_IO_PFN_FIRST: + rc = hvm_set_ioreq_page(d, &d->arch.hvm_domain.ioreq, a.value); + gdprintk(XENLOG_DEBUG, "Pfn first = 0x%lx\n", a.value); + gdprintk(XENLOG_DEBUG, "va = %p\n", d->arch.hvm_domain.ioreq.va); break; + case HVM_PARAM_IO_PFN_LAST: + if ( (d->arch.hvm_domain.params[HVM_PARAM_IO_PFN_LAST]) ) + rc = -EINVAL; case HVM_PARAM_CALLBACK_IRQ: hvm_set_callback_via(d, a.value); hvm_latch_shinfo_size(d); @@ -4096,38 +4088,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) domctl_lock_release(); break; - case HVM_PARAM_DM_DOMAIN: - /* Not reflexive, as we must domain_pause(). */ - rc = -EPERM; - if ( curr_d == d ) - break; - - if ( a.value == DOMID_SELF ) - a.value = curr_d->domain_id; - - rc = 0; - domain_pause(d); /* safe to change per-vcpu xen_port */ - iorp = &d->arch.hvm_domain.ioreq; - for_each_vcpu ( d, v ) - { - int old_port, new_port; - new_port = alloc_unbound_xen_event_channel( - v, a.value, NULL); - if ( new_port < 0 ) - { - rc = new_port; - break; - } - /* xchg() ensures that only we free_xen_event_channel() */ - old_port = xchg(&v->arch.hvm_vcpu.xen_port, new_port); - free_xen_event_channel(v, old_port); - spin_lock(&iorp->lock); - if ( iorp->va != NULL ) - get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; - spin_unlock(&iorp->lock); - } - domain_unpause(d); - break; case HVM_PARAM_ACPI_S_STATE: /* Not reflexive, as we must domain_pause(). */ rc = -EPERM; @@ -4650,6 +4610,79 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) break; } + case HVMOP_register_ioreq_server: + { + struct xen_hvm_register_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_register_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + break; + } + + case HVMOP_get_ioreq_server_buf_channel: + { + struct xen_hvm_get_ioreq_server_buf_channel a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_get_ioreq_server_buf_channel(&a); + if ( rc != 0 ) + return rc; + + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + + break; + } + + case HVMOP_map_io_range_to_ioreq_server: + { + struct xen_hvm_map_io_range_to_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_map_io_range_to_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + break; + } + + case HVMOP_unmap_io_range_from_ioreq_server: + { + struct xen_hvm_unmap_io_range_from_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_unmap_io_range_from_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + break; + } + + case HVMOP_register_pcidev: + { + struct xen_hvm_register_pcidev a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvm_register_pcidev(a.domid, a.id, a.bdf); + if ( rc != 0 ) + return rc; + + break; + } + default: { gdprintk(XENLOG_DEBUG, "Bad HVM op %ld.\n", op); -- Julien Grall
WARNING: multiple messages have this Message-ID (diff)
From: Julien Grall <julien.grall@citrix.com> To: xen-devel@lists.xensource.com Cc: Stefano.Stabellini@eu.citrix.com, qemu-devel@nongnu.org, julian.pidancet@citrix.com Subject: [XEN][RFC PATCH 05/15] hvm: Modify hvm_op Date: Thu, 22 Mar 2012 15:59:26 +0000 [thread overview] Message-ID: <2b109c735483d07e494b0dc14256d1f93b150595.1332430810.git.julien.grall@citrix.com> (raw) In-Reply-To: <cover.1332430810.git.julien.grall@citrix.com> This patch remove useless hvm_param due to structure modification and bind the new hypercalls to handle ioreq servers and pci. Signed-off-by: Julien Grall <julien.grall@citrix.com> --- xen/arch/x86/hvm/hvm.c | 127 ++++++++++++++++++++++++++++++------------------ 1 files changed, 80 insertions(+), 47 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 1b38762..3117ae1 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4009,7 +4009,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) case HVMOP_get_param: { struct xen_hvm_param a; - struct hvm_ioreq_page *iorp; struct domain *d; struct vcpu *v; @@ -4037,21 +4036,14 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) switch ( a.index ) { - case HVM_PARAM_IOREQ_PFN: - iorp = &d->arch.hvm_domain.ioreq; - if ( (rc = hvm_set_ioreq_page(d, iorp, a.value)) != 0 ) - break; - spin_lock(&iorp->lock); - if ( iorp->va != NULL ) - /* Initialise evtchn port info if VCPUs already created. */ - for_each_vcpu ( d, v ) - get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; - spin_unlock(&iorp->lock); - break; - case HVM_PARAM_BUFIOREQ_PFN: - iorp = &d->arch.hvm_domain.buf_ioreq; - rc = hvm_set_ioreq_page(d, iorp, a.value); + case HVM_PARAM_IO_PFN_FIRST: + rc = hvm_set_ioreq_page(d, &d->arch.hvm_domain.ioreq, a.value); + gdprintk(XENLOG_DEBUG, "Pfn first = 0x%lx\n", a.value); + gdprintk(XENLOG_DEBUG, "va = %p\n", d->arch.hvm_domain.ioreq.va); break; + case HVM_PARAM_IO_PFN_LAST: + if ( (d->arch.hvm_domain.params[HVM_PARAM_IO_PFN_LAST]) ) + rc = -EINVAL; case HVM_PARAM_CALLBACK_IRQ: hvm_set_callback_via(d, a.value); hvm_latch_shinfo_size(d); @@ -4096,38 +4088,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) domctl_lock_release(); break; - case HVM_PARAM_DM_DOMAIN: - /* Not reflexive, as we must domain_pause(). */ - rc = -EPERM; - if ( curr_d == d ) - break; - - if ( a.value == DOMID_SELF ) - a.value = curr_d->domain_id; - - rc = 0; - domain_pause(d); /* safe to change per-vcpu xen_port */ - iorp = &d->arch.hvm_domain.ioreq; - for_each_vcpu ( d, v ) - { - int old_port, new_port; - new_port = alloc_unbound_xen_event_channel( - v, a.value, NULL); - if ( new_port < 0 ) - { - rc = new_port; - break; - } - /* xchg() ensures that only we free_xen_event_channel() */ - old_port = xchg(&v->arch.hvm_vcpu.xen_port, new_port); - free_xen_event_channel(v, old_port); - spin_lock(&iorp->lock); - if ( iorp->va != NULL ) - get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; - spin_unlock(&iorp->lock); - } - domain_unpause(d); - break; case HVM_PARAM_ACPI_S_STATE: /* Not reflexive, as we must domain_pause(). */ rc = -EPERM; @@ -4650,6 +4610,79 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) break; } + case HVMOP_register_ioreq_server: + { + struct xen_hvm_register_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_register_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + break; + } + + case HVMOP_get_ioreq_server_buf_channel: + { + struct xen_hvm_get_ioreq_server_buf_channel a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_get_ioreq_server_buf_channel(&a); + if ( rc != 0 ) + return rc; + + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + + break; + } + + case HVMOP_map_io_range_to_ioreq_server: + { + struct xen_hvm_map_io_range_to_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_map_io_range_to_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + break; + } + + case HVMOP_unmap_io_range_from_ioreq_server: + { + struct xen_hvm_unmap_io_range_from_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_unmap_io_range_from_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + break; + } + + case HVMOP_register_pcidev: + { + struct xen_hvm_register_pcidev a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvm_register_pcidev(a.domid, a.id, a.bdf); + if ( rc != 0 ) + return rc; + + break; + } + default: { gdprintk(XENLOG_DEBUG, "Bad HVM op %ld.\n", op); -- Julien Grall
next prev parent reply other threads:[~2012-03-22 16:02 UTC|newest] Thread overview: 80+ messages / expand[flat|nested] mbox.gz Atom feed top 2012-03-22 15:59 [Qemu-devel] [XEN][RFC PATCH 00/15] QEMU disaggregation Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 01/15] hvm: Modify interface to support multiple ioreq server Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-23 8:18 ` [Qemu-devel] [Xen-devel] " Jan Beulich 2012-03-23 8:18 ` Jan Beulich 2012-03-26 12:32 ` [Qemu-devel] [Xen-devel] " Julien Grall 2012-03-26 12:53 ` Jan Beulich 2012-03-26 12:53 ` [Qemu-devel] [Xen-devel] " Jan Beulich 2012-03-26 12:32 ` Julien Grall 2012-03-23 11:33 ` [Qemu-devel] [Xen-devel] " Ian Campbell 2012-03-23 11:33 ` Ian Campbell 2012-04-12 19:33 ` [Xen-devel] " Julien Grall 2012-04-02 17:12 ` [Qemu-devel] " Ian Jackson 2012-04-02 17:12 ` Ian Jackson 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 02/15] hvm: Add functions to handle ioreq servers Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 03/15] hvm-pci: Handle PCI config space in Xen Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-23 8:29 ` [Qemu-devel] [Xen-devel] " Jan Beulich 2012-03-23 8:29 ` Jan Beulich 2012-03-26 12:20 ` Julien Grall 2012-03-26 12:20 ` [Qemu-devel] [Xen-devel] " Julien Grall 2012-03-26 12:52 ` Jan Beulich 2012-03-26 12:52 ` [Qemu-devel] [Xen-devel] " Jan Beulich 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 04/15] hvm: Change initialization/destruction of an hvm Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-22 15:59 ` Julien Grall [this message] 2012-03-22 15:59 ` [XEN][RFC PATCH 05/15] hvm: Modify hvm_op Julien Grall 2012-04-26 17:50 ` Christian Limpach 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 06/15] hvm-io: IO refactoring with ioreq server Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 07/15] hvm-io: send invalidate map cache to each registered servers Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 08/15] hvm-io: Handle server in buffered IO Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 09/15] xc: Add the hypercall for multiple servers Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-23 11:37 ` [Qemu-devel] [Xen-devel] " Ian Campbell 2012-03-23 11:37 ` Ian Campbell 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 10/15] xc: Add argument to allocate more special pages Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-23 11:39 ` [Qemu-devel] [Xen-devel] " Ian Campbell 2012-03-23 11:39 ` Ian Campbell 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 11/15] xc: Fix python build Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-23 11:39 ` [Qemu-devel] [Xen-devel] " Ian Campbell 2012-03-23 11:39 ` Ian Campbell 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 12/15] xl: Add interface to handle multiple device models Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-23 11:47 ` [Qemu-devel] [Xen-devel] " Ian Campbell 2012-03-23 11:47 ` Ian Campbell 2012-03-23 13:06 ` [Qemu-devel] [Xen-devel] " Julien Grall 2012-03-23 13:06 ` Julien Grall 2012-03-23 13:55 ` [Qemu-devel] " Ian Campbell 2012-03-23 13:55 ` Ian Campbell 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 13/15] xl-qmp: add device model id to qmp function Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 14/15] xl-parsing: Parse the new option device_models Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-04-02 17:11 ` [Qemu-devel] [Xen-devel] " Ian Jackson 2012-04-02 17:11 ` Ian Jackson 2012-04-03 10:05 ` [Qemu-devel] " Stefano Stabellini 2012-04-03 10:05 ` Stefano Stabellini 2012-04-03 13:31 ` [Qemu-devel] " Ian Jackson 2012-04-03 13:31 ` Ian Jackson 2012-04-03 13:54 ` [Qemu-devel] " Julien Grall 2012-04-03 13:54 ` Julien Grall 2012-04-03 14:02 ` [Qemu-devel] [Xen-devel] " Ian Jackson 2012-04-03 14:02 ` Ian Jackson 2012-04-03 14:16 ` [Qemu-devel] [Xen-devel] " Stefano Stabellini 2012-04-03 14:16 ` Stefano Stabellini 2012-04-03 14:23 ` [Qemu-devel] " Ian Jackson 2012-04-03 14:23 ` Ian Jackson 2012-03-22 15:59 ` [Qemu-devel] [XEN][RFC PATCH 15/15] xl: Launch and destroy all device models Julien Grall 2012-03-22 15:59 ` Julien Grall 2012-03-22 16:59 ` [Qemu-devel] [Xen-devel] [XEN][RFC PATCH 00/15] QEMU disaggregation Tim Deegan 2012-03-22 16:59 ` Tim Deegan 2012-03-23 13:44 ` [Qemu-devel] [Xen-devel] " Julien Grall 2012-03-23 13:44 ` Julien Grall
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=2b109c735483d07e494b0dc14256d1f93b150595.1332430810.git.julien.grall@citrix.com \ --to=julien.grall@citrix.com \ --cc=Stefano.Stabellini@eu.citrix.com \ --cc=julian.pidancet@citrix.com \ --cc=qemu-devel@nongnu.org \ --cc=xen-devel@lists.xensource.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.