From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:57146) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SAkSd-0006kx-Eo for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:02:05 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1SAkSS-0004VF-Uy for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:01:59 -0400 Received: from smtp.citrix.com ([66.165.176.89]:41448) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SAkSS-0004V3-PS for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:01:48 -0400 From: Julien Grall Date: Thu, 22 Mar 2012 15:59:26 +0000 Message-ID: <2b109c735483d07e494b0dc14256d1f93b150595.1332430810.git.julien.grall@citrix.com> In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain Subject: [Qemu-devel] [XEN][RFC PATCH 05/15] hvm: Modify hvm_op List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: xen-devel@lists.xensource.com Cc: Stefano.Stabellini@eu.citrix.com, qemu-devel@nongnu.org, julian.pidancet@citrix.com This patch remove useless hvm_param due to structure modification and bind the new hypercalls to handle ioreq servers and pci. Signed-off-by: Julien Grall --- xen/arch/x86/hvm/hvm.c | 127 ++++++++++++++++++++++++++++++------------------ 1 files changed, 80 insertions(+), 47 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 1b38762..3117ae1 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4009,7 +4009,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) case HVMOP_get_param: { struct xen_hvm_param a; - struct hvm_ioreq_page *iorp; struct domain *d; struct vcpu *v; @@ -4037,21 +4036,14 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) switch ( a.index ) { - case HVM_PARAM_IOREQ_PFN: - iorp = &d->arch.hvm_domain.ioreq; - if ( (rc = hvm_set_ioreq_page(d, iorp, a.value)) != 0 ) - break; - spin_lock(&iorp->lock); - if ( iorp->va != NULL ) - /* Initialise evtchn port info if VCPUs already created. */ - for_each_vcpu ( d, v ) - get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; - spin_unlock(&iorp->lock); - break; - case HVM_PARAM_BUFIOREQ_PFN: - iorp = &d->arch.hvm_domain.buf_ioreq; - rc = hvm_set_ioreq_page(d, iorp, a.value); + case HVM_PARAM_IO_PFN_FIRST: + rc = hvm_set_ioreq_page(d, &d->arch.hvm_domain.ioreq, a.value); + gdprintk(XENLOG_DEBUG, "Pfn first = 0x%lx\n", a.value); + gdprintk(XENLOG_DEBUG, "va = %p\n", d->arch.hvm_domain.ioreq.va); break; + case HVM_PARAM_IO_PFN_LAST: + if ( (d->arch.hvm_domain.params[HVM_PARAM_IO_PFN_LAST]) ) + rc = -EINVAL; case HVM_PARAM_CALLBACK_IRQ: hvm_set_callback_via(d, a.value); hvm_latch_shinfo_size(d); @@ -4096,38 +4088,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) domctl_lock_release(); break; - case HVM_PARAM_DM_DOMAIN: - /* Not reflexive, as we must domain_pause(). */ - rc = -EPERM; - if ( curr_d == d ) - break; - - if ( a.value == DOMID_SELF ) - a.value = curr_d->domain_id; - - rc = 0; - domain_pause(d); /* safe to change per-vcpu xen_port */ - iorp = &d->arch.hvm_domain.ioreq; - for_each_vcpu ( d, v ) - { - int old_port, new_port; - new_port = alloc_unbound_xen_event_channel( - v, a.value, NULL); - if ( new_port < 0 ) - { - rc = new_port; - break; - } - /* xchg() ensures that only we free_xen_event_channel() */ - old_port = xchg(&v->arch.hvm_vcpu.xen_port, new_port); - free_xen_event_channel(v, old_port); - spin_lock(&iorp->lock); - if ( iorp->va != NULL ) - get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; - spin_unlock(&iorp->lock); - } - domain_unpause(d); - break; case HVM_PARAM_ACPI_S_STATE: /* Not reflexive, as we must domain_pause(). */ rc = -EPERM; @@ -4650,6 +4610,79 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) break; } + case HVMOP_register_ioreq_server: + { + struct xen_hvm_register_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_register_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + break; + } + + case HVMOP_get_ioreq_server_buf_channel: + { + struct xen_hvm_get_ioreq_server_buf_channel a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_get_ioreq_server_buf_channel(&a); + if ( rc != 0 ) + return rc; + + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + + break; + } + + case HVMOP_map_io_range_to_ioreq_server: + { + struct xen_hvm_map_io_range_to_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_map_io_range_to_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + break; + } + + case HVMOP_unmap_io_range_from_ioreq_server: + { + struct xen_hvm_unmap_io_range_from_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_unmap_io_range_from_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + break; + } + + case HVMOP_register_pcidev: + { + struct xen_hvm_register_pcidev a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvm_register_pcidev(a.domid, a.id, a.bdf); + if ( rc != 0 ) + return rc; + + break; + } + default: { gdprintk(XENLOG_DEBUG, "Bad HVM op %ld.\n", op); -- Julien Grall From mboxrd@z Thu Jan 1 00:00:00 1970 From: Julien Grall Subject: [XEN][RFC PATCH 05/15] hvm: Modify hvm_op Date: Thu, 22 Mar 2012 15:59:26 +0000 Message-ID: <2b109c735483d07e494b0dc14256d1f93b150595.1332430810.git.julien.grall@citrix.com> References: Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xensource.com Cc: Stefano.Stabellini@eu.citrix.com, qemu-devel@nongnu.org, julian.pidancet@citrix.com List-Id: xen-devel@lists.xenproject.org This patch remove useless hvm_param due to structure modification and bind the new hypercalls to handle ioreq servers and pci. Signed-off-by: Julien Grall --- xen/arch/x86/hvm/hvm.c | 127 ++++++++++++++++++++++++++++++------------------ 1 files changed, 80 insertions(+), 47 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 1b38762..3117ae1 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4009,7 +4009,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) case HVMOP_get_param: { struct xen_hvm_param a; - struct hvm_ioreq_page *iorp; struct domain *d; struct vcpu *v; @@ -4037,21 +4036,14 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) switch ( a.index ) { - case HVM_PARAM_IOREQ_PFN: - iorp = &d->arch.hvm_domain.ioreq; - if ( (rc = hvm_set_ioreq_page(d, iorp, a.value)) != 0 ) - break; - spin_lock(&iorp->lock); - if ( iorp->va != NULL ) - /* Initialise evtchn port info if VCPUs already created. */ - for_each_vcpu ( d, v ) - get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; - spin_unlock(&iorp->lock); - break; - case HVM_PARAM_BUFIOREQ_PFN: - iorp = &d->arch.hvm_domain.buf_ioreq; - rc = hvm_set_ioreq_page(d, iorp, a.value); + case HVM_PARAM_IO_PFN_FIRST: + rc = hvm_set_ioreq_page(d, &d->arch.hvm_domain.ioreq, a.value); + gdprintk(XENLOG_DEBUG, "Pfn first = 0x%lx\n", a.value); + gdprintk(XENLOG_DEBUG, "va = %p\n", d->arch.hvm_domain.ioreq.va); break; + case HVM_PARAM_IO_PFN_LAST: + if ( (d->arch.hvm_domain.params[HVM_PARAM_IO_PFN_LAST]) ) + rc = -EINVAL; case HVM_PARAM_CALLBACK_IRQ: hvm_set_callback_via(d, a.value); hvm_latch_shinfo_size(d); @@ -4096,38 +4088,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) domctl_lock_release(); break; - case HVM_PARAM_DM_DOMAIN: - /* Not reflexive, as we must domain_pause(). */ - rc = -EPERM; - if ( curr_d == d ) - break; - - if ( a.value == DOMID_SELF ) - a.value = curr_d->domain_id; - - rc = 0; - domain_pause(d); /* safe to change per-vcpu xen_port */ - iorp = &d->arch.hvm_domain.ioreq; - for_each_vcpu ( d, v ) - { - int old_port, new_port; - new_port = alloc_unbound_xen_event_channel( - v, a.value, NULL); - if ( new_port < 0 ) - { - rc = new_port; - break; - } - /* xchg() ensures that only we free_xen_event_channel() */ - old_port = xchg(&v->arch.hvm_vcpu.xen_port, new_port); - free_xen_event_channel(v, old_port); - spin_lock(&iorp->lock); - if ( iorp->va != NULL ) - get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; - spin_unlock(&iorp->lock); - } - domain_unpause(d); - break; case HVM_PARAM_ACPI_S_STATE: /* Not reflexive, as we must domain_pause(). */ rc = -EPERM; @@ -4650,6 +4610,79 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) break; } + case HVMOP_register_ioreq_server: + { + struct xen_hvm_register_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_register_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + break; + } + + case HVMOP_get_ioreq_server_buf_channel: + { + struct xen_hvm_get_ioreq_server_buf_channel a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_get_ioreq_server_buf_channel(&a); + if ( rc != 0 ) + return rc; + + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + + break; + } + + case HVMOP_map_io_range_to_ioreq_server: + { + struct xen_hvm_map_io_range_to_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_map_io_range_to_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + break; + } + + case HVMOP_unmap_io_range_from_ioreq_server: + { + struct xen_hvm_unmap_io_range_from_ioreq_server a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvmop_unmap_io_range_from_ioreq_server(&a); + if ( rc != 0 ) + return rc; + + break; + } + + case HVMOP_register_pcidev: + { + struct xen_hvm_register_pcidev a; + + if ( copy_from_guest(&a, arg, 1) ) + return -EFAULT; + + rc = hvm_register_pcidev(a.domid, a.id, a.bdf); + if ( rc != 0 ) + return rc; + + break; + } + default: { gdprintk(XENLOG_DEBUG, "Bad HVM op %ld.\n", op); -- Julien Grall