From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:57241) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SAkSs-0006pC-BY for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:02:22 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1SAkSl-0004hQ-HS for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:02:13 -0400 Received: from smtp02.citrix.com ([66.165.176.63]:53633) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SAkSl-0004eg-Ad for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:02:07 -0400 From: Julien Grall Date: Thu, 22 Mar 2012 15:59:27 +0000 Message-ID: <95848bd2077d52d98a9a2e09875c9552db12ad41.1332430810.git.julien.grall@citrix.com> In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain Subject: [Qemu-devel] [XEN][RFC PATCH 06/15] hvm-io: IO refactoring with ioreq server List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: xen-devel@lists.xensource.com Cc: Stefano.Stabellini@eu.citrix.com, qemu-devel@nongnu.org, julian.pidancet@citrix.com This modify several parts of the IO handle. Each vcpu now contains a pointer to the current IO shared page. A default shared page has been created for IO handle by Xen. Each time that Xen receives an ioreq, it will use the default shared page and set the right shared page when it's able to know the server. Moreover, all IO which are unhandleabled by Xen or by a server will be directly discard by Xen. Signed-off-by: Julien Grall --- xen/arch/x86/hvm/emulate.c | 56 +++++++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/hvm.c | 5 ++- xen/include/asm-x86/hvm/support.h | 26 ++++++++++++++-- 3 files changed, 81 insertions(+), 6 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 82efd1a..284c8b2 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -49,6 +49,55 @@ static void hvmtrace_io_assist(int is_mmio, ioreq_t *p) trace_var(event, 0/*!cycles*/, size, buffer); } +static int hvmemul_prepare_assist(ioreq_t *p) +{ + struct vcpu *v = current; + struct hvm_ioreq_server *s; + int i; + int sign; + uint32_t data = ~0; + + if ( p->type == IOREQ_TYPE_PCI_CONFIG ) + return X86EMUL_UNHANDLEABLE; + + spin_lock(&v->domain->arch.hvm_domain.ioreq_server_lock); + for ( s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next ) + { + struct hvm_io_range *x = (p->type == IOREQ_TYPE_COPY) + ? s->mmio_range_list : s->portio_range_list; + + for ( ; x; x = x->next ) + { + if ( (p->addr >= x->s) && (p->addr <= x->e) ) + goto done_server_scan; + } + } + + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); + + sign = p->df ? -1 : 1; + + if ( p->dir != IOREQ_WRITE ) + { + if ( !p->data_is_ptr ) + p->data = ~0; + else + { + for ( i = 0; i < p->count; i++ ) + hvm_copy_to_guest_phys(p->data + sign * i * p->size, &data, + p->size); + } + } + + return X86EMUL_OKAY; + + done_server_scan: + set_ioreq(v, &s->ioreq, p); + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); + + return X86EMUL_UNHANDLEABLE; +} + static int hvmemul_do_io( int is_mmio, paddr_t addr, unsigned long *reps, int size, paddr_t ram_gpa, int dir, int df, void *p_data) @@ -182,6 +231,10 @@ static int hvmemul_do_io( (p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion; vio->io_size = size; + /* Use the default shared page */ + current->arch.hvm_vcpu.ioreq = &curr->domain->arch.hvm_domain.ioreq; + p = get_ioreq(current); + p->dir = dir; p->data_is_ptr = value_is_ptr; p->type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO; @@ -204,6 +257,9 @@ static int hvmemul_do_io( rc = hvm_portio_intercept(p); } + if ( rc == X86EMUL_UNHANDLEABLE ) + rc = hvmemul_prepare_assist(p); + switch ( rc ) { case X86EMUL_OKAY: diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 3117ae1..e8ea42e 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1204,14 +1204,15 @@ bool_t hvm_send_assist_req(struct vcpu *v) return 0; } - prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port); + prepare_wait_on_xen_event_channel(p->vp_eport); /* * Following happens /after/ blocking and setting up ioreq contents. * prepare_wait_on_xen_event_channel() is an implicit barrier. */ p->state = STATE_IOREQ_READY; - notify_via_xen_event_channel(v->domain, v->arch.hvm_vcpu.xen_port); + + notify_via_xen_event_channel(v->domain, p->vp_eport); return 1; } diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index f9b102f..44acd37 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -29,13 +29,31 @@ static inline ioreq_t *get_ioreq(struct vcpu *v) { - struct domain *d = v->domain; - shared_iopage_t *p = d->arch.hvm_domain.ioreq.va; - ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock)); - ASSERT(d->arch.hvm_domain.ioreq.va != NULL); + shared_iopage_t *p = v->arch.hvm_vcpu.ioreq->va; + ASSERT((v == current) || spin_is_locked(&v->arch.hvm_vcpu.ioreq->lock)); + ASSERT(v->arch.hvm_vcpu.ioreq->va != NULL); return &p->vcpu_ioreq[v->vcpu_id]; } +static inline void set_ioreq(struct vcpu *v, struct hvm_ioreq_page *page, + ioreq_t *p) +{ + ioreq_t *np; + + v->arch.hvm_vcpu.ioreq = page; + spin_lock(&v->arch.hvm_vcpu.ioreq->lock); + np = get_ioreq(v); + np->dir = p->dir; + np->data_is_ptr = p->data_is_ptr; + np->type = p->type; + np->size = p->size; + np->addr = p->addr; + np->count = p->count; + np->df = p->df; + np->data = p->data; + spin_unlock(&v->arch.hvm_vcpu.ioreq->lock); +} + #define HVM_DELIVER_NO_ERROR_CODE -1 #ifndef NDEBUG -- Julien Grall From mboxrd@z Thu Jan 1 00:00:00 1970 From: Julien Grall Subject: [XEN][RFC PATCH 06/15] hvm-io: IO refactoring with ioreq server Date: Thu, 22 Mar 2012 15:59:27 +0000 Message-ID: <95848bd2077d52d98a9a2e09875c9552db12ad41.1332430810.git.julien.grall@citrix.com> References: Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xensource.com Cc: Stefano.Stabellini@eu.citrix.com, qemu-devel@nongnu.org, julian.pidancet@citrix.com List-Id: xen-devel@lists.xenproject.org This modify several parts of the IO handle. Each vcpu now contains a pointer to the current IO shared page. A default shared page has been created for IO handle by Xen. Each time that Xen receives an ioreq, it will use the default shared page and set the right shared page when it's able to know the server. Moreover, all IO which are unhandleabled by Xen or by a server will be directly discard by Xen. Signed-off-by: Julien Grall --- xen/arch/x86/hvm/emulate.c | 56 +++++++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/hvm.c | 5 ++- xen/include/asm-x86/hvm/support.h | 26 ++++++++++++++-- 3 files changed, 81 insertions(+), 6 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 82efd1a..284c8b2 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -49,6 +49,55 @@ static void hvmtrace_io_assist(int is_mmio, ioreq_t *p) trace_var(event, 0/*!cycles*/, size, buffer); } +static int hvmemul_prepare_assist(ioreq_t *p) +{ + struct vcpu *v = current; + struct hvm_ioreq_server *s; + int i; + int sign; + uint32_t data = ~0; + + if ( p->type == IOREQ_TYPE_PCI_CONFIG ) + return X86EMUL_UNHANDLEABLE; + + spin_lock(&v->domain->arch.hvm_domain.ioreq_server_lock); + for ( s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next ) + { + struct hvm_io_range *x = (p->type == IOREQ_TYPE_COPY) + ? s->mmio_range_list : s->portio_range_list; + + for ( ; x; x = x->next ) + { + if ( (p->addr >= x->s) && (p->addr <= x->e) ) + goto done_server_scan; + } + } + + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); + + sign = p->df ? -1 : 1; + + if ( p->dir != IOREQ_WRITE ) + { + if ( !p->data_is_ptr ) + p->data = ~0; + else + { + for ( i = 0; i < p->count; i++ ) + hvm_copy_to_guest_phys(p->data + sign * i * p->size, &data, + p->size); + } + } + + return X86EMUL_OKAY; + + done_server_scan: + set_ioreq(v, &s->ioreq, p); + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); + + return X86EMUL_UNHANDLEABLE; +} + static int hvmemul_do_io( int is_mmio, paddr_t addr, unsigned long *reps, int size, paddr_t ram_gpa, int dir, int df, void *p_data) @@ -182,6 +231,10 @@ static int hvmemul_do_io( (p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion; vio->io_size = size; + /* Use the default shared page */ + current->arch.hvm_vcpu.ioreq = &curr->domain->arch.hvm_domain.ioreq; + p = get_ioreq(current); + p->dir = dir; p->data_is_ptr = value_is_ptr; p->type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO; @@ -204,6 +257,9 @@ static int hvmemul_do_io( rc = hvm_portio_intercept(p); } + if ( rc == X86EMUL_UNHANDLEABLE ) + rc = hvmemul_prepare_assist(p); + switch ( rc ) { case X86EMUL_OKAY: diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 3117ae1..e8ea42e 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1204,14 +1204,15 @@ bool_t hvm_send_assist_req(struct vcpu *v) return 0; } - prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port); + prepare_wait_on_xen_event_channel(p->vp_eport); /* * Following happens /after/ blocking and setting up ioreq contents. * prepare_wait_on_xen_event_channel() is an implicit barrier. */ p->state = STATE_IOREQ_READY; - notify_via_xen_event_channel(v->domain, v->arch.hvm_vcpu.xen_port); + + notify_via_xen_event_channel(v->domain, p->vp_eport); return 1; } diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index f9b102f..44acd37 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -29,13 +29,31 @@ static inline ioreq_t *get_ioreq(struct vcpu *v) { - struct domain *d = v->domain; - shared_iopage_t *p = d->arch.hvm_domain.ioreq.va; - ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock)); - ASSERT(d->arch.hvm_domain.ioreq.va != NULL); + shared_iopage_t *p = v->arch.hvm_vcpu.ioreq->va; + ASSERT((v == current) || spin_is_locked(&v->arch.hvm_vcpu.ioreq->lock)); + ASSERT(v->arch.hvm_vcpu.ioreq->va != NULL); return &p->vcpu_ioreq[v->vcpu_id]; } +static inline void set_ioreq(struct vcpu *v, struct hvm_ioreq_page *page, + ioreq_t *p) +{ + ioreq_t *np; + + v->arch.hvm_vcpu.ioreq = page; + spin_lock(&v->arch.hvm_vcpu.ioreq->lock); + np = get_ioreq(v); + np->dir = p->dir; + np->data_is_ptr = p->data_is_ptr; + np->type = p->type; + np->size = p->size; + np->addr = p->addr; + np->count = p->count; + np->df = p->df; + np->data = p->data; + spin_unlock(&v->arch.hvm_vcpu.ioreq->lock); +} + #define HVM_DELIVER_NO_ERROR_CODE -1 #ifndef NDEBUG -- Julien Grall