From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:57151) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SAkSf-0006l9-96 for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:02:10 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1SAkSV-0004Va-3E for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:02:00 -0400 Received: from smtp.citrix.com ([66.165.176.89]:41452) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SAkSU-0004VS-V2 for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:01:51 -0400 From: Julien Grall Date: Thu, 22 Mar 2012 15:59:28 +0000 Message-ID: <0d58b7fdcdacf584e9538f8967593dffcacbc84f.1332430810.git.julien.grall@citrix.com> In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain Subject: [Qemu-devel] [XEN][RFC PATCH 07/15] hvm-io: send invalidate map cache to each registered servers List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: xen-devel@lists.xensource.com Cc: Stefano.Stabellini@eu.citrix.com, qemu-devel@nongnu.org, julian.pidancet@citrix.com When an invalidate mapcache cache occurs, Xen need to send and IOREQ_TYPE_INVALIDATE to each server and wait that all IO is completed. We introduce a new function hvm_wait_on_io to wait until an IO is completed. Signed-off-by: Julien Grall --- xen/arch/x86/hvm/hvm.c | 41 ++++++++++++++++++++++++++++++++--------- xen/arch/x86/hvm/io.c | 15 +++++++++++++-- 2 files changed, 45 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index e8ea42e..f57e3aa 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -291,16 +291,9 @@ void hvm_migrate_pirqs(struct vcpu *v) spin_unlock(&d->event_lock); } -void hvm_do_resume(struct vcpu *v) +static void hvm_wait_on_io(struct vcpu *v, ioreq_t *p) { - ioreq_t *p; - - pt_restore_timer(v); - - check_wakeup_from_wait(); - /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ - p = get_ioreq(v); while ( p->state != STATE_IOREQ_NONE ) { switch ( p->state ) @@ -310,7 +303,7 @@ void hvm_do_resume(struct vcpu *v) break; case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ case STATE_IOREQ_INPROCESS: - wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port, + wait_on_xen_event_channel(p->vp_eport, (p->state != STATE_IOREQ_READY) && (p->state != STATE_IOREQ_INPROCESS)); break; @@ -320,6 +313,36 @@ void hvm_do_resume(struct vcpu *v) return; /* bail */ } } +} + +void hvm_do_resume(struct vcpu *v) +{ + ioreq_t *p; + struct hvm_ioreq_server *s; + shared_iopage_t *page; + + pt_restore_timer(v); + + check_wakeup_from_wait(); + + p = get_ioreq(v); + + if ( p->type == IOREQ_TYPE_INVALIDATE ) + { + spin_lock(&v->domain->arch.hvm_domain.ioreq_server_lock); + /* Wait all servers */ + for ( s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next ) + { + page = s->ioreq.va; + ASSERT((v == current) || spin_is_locked(&s->ioreq.lock)); + ASSERT(s->ioreq.va != NULL); + v->arch.hvm_vcpu.ioreq = &s->ioreq; + hvm_wait_on_io(v, &page->vcpu_ioreq[v->vcpu_id]); + } + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); + } + else + hvm_wait_on_io(v, p); /* Inject pending hw/sw trap */ if (v->arch.hvm_vcpu.inject_trap != -1) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 41a2ede..cd89ff6 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -150,7 +150,8 @@ void send_timeoffset_req(unsigned long timeoff) void send_invalidate_req(void) { struct vcpu *v = current; - ioreq_t *p = get_ioreq(v); + ioreq_t p[1]; + struct hvm_ioreq_server *s; if ( p->state != STATE_IOREQ_NONE ) { @@ -164,8 +165,18 @@ void send_invalidate_req(void) p->size = 4; p->dir = IOREQ_WRITE; p->data = ~0UL; /* flush all */ + p->count = 0; + p->addr = 0; + + spin_lock(&v->domain->arch.hvm_domain.ioreq_server_lock); + for (s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next) + { + set_ioreq(v, &s->ioreq, p); + (void)hvm_send_assist_req(v); + } + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); - (void)hvm_send_assist_req(v); + set_ioreq(v, &v->domain->arch.hvm_domain.ioreq, p); } int handle_mmio(void) -- Julien Grall From mboxrd@z Thu Jan 1 00:00:00 1970 From: Julien Grall Subject: [XEN][RFC PATCH 07/15] hvm-io: send invalidate map cache to each registered servers Date: Thu, 22 Mar 2012 15:59:28 +0000 Message-ID: <0d58b7fdcdacf584e9538f8967593dffcacbc84f.1332430810.git.julien.grall@citrix.com> References: Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xensource.com Cc: Stefano.Stabellini@eu.citrix.com, qemu-devel@nongnu.org, julian.pidancet@citrix.com List-Id: xen-devel@lists.xenproject.org When an invalidate mapcache cache occurs, Xen need to send and IOREQ_TYPE_INVALIDATE to each server and wait that all IO is completed. We introduce a new function hvm_wait_on_io to wait until an IO is completed. Signed-off-by: Julien Grall --- xen/arch/x86/hvm/hvm.c | 41 ++++++++++++++++++++++++++++++++--------- xen/arch/x86/hvm/io.c | 15 +++++++++++++-- 2 files changed, 45 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index e8ea42e..f57e3aa 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -291,16 +291,9 @@ void hvm_migrate_pirqs(struct vcpu *v) spin_unlock(&d->event_lock); } -void hvm_do_resume(struct vcpu *v) +static void hvm_wait_on_io(struct vcpu *v, ioreq_t *p) { - ioreq_t *p; - - pt_restore_timer(v); - - check_wakeup_from_wait(); - /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ - p = get_ioreq(v); while ( p->state != STATE_IOREQ_NONE ) { switch ( p->state ) @@ -310,7 +303,7 @@ void hvm_do_resume(struct vcpu *v) break; case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ case STATE_IOREQ_INPROCESS: - wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port, + wait_on_xen_event_channel(p->vp_eport, (p->state != STATE_IOREQ_READY) && (p->state != STATE_IOREQ_INPROCESS)); break; @@ -320,6 +313,36 @@ void hvm_do_resume(struct vcpu *v) return; /* bail */ } } +} + +void hvm_do_resume(struct vcpu *v) +{ + ioreq_t *p; + struct hvm_ioreq_server *s; + shared_iopage_t *page; + + pt_restore_timer(v); + + check_wakeup_from_wait(); + + p = get_ioreq(v); + + if ( p->type == IOREQ_TYPE_INVALIDATE ) + { + spin_lock(&v->domain->arch.hvm_domain.ioreq_server_lock); + /* Wait all servers */ + for ( s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next ) + { + page = s->ioreq.va; + ASSERT((v == current) || spin_is_locked(&s->ioreq.lock)); + ASSERT(s->ioreq.va != NULL); + v->arch.hvm_vcpu.ioreq = &s->ioreq; + hvm_wait_on_io(v, &page->vcpu_ioreq[v->vcpu_id]); + } + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); + } + else + hvm_wait_on_io(v, p); /* Inject pending hw/sw trap */ if (v->arch.hvm_vcpu.inject_trap != -1) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 41a2ede..cd89ff6 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -150,7 +150,8 @@ void send_timeoffset_req(unsigned long timeoff) void send_invalidate_req(void) { struct vcpu *v = current; - ioreq_t *p = get_ioreq(v); + ioreq_t p[1]; + struct hvm_ioreq_server *s; if ( p->state != STATE_IOREQ_NONE ) { @@ -164,8 +165,18 @@ void send_invalidate_req(void) p->size = 4; p->dir = IOREQ_WRITE; p->data = ~0UL; /* flush all */ + p->count = 0; + p->addr = 0; + + spin_lock(&v->domain->arch.hvm_domain.ioreq_server_lock); + for (s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next) + { + set_ioreq(v, &s->ioreq, p); + (void)hvm_send_assist_req(v); + } + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); - (void)hvm_send_assist_req(v); + set_ioreq(v, &v->domain->arch.hvm_domain.ioreq, p); } int handle_mmio(void) -- Julien Grall