From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:57261) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SAkSw-0006q3-C6 for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:02:22 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1SAkSm-0004j1-9C for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:02:17 -0400 Received: from smtp02.citrix.com ([66.165.176.63]:53633) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SAkSm-0004eg-5K for qemu-devel@nongnu.org; Thu, 22 Mar 2012 12:02:08 -0400 From: Julien Grall Date: Thu, 22 Mar 2012 15:59:29 +0000 Message-ID: In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain Subject: [Qemu-devel] [XEN][RFC PATCH 08/15] hvm-io: Handle server in buffered IO List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: xen-devel@lists.xensource.com Cc: Stefano.Stabellini@eu.citrix.com, qemu-devel@nongnu.org, julian.pidancet@citrix.com As for the normal IO, Xen browses the ranges to find which server is able to handle the IO. There is a special case for IOREQ_TYPE_TIMEOFFSET. Indeed, this IO must be send to all servers. For that we have introduce a new function hvm_buffered_io_send_to_server, which send an IO to a specify server. Signed-off-by: Julien Grall --- xen/arch/x86/hvm/io.c | 85 ++++++++++++++++++++++++++++++++++++------------- 1 files changed, 63 insertions(+), 22 deletions(-) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index cd89ff6..d9df913 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -46,28 +46,17 @@ #include #include -int hvm_buffered_io_send(ioreq_t *p) +static int hvm_buffered_io_send_to_server(ioreq_t *p, struct hvm_ioreq_server *s) { struct vcpu *v = current; - struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq; - buffered_iopage_t *pg = iorp->va; + struct hvm_ioreq_page *iorp; + buffered_iopage_t *pg; buf_ioreq_t bp; /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */ int qw = 0; - /* Ensure buffered_iopage fits in a page */ - BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE); - - /* - * Return 0 for the cases we can't deal with: - * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB - * - we cannot buffer accesses to guest memory buffers, as the guest - * may expect the memory buffer to be synchronously accessed - * - the count field is usually used with data_is_ptr and since we don't - * support data_is_ptr we do not waste space for the count field either - */ - if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) ) - return 0; + iorp = &s->buf_ioreq; + pg = iorp->va; bp.type = p->type; bp.dir = p->dir; @@ -90,10 +79,10 @@ int hvm_buffered_io_send(ioreq_t *p) gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size); return 0; } - + bp.data = p->data; bp.addr = p->addr; - + spin_lock(&iorp->lock); if ( (pg->write_pointer - pg->read_pointer) >= @@ -103,10 +92,10 @@ int hvm_buffered_io_send(ioreq_t *p) spin_unlock(&iorp->lock); return 0; } - + memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM], &bp, sizeof(bp)); - + if ( qw ) { bp.data = p->data >> 32; @@ -119,12 +108,64 @@ int hvm_buffered_io_send(ioreq_t *p) pg->write_pointer += qw ? 2 : 1; notify_via_xen_event_channel(v->domain, - v->domain->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN]); + s->buf_ioreq_evtchn); spin_unlock(&iorp->lock); - + return 1; } +int hvm_buffered_io_send(ioreq_t *p) +{ + struct vcpu *v = current; + struct hvm_ioreq_server *s; + int rc = 1; + + /* Ensure buffered_iopage fits in a page */ + BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE); + + /* + * Return 0 for the cases we can't deal with: + * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB + * - we cannot buffer accesses to guest memory buffers, as the guest + * may expect the memory buffer to be synchronously accessed + * - the count field is usually used with data_is_ptr and since we don't + * support data_is_ptr we do not waste space for the count field either + */ + if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) ) + return 0; + + spin_lock(&v->domain->arch.hvm_domain.ioreq_server_lock); + if ( p->type == IOREQ_TYPE_TIMEOFFSET ) + { + /* Send TIME OFFSET to all servers */ + for ( s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next ) + rc = hvm_buffered_io_send_to_server(p, s) && rc; + } + else + { + for ( s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next ) + { + struct hvm_io_range *x = (p->type == IOREQ_TYPE_COPY) + ? s->mmio_range_list : s->portio_range_list; + for ( ; x; x = x->next ) + { + if ( (p->addr >= x->s) && (p->addr <= x->e) ) + { + rc = hvm_buffered_io_send_to_server(p, s); + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); + + return rc; + } + } + } + rc = 0; + } + + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); + + return rc; +} + void send_timeoffset_req(unsigned long timeoff) { ioreq_t p[1]; -- Julien Grall From mboxrd@z Thu Jan 1 00:00:00 1970 From: Julien Grall Subject: [XEN][RFC PATCH 08/15] hvm-io: Handle server in buffered IO Date: Thu, 22 Mar 2012 15:59:29 +0000 Message-ID: References: Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xensource.com Cc: Stefano.Stabellini@eu.citrix.com, qemu-devel@nongnu.org, julian.pidancet@citrix.com List-Id: xen-devel@lists.xenproject.org As for the normal IO, Xen browses the ranges to find which server is able to handle the IO. There is a special case for IOREQ_TYPE_TIMEOFFSET. Indeed, this IO must be send to all servers. For that we have introduce a new function hvm_buffered_io_send_to_server, which send an IO to a specify server. Signed-off-by: Julien Grall --- xen/arch/x86/hvm/io.c | 85 ++++++++++++++++++++++++++++++++++++------------- 1 files changed, 63 insertions(+), 22 deletions(-) diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index cd89ff6..d9df913 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -46,28 +46,17 @@ #include #include -int hvm_buffered_io_send(ioreq_t *p) +static int hvm_buffered_io_send_to_server(ioreq_t *p, struct hvm_ioreq_server *s) { struct vcpu *v = current; - struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq; - buffered_iopage_t *pg = iorp->va; + struct hvm_ioreq_page *iorp; + buffered_iopage_t *pg; buf_ioreq_t bp; /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */ int qw = 0; - /* Ensure buffered_iopage fits in a page */ - BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE); - - /* - * Return 0 for the cases we can't deal with: - * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB - * - we cannot buffer accesses to guest memory buffers, as the guest - * may expect the memory buffer to be synchronously accessed - * - the count field is usually used with data_is_ptr and since we don't - * support data_is_ptr we do not waste space for the count field either - */ - if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) ) - return 0; + iorp = &s->buf_ioreq; + pg = iorp->va; bp.type = p->type; bp.dir = p->dir; @@ -90,10 +79,10 @@ int hvm_buffered_io_send(ioreq_t *p) gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size); return 0; } - + bp.data = p->data; bp.addr = p->addr; - + spin_lock(&iorp->lock); if ( (pg->write_pointer - pg->read_pointer) >= @@ -103,10 +92,10 @@ int hvm_buffered_io_send(ioreq_t *p) spin_unlock(&iorp->lock); return 0; } - + memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM], &bp, sizeof(bp)); - + if ( qw ) { bp.data = p->data >> 32; @@ -119,12 +108,64 @@ int hvm_buffered_io_send(ioreq_t *p) pg->write_pointer += qw ? 2 : 1; notify_via_xen_event_channel(v->domain, - v->domain->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN]); + s->buf_ioreq_evtchn); spin_unlock(&iorp->lock); - + return 1; } +int hvm_buffered_io_send(ioreq_t *p) +{ + struct vcpu *v = current; + struct hvm_ioreq_server *s; + int rc = 1; + + /* Ensure buffered_iopage fits in a page */ + BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE); + + /* + * Return 0 for the cases we can't deal with: + * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB + * - we cannot buffer accesses to guest memory buffers, as the guest + * may expect the memory buffer to be synchronously accessed + * - the count field is usually used with data_is_ptr and since we don't + * support data_is_ptr we do not waste space for the count field either + */ + if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) ) + return 0; + + spin_lock(&v->domain->arch.hvm_domain.ioreq_server_lock); + if ( p->type == IOREQ_TYPE_TIMEOFFSET ) + { + /* Send TIME OFFSET to all servers */ + for ( s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next ) + rc = hvm_buffered_io_send_to_server(p, s) && rc; + } + else + { + for ( s = v->domain->arch.hvm_domain.ioreq_server_list; s; s = s->next ) + { + struct hvm_io_range *x = (p->type == IOREQ_TYPE_COPY) + ? s->mmio_range_list : s->portio_range_list; + for ( ; x; x = x->next ) + { + if ( (p->addr >= x->s) && (p->addr <= x->e) ) + { + rc = hvm_buffered_io_send_to_server(p, s); + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); + + return rc; + } + } + } + rc = 0; + } + + spin_unlock(&v->domain->arch.hvm_domain.ioreq_server_lock); + + return rc; +} + void send_timeoffset_req(unsigned long timeoff) { ioreq_t p[1]; -- Julien Grall