From mboxrd@z Thu Jan 1 00:00:00 1970 From: Paul Durrant Subject: [PATCH v3 05/18] x86/hvm: remove multiple open coded 'chunking' loops Date: Tue, 23 Jun 2015 11:39:44 +0100 Message-ID: <1435055997-30017-6-git-send-email-paul.durrant@citrix.com> References: <1435055997-30017-1-git-send-email-paul.durrant@citrix.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mail6.bemta5.messagelabs.com ([195.245.231.135]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1Z7Lcg-00016v-8Y for xen-devel@lists.xenproject.org; Tue, 23 Jun 2015 10:40:10 +0000 In-Reply-To: <1435055997-30017-1-git-send-email-paul.durrant@citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xenproject.org Cc: Andrew Cooper , Paul Durrant , Keir Fraser , Jan Beulich List-Id: xen-devel@lists.xenproject.org ...in hvmemul_read/write() Add hvmemul_phys_mmio_access() and hvmemul_linear_mmio_access() functions to reduce code duplication. Signed-off-by: Paul Durrant Cc: Keir Fraser Cc: Jan Beulich Cc: Andrew Cooper --- xen/arch/x86/hvm/emulate.c | 232 ++++++++++++++++++++++++-------------------- 1 file changed, 126 insertions(+), 106 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index f3372fc..02796d0 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -540,6 +540,115 @@ static int hvmemul_virtual_to_linear( return X86EMUL_EXCEPTION; } +static int hvmemul_phys_mmio_access( + paddr_t gpa, unsigned int size, uint8_t dir, uint8_t *buffer, + unsigned int *off) +{ + unsigned long one_rep = 1; + unsigned int chunk; + int rc = 0; + + /* Accesses must fall within a page */ + if ( (gpa & (PAGE_SIZE - 1)) + size > PAGE_SIZE ) + return X86EMUL_UNHANDLEABLE; + + /* + * hvmemul_do_io() cannot handle non-power-of-2 accesses or + * accesses larger than sizeof(long), so choose the highest power + * of 2 not exceeding sizeof(long) as the 'chunk' size. + */ + chunk = 1 << (fls(size) - 1); + if ( chunk > sizeof (long) ) + chunk = sizeof (long); + + while ( size != 0 ) + { + rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0, + &buffer[*off]); + if ( rc != X86EMUL_OKAY ) + break; + + /* Advance to the next chunk */ + gpa += chunk; + *off += chunk; + size -= chunk; + + /* + * If the chunk now exceeds the remaining size, choose the next + * lowest power of 2 that will fit. + */ + while ( chunk > size ) + chunk >>= 1; + } + + return rc; +} + +static inline int hvmemul_phys_mmio_read( + paddr_t gpa, unsigned int size, uint8_t *buffer, unsigned int *off) +{ + return hvmemul_phys_mmio_access(gpa, size, IOREQ_READ, buffer, + off); +} + +static inline int hvmemul_phys_mmio_write( + paddr_t gpa, unsigned int size, uint8_t *buffer, unsigned int *off) +{ + return hvmemul_phys_mmio_access(gpa, size, IOREQ_WRITE, buffer, + off); +} + +static int hvmemul_linear_mmio_access( + unsigned long gla, unsigned int size, uint8_t dir, void *buffer, + uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) +{ + unsigned long page_off = gla & (PAGE_SIZE - 1); + unsigned int chunk, buffer_off = 0; + paddr_t gpa; + unsigned long one_rep = 1; + int rc; + + chunk = min_t(unsigned int, size, PAGE_SIZE - page_off); + rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec, + hvmemul_ctxt); + while ( rc == X86EMUL_OKAY ) + { + rc = hvmemul_phys_mmio_access(gpa, chunk, dir, buffer, + &buffer_off); + if ( rc != X86EMUL_OKAY ) + break; + + gla += chunk; + size -= chunk; + + if ( size == 0 ) + break; + + ASSERT((gla & (PAGE_SIZE - 1)) == 0); + chunk = min_t(unsigned int, size, PAGE_SIZE); + rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec, + hvmemul_ctxt); + } + + return rc; +} + +static inline int hvmemul_linear_mmio_read( + unsigned long gla, unsigned int size, void *buffer, + uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) +{ + return hvmemul_linear_mmio_access(gla, size, IOREQ_READ, buffer, + pfec, hvmemul_ctxt); +} + +static inline int hvmemul_linear_mmio_write( + unsigned long gla, unsigned int size, void *buffer, + uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt) +{ + return hvmemul_linear_mmio_access(gla, size, IOREQ_WRITE, buffer, + pfec, hvmemul_ctxt); +} + static int __hvmemul_read( enum x86_segment seg, unsigned long offset, @@ -549,52 +658,26 @@ static int __hvmemul_read( struct hvm_emulate_ctxt *hvmemul_ctxt) { struct vcpu *curr = current; - unsigned long addr, reps = 1; - unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER); + unsigned long addr, one_rep = 1; uint32_t pfec = PFEC_page_present; struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; - paddr_t gpa; int rc; rc = hvmemul_virtual_to_linear( - seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr); + seg, offset, bytes, &one_rep, access_type, hvmemul_ctxt, &addr); if ( rc != X86EMUL_OKAY ) return rc; - off = addr & (PAGE_SIZE - 1); - /* - * We only need to handle sizes actual instruction operands can have. All - * such sizes are either powers of 2 or the sum of two powers of 2. Thus - * picking as initial chunk size the largest power of 2 not greater than - * the total size will always result in only power-of-2 size requests - * issued to hvmemul_do_mmio() (hvmemul_do_io() rejects non-powers-of-2). - */ - while ( chunk & (chunk - 1) ) - chunk &= chunk - 1; - if ( off + bytes > PAGE_SIZE ) - while ( off & (chunk - 1) ) - chunk >>= 1; if ( ((access_type != hvm_access_insn_fetch ? vio->mmio_access.read_access : vio->mmio_access.insn_fetch)) && (vio->mmio_gva == (addr & PAGE_MASK)) ) { - gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off); - while ( (off + chunk) <= PAGE_SIZE ) - { - rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_READ, 0, - p_data); - if ( rc != X86EMUL_OKAY || bytes == chunk ) - return rc; - off += chunk; - gpa += chunk; - p_data += chunk; - bytes -= chunk; - if ( bytes < chunk ) - chunk = bytes; - } + unsigned int off = 0; + paddr_t gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | + (addr & (PAGE_SIZE - 1))); - return X86EMUL_UNHANDLEABLE; + return hvmemul_phys_mmio_read(gpa, bytes, p_data, &off); } if ( (seg != x86_seg_none) && @@ -614,30 +697,9 @@ static int __hvmemul_read( case HVMCOPY_bad_gfn_to_mfn: if ( access_type == hvm_access_insn_fetch ) return X86EMUL_UNHANDLEABLE; - rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec, - hvmemul_ctxt); - while ( rc == X86EMUL_OKAY ) - { - rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_READ, 0, - p_data); - if ( rc != X86EMUL_OKAY || bytes == chunk ) - break; - addr += chunk; - off += chunk; - p_data += chunk; - bytes -= chunk; - if ( bytes < chunk ) - chunk = bytes; - if ( off < PAGE_SIZE ) - gpa += chunk; - else - { - rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec, - hvmemul_ctxt); - off = 0; - } - } - return rc; + + return hvmemul_linear_mmio_read(addr, bytes, p_data, + pfec, hvmemul_ctxt); case HVMCOPY_gfn_paged_out: case HVMCOPY_gfn_shared: return X86EMUL_RETRY; @@ -702,44 +764,24 @@ static int hvmemul_write( struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); struct vcpu *curr = current; - unsigned long addr, reps = 1; - unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER); + unsigned long addr, one_rep = 1; uint32_t pfec = PFEC_page_present | PFEC_write_access; struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; - paddr_t gpa; int rc; rc = hvmemul_virtual_to_linear( - seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr); + seg, offset, bytes, &one_rep, hvm_access_write, hvmemul_ctxt, &addr); if ( rc != X86EMUL_OKAY ) return rc; - off = addr & (PAGE_SIZE - 1); - /* See the respective comment in __hvmemul_read(). */ - while ( chunk & (chunk - 1) ) - chunk &= chunk - 1; - if ( off + bytes > PAGE_SIZE ) - while ( off & (chunk - 1) ) - chunk >>= 1; if ( vio->mmio_access.write_access && (vio->mmio_gva == (addr & PAGE_MASK)) ) { - gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off); - while ( (off + chunk) <= PAGE_SIZE ) - { - rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_WRITE, 0, - p_data); - if ( rc != X86EMUL_OKAY || bytes == chunk ) - return rc; - off += chunk; - gpa += chunk; - p_data += chunk; - bytes -= chunk; - if ( bytes < chunk ) - chunk = bytes; - } + unsigned int off = 0; + paddr_t gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | + (addr & (PAGE_SIZE - 1))); - return X86EMUL_UNHANDLEABLE; + return hvmemul_phys_mmio_write(gpa, bytes, p_data, &off); } if ( (seg != x86_seg_none) && @@ -755,30 +797,8 @@ static int hvmemul_write( case HVMCOPY_bad_gva_to_gfn: return X86EMUL_EXCEPTION; case HVMCOPY_bad_gfn_to_mfn: - rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec, - hvmemul_ctxt); - while ( rc == X86EMUL_OKAY ) - { - rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_WRITE, 0, - p_data); - if ( rc != X86EMUL_OKAY || bytes == chunk ) - break; - addr += chunk; - off += chunk; - p_data += chunk; - bytes -= chunk; - if ( bytes < chunk ) - chunk = bytes; - if ( off < PAGE_SIZE ) - gpa += chunk; - else - { - rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec, - hvmemul_ctxt); - off = 0; - } - } - return rc; + return hvmemul_linear_mmio_write(addr, bytes, p_data, + pfec, hvmemul_ctxt); case HVMCOPY_gfn_paged_out: case HVMCOPY_gfn_shared: return X86EMUL_RETRY; -- 1.7.10.4