From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Paul Durrant <paul.durrant@citrix.com>
Subject: [PATCH 2/2] x86/HVM: use available linear->phys translations in REP MOVS/STOS handling
Date: Wed, 08 Jun 2016 07:10:11 -0600 [thread overview]
Message-ID: <5758355302000078000F30FE@prv-mh.provo.novell.com> (raw)
In-Reply-To: <575833E402000078000F30E7@prv-mh.provo.novell.com>
[-- Attachment #1: Type: text/plain, Size: 4287 bytes --]
If we have the translation result available already, we should also use
is here. In my tests with Linux guests this eliminates all calls to
hvmemul_linear_to_phys() out of the two functions being changed.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1156,6 +1156,7 @@ static int hvmemul_rep_movs(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
unsigned long saddr, daddr, bytes;
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
@@ -1178,16 +1179,43 @@ static int hvmemul_rep_movs(
if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
pfec |= PFEC_user_mode;
- rc = hvmemul_linear_to_phys(
- saddr, &sgpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
- if ( rc != X86EMUL_OKAY )
- return rc;
+ bytes = PAGE_SIZE - (saddr & ~PAGE_MASK);
+ if ( vio->mmio_access.read_access &&
+ (vio->mmio_gva == (saddr & PAGE_MASK)) &&
+ bytes >= bytes_per_rep )
+ {
+ sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK);
+ if ( *reps * bytes_per_rep > bytes )
+ *reps = bytes / bytes_per_rep;
+ }
+ else
+ {
+ rc = hvmemul_linear_to_phys(saddr, &sgpa, bytes_per_rep, reps, pfec,
+ hvmemul_ctxt);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
- rc = hvmemul_linear_to_phys(
- daddr, &dgpa, bytes_per_rep, reps,
- pfec | PFEC_write_access, hvmemul_ctxt);
- if ( rc != X86EMUL_OKAY )
- return rc;
+ latch_linear_to_phys(vio, saddr, sgpa, 0);
+ }
+
+ bytes = PAGE_SIZE - (daddr & ~PAGE_MASK);
+ if ( vio->mmio_access.write_access &&
+ (vio->mmio_gva == (daddr & PAGE_MASK)) &&
+ bytes >= bytes_per_rep )
+ {
+ dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK);
+ if ( *reps * bytes_per_rep > bytes )
+ *reps = bytes / bytes_per_rep;
+ }
+ else
+ {
+ rc = hvmemul_linear_to_phys(daddr, &dgpa, bytes_per_rep, reps,
+ pfec | PFEC_write_access, hvmemul_ctxt);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ latch_linear_to_phys(vio, daddr, dgpa, 1);
+ }
/* Check for MMIO ops */
(void) get_gfn_query_unlocked(current->domain, sgpa >> PAGE_SHIFT, &sp2mt);
@@ -1279,25 +1307,40 @@ static int hvmemul_rep_stos(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- unsigned long addr;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ unsigned long addr, bytes;
paddr_t gpa;
p2m_type_t p2mt;
bool_t df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps,
hvm_access_write, hvmemul_ctxt, &addr);
- if ( rc == X86EMUL_OKAY )
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ bytes = PAGE_SIZE - (addr & ~PAGE_MASK);
+ if ( vio->mmio_access.write_access &&
+ (vio->mmio_gva == (addr & PAGE_MASK)) &&
+ bytes >= bytes_per_rep )
+ {
+ gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK);
+ if ( *reps * bytes_per_rep > bytes )
+ *reps = bytes / bytes_per_rep;
+ }
+ else
{
uint32_t pfec = PFEC_page_present | PFEC_write_access;
if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
pfec |= PFEC_user_mode;
- rc = hvmemul_linear_to_phys(
- addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
+ rc = hvmemul_linear_to_phys(addr, &gpa, bytes_per_rep, reps, pfec,
+ hvmemul_ctxt);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ latch_linear_to_phys(vio, addr, gpa, 1);
}
- if ( rc != X86EMUL_OKAY )
- return rc;
/* Check for MMIO op */
(void)get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT, &p2mt);
[-- Attachment #2: x86-HVM-emul-REP-use-addresses.patch --]
[-- Type: text/plain, Size: 4361 bytes --]
x86/HVM: use available linear->phys translations in REP MOVS/STOS handling
If we have the translation result available already, we should also use
is here. In my tests with Linux guests this eliminates all calls to
hvmemul_linear_to_phys() out of the two functions being changed.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1156,6 +1156,7 @@ static int hvmemul_rep_movs(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
unsigned long saddr, daddr, bytes;
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
@@ -1178,16 +1179,43 @@ static int hvmemul_rep_movs(
if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
pfec |= PFEC_user_mode;
- rc = hvmemul_linear_to_phys(
- saddr, &sgpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
- if ( rc != X86EMUL_OKAY )
- return rc;
+ bytes = PAGE_SIZE - (saddr & ~PAGE_MASK);
+ if ( vio->mmio_access.read_access &&
+ (vio->mmio_gva == (saddr & PAGE_MASK)) &&
+ bytes >= bytes_per_rep )
+ {
+ sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK);
+ if ( *reps * bytes_per_rep > bytes )
+ *reps = bytes / bytes_per_rep;
+ }
+ else
+ {
+ rc = hvmemul_linear_to_phys(saddr, &sgpa, bytes_per_rep, reps, pfec,
+ hvmemul_ctxt);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
- rc = hvmemul_linear_to_phys(
- daddr, &dgpa, bytes_per_rep, reps,
- pfec | PFEC_write_access, hvmemul_ctxt);
- if ( rc != X86EMUL_OKAY )
- return rc;
+ latch_linear_to_phys(vio, saddr, sgpa, 0);
+ }
+
+ bytes = PAGE_SIZE - (daddr & ~PAGE_MASK);
+ if ( vio->mmio_access.write_access &&
+ (vio->mmio_gva == (daddr & PAGE_MASK)) &&
+ bytes >= bytes_per_rep )
+ {
+ dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK);
+ if ( *reps * bytes_per_rep > bytes )
+ *reps = bytes / bytes_per_rep;
+ }
+ else
+ {
+ rc = hvmemul_linear_to_phys(daddr, &dgpa, bytes_per_rep, reps,
+ pfec | PFEC_write_access, hvmemul_ctxt);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ latch_linear_to_phys(vio, daddr, dgpa, 1);
+ }
/* Check for MMIO ops */
(void) get_gfn_query_unlocked(current->domain, sgpa >> PAGE_SHIFT, &sp2mt);
@@ -1279,25 +1307,40 @@ static int hvmemul_rep_stos(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- unsigned long addr;
+ struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ unsigned long addr, bytes;
paddr_t gpa;
p2m_type_t p2mt;
bool_t df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps,
hvm_access_write, hvmemul_ctxt, &addr);
- if ( rc == X86EMUL_OKAY )
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ bytes = PAGE_SIZE - (addr & ~PAGE_MASK);
+ if ( vio->mmio_access.write_access &&
+ (vio->mmio_gva == (addr & PAGE_MASK)) &&
+ bytes >= bytes_per_rep )
+ {
+ gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK);
+ if ( *reps * bytes_per_rep > bytes )
+ *reps = bytes / bytes_per_rep;
+ }
+ else
{
uint32_t pfec = PFEC_page_present | PFEC_write_access;
if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
pfec |= PFEC_user_mode;
- rc = hvmemul_linear_to_phys(
- addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);
+ rc = hvmemul_linear_to_phys(addr, &gpa, bytes_per_rep, reps, pfec,
+ hvmemul_ctxt);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ latch_linear_to_phys(vio, addr, gpa, 1);
}
- if ( rc != X86EMUL_OKAY )
- return rc;
/* Check for MMIO op */
(void)get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT, &p2mt);
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-06-08 13:10 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-06-08 13:04 [PATCH 0/2] x86/HVM: avoid full linear->phys translations more frequently Jan Beulich
2016-06-08 13:09 ` [PATCH 1/2] x86/HVM: latch linear->phys translation results Jan Beulich
2016-06-09 11:54 ` Andrew Cooper
2016-06-09 12:13 ` Jan Beulich
2016-06-14 10:29 ` Andrew Cooper
2016-06-20 13:12 ` Tim Deegan
2016-06-20 13:44 ` Andrew Cooper
2016-06-10 15:17 ` Paul Durrant
2016-06-08 13:10 ` Jan Beulich [this message]
2016-06-10 15:17 ` [PATCH 2/2] x86/HVM: use available linear->phys translations in REP MOVS/STOS handling Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5758355302000078000F30FE@prv-mh.provo.novell.com \
--to=jbeulich@suse.com \
--cc=paul.durrant@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).