* [Xen-devel] [PATCH] x86/hvm: make hvmemul_virtual_to_linear()'s reps parameter optional
@ 2019-07-01 11:59 Jan Beulich
2019-07-01 12:50 ` Alexandru Stefan ISAILA
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Jan Beulich @ 2019-07-01 11:59 UTC (permalink / raw)
To: xen-devel, xen-devel
Cc: Andrew Cooper, Paul Durrant, Wei Liu, Roger Pau Monne
A majority of callers wants just a single iteration handled. Allow to
express this by passing in a NULL pointer, instead of setting up a local
variable just to hold the "1" to pass in here.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
Note that this conflicts with additions/changes made by "x86emul:
further work". Whatever goes in later will need re-basing.
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -788,14 +788,14 @@ static int hvmemul_virtual_to_linear(
enum x86_segment seg,
unsigned long offset,
unsigned int bytes_per_rep,
- unsigned long *reps,
+ unsigned long *reps_p,
enum hvm_access_type access_type,
struct hvm_emulate_ctxt *hvmemul_ctxt,
unsigned long *linear)
{
struct segment_register *reg;
int okay;
- unsigned long max_reps = 4096;
+ unsigned long reps = 1;
if ( seg == x86_seg_none )
{
@@ -803,62 +803,72 @@ static int hvmemul_virtual_to_linear(
return X86EMUL_OKAY;
}
- /*
- * If introspection has been enabled for this domain, and we're emulating
- * becase a vm_reply asked us to (i.e. not doing regular IO) reps should
- * be at most 1, since optimization might otherwise cause a single
- * vm_event being triggered for repeated writes to a whole page.
- */
- if ( unlikely(current->domain->arch.mem_access_emulate_each_rep) &&
- current->arch.vm_event->emulate_flags != 0 )
- max_reps = 1;
+ if ( reps_p )
+ {
+ unsigned long max_reps = 4096;
- /*
- * Clip repetitions to avoid overflow when multiplying by @bytes_per_rep.
- * The chosen maximum is very conservative but it's what we use in
- * hvmemul_linear_to_phys() so there is no point in using a larger value.
- */
- *reps = min_t(unsigned long, *reps, max_reps);
+ /*
+ * If introspection has been enabled for this domain, and we're
+ * emulating because a vm_reply asked us to (i.e. not doing regular IO)
+ * reps should be at most 1, since optimization might otherwise cause a
+ * single vm_event being triggered for repeated writes to a whole page.
+ */
+ if ( unlikely(current->domain->arch.mem_access_emulate_each_rep) &&
+ current->arch.vm_event->emulate_flags != 0 )
+ max_reps = 1;
+
+ /*
+ * Clip repetitions to avoid overflow when multiplying by
+ * @bytes_per_rep. The chosen maximum is very conservative but it's
+ * what we use in hvmemul_linear_to_phys() so there is no point in
+ * using a larger value.
+ */
+ reps = *reps_p = min_t(unsigned long, *reps_p, max_reps);
+ }
reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
if ( IS_ERR(reg) )
return -PTR_ERR(reg);
- if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
+ if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (reps > 1) )
{
/*
* x86_emulate() clips the repetition count to ensure we don't wrap
* the effective-address index register. Hence this assertion holds.
*/
- ASSERT(offset >= ((*reps - 1) * bytes_per_rep));
+ ASSERT(offset >= ((reps - 1) * bytes_per_rep));
okay = hvm_virtual_to_linear_addr(
- seg, reg, offset - (*reps - 1) * bytes_per_rep,
- *reps * bytes_per_rep, access_type,
+ seg, reg, offset - (reps - 1) * bytes_per_rep,
+ reps * bytes_per_rep, access_type,
hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear);
- *linear += (*reps - 1) * bytes_per_rep;
+ *linear += (reps - 1) * bytes_per_rep;
if ( hvmemul_ctxt->ctxt.addr_size != 64 )
*linear = (uint32_t)*linear;
}
else
{
okay = hvm_virtual_to_linear_addr(
- seg, reg, offset, *reps * bytes_per_rep, access_type,
+ seg, reg, offset, reps * bytes_per_rep, access_type,
hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear);
}
if ( okay )
return X86EMUL_OKAY;
- /* If this is a string operation, emulate each iteration separately. */
- if ( *reps != 1 )
- return X86EMUL_UNHANDLEABLE;
+ if ( reps_p )
+ {
+ /* If this is a string operation, emulate each iteration separately. */
+ if ( reps != 1 )
+ return X86EMUL_UNHANDLEABLE;
+
+ *reps_p = 0;
+ }
/*
* Leave exception injection to the caller for non-user segments: We
* neither know the exact error code to be used, nor can we easily
* determine the kind of exception (#GP or #TS) in that case.
*/
- *reps = 0;
if ( is_x86_user_segment(seg) )
x86_emul_hw_exception((seg == x86_seg_ss)
? TRAP_stack_error
@@ -1201,7 +1211,7 @@ static int __hvmemul_read(
enum hvm_access_type access_type,
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
- unsigned long addr, reps = 1;
+ unsigned long addr;
uint32_t pfec = PFEC_page_present;
int rc;
@@ -1213,7 +1223,7 @@ static int __hvmemul_read(
pfec |= PFEC_insn_fetch;
rc = hvmemul_virtual_to_linear(
- seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
+ seg, offset, bytes, NULL, access_type, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
@@ -1294,7 +1304,7 @@ static int hvmemul_write(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- unsigned long addr, reps = 1;
+ unsigned long addr;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
int rc;
void *mapping = NULL;
@@ -1305,7 +1315,7 @@ static int hvmemul_write(
pfec |= PFEC_user_mode;
rc = hvmemul_virtual_to_linear(
- seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
+ seg, offset, bytes, NULL, hvm_access_write, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
@@ -1336,13 +1346,13 @@ static int hvmemul_rmw(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- unsigned long addr, reps = 1;
+ unsigned long addr;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
int rc;
void *mapping = NULL;
rc = hvmemul_virtual_to_linear(
- seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
+ seg, offset, bytes, NULL, hvm_access_write, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
@@ -1491,14 +1501,14 @@ static int hvmemul_cmpxchg(
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
struct vcpu *curr = current;
- unsigned long addr, reps = 1;
+ unsigned long addr;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
void *mapping = NULL;
rc = hvmemul_virtual_to_linear(
- seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
+ seg, offset, bytes, NULL, hvm_access_write, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY )
return rc;
@@ -2292,11 +2302,11 @@ static int hvmemul_invlpg(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- unsigned long addr, reps = 1;
+ unsigned long addr;
int rc;
rc = hvmemul_virtual_to_linear(
- seg, offset, 1, &reps, hvm_access_none, hvmemul_ctxt, &addr);
+ seg, offset, 1, NULL, hvm_access_none, hvmemul_ctxt, &addr);
if ( rc == X86EMUL_EXCEPTION )
{
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [Xen-devel] [PATCH] x86/hvm: make hvmemul_virtual_to_linear()'s reps parameter optional
2019-07-01 11:59 [Xen-devel] [PATCH] x86/hvm: make hvmemul_virtual_to_linear()'s reps parameter optional Jan Beulich
@ 2019-07-01 12:50 ` Alexandru Stefan ISAILA
2019-07-15 8:30 ` [Xen-devel] Ping: " Jan Beulich
2019-07-15 8:51 ` [Xen-devel] " Paul Durrant
2 siblings, 0 replies; 4+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-07-01 12:50 UTC (permalink / raw)
To: Jan Beulich, xen-devel
Cc: Andrew Cooper, Paul Durrant, Wei Liu, Roger Pau Monne
Useful patch, glad to have it on the table.
On 01.07.2019 14:59, Jan Beulich wrote:
> A majority of callers wants just a single iteration handled. Allow to
> express this by passing in a NULL pointer, instead of setting up a local
> variable just to hold the "1" to pass in here.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Alexandru Isaila <aisaila@bitdefender.com>
> ---
> Note that this conflicts with additions/changes made by "x86emul:
> further work". Whatever goes in later will need re-basing.
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
* [Xen-devel] Ping: [PATCH] x86/hvm: make hvmemul_virtual_to_linear()'s reps parameter optional
2019-07-01 11:59 [Xen-devel] [PATCH] x86/hvm: make hvmemul_virtual_to_linear()'s reps parameter optional Jan Beulich
2019-07-01 12:50 ` Alexandru Stefan ISAILA
@ 2019-07-15 8:30 ` Jan Beulich
2019-07-15 8:51 ` [Xen-devel] " Paul Durrant
2 siblings, 0 replies; 4+ messages in thread
From: Jan Beulich @ 2019-07-15 8:30 UTC (permalink / raw)
To: Paul Durrant; +Cc: xen-devel, RogerPau Monne, Wei Liu, Andrew Cooper
On 01.07.2019 13:59, Jan Beulich wrote:
> A majority of callers wants just a single iteration handled. Allow to
> express this by passing in a NULL pointer, instead of setting up a local
> variable just to hold the "1" to pass in here.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> Note that this conflicts with additions/changes made by "x86emul:
> further work". Whatever goes in later will need re-basing.
>
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -788,14 +788,14 @@ static int hvmemul_virtual_to_linear(
> enum x86_segment seg,
> unsigned long offset,
> unsigned int bytes_per_rep,
> - unsigned long *reps,
> + unsigned long *reps_p,
> enum hvm_access_type access_type,
> struct hvm_emulate_ctxt *hvmemul_ctxt,
> unsigned long *linear)
> {
> struct segment_register *reg;
> int okay;
> - unsigned long max_reps = 4096;
> + unsigned long reps = 1;
>
> if ( seg == x86_seg_none )
> {
> @@ -803,62 +803,72 @@ static int hvmemul_virtual_to_linear(
> return X86EMUL_OKAY;
> }
>
> - /*
> - * If introspection has been enabled for this domain, and we're emulating
> - * becase a vm_reply asked us to (i.e. not doing regular IO) reps should
> - * be at most 1, since optimization might otherwise cause a single
> - * vm_event being triggered for repeated writes to a whole page.
> - */
> - if ( unlikely(current->domain->arch.mem_access_emulate_each_rep) &&
> - current->arch.vm_event->emulate_flags != 0 )
> - max_reps = 1;
> + if ( reps_p )
> + {
> + unsigned long max_reps = 4096;
>
> - /*
> - * Clip repetitions to avoid overflow when multiplying by @bytes_per_rep.
> - * The chosen maximum is very conservative but it's what we use in
> - * hvmemul_linear_to_phys() so there is no point in using a larger value.
> - */
> - *reps = min_t(unsigned long, *reps, max_reps);
> + /*
> + * If introspection has been enabled for this domain, and we're
> + * emulating because a vm_reply asked us to (i.e. not doing regular IO)
> + * reps should be at most 1, since optimization might otherwise cause a
> + * single vm_event being triggered for repeated writes to a whole page.
> + */
> + if ( unlikely(current->domain->arch.mem_access_emulate_each_rep) &&
> + current->arch.vm_event->emulate_flags != 0 )
> + max_reps = 1;
> +
> + /*
> + * Clip repetitions to avoid overflow when multiplying by
> + * @bytes_per_rep. The chosen maximum is very conservative but it's
> + * what we use in hvmemul_linear_to_phys() so there is no point in
> + * using a larger value.
> + */
> + reps = *reps_p = min_t(unsigned long, *reps_p, max_reps);
> + }
>
> reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
> if ( IS_ERR(reg) )
> return -PTR_ERR(reg);
>
> - if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
> + if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (reps > 1) )
> {
> /*
> * x86_emulate() clips the repetition count to ensure we don't wrap
> * the effective-address index register. Hence this assertion holds.
> */
> - ASSERT(offset >= ((*reps - 1) * bytes_per_rep));
> + ASSERT(offset >= ((reps - 1) * bytes_per_rep));
> okay = hvm_virtual_to_linear_addr(
> - seg, reg, offset - (*reps - 1) * bytes_per_rep,
> - *reps * bytes_per_rep, access_type,
> + seg, reg, offset - (reps - 1) * bytes_per_rep,
> + reps * bytes_per_rep, access_type,
> hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear);
> - *linear += (*reps - 1) * bytes_per_rep;
> + *linear += (reps - 1) * bytes_per_rep;
> if ( hvmemul_ctxt->ctxt.addr_size != 64 )
> *linear = (uint32_t)*linear;
> }
> else
> {
> okay = hvm_virtual_to_linear_addr(
> - seg, reg, offset, *reps * bytes_per_rep, access_type,
> + seg, reg, offset, reps * bytes_per_rep, access_type,
> hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear);
> }
>
> if ( okay )
> return X86EMUL_OKAY;
>
> - /* If this is a string operation, emulate each iteration separately. */
> - if ( *reps != 1 )
> - return X86EMUL_UNHANDLEABLE;
> + if ( reps_p )
> + {
> + /* If this is a string operation, emulate each iteration separately. */
> + if ( reps != 1 )
> + return X86EMUL_UNHANDLEABLE;
> +
> + *reps_p = 0;
> + }
>
> /*
> * Leave exception injection to the caller for non-user segments: We
> * neither know the exact error code to be used, nor can we easily
> * determine the kind of exception (#GP or #TS) in that case.
> */
> - *reps = 0;
> if ( is_x86_user_segment(seg) )
> x86_emul_hw_exception((seg == x86_seg_ss)
> ? TRAP_stack_error
> @@ -1201,7 +1211,7 @@ static int __hvmemul_read(
> enum hvm_access_type access_type,
> struct hvm_emulate_ctxt *hvmemul_ctxt)
> {
> - unsigned long addr, reps = 1;
> + unsigned long addr;
> uint32_t pfec = PFEC_page_present;
> int rc;
>
> @@ -1213,7 +1223,7 @@ static int __hvmemul_read(
> pfec |= PFEC_insn_fetch;
>
> rc = hvmemul_virtual_to_linear(
> - seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
> + seg, offset, bytes, NULL, access_type, hvmemul_ctxt, &addr);
> if ( rc != X86EMUL_OKAY || !bytes )
> return rc;
>
> @@ -1294,7 +1304,7 @@ static int hvmemul_write(
> {
> struct hvm_emulate_ctxt *hvmemul_ctxt =
> container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> - unsigned long addr, reps = 1;
> + unsigned long addr;
> uint32_t pfec = PFEC_page_present | PFEC_write_access;
> int rc;
> void *mapping = NULL;
> @@ -1305,7 +1315,7 @@ static int hvmemul_write(
> pfec |= PFEC_user_mode;
>
> rc = hvmemul_virtual_to_linear(
> - seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
> + seg, offset, bytes, NULL, hvm_access_write, hvmemul_ctxt, &addr);
> if ( rc != X86EMUL_OKAY || !bytes )
> return rc;
>
> @@ -1336,13 +1346,13 @@ static int hvmemul_rmw(
> {
> struct hvm_emulate_ctxt *hvmemul_ctxt =
> container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> - unsigned long addr, reps = 1;
> + unsigned long addr;
> uint32_t pfec = PFEC_page_present | PFEC_write_access;
> int rc;
> void *mapping = NULL;
>
> rc = hvmemul_virtual_to_linear(
> - seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
> + seg, offset, bytes, NULL, hvm_access_write, hvmemul_ctxt, &addr);
> if ( rc != X86EMUL_OKAY || !bytes )
> return rc;
>
> @@ -1491,14 +1501,14 @@ static int hvmemul_cmpxchg(
> struct hvm_emulate_ctxt *hvmemul_ctxt =
> container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> struct vcpu *curr = current;
> - unsigned long addr, reps = 1;
> + unsigned long addr;
> uint32_t pfec = PFEC_page_present | PFEC_write_access;
> struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
> int rc;
> void *mapping = NULL;
>
> rc = hvmemul_virtual_to_linear(
> - seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
> + seg, offset, bytes, NULL, hvm_access_write, hvmemul_ctxt, &addr);
> if ( rc != X86EMUL_OKAY )
> return rc;
>
> @@ -2292,11 +2302,11 @@ static int hvmemul_invlpg(
> {
> struct hvm_emulate_ctxt *hvmemul_ctxt =
> container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> - unsigned long addr, reps = 1;
> + unsigned long addr;
> int rc;
>
> rc = hvmemul_virtual_to_linear(
> - seg, offset, 1, &reps, hvm_access_none, hvmemul_ctxt, &addr);
> + seg, offset, 1, NULL, hvm_access_none, hvmemul_ctxt, &addr);
>
> if ( rc == X86EMUL_EXCEPTION )
> {
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [Xen-devel] [PATCH] x86/hvm: make hvmemul_virtual_to_linear()'s reps parameter optional
2019-07-01 11:59 [Xen-devel] [PATCH] x86/hvm: make hvmemul_virtual_to_linear()'s reps parameter optional Jan Beulich
2019-07-01 12:50 ` Alexandru Stefan ISAILA
2019-07-15 8:30 ` [Xen-devel] Ping: " Jan Beulich
@ 2019-07-15 8:51 ` Paul Durrant
2 siblings, 0 replies; 4+ messages in thread
From: Paul Durrant @ 2019-07-15 8:51 UTC (permalink / raw)
To: 'Jan Beulich', xen-devel, xen-devel
Cc: Andrew Cooper, Wei Liu, Roger Pau Monne
> -----Original Message-----
> From: Jan Beulich <JBeulich@suse.com>
> Sent: 01 July 2019 13:00
> To: xen-devel@lists.xenproject.org; xen-devel@lists.xenproject.org
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Wei Liu <wl@xen.org>; Roger Pau Monne
> <roger.pau@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>
> Subject: [PATCH] x86/hvm: make hvmemul_virtual_to_linear()'s reps parameter optional
>
> A majority of callers wants just a single iteration handled. Allow to
> express this by passing in a NULL pointer, instead of setting up a local
> variable just to hold the "1" to pass in here.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> ---
> Note that this conflicts with additions/changes made by "x86emul:
> further work". Whatever goes in later will need re-basing.
>
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -788,14 +788,14 @@ static int hvmemul_virtual_to_linear(
> enum x86_segment seg,
> unsigned long offset,
> unsigned int bytes_per_rep,
> - unsigned long *reps,
> + unsigned long *reps_p,
> enum hvm_access_type access_type,
> struct hvm_emulate_ctxt *hvmemul_ctxt,
> unsigned long *linear)
> {
> struct segment_register *reg;
> int okay;
> - unsigned long max_reps = 4096;
> + unsigned long reps = 1;
>
> if ( seg == x86_seg_none )
> {
> @@ -803,62 +803,72 @@ static int hvmemul_virtual_to_linear(
> return X86EMUL_OKAY;
> }
>
> - /*
> - * If introspection has been enabled for this domain, and we're emulating
> - * becase a vm_reply asked us to (i.e. not doing regular IO) reps should
> - * be at most 1, since optimization might otherwise cause a single
> - * vm_event being triggered for repeated writes to a whole page.
> - */
> - if ( unlikely(current->domain->arch.mem_access_emulate_each_rep) &&
> - current->arch.vm_event->emulate_flags != 0 )
> - max_reps = 1;
> + if ( reps_p )
> + {
> + unsigned long max_reps = 4096;
>
> - /*
> - * Clip repetitions to avoid overflow when multiplying by @bytes_per_rep.
> - * The chosen maximum is very conservative but it's what we use in
> - * hvmemul_linear_to_phys() so there is no point in using a larger value.
> - */
> - *reps = min_t(unsigned long, *reps, max_reps);
> + /*
> + * If introspection has been enabled for this domain, and we're
> + * emulating because a vm_reply asked us to (i.e. not doing regular IO)
> + * reps should be at most 1, since optimization might otherwise cause a
> + * single vm_event being triggered for repeated writes to a whole page.
> + */
> + if ( unlikely(current->domain->arch.mem_access_emulate_each_rep) &&
> + current->arch.vm_event->emulate_flags != 0 )
> + max_reps = 1;
> +
> + /*
> + * Clip repetitions to avoid overflow when multiplying by
> + * @bytes_per_rep. The chosen maximum is very conservative but it's
> + * what we use in hvmemul_linear_to_phys() so there is no point in
> + * using a larger value.
> + */
> + reps = *reps_p = min_t(unsigned long, *reps_p, max_reps);
> + }
>
> reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
> if ( IS_ERR(reg) )
> return -PTR_ERR(reg);
>
> - if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
> + if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (reps > 1) )
> {
> /*
> * x86_emulate() clips the repetition count to ensure we don't wrap
> * the effective-address index register. Hence this assertion holds.
> */
> - ASSERT(offset >= ((*reps - 1) * bytes_per_rep));
> + ASSERT(offset >= ((reps - 1) * bytes_per_rep));
> okay = hvm_virtual_to_linear_addr(
> - seg, reg, offset - (*reps - 1) * bytes_per_rep,
> - *reps * bytes_per_rep, access_type,
> + seg, reg, offset - (reps - 1) * bytes_per_rep,
> + reps * bytes_per_rep, access_type,
> hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear);
> - *linear += (*reps - 1) * bytes_per_rep;
> + *linear += (reps - 1) * bytes_per_rep;
> if ( hvmemul_ctxt->ctxt.addr_size != 64 )
> *linear = (uint32_t)*linear;
> }
> else
> {
> okay = hvm_virtual_to_linear_addr(
> - seg, reg, offset, *reps * bytes_per_rep, access_type,
> + seg, reg, offset, reps * bytes_per_rep, access_type,
> hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt), linear);
> }
>
> if ( okay )
> return X86EMUL_OKAY;
>
> - /* If this is a string operation, emulate each iteration separately. */
> - if ( *reps != 1 )
> - return X86EMUL_UNHANDLEABLE;
> + if ( reps_p )
> + {
> + /* If this is a string operation, emulate each iteration separately. */
> + if ( reps != 1 )
> + return X86EMUL_UNHANDLEABLE;
> +
> + *reps_p = 0;
> + }
>
> /*
> * Leave exception injection to the caller for non-user segments: We
> * neither know the exact error code to be used, nor can we easily
> * determine the kind of exception (#GP or #TS) in that case.
> */
> - *reps = 0;
> if ( is_x86_user_segment(seg) )
> x86_emul_hw_exception((seg == x86_seg_ss)
> ? TRAP_stack_error
> @@ -1201,7 +1211,7 @@ static int __hvmemul_read(
> enum hvm_access_type access_type,
> struct hvm_emulate_ctxt *hvmemul_ctxt)
> {
> - unsigned long addr, reps = 1;
> + unsigned long addr;
> uint32_t pfec = PFEC_page_present;
> int rc;
>
> @@ -1213,7 +1223,7 @@ static int __hvmemul_read(
> pfec |= PFEC_insn_fetch;
>
> rc = hvmemul_virtual_to_linear(
> - seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
> + seg, offset, bytes, NULL, access_type, hvmemul_ctxt, &addr);
> if ( rc != X86EMUL_OKAY || !bytes )
> return rc;
>
> @@ -1294,7 +1304,7 @@ static int hvmemul_write(
> {
> struct hvm_emulate_ctxt *hvmemul_ctxt =
> container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> - unsigned long addr, reps = 1;
> + unsigned long addr;
> uint32_t pfec = PFEC_page_present | PFEC_write_access;
> int rc;
> void *mapping = NULL;
> @@ -1305,7 +1315,7 @@ static int hvmemul_write(
> pfec |= PFEC_user_mode;
>
> rc = hvmemul_virtual_to_linear(
> - seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
> + seg, offset, bytes, NULL, hvm_access_write, hvmemul_ctxt, &addr);
> if ( rc != X86EMUL_OKAY || !bytes )
> return rc;
>
> @@ -1336,13 +1346,13 @@ static int hvmemul_rmw(
> {
> struct hvm_emulate_ctxt *hvmemul_ctxt =
> container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> - unsigned long addr, reps = 1;
> + unsigned long addr;
> uint32_t pfec = PFEC_page_present | PFEC_write_access;
> int rc;
> void *mapping = NULL;
>
> rc = hvmemul_virtual_to_linear(
> - seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
> + seg, offset, bytes, NULL, hvm_access_write, hvmemul_ctxt, &addr);
> if ( rc != X86EMUL_OKAY || !bytes )
> return rc;
>
> @@ -1491,14 +1501,14 @@ static int hvmemul_cmpxchg(
> struct hvm_emulate_ctxt *hvmemul_ctxt =
> container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> struct vcpu *curr = current;
> - unsigned long addr, reps = 1;
> + unsigned long addr;
> uint32_t pfec = PFEC_page_present | PFEC_write_access;
> struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
> int rc;
> void *mapping = NULL;
>
> rc = hvmemul_virtual_to_linear(
> - seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
> + seg, offset, bytes, NULL, hvm_access_write, hvmemul_ctxt, &addr);
> if ( rc != X86EMUL_OKAY )
> return rc;
>
> @@ -2292,11 +2302,11 @@ static int hvmemul_invlpg(
> {
> struct hvm_emulate_ctxt *hvmemul_ctxt =
> container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> - unsigned long addr, reps = 1;
> + unsigned long addr;
> int rc;
>
> rc = hvmemul_virtual_to_linear(
> - seg, offset, 1, &reps, hvm_access_none, hvmemul_ctxt, &addr);
> + seg, offset, 1, NULL, hvm_access_none, hvmemul_ctxt, &addr);
>
> if ( rc == X86EMUL_EXCEPTION )
> {
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2019-07-15 8:51 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-01 11:59 [Xen-devel] [PATCH] x86/hvm: make hvmemul_virtual_to_linear()'s reps parameter optional Jan Beulich
2019-07-01 12:50 ` Alexandru Stefan ISAILA
2019-07-15 8:30 ` [Xen-devel] Ping: " Jan Beulich
2019-07-15 8:51 ` [Xen-devel] " Paul Durrant
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).