* [PATCH] x86/HVM: rename mmio_gva field to mmio_gla
@ 2016-06-14 14:44 Jan Beulich
2016-06-14 14:48 ` Andrew Cooper
2016-06-14 14:50 ` Paul Durrant
0 siblings, 2 replies; 3+ messages in thread
From: Jan Beulich @ 2016-06-14 14:44 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper, Paul Durrant
[-- Attachment #1: Type: text/plain, Size: 4557 bytes --]
... to correctly reflect its purpose. To make things consistent also
rename handle_mmio_with_translation()'s respective parameter (but don't
touch sh_page_fault(), as renaming its parameter would require quite a
few more changes there).
Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -684,7 +684,7 @@ static void latch_linear_to_phys(struct
if ( vio->mmio_access.gla_valid )
return;
- vio->mmio_gva = gla & PAGE_MASK;
+ vio->mmio_gla = gla & PAGE_MASK;
vio->mmio_gpfn = PFN_DOWN(gpa);
vio->mmio_access = (struct npfec){ .gla_valid = 1,
.read_access = 1,
@@ -782,7 +782,7 @@ static int __hvmemul_read(
if ( ((access_type != hvm_access_insn_fetch
? vio->mmio_access.read_access
: vio->mmio_access.insn_fetch)) &&
- (vio->mmio_gva == (addr & PAGE_MASK)) )
+ (vio->mmio_gla == (addr & PAGE_MASK)) )
return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
if ( (seg != x86_seg_none) &&
@@ -889,7 +889,7 @@ static int hvmemul_write(
return rc;
if ( vio->mmio_access.write_access &&
- (vio->mmio_gva == (addr & PAGE_MASK)) )
+ (vio->mmio_gla == (addr & PAGE_MASK)) )
return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
if ( (seg != x86_seg_none) &&
@@ -1181,7 +1181,7 @@ static int hvmemul_rep_movs(
bytes = PAGE_SIZE - (saddr & ~PAGE_MASK);
if ( vio->mmio_access.read_access &&
- (vio->mmio_gva == (saddr & PAGE_MASK)) &&
+ (vio->mmio_gla == (saddr & PAGE_MASK)) &&
bytes >= bytes_per_rep )
{
sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK);
@@ -1200,7 +1200,7 @@ static int hvmemul_rep_movs(
bytes = PAGE_SIZE - (daddr & ~PAGE_MASK);
if ( vio->mmio_access.write_access &&
- (vio->mmio_gva == (daddr & PAGE_MASK)) &&
+ (vio->mmio_gla == (daddr & PAGE_MASK)) &&
bytes >= bytes_per_rep )
{
dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK);
@@ -1320,7 +1320,7 @@ static int hvmemul_rep_stos(
bytes = PAGE_SIZE - (addr & ~PAGE_MASK);
if ( vio->mmio_access.write_access &&
- (vio->mmio_gva == (addr & PAGE_MASK)) &&
+ (vio->mmio_gla == (addr & PAGE_MASK)) &&
bytes >= bytes_per_rep )
{
gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK);
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -114,7 +114,7 @@ int handle_mmio(void)
return 1;
}
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
struct npfec access)
{
struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
@@ -122,7 +122,7 @@ int handle_mmio_with_translation(unsigne
vio->mmio_access = access.gla_valid &&
access.kind == npfec_kind_with_gla
? access : (struct npfec){};
- vio->mmio_gva = gva & PAGE_MASK;
+ vio->mmio_gla = gla & PAGE_MASK;
vio->mmio_gpfn = gpfn;
return handle_mmio();
}
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -119,7 +119,7 @@ void relocate_portio_handler(
void send_timeoffset_req(unsigned long timeoff);
void send_invalidate_req(void);
int handle_mmio(void);
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
struct npfec);
int handle_pio(uint16_t port, unsigned int size, int dir);
void hvm_interrupt_post(struct vcpu *v, int vector, int type);
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -60,12 +60,12 @@ struct hvm_vcpu_io {
/*
* HVM emulation:
- * Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
+ * Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn.
* The latter is known to be an MMIO frame (not RAM).
* This translation is only valid for accesses as per @mmio_access.
*/
struct npfec mmio_access;
- unsigned long mmio_gva;
+ unsigned long mmio_gla;
unsigned long mmio_gpfn;
/*
[-- Attachment #2: x86-HVM-mmio_gla.patch --]
[-- Type: text/plain, Size: 4599 bytes --]
x86/HVM: rename mmio_gva field to mmio_gla
... to correctly reflect its purpose. To make things consistent also
rename handle_mmio_with_translation()'s respective parameter (but don't
touch sh_page_fault(), as renaming its parameter would require quite a
few more changes there).
Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -684,7 +684,7 @@ static void latch_linear_to_phys(struct
if ( vio->mmio_access.gla_valid )
return;
- vio->mmio_gva = gla & PAGE_MASK;
+ vio->mmio_gla = gla & PAGE_MASK;
vio->mmio_gpfn = PFN_DOWN(gpa);
vio->mmio_access = (struct npfec){ .gla_valid = 1,
.read_access = 1,
@@ -782,7 +782,7 @@ static int __hvmemul_read(
if ( ((access_type != hvm_access_insn_fetch
? vio->mmio_access.read_access
: vio->mmio_access.insn_fetch)) &&
- (vio->mmio_gva == (addr & PAGE_MASK)) )
+ (vio->mmio_gla == (addr & PAGE_MASK)) )
return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
if ( (seg != x86_seg_none) &&
@@ -889,7 +889,7 @@ static int hvmemul_write(
return rc;
if ( vio->mmio_access.write_access &&
- (vio->mmio_gva == (addr & PAGE_MASK)) )
+ (vio->mmio_gla == (addr & PAGE_MASK)) )
return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
if ( (seg != x86_seg_none) &&
@@ -1181,7 +1181,7 @@ static int hvmemul_rep_movs(
bytes = PAGE_SIZE - (saddr & ~PAGE_MASK);
if ( vio->mmio_access.read_access &&
- (vio->mmio_gva == (saddr & PAGE_MASK)) &&
+ (vio->mmio_gla == (saddr & PAGE_MASK)) &&
bytes >= bytes_per_rep )
{
sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK);
@@ -1200,7 +1200,7 @@ static int hvmemul_rep_movs(
bytes = PAGE_SIZE - (daddr & ~PAGE_MASK);
if ( vio->mmio_access.write_access &&
- (vio->mmio_gva == (daddr & PAGE_MASK)) &&
+ (vio->mmio_gla == (daddr & PAGE_MASK)) &&
bytes >= bytes_per_rep )
{
dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK);
@@ -1320,7 +1320,7 @@ static int hvmemul_rep_stos(
bytes = PAGE_SIZE - (addr & ~PAGE_MASK);
if ( vio->mmio_access.write_access &&
- (vio->mmio_gva == (addr & PAGE_MASK)) &&
+ (vio->mmio_gla == (addr & PAGE_MASK)) &&
bytes >= bytes_per_rep )
{
gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK);
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -114,7 +114,7 @@ int handle_mmio(void)
return 1;
}
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
struct npfec access)
{
struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
@@ -122,7 +122,7 @@ int handle_mmio_with_translation(unsigne
vio->mmio_access = access.gla_valid &&
access.kind == npfec_kind_with_gla
? access : (struct npfec){};
- vio->mmio_gva = gva & PAGE_MASK;
+ vio->mmio_gla = gla & PAGE_MASK;
vio->mmio_gpfn = gpfn;
return handle_mmio();
}
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -119,7 +119,7 @@ void relocate_portio_handler(
void send_timeoffset_req(unsigned long timeoff);
void send_invalidate_req(void);
int handle_mmio(void);
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
struct npfec);
int handle_pio(uint16_t port, unsigned int size, int dir);
void hvm_interrupt_post(struct vcpu *v, int vector, int type);
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -60,12 +60,12 @@ struct hvm_vcpu_io {
/*
* HVM emulation:
- * Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
+ * Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn.
* The latter is known to be an MMIO frame (not RAM).
* This translation is only valid for accesses as per @mmio_access.
*/
struct npfec mmio_access;
- unsigned long mmio_gva;
+ unsigned long mmio_gla;
unsigned long mmio_gpfn;
/*
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] x86/HVM: rename mmio_gva field to mmio_gla
2016-06-14 14:44 [PATCH] x86/HVM: rename mmio_gva field to mmio_gla Jan Beulich
@ 2016-06-14 14:48 ` Andrew Cooper
2016-06-14 14:50 ` Paul Durrant
1 sibling, 0 replies; 3+ messages in thread
From: Andrew Cooper @ 2016-06-14 14:48 UTC (permalink / raw)
To: Jan Beulich, xen-devel; +Cc: Paul Durrant
On 14/06/16 15:44, Jan Beulich wrote:
> ... to correctly reflect its purpose. To make things consistent also
> rename handle_mmio_with_translation()'s respective parameter (but don't
> touch sh_page_fault(), as renaming its parameter would require quite a
> few more changes there).
>
> Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] x86/HVM: rename mmio_gva field to mmio_gla
2016-06-14 14:44 [PATCH] x86/HVM: rename mmio_gva field to mmio_gla Jan Beulich
2016-06-14 14:48 ` Andrew Cooper
@ 2016-06-14 14:50 ` Paul Durrant
1 sibling, 0 replies; 3+ messages in thread
From: Paul Durrant @ 2016-06-14 14:50 UTC (permalink / raw)
To: Jan Beulich, xen-devel; +Cc: Andrew Cooper
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 14 June 2016 15:45
> To: xen-devel
> Cc: Andrew Cooper; Paul Durrant
> Subject: [PATCH] x86/HVM: rename mmio_gva field to mmio_gla
>
> ... to correctly reflect its purpose. To make things consistent also
> rename handle_mmio_with_translation()'s respective parameter (but don't
> touch sh_page_fault(), as renaming its parameter would require quite a
> few more changes there).
>
> Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -684,7 +684,7 @@ static void latch_linear_to_phys(struct
> if ( vio->mmio_access.gla_valid )
> return;
>
> - vio->mmio_gva = gla & PAGE_MASK;
> + vio->mmio_gla = gla & PAGE_MASK;
> vio->mmio_gpfn = PFN_DOWN(gpa);
> vio->mmio_access = (struct npfec){ .gla_valid = 1,
> .read_access = 1,
> @@ -782,7 +782,7 @@ static int __hvmemul_read(
> if ( ((access_type != hvm_access_insn_fetch
> ? vio->mmio_access.read_access
> : vio->mmio_access.insn_fetch)) &&
> - (vio->mmio_gva == (addr & PAGE_MASK)) )
> + (vio->mmio_gla == (addr & PAGE_MASK)) )
> return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
>
> if ( (seg != x86_seg_none) &&
> @@ -889,7 +889,7 @@ static int hvmemul_write(
> return rc;
>
> if ( vio->mmio_access.write_access &&
> - (vio->mmio_gva == (addr & PAGE_MASK)) )
> + (vio->mmio_gla == (addr & PAGE_MASK)) )
> return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
>
> if ( (seg != x86_seg_none) &&
> @@ -1181,7 +1181,7 @@ static int hvmemul_rep_movs(
>
> bytes = PAGE_SIZE - (saddr & ~PAGE_MASK);
> if ( vio->mmio_access.read_access &&
> - (vio->mmio_gva == (saddr & PAGE_MASK)) &&
> + (vio->mmio_gla == (saddr & PAGE_MASK)) &&
> bytes >= bytes_per_rep )
> {
> sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK);
> @@ -1200,7 +1200,7 @@ static int hvmemul_rep_movs(
>
> bytes = PAGE_SIZE - (daddr & ~PAGE_MASK);
> if ( vio->mmio_access.write_access &&
> - (vio->mmio_gva == (daddr & PAGE_MASK)) &&
> + (vio->mmio_gla == (daddr & PAGE_MASK)) &&
> bytes >= bytes_per_rep )
> {
> dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK);
> @@ -1320,7 +1320,7 @@ static int hvmemul_rep_stos(
>
> bytes = PAGE_SIZE - (addr & ~PAGE_MASK);
> if ( vio->mmio_access.write_access &&
> - (vio->mmio_gva == (addr & PAGE_MASK)) &&
> + (vio->mmio_gla == (addr & PAGE_MASK)) &&
> bytes >= bytes_per_rep )
> {
> gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK);
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -114,7 +114,7 @@ int handle_mmio(void)
> return 1;
> }
>
> -int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
> +int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
> struct npfec access)
> {
> struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
> @@ -122,7 +122,7 @@ int handle_mmio_with_translation(unsigne
> vio->mmio_access = access.gla_valid &&
> access.kind == npfec_kind_with_gla
> ? access : (struct npfec){};
> - vio->mmio_gva = gva & PAGE_MASK;
> + vio->mmio_gla = gla & PAGE_MASK;
> vio->mmio_gpfn = gpfn;
> return handle_mmio();
> }
> --- a/xen/include/asm-x86/hvm/io.h
> +++ b/xen/include/asm-x86/hvm/io.h
> @@ -119,7 +119,7 @@ void relocate_portio_handler(
> void send_timeoffset_req(unsigned long timeoff);
> void send_invalidate_req(void);
> int handle_mmio(void);
> -int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
> +int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
> struct npfec);
> int handle_pio(uint16_t port, unsigned int size, int dir);
> void hvm_interrupt_post(struct vcpu *v, int vector, int type);
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -60,12 +60,12 @@ struct hvm_vcpu_io {
>
> /*
> * HVM emulation:
> - * Virtual address @mmio_gva maps to MMIO physical frame
> @mmio_gpfn.
> + * Linear address @mmio_gla maps to MMIO physical frame
> @mmio_gpfn.
> * The latter is known to be an MMIO frame (not RAM).
> * This translation is only valid for accesses as per @mmio_access.
> */
> struct npfec mmio_access;
> - unsigned long mmio_gva;
> + unsigned long mmio_gla;
> unsigned long mmio_gpfn;
>
> /*
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2016-06-14 15:06 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-14 14:44 [PATCH] x86/HVM: rename mmio_gva field to mmio_gla Jan Beulich
2016-06-14 14:48 ` Andrew Cooper
2016-06-14 14:50 ` Paul Durrant
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).