* [PATCH 1/3] x86/mmuext: tighten TLB flush address checks
2016-01-20 14:00 [PATCH 0/3] x86: XSA-168 follow-up Jan Beulich
@ 2016-01-20 14:05 ` Jan Beulich
2016-01-20 14:23 ` Andrew Cooper
2016-01-20 14:06 ` [PATCH 2/3] x86/PV: relax LDT address check Jan Beulich
2016-01-20 14:07 ` [PATCH 3/3] x86/paging: invlpg() hook returns boolean Jan Beulich
2 siblings, 1 reply; 12+ messages in thread
From: Jan Beulich @ 2016-01-20 14:05 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper, Keir Fraser
[-- Attachment #1: Type: text/plain, Size: 2178 bytes --]
Addresses passed by PV guests should be subjected to __addr_ok(),
avoiding undue TLB flushes; .
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3268,8 +3268,9 @@ long do_mmuext_op(
case MMUEXT_INVLPG_LOCAL:
if ( unlikely(d != pg_owner) )
rc = -EPERM;
- else if ( !paging_mode_enabled(d) ||
- paging_invlpg(curr, op.arg1.linear_addr) != 0 )
+ else if ( !paging_mode_enabled(d)
+ ? __addr_ok(op.arg1.linear_addr)
+ : paging_invlpg(curr, op.arg1.linear_addr) )
flush_tlb_one_local(op.arg1.linear_addr);
break;
@@ -3290,7 +3291,7 @@ long do_mmuext_op(
if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
flush_tlb_mask(&pmask);
- else
+ else if ( __addr_ok(op.arg1.linear_addr) )
flush_tlb_one_mask(&pmask, op.arg1.linear_addr);
break;
}
@@ -3303,10 +3304,10 @@ long do_mmuext_op(
break;
case MMUEXT_INVLPG_ALL:
- if ( likely(d == pg_owner) )
- flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr);
- else
+ if ( unlikely(d != pg_owner) )
rc = -EPERM;
+ else if ( __addr_ok(op.arg1.linear_addr) )
+ flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr);
break;
case MMUEXT_FLUSH_CACHE:
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -245,7 +245,9 @@ paging_fault(unsigned long va, struct cp
* or 0 if it's safe not to do so. */
static inline int paging_invlpg(struct vcpu *v, unsigned long va)
{
- return is_canonical_address(va) && paging_get_hostmode(v)->invlpg(v, va);
+ return (paging_mode_external(v->domain) ? is_canonical_address(va)
+ : __addr_ok(va)) &&
+ paging_get_hostmode(v)->invlpg(v, va);
}
/* Translate a guest virtual address to the frame number that the
[-- Attachment #2: x86-mmuext-check-addr.patch --]
[-- Type: text/plain, Size: 2220 bytes --]
x86/mmuext: tighten TLB flush address checks
Addresses passed by PV guests should be subjected to __addr_ok(),
avoiding undue TLB flushes; .
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3268,8 +3268,9 @@ long do_mmuext_op(
case MMUEXT_INVLPG_LOCAL:
if ( unlikely(d != pg_owner) )
rc = -EPERM;
- else if ( !paging_mode_enabled(d) ||
- paging_invlpg(curr, op.arg1.linear_addr) != 0 )
+ else if ( !paging_mode_enabled(d)
+ ? __addr_ok(op.arg1.linear_addr)
+ : paging_invlpg(curr, op.arg1.linear_addr) )
flush_tlb_one_local(op.arg1.linear_addr);
break;
@@ -3290,7 +3291,7 @@ long do_mmuext_op(
if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
flush_tlb_mask(&pmask);
- else
+ else if ( __addr_ok(op.arg1.linear_addr) )
flush_tlb_one_mask(&pmask, op.arg1.linear_addr);
break;
}
@@ -3303,10 +3304,10 @@ long do_mmuext_op(
break;
case MMUEXT_INVLPG_ALL:
- if ( likely(d == pg_owner) )
- flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr);
- else
+ if ( unlikely(d != pg_owner) )
rc = -EPERM;
+ else if ( __addr_ok(op.arg1.linear_addr) )
+ flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr);
break;
case MMUEXT_FLUSH_CACHE:
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -245,7 +245,9 @@ paging_fault(unsigned long va, struct cp
* or 0 if it's safe not to do so. */
static inline int paging_invlpg(struct vcpu *v, unsigned long va)
{
- return is_canonical_address(va) && paging_get_hostmode(v)->invlpg(v, va);
+ return (paging_mode_external(v->domain) ? is_canonical_address(va)
+ : __addr_ok(va)) &&
+ paging_get_hostmode(v)->invlpg(v, va);
}
/* Translate a guest virtual address to the frame number that the
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 2/3] x86/PV: relax LDT address check
2016-01-20 14:00 [PATCH 0/3] x86: XSA-168 follow-up Jan Beulich
2016-01-20 14:05 ` [PATCH 1/3] x86/mmuext: tighten TLB flush address checks Jan Beulich
@ 2016-01-20 14:06 ` Jan Beulich
2016-01-20 14:27 ` Andrew Cooper
2016-01-20 14:07 ` [PATCH 3/3] x86/paging: invlpg() hook returns boolean Jan Beulich
2 siblings, 1 reply; 12+ messages in thread
From: Jan Beulich @ 2016-01-20 14:06 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper, Keir Fraser
[-- Attachment #1: Type: text/plain, Size: 1097 bytes --]
There's no point placing restrictions on its address when the LDT size
is zero.
Also convert a local variable to a slightly more efficient type.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3348,8 +3348,8 @@ long do_mmuext_op(
case MMUEXT_SET_LDT:
{
- unsigned long ptr = op.arg1.linear_addr;
- unsigned long ents = op.arg2.nr_ents;
+ unsigned int ents = op.arg2.nr_ents;
+ unsigned long ptr = ents ? op.arg1.linear_addr : 0;
if ( unlikely(d != pg_owner) )
rc = -EPERM;
@@ -3361,7 +3361,7 @@ long do_mmuext_op(
else if ( ((ptr & (PAGE_SIZE - 1)) != 0) || !__addr_ok(ptr) ||
(ents > 8192) )
{
- MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents);
+ MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%x", ptr, ents);
rc = -EINVAL;
}
else if ( (curr->arch.pv_vcpu.ldt_ents != ents) ||
[-- Attachment #2: x86-mmuext-LDT-relax.patch --]
[-- Type: text/plain, Size: 1126 bytes --]
x86/PV: relax LDT address check
There's no point placing restrictions on its address when the LDT size
is zero.
Also convert a local variable to a slightly more efficient type.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3348,8 +3348,8 @@ long do_mmuext_op(
case MMUEXT_SET_LDT:
{
- unsigned long ptr = op.arg1.linear_addr;
- unsigned long ents = op.arg2.nr_ents;
+ unsigned int ents = op.arg2.nr_ents;
+ unsigned long ptr = ents ? op.arg1.linear_addr : 0;
if ( unlikely(d != pg_owner) )
rc = -EPERM;
@@ -3361,7 +3361,7 @@ long do_mmuext_op(
else if ( ((ptr & (PAGE_SIZE - 1)) != 0) || !__addr_ok(ptr) ||
(ents > 8192) )
{
- MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents);
+ MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%x", ptr, ents);
rc = -EINVAL;
}
else if ( (curr->arch.pv_vcpu.ldt_ents != ents) ||
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 3/3] x86/paging: invlpg() hook returns boolean
2016-01-20 14:00 [PATCH 0/3] x86: XSA-168 follow-up Jan Beulich
2016-01-20 14:05 ` [PATCH 1/3] x86/mmuext: tighten TLB flush address checks Jan Beulich
2016-01-20 14:06 ` [PATCH 2/3] x86/PV: relax LDT address check Jan Beulich
@ 2016-01-20 14:07 ` Jan Beulich
2016-01-20 14:30 ` Andrew Cooper
` (2 more replies)
2 siblings, 3 replies; 12+ messages in thread
From: Jan Beulich @ 2016-01-20 14:07 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper, Keir Fraser
[-- Attachment #1: Type: text/plain, Size: 3137 bytes --]
... so make its return type reflect this.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -680,7 +680,7 @@ static int hap_page_fault(struct vcpu *v
* HAP guests can handle invlpg without needing any action from Xen, so
* should not be intercepting it.
*/
-static int hap_invlpg(struct vcpu *v, unsigned long va)
+static bool_t hap_invlpg(struct vcpu *v, unsigned long va)
{
if (nestedhvm_enabled(v->domain)) {
/* Emulate INVLPGA:
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3510,11 +3510,12 @@ propagate:
}
-static int
-sh_invlpg(struct vcpu *v, unsigned long va)
-/* Called when the guest requests an invlpg. Returns 1 if the invlpg
+/*
+ * Called when the guest requests an invlpg. Returns 1 if the invlpg
* instruction should be issued on the hardware, or 0 if it's safe not
- * to do so. */
+ * to do so.
+ */
+static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
{
mfn_t sl1mfn;
shadow_l2e_t sl2e;
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -32,10 +32,10 @@ static int _page_fault(struct vcpu *v, u
return 0;
}
-static int _invlpg(struct vcpu *v, unsigned long va)
+static bool_t _invlpg(struct vcpu *v, unsigned long va)
{
ASSERT_UNREACHABLE();
- return -EOPNOTSUPP;
+ return 1;
}
static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4392,8 +4392,7 @@ static int __do_update_va_mapping(
switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
{
case UVMF_LOCAL:
- if ( !paging_mode_enabled(d) ||
- (paging_invlpg(v, va) != 0) )
+ if ( !paging_mode_enabled(d) || paging_invlpg(v, va) )
flush_tlb_one_local(va);
break;
case UVMF_ALL:
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -104,7 +104,7 @@ struct shadow_paging_mode {
struct paging_mode {
int (*page_fault )(struct vcpu *v, unsigned long va,
struct cpu_user_regs *regs);
- int (*invlpg )(struct vcpu *v, unsigned long va);
+ bool_t (*invlpg )(struct vcpu *v, unsigned long va);
unsigned long (*gva_to_gfn )(struct vcpu *v,
struct p2m_domain *p2m,
unsigned long va,
@@ -243,7 +243,7 @@ paging_fault(unsigned long va, struct cp
/* Handle invlpg requests on vcpus.
* Returns 1 if the invlpg instruction should be issued on the hardware,
* or 0 if it's safe not to do so. */
-static inline int paging_invlpg(struct vcpu *v, unsigned long va)
+static inline bool_t paging_invlpg(struct vcpu *v, unsigned long va)
{
return (paging_mode_external(v->domain) ? is_canonical_address(va)
: __addr_ok(va)) &&
[-- Attachment #2: x86-paging_invlpg-bool.patch --]
[-- Type: text/plain, Size: 3176 bytes --]
x86/paging: invlpg() hook returns boolean
... so make its return type reflect this.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -680,7 +680,7 @@ static int hap_page_fault(struct vcpu *v
* HAP guests can handle invlpg without needing any action from Xen, so
* should not be intercepting it.
*/
-static int hap_invlpg(struct vcpu *v, unsigned long va)
+static bool_t hap_invlpg(struct vcpu *v, unsigned long va)
{
if (nestedhvm_enabled(v->domain)) {
/* Emulate INVLPGA:
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3510,11 +3510,12 @@ propagate:
}
-static int
-sh_invlpg(struct vcpu *v, unsigned long va)
-/* Called when the guest requests an invlpg. Returns 1 if the invlpg
+/*
+ * Called when the guest requests an invlpg. Returns 1 if the invlpg
* instruction should be issued on the hardware, or 0 if it's safe not
- * to do so. */
+ * to do so.
+ */
+static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
{
mfn_t sl1mfn;
shadow_l2e_t sl2e;
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -32,10 +32,10 @@ static int _page_fault(struct vcpu *v, u
return 0;
}
-static int _invlpg(struct vcpu *v, unsigned long va)
+static bool_t _invlpg(struct vcpu *v, unsigned long va)
{
ASSERT_UNREACHABLE();
- return -EOPNOTSUPP;
+ return 1;
}
static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4392,8 +4392,7 @@ static int __do_update_va_mapping(
switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
{
case UVMF_LOCAL:
- if ( !paging_mode_enabled(d) ||
- (paging_invlpg(v, va) != 0) )
+ if ( !paging_mode_enabled(d) || paging_invlpg(v, va) )
flush_tlb_one_local(va);
break;
case UVMF_ALL:
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -104,7 +104,7 @@ struct shadow_paging_mode {
struct paging_mode {
int (*page_fault )(struct vcpu *v, unsigned long va,
struct cpu_user_regs *regs);
- int (*invlpg )(struct vcpu *v, unsigned long va);
+ bool_t (*invlpg )(struct vcpu *v, unsigned long va);
unsigned long (*gva_to_gfn )(struct vcpu *v,
struct p2m_domain *p2m,
unsigned long va,
@@ -243,7 +243,7 @@ paging_fault(unsigned long va, struct cp
/* Handle invlpg requests on vcpus.
* Returns 1 if the invlpg instruction should be issued on the hardware,
* or 0 if it's safe not to do so. */
-static inline int paging_invlpg(struct vcpu *v, unsigned long va)
+static inline bool_t paging_invlpg(struct vcpu *v, unsigned long va)
{
return (paging_mode_external(v->domain) ? is_canonical_address(va)
: __addr_ok(va)) &&
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 12+ messages in thread