* [PATCH 0/2] x86: physdev-op handling adjustments
@ 2015-01-19 15:30 Jan Beulich
2015-01-19 15:36 ` [PATCH 1/2] x86: slightly simplify PHYSDEVOP_pirq_eoi_gmfn_v* handling Jan Beulich
2015-01-19 15:36 ` [PATCH 2/2] x86: latch current‑>domain in do_physdev_op() Jan Beulich
0 siblings, 2 replies; 5+ messages in thread
From: Jan Beulich @ 2015-01-19 15:30 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper, Keir Fraser
1: slightly simplify PHYSDEVOP_pirq_eoi_gmfn_v* handling
2: latch current->domain in do_physdev_op()
Signed-off-by: Jan Beulich <jbeulich@suse.com>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/2] x86: slightly simplify PHYSDEVOP_pirq_eoi_gmfn_v* handling
2015-01-19 15:30 [PATCH 0/2] x86: physdev-op handling adjustments Jan Beulich
@ 2015-01-19 15:36 ` Jan Beulich
2015-01-19 15:40 ` Andrew Cooper
2015-01-19 15:36 ` [PATCH 2/2] x86: latch current‑>domain in do_physdev_op() Jan Beulich
1 sibling, 1 reply; 5+ messages in thread
From: Jan Beulich @ 2015-01-19 15:36 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper, Keir Fraser
[-- Attachment #1: Type: text/plain, Size: 1465 bytes --]
We don't really need the MFN in more than one place (after dropping
mfn_to_page() translations where we know the result already), so no
need to have a local variable for it.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -336,7 +336,6 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
case PHYSDEVOP_pirq_eoi_gmfn_v2:
case PHYSDEVOP_pirq_eoi_gmfn_v1: {
struct physdev_pirq_eoi_gmfn info;
- unsigned long mfn;
struct page_info *page;
ret = -EFAULT;
@@ -352,21 +351,20 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
put_page(page);
break;
}
- mfn = page_to_mfn(page);
if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn,
- 0, mfn) != 0 )
+ 0, page_to_mfn(page)) != 0 )
{
- put_page_and_type(mfn_to_page(mfn));
+ put_page_and_type(page);
ret = -EBUSY;
break;
}
- v->domain->arch.pirq_eoi_map = map_domain_page_global(mfn);
+ v->domain->arch.pirq_eoi_map = __map_domain_page_global(page);
if ( v->domain->arch.pirq_eoi_map == NULL )
{
v->domain->arch.pirq_eoi_map_mfn = 0;
- put_page_and_type(mfn_to_page(mfn));
+ put_page_and_type(page);
ret = -ENOSPC;
break;
}
[-- Attachment #2: x86-pirq-eoi-gmfn-simplify.patch --]
[-- Type: text/plain, Size: 1521 bytes --]
x86: slightly simplify PHYSDEVOP_pirq_eoi_gmfn_v* handling
We don't really need the MFN in more than one place (after dropping
mfn_to_page() translations where we know the result already), so no
need to have a local variable for it.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -336,7 +336,6 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
case PHYSDEVOP_pirq_eoi_gmfn_v2:
case PHYSDEVOP_pirq_eoi_gmfn_v1: {
struct physdev_pirq_eoi_gmfn info;
- unsigned long mfn;
struct page_info *page;
ret = -EFAULT;
@@ -352,21 +351,20 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
put_page(page);
break;
}
- mfn = page_to_mfn(page);
if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn,
- 0, mfn) != 0 )
+ 0, page_to_mfn(page)) != 0 )
{
- put_page_and_type(mfn_to_page(mfn));
+ put_page_and_type(page);
ret = -EBUSY;
break;
}
- v->domain->arch.pirq_eoi_map = map_domain_page_global(mfn);
+ v->domain->arch.pirq_eoi_map = __map_domain_page_global(page);
if ( v->domain->arch.pirq_eoi_map == NULL )
{
v->domain->arch.pirq_eoi_map_mfn = 0;
- put_page_and_type(mfn_to_page(mfn));
+ put_page_and_type(page);
ret = -ENOSPC;
break;
}
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 2/2] x86: latch current‑>domain in do_physdev_op()
2015-01-19 15:30 [PATCH 0/2] x86: physdev-op handling adjustments Jan Beulich
2015-01-19 15:36 ` [PATCH 1/2] x86: slightly simplify PHYSDEVOP_pirq_eoi_gmfn_v* handling Jan Beulich
@ 2015-01-19 15:36 ` Jan Beulich
2015-01-19 15:58 ` Andrew Cooper
1 sibling, 1 reply; 5+ messages in thread
From: Jan Beulich @ 2015-01-19 15:36 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper, Keir Fraser
[-- Attachment #1: Type: text/plain, Size: 8394 bytes --]
... and drop global latching of current, as being needed more than once
only in PHYSDEVOP_set_iobitmap, and not at all in almost all other
cases.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -291,7 +291,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
{
int irq;
ret_t ret;
- struct vcpu *v = current;
+ struct domain *currd = current->domain;
switch ( cmd )
{
@@ -303,32 +303,31 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
if ( copy_from_guest(&eoi, arg, 1) != 0 )
break;
ret = -EINVAL;
- if ( eoi.irq >= v->domain->nr_pirqs )
+ if ( eoi.irq >= currd->nr_pirqs )
break;
- spin_lock(&v->domain->event_lock);
- pirq = pirq_info(v->domain, eoi.irq);
+ spin_lock(&currd->event_lock);
+ pirq = pirq_info(currd, eoi.irq);
if ( !pirq ) {
- spin_unlock(&v->domain->event_lock);
+ spin_unlock(&currd->event_lock);
break;
}
- if ( v->domain->arch.auto_unmask )
+ if ( currd->arch.auto_unmask )
evtchn_unmask(pirq->evtchn);
- if ( is_pv_domain(v->domain) ||
- domain_pirq_to_irq(v->domain, eoi.irq) > 0 )
+ if ( is_pv_domain(currd) || domain_pirq_to_irq(currd, eoi.irq) > 0 )
pirq_guest_eoi(pirq);
- if ( is_hvm_domain(v->domain) &&
- domain_pirq_to_emuirq(v->domain, eoi.irq) > 0 )
+ if ( is_hvm_domain(currd) &&
+ domain_pirq_to_emuirq(currd, eoi.irq) > 0 )
{
- struct hvm_irq *hvm_irq = &v->domain->arch.hvm_domain.irq;
- int gsi = domain_pirq_to_emuirq(v->domain, eoi.irq);
+ struct hvm_irq *hvm_irq = &currd->arch.hvm_domain.irq;
+ int gsi = domain_pirq_to_emuirq(currd, eoi.irq);
/* if this is a level irq and count > 0, send another
* notification */
if ( gsi >= NR_ISAIRQS /* ISA irqs are edge triggered */
&& hvm_irq->gsi_assert_count[gsi] )
- send_guest_pirq(v->domain, pirq);
+ send_guest_pirq(currd, pirq);
}
- spin_unlock(&v->domain->event_lock);
+ spin_unlock(&currd->event_lock);
ret = 0;
break;
}
@@ -352,7 +351,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
break;
}
- if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn,
+ if ( cmpxchg(&currd->arch.pirq_eoi_map_mfn,
0, page_to_mfn(page)) != 0 )
{
put_page_and_type(page);
@@ -360,16 +359,16 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
break;
}
- v->domain->arch.pirq_eoi_map = __map_domain_page_global(page);
- if ( v->domain->arch.pirq_eoi_map == NULL )
+ currd->arch.pirq_eoi_map = __map_domain_page_global(page);
+ if ( currd->arch.pirq_eoi_map == NULL )
{
- v->domain->arch.pirq_eoi_map_mfn = 0;
+ currd->arch.pirq_eoi_map_mfn = 0;
put_page_and_type(page);
ret = -ENOSPC;
break;
}
if ( cmd == PHYSDEVOP_pirq_eoi_gmfn_v1 )
- v->domain->arch.auto_unmask = 1;
+ currd->arch.auto_unmask = 1;
ret = 0;
break;
@@ -377,7 +376,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
/* Legacy since 0x00030202. */
case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
- ret = pirq_guest_unmask(v->domain);
+ ret = pirq_guest_unmask(currd);
break;
}
@@ -388,12 +387,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
break;
irq = irq_status_query.irq;
ret = -EINVAL;
- if ( (irq < 0) || (irq >= v->domain->nr_pirqs) )
+ if ( (irq < 0) || (irq >= currd->nr_pirqs) )
break;
irq_status_query.flags = 0;
- if ( is_hvm_domain(v->domain) &&
- domain_pirq_to_irq(v->domain, irq) <= 0 &&
- domain_pirq_to_emuirq(v->domain, irq) == IRQ_UNBOUND )
+ if ( is_hvm_domain(currd) &&
+ domain_pirq_to_irq(currd, irq) <= 0 &&
+ domain_pirq_to_emuirq(currd, irq) == IRQ_UNBOUND )
{
ret = -EINVAL;
break;
@@ -408,7 +407,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
* then dom0 is probably modern anyway.
*/
irq_status_query.flags |= XENIRQSTAT_needs_eoi;
- if ( pirq_shared(v->domain, irq) )
+ if ( pirq_shared(currd, irq) )
irq_status_query.flags |= XENIRQSTAT_shared;
ret = __copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
break;
@@ -469,7 +468,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
ret = -EFAULT;
if ( copy_from_guest(&apic, arg, 1) != 0 )
break;
- ret = xsm_apic(XSM_PRIV, v->domain, cmd);
+ ret = xsm_apic(XSM_PRIV, currd, cmd);
if ( ret )
break;
ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
@@ -483,7 +482,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
ret = -EFAULT;
if ( copy_from_guest(&apic, arg, 1) != 0 )
break;
- ret = xsm_apic(XSM_PRIV, v->domain, cmd);
+ ret = xsm_apic(XSM_PRIV, currd, cmd);
if ( ret )
break;
ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
@@ -499,7 +498,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
/* Use the APIC check since this dummy hypercall should still only
* be called by the domain with access to program the ioapic */
- ret = xsm_apic(XSM_PRIV, v->domain, cmd);
+ ret = xsm_apic(XSM_PRIV, currd, cmd);
if ( ret )
break;
@@ -529,15 +528,16 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
if ( set_iopl.iopl > 3 )
break;
ret = 0;
- v->arch.pv_vcpu.iopl = set_iopl.iopl;
+ current->arch.pv_vcpu.iopl = set_iopl.iopl;
break;
}
case PHYSDEVOP_set_iobitmap: {
+ struct vcpu *curr = current;
struct physdev_set_iobitmap set_iobitmap;
ret = -ENOSYS;
- if ( is_pvh_vcpu(current) )
+ if ( is_pvh_vcpu(curr) )
break;
ret = -EFAULT;
@@ -549,11 +549,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
break;
ret = 0;
#ifndef COMPAT
- v->arch.pv_vcpu.iobmp = set_iobitmap.bitmap;
+ curr->arch.pv_vcpu.iobmp = set_iobitmap.bitmap;
#else
- guest_from_compat_handle(v->arch.pv_vcpu.iobmp, set_iobitmap.bitmap);
+ guest_from_compat_handle(curr->arch.pv_vcpu.iobmp,
+ set_iobitmap.bitmap);
#endif
- v->arch.pv_vcpu.iobmp_limit = set_iobitmap.nr_ports;
+ curr->arch.pv_vcpu.iobmp_limit = set_iobitmap.nr_ports;
break;
}
@@ -713,18 +714,17 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
}
case PHYSDEVOP_get_free_pirq: {
struct physdev_get_free_pirq out;
- struct domain *d = v->domain;
ret = -EFAULT;
if ( copy_from_guest(&out, arg, 1) != 0 )
break;
- spin_lock(&d->event_lock);
+ spin_lock(&currd->event_lock);
- ret = get_free_pirq(d, out.type);
+ ret = get_free_pirq(currd, out.type);
if ( ret >= 0 )
{
- struct pirq *info = pirq_get_info(d, ret);
+ struct pirq *info = pirq_get_info(currd, ret);
if ( info )
info->arch.irq = PIRQ_ALLOCATED;
@@ -732,7 +732,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
ret = -ENOMEM;
}
- spin_unlock(&d->event_lock);
+ spin_unlock(&currd->event_lock);
if ( ret >= 0 )
{
@@ -746,7 +746,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
case PHYSDEVOP_dbgp_op: {
struct physdev_dbgp_op op;
- if ( !is_hardware_domain(v->domain) )
+ if ( !is_hardware_domain(currd) )
ret = -EPERM;
else if ( copy_from_guest(&op, arg, 1) )
ret = -EFAULT;
[-- Attachment #2: x86-physdevop-latch-domain.patch --]
[-- Type: text/plain, Size: 8439 bytes --]
x86: latch current->domain in do_physdev_op()
... and drop global latching of current, as being needed more than once
only in PHYSDEVOP_set_iobitmap, and not at all in almost all other
cases.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -291,7 +291,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
{
int irq;
ret_t ret;
- struct vcpu *v = current;
+ struct domain *currd = current->domain;
switch ( cmd )
{
@@ -303,32 +303,31 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
if ( copy_from_guest(&eoi, arg, 1) != 0 )
break;
ret = -EINVAL;
- if ( eoi.irq >= v->domain->nr_pirqs )
+ if ( eoi.irq >= currd->nr_pirqs )
break;
- spin_lock(&v->domain->event_lock);
- pirq = pirq_info(v->domain, eoi.irq);
+ spin_lock(&currd->event_lock);
+ pirq = pirq_info(currd, eoi.irq);
if ( !pirq ) {
- spin_unlock(&v->domain->event_lock);
+ spin_unlock(&currd->event_lock);
break;
}
- if ( v->domain->arch.auto_unmask )
+ if ( currd->arch.auto_unmask )
evtchn_unmask(pirq->evtchn);
- if ( is_pv_domain(v->domain) ||
- domain_pirq_to_irq(v->domain, eoi.irq) > 0 )
+ if ( is_pv_domain(currd) || domain_pirq_to_irq(currd, eoi.irq) > 0 )
pirq_guest_eoi(pirq);
- if ( is_hvm_domain(v->domain) &&
- domain_pirq_to_emuirq(v->domain, eoi.irq) > 0 )
+ if ( is_hvm_domain(currd) &&
+ domain_pirq_to_emuirq(currd, eoi.irq) > 0 )
{
- struct hvm_irq *hvm_irq = &v->domain->arch.hvm_domain.irq;
- int gsi = domain_pirq_to_emuirq(v->domain, eoi.irq);
+ struct hvm_irq *hvm_irq = &currd->arch.hvm_domain.irq;
+ int gsi = domain_pirq_to_emuirq(currd, eoi.irq);
/* if this is a level irq and count > 0, send another
* notification */
if ( gsi >= NR_ISAIRQS /* ISA irqs are edge triggered */
&& hvm_irq->gsi_assert_count[gsi] )
- send_guest_pirq(v->domain, pirq);
+ send_guest_pirq(currd, pirq);
}
- spin_unlock(&v->domain->event_lock);
+ spin_unlock(&currd->event_lock);
ret = 0;
break;
}
@@ -352,7 +351,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
break;
}
- if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn,
+ if ( cmpxchg(&currd->arch.pirq_eoi_map_mfn,
0, page_to_mfn(page)) != 0 )
{
put_page_and_type(page);
@@ -360,16 +359,16 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
break;
}
- v->domain->arch.pirq_eoi_map = __map_domain_page_global(page);
- if ( v->domain->arch.pirq_eoi_map == NULL )
+ currd->arch.pirq_eoi_map = __map_domain_page_global(page);
+ if ( currd->arch.pirq_eoi_map == NULL )
{
- v->domain->arch.pirq_eoi_map_mfn = 0;
+ currd->arch.pirq_eoi_map_mfn = 0;
put_page_and_type(page);
ret = -ENOSPC;
break;
}
if ( cmd == PHYSDEVOP_pirq_eoi_gmfn_v1 )
- v->domain->arch.auto_unmask = 1;
+ currd->arch.auto_unmask = 1;
ret = 0;
break;
@@ -377,7 +376,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
/* Legacy since 0x00030202. */
case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
- ret = pirq_guest_unmask(v->domain);
+ ret = pirq_guest_unmask(currd);
break;
}
@@ -388,12 +387,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
break;
irq = irq_status_query.irq;
ret = -EINVAL;
- if ( (irq < 0) || (irq >= v->domain->nr_pirqs) )
+ if ( (irq < 0) || (irq >= currd->nr_pirqs) )
break;
irq_status_query.flags = 0;
- if ( is_hvm_domain(v->domain) &&
- domain_pirq_to_irq(v->domain, irq) <= 0 &&
- domain_pirq_to_emuirq(v->domain, irq) == IRQ_UNBOUND )
+ if ( is_hvm_domain(currd) &&
+ domain_pirq_to_irq(currd, irq) <= 0 &&
+ domain_pirq_to_emuirq(currd, irq) == IRQ_UNBOUND )
{
ret = -EINVAL;
break;
@@ -408,7 +407,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
* then dom0 is probably modern anyway.
*/
irq_status_query.flags |= XENIRQSTAT_needs_eoi;
- if ( pirq_shared(v->domain, irq) )
+ if ( pirq_shared(currd, irq) )
irq_status_query.flags |= XENIRQSTAT_shared;
ret = __copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
break;
@@ -469,7 +468,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
ret = -EFAULT;
if ( copy_from_guest(&apic, arg, 1) != 0 )
break;
- ret = xsm_apic(XSM_PRIV, v->domain, cmd);
+ ret = xsm_apic(XSM_PRIV, currd, cmd);
if ( ret )
break;
ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
@@ -483,7 +482,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
ret = -EFAULT;
if ( copy_from_guest(&apic, arg, 1) != 0 )
break;
- ret = xsm_apic(XSM_PRIV, v->domain, cmd);
+ ret = xsm_apic(XSM_PRIV, currd, cmd);
if ( ret )
break;
ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
@@ -499,7 +498,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
/* Use the APIC check since this dummy hypercall should still only
* be called by the domain with access to program the ioapic */
- ret = xsm_apic(XSM_PRIV, v->domain, cmd);
+ ret = xsm_apic(XSM_PRIV, currd, cmd);
if ( ret )
break;
@@ -529,15 +528,16 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
if ( set_iopl.iopl > 3 )
break;
ret = 0;
- v->arch.pv_vcpu.iopl = set_iopl.iopl;
+ current->arch.pv_vcpu.iopl = set_iopl.iopl;
break;
}
case PHYSDEVOP_set_iobitmap: {
+ struct vcpu *curr = current;
struct physdev_set_iobitmap set_iobitmap;
ret = -ENOSYS;
- if ( is_pvh_vcpu(current) )
+ if ( is_pvh_vcpu(curr) )
break;
ret = -EFAULT;
@@ -549,11 +549,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
break;
ret = 0;
#ifndef COMPAT
- v->arch.pv_vcpu.iobmp = set_iobitmap.bitmap;
+ curr->arch.pv_vcpu.iobmp = set_iobitmap.bitmap;
#else
- guest_from_compat_handle(v->arch.pv_vcpu.iobmp, set_iobitmap.bitmap);
+ guest_from_compat_handle(curr->arch.pv_vcpu.iobmp,
+ set_iobitmap.bitmap);
#endif
- v->arch.pv_vcpu.iobmp_limit = set_iobitmap.nr_ports;
+ curr->arch.pv_vcpu.iobmp_limit = set_iobitmap.nr_ports;
break;
}
@@ -713,18 +714,17 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
}
case PHYSDEVOP_get_free_pirq: {
struct physdev_get_free_pirq out;
- struct domain *d = v->domain;
ret = -EFAULT;
if ( copy_from_guest(&out, arg, 1) != 0 )
break;
- spin_lock(&d->event_lock);
+ spin_lock(&currd->event_lock);
- ret = get_free_pirq(d, out.type);
+ ret = get_free_pirq(currd, out.type);
if ( ret >= 0 )
{
- struct pirq *info = pirq_get_info(d, ret);
+ struct pirq *info = pirq_get_info(currd, ret);
if ( info )
info->arch.irq = PIRQ_ALLOCATED;
@@ -732,7 +732,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
ret = -ENOMEM;
}
- spin_unlock(&d->event_lock);
+ spin_unlock(&currd->event_lock);
if ( ret >= 0 )
{
@@ -746,7 +746,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
case PHYSDEVOP_dbgp_op: {
struct physdev_dbgp_op op;
- if ( !is_hardware_domain(v->domain) )
+ if ( !is_hardware_domain(currd) )
ret = -EPERM;
else if ( copy_from_guest(&op, arg, 1) )
ret = -EFAULT;
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] x86: slightly simplify PHYSDEVOP_pirq_eoi_gmfn_v* handling
2015-01-19 15:36 ` [PATCH 1/2] x86: slightly simplify PHYSDEVOP_pirq_eoi_gmfn_v* handling Jan Beulich
@ 2015-01-19 15:40 ` Andrew Cooper
0 siblings, 0 replies; 5+ messages in thread
From: Andrew Cooper @ 2015-01-19 15:40 UTC (permalink / raw)
To: Jan Beulich, xen-devel; +Cc: Keir Fraser
On 19/01/15 15:36, Jan Beulich wrote:
> We don't really need the MFN in more than one place (after dropping
> mfn_to_page() translations where we know the result already), so no
> need to have a local variable for it.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
>
> --- a/xen/arch/x86/physdev.c
> +++ b/xen/arch/x86/physdev.c
> @@ -336,7 +336,6 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
> case PHYSDEVOP_pirq_eoi_gmfn_v2:
> case PHYSDEVOP_pirq_eoi_gmfn_v1: {
> struct physdev_pirq_eoi_gmfn info;
> - unsigned long mfn;
> struct page_info *page;
>
> ret = -EFAULT;
> @@ -352,21 +351,20 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
> put_page(page);
> break;
> }
> - mfn = page_to_mfn(page);
>
> if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn,
> - 0, mfn) != 0 )
> + 0, page_to_mfn(page)) != 0 )
> {
> - put_page_and_type(mfn_to_page(mfn));
> + put_page_and_type(page);
> ret = -EBUSY;
> break;
> }
>
> - v->domain->arch.pirq_eoi_map = map_domain_page_global(mfn);
> + v->domain->arch.pirq_eoi_map = __map_domain_page_global(page);
> if ( v->domain->arch.pirq_eoi_map == NULL )
> {
> v->domain->arch.pirq_eoi_map_mfn = 0;
> - put_page_and_type(mfn_to_page(mfn));
> + put_page_and_type(page);
> ret = -ENOSPC;
> break;
> }
>
>
>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 2/2] x86: latch current‑>domain in do_physdev_op()
2015-01-19 15:36 ` [PATCH 2/2] x86: latch current‑>domain in do_physdev_op() Jan Beulich
@ 2015-01-19 15:58 ` Andrew Cooper
0 siblings, 0 replies; 5+ messages in thread
From: Andrew Cooper @ 2015-01-19 15:58 UTC (permalink / raw)
To: Jan Beulich, xen-devel; +Cc: Keir Fraser
On 19/01/15 15:36, Jan Beulich wrote:
> @@ -483,7 +482,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
> ret = -EFAULT;
> if ( copy_from_guest(&apic, arg, 1) != 0 )
> break;
> - ret = xsm_apic(XSM_PRIV, v->domain, cmd);
> + ret = xsm_apic(XSM_PRIV, currd, cmd);
> if ( ret )
> break;
> ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
> @@ -499,7 +498,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
>
> /* Use the APIC check since this dummy hypercall should still only
> * be called by the domain with access to program the ioapic */
> - ret = xsm_apic(XSM_PRIV, v->domain, cmd);
> + ret = xsm_apic(XSM_PRIV, currd, cmd);
> if ( ret )
> break;
>
> @@ -529,15 +528,16 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
There is a "is_pvh_vcpu(current)" between these two hunks which wants
latching in the same way as set_iobitmap, allowing the "current" below
to become "curr".
With that fixed, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> if ( set_iopl.iopl > 3 )
> break;
> ret = 0;
> - v->arch.pv_vcpu.iopl = set_iopl.iopl;
> + current->arch.pv_vcpu.iopl = set_iopl.iopl;
> break;
> }
>
> case PHYSDEVOP_set_iobitmap: {
> + struct vcpu *curr = current;
> struct physdev_set_iobitmap set_iobitmap;
>
> ret = -ENOSYS;
> - if ( is_pvh_vcpu(current) )
> + if ( is_pvh_vcpu(curr) )
> break;
>
> ret = -EFAULT;
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2015-01-19 15:59 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-19 15:30 [PATCH 0/2] x86: physdev-op handling adjustments Jan Beulich
2015-01-19 15:36 ` [PATCH 1/2] x86: slightly simplify PHYSDEVOP_pirq_eoi_gmfn_v* handling Jan Beulich
2015-01-19 15:40 ` Andrew Cooper
2015-01-19 15:36 ` [PATCH 2/2] x86: latch current‑>domain in do_physdev_op() Jan Beulich
2015-01-19 15:58 ` Andrew Cooper
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.