* [PATCH] evtchn: simplify sending of notifications
@ 2015-01-12 8:57 Jan Beulich
2015-01-12 10:52 ` David Vrabel
2015-01-12 11:33 ` Andrew Cooper
0 siblings, 2 replies; 5+ messages in thread
From: Jan Beulich @ 2015-01-12 8:57 UTC (permalink / raw)
To: xen-devel; +Cc: Ian Campbell, Keir Fraser, Ian Jackson, Tim Deegan
[-- Attachment #1: Type: text/plain, Size: 5249 bytes --]
The trivial wrapper evtchn_set_pending() is pretty pointless, as it
only serves to invoke another wrapper evtchn_port_set_pending(). In
turn, the latter is kind of inconsistent with its siblings in that is
takes a struct vcpu * rather than a struct domain * - adjusting this
allows for more efficient code in the majority of cases.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -95,8 +95,6 @@ static uint8_t get_xen_consumer(xen_even
/* Get the notification function for a given Xen-bound event channel. */
#define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1])
-static void evtchn_set_pending(struct vcpu *v, int port);
-
static int virq_is_global(uint32_t virq)
{
int rc;
@@ -287,7 +285,7 @@ static long evtchn_bind_interdomain(evtc
* We may have lost notifications on the remote unbound port. Fix that up
* here by conservatively always setting a notification on the local port.
*/
- evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
+ evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn);
bind->local_port = lport;
@@ -599,11 +597,10 @@ static long evtchn_close(evtchn_close_t
return __evtchn_close(current->domain, close->port);
}
-int evtchn_send(struct domain *d, unsigned int lport)
+int evtchn_send(struct domain *ld, unsigned int lport)
{
struct evtchn *lchn, *rchn;
- struct domain *ld = d, *rd;
- struct vcpu *rvcpu;
+ struct domain *rd;
int rport, ret = 0;
spin_lock(&ld->event_lock);
@@ -633,14 +630,13 @@ int evtchn_send(struct domain *d, unsign
rd = lchn->u.interdomain.remote_dom;
rport = lchn->u.interdomain.remote_port;
rchn = evtchn_from_port(rd, rport);
- rvcpu = rd->vcpu[rchn->notify_vcpu_id];
if ( consumer_is_xen(rchn) )
- (*xen_notification_fn(rchn))(rvcpu, rport);
+ xen_notification_fn(rchn)(rd->vcpu[rchn->notify_vcpu_id], rport);
else
- evtchn_set_pending(rvcpu, rport);
+ evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn);
break;
case ECS_IPI:
- evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
+ evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn);
break;
case ECS_UNBOUND:
/* silently drop the notification */
@@ -655,11 +651,6 @@ out:
return ret;
}
-static void evtchn_set_pending(struct vcpu *v, int port)
-{
- evtchn_port_set_pending(v, evtchn_from_port(v->domain, port));
-}
-
int guest_enabled_event(struct vcpu *v, uint32_t virq)
{
return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
@@ -669,6 +660,7 @@ void send_guest_vcpu_virq(struct vcpu *v
{
unsigned long flags;
int port;
+ struct domain *d;
ASSERT(!virq_is_global(virq));
@@ -678,7 +670,8 @@ void send_guest_vcpu_virq(struct vcpu *v
if ( unlikely(port == 0) )
goto out;
- evtchn_set_pending(v, port);
+ d = v->domain;
+ evtchn_port_set_pending(d, v->vcpu_id, evtchn_from_port(d, port));
out:
spin_unlock_irqrestore(&v->virq_lock, flags);
@@ -707,7 +700,7 @@ static void send_guest_global_virq(struc
goto out;
chn = evtchn_from_port(d, port);
- evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
+ evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
out:
spin_unlock_irqrestore(&v->virq_lock, flags);
@@ -731,7 +724,7 @@ void send_guest_pirq(struct domain *d, c
}
chn = evtchn_from_port(d, port);
- evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
+ evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
}
static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly;
@@ -1202,7 +1195,6 @@ void notify_via_xen_event_channel(struct
{
struct evtchn *lchn, *rchn;
struct domain *rd;
- int rport;
spin_lock(&ld->event_lock);
@@ -1219,9 +1211,8 @@ void notify_via_xen_event_channel(struct
if ( likely(lchn->state == ECS_INTERDOMAIN) )
{
rd = lchn->u.interdomain.remote_dom;
- rport = lchn->u.interdomain.remote_port;
- rchn = evtchn_from_port(rd, rport);
- evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
+ rchn = evtchn_from_port(rd, lchn->u.interdomain.remote_port);
+ evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn);
}
spin_unlock(&ld->event_lock);
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -152,10 +152,11 @@ static inline void evtchn_port_init(stru
d->evtchn_port_ops->init(d, evtchn);
}
-static inline void evtchn_port_set_pending(struct vcpu *v,
+static inline void evtchn_port_set_pending(struct domain *d,
+ unsigned int vcpu_id,
struct evtchn *evtchn)
{
- v->domain->evtchn_port_ops->set_pending(v, evtchn);
+ d->evtchn_port_ops->set_pending(d->vcpu[vcpu_id], evtchn);
}
static inline void evtchn_port_clear_pending(struct domain *d,
[-- Attachment #2: evtchn-send-simplify.patch --]
[-- Type: text/plain, Size: 5290 bytes --]
evtchn: simplify sending of notifications
The trivial wrapper evtchn_set_pending() is pretty pointless, as it
only serves to invoke another wrapper evtchn_port_set_pending(). In
turn, the latter is kind of inconsistent with its siblings in that is
takes a struct vcpu * rather than a struct domain * - adjusting this
allows for more efficient code in the majority of cases.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -95,8 +95,6 @@ static uint8_t get_xen_consumer(xen_even
/* Get the notification function for a given Xen-bound event channel. */
#define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1])
-static void evtchn_set_pending(struct vcpu *v, int port);
-
static int virq_is_global(uint32_t virq)
{
int rc;
@@ -287,7 +285,7 @@ static long evtchn_bind_interdomain(evtc
* We may have lost notifications on the remote unbound port. Fix that up
* here by conservatively always setting a notification on the local port.
*/
- evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
+ evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn);
bind->local_port = lport;
@@ -599,11 +597,10 @@ static long evtchn_close(evtchn_close_t
return __evtchn_close(current->domain, close->port);
}
-int evtchn_send(struct domain *d, unsigned int lport)
+int evtchn_send(struct domain *ld, unsigned int lport)
{
struct evtchn *lchn, *rchn;
- struct domain *ld = d, *rd;
- struct vcpu *rvcpu;
+ struct domain *rd;
int rport, ret = 0;
spin_lock(&ld->event_lock);
@@ -633,14 +630,13 @@ int evtchn_send(struct domain *d, unsign
rd = lchn->u.interdomain.remote_dom;
rport = lchn->u.interdomain.remote_port;
rchn = evtchn_from_port(rd, rport);
- rvcpu = rd->vcpu[rchn->notify_vcpu_id];
if ( consumer_is_xen(rchn) )
- (*xen_notification_fn(rchn))(rvcpu, rport);
+ xen_notification_fn(rchn)(rd->vcpu[rchn->notify_vcpu_id], rport);
else
- evtchn_set_pending(rvcpu, rport);
+ evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn);
break;
case ECS_IPI:
- evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
+ evtchn_port_set_pending(ld, lchn->notify_vcpu_id, lchn);
break;
case ECS_UNBOUND:
/* silently drop the notification */
@@ -655,11 +651,6 @@ out:
return ret;
}
-static void evtchn_set_pending(struct vcpu *v, int port)
-{
- evtchn_port_set_pending(v, evtchn_from_port(v->domain, port));
-}
-
int guest_enabled_event(struct vcpu *v, uint32_t virq)
{
return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
@@ -669,6 +660,7 @@ void send_guest_vcpu_virq(struct vcpu *v
{
unsigned long flags;
int port;
+ struct domain *d;
ASSERT(!virq_is_global(virq));
@@ -678,7 +670,8 @@ void send_guest_vcpu_virq(struct vcpu *v
if ( unlikely(port == 0) )
goto out;
- evtchn_set_pending(v, port);
+ d = v->domain;
+ evtchn_port_set_pending(d, v->vcpu_id, evtchn_from_port(d, port));
out:
spin_unlock_irqrestore(&v->virq_lock, flags);
@@ -707,7 +700,7 @@ static void send_guest_global_virq(struc
goto out;
chn = evtchn_from_port(d, port);
- evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
+ evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
out:
spin_unlock_irqrestore(&v->virq_lock, flags);
@@ -731,7 +724,7 @@ void send_guest_pirq(struct domain *d, c
}
chn = evtchn_from_port(d, port);
- evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
+ evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
}
static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly;
@@ -1202,7 +1195,6 @@ void notify_via_xen_event_channel(struct
{
struct evtchn *lchn, *rchn;
struct domain *rd;
- int rport;
spin_lock(&ld->event_lock);
@@ -1219,9 +1211,8 @@ void notify_via_xen_event_channel(struct
if ( likely(lchn->state == ECS_INTERDOMAIN) )
{
rd = lchn->u.interdomain.remote_dom;
- rport = lchn->u.interdomain.remote_port;
- rchn = evtchn_from_port(rd, rport);
- evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
+ rchn = evtchn_from_port(rd, lchn->u.interdomain.remote_port);
+ evtchn_port_set_pending(rd, rchn->notify_vcpu_id, rchn);
}
spin_unlock(&ld->event_lock);
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -152,10 +152,11 @@ static inline void evtchn_port_init(stru
d->evtchn_port_ops->init(d, evtchn);
}
-static inline void evtchn_port_set_pending(struct vcpu *v,
+static inline void evtchn_port_set_pending(struct domain *d,
+ unsigned int vcpu_id,
struct evtchn *evtchn)
{
- v->domain->evtchn_port_ops->set_pending(v, evtchn);
+ d->evtchn_port_ops->set_pending(d->vcpu[vcpu_id], evtchn);
}
static inline void evtchn_port_clear_pending(struct domain *d,
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] evtchn: simplify sending of notifications
2015-01-12 8:57 [PATCH] evtchn: simplify sending of notifications Jan Beulich
@ 2015-01-12 10:52 ` David Vrabel
2015-01-12 11:33 ` Andrew Cooper
1 sibling, 0 replies; 5+ messages in thread
From: David Vrabel @ 2015-01-12 10:52 UTC (permalink / raw)
To: Jan Beulich, xen-devel; +Cc: Ian Campbell, Ian Jackson, Keir Fraser, Tim Deegan
On 12/01/15 08:57, Jan Beulich wrote:
> The trivial wrapper evtchn_set_pending() is pretty pointless, as it
> only serves to invoke another wrapper evtchn_port_set_pending(). In
> turn, the latter is kind of inconsistent with its siblings in that is
> takes a struct vcpu * rather than a struct domain * - adjusting this
> allows for more efficient code in the majority of cases.
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
David
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] evtchn: simplify sending of notifications
2015-01-12 8:57 [PATCH] evtchn: simplify sending of notifications Jan Beulich
2015-01-12 10:52 ` David Vrabel
@ 2015-01-12 11:33 ` Andrew Cooper
2015-01-12 11:42 ` Jan Beulich
1 sibling, 1 reply; 5+ messages in thread
From: Andrew Cooper @ 2015-01-12 11:33 UTC (permalink / raw)
To: Jan Beulich, xen-devel; +Cc: Ian Campbell, Ian Jackson, Keir Fraser, Tim Deegan
[-- Attachment #1.1: Type: text/plain, Size: 1414 bytes --]
On 12/01/15 08:57, Jan Beulich wrote:
> --- a/xen/include/xen/event.h
> +++ b/xen/include/xen/event.h
> @@ -152,10 +152,11 @@ static inline void evtchn_port_init(stru
> d->evtchn_port_ops->init(d, evtchn);
> }
>
> -static inline void evtchn_port_set_pending(struct vcpu *v,
> +static inline void evtchn_port_set_pending(struct domain *d,
> + unsigned int vcpu_id,
> struct evtchn *evtchn)
I would rename this to the, now vacant, evtchn_set_pending(). It takes
an evtchn* not a port. (Its sole caller was evtchn_set_pending(), so
the patch won't grow)
Furthermore, all callers except send_guest_vcpu_virq() currently use
evtchn->notify_vcpu_id to get a struct vcpu* to pass. I think you can
drop the vcpu_id parameter and use evtchn->notify_vcpu_id directly,
which reduces the likelyhood of a bug where the evtchn is bound to one
vcpu but a caller gets the wrong id and raises the event channel on the
wrong vcpu.
~Andrew
> {
> - v->domain->evtchn_port_ops->set_pending(v, evtchn);
> + d->evtchn_port_ops->set_pending(d->vcpu[vcpu_id], evtchn);
> }
>
> static inline void evtchn_port_clear_pending(struct domain *d,
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
[-- Attachment #1.2: Type: text/html, Size: 2256 bytes --]
[-- Attachment #2: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] evtchn: simplify sending of notifications
2015-01-12 11:33 ` Andrew Cooper
@ 2015-01-12 11:42 ` Jan Beulich
2015-01-12 16:01 ` Andrew Cooper
0 siblings, 1 reply; 5+ messages in thread
From: Jan Beulich @ 2015-01-12 11:42 UTC (permalink / raw)
To: Andrew Cooper
Cc: Ian Campbell, xen-devel, Keir Fraser, IanJackson, Tim Deegan
>>> On 12.01.15 at 12:33, <andrew.cooper3@citrix.com> wrote:
> On 12/01/15 08:57, Jan Beulich wrote:
>> --- a/xen/include/xen/event.h
>> +++ b/xen/include/xen/event.h
>> @@ -152,10 +152,11 @@ static inline void evtchn_port_init(stru
>> d->evtchn_port_ops->init(d, evtchn);
>> }
>>
>> -static inline void evtchn_port_set_pending(struct vcpu *v,
>> +static inline void evtchn_port_set_pending(struct domain *d,
>> + unsigned int vcpu_id,
>> struct evtchn *evtchn)
>
> I would rename this to the, now vacant, evtchn_set_pending(). It takes
> an evtchn* not a port. (Its sole caller was evtchn_set_pending(), so
> the patch won't grow)
No (and I had actually considered it) - that would get its name out of
sync with all its sibling wrappers.
> Furthermore, all callers except send_guest_vcpu_virq() currently use
> evtchn->notify_vcpu_id to get a struct vcpu* to pass. I think you can
> drop the vcpu_id parameter and use evtchn->notify_vcpu_id directly,
> which reduces the likelyhood of a bug where the evtchn is bound to one
> vcpu but a caller gets the wrong id and raises the event channel on the
> wrong vcpu.
Generally a nice idea, but it doesn't immediately/obviously fit with
the use in send_guest_vcpu_virq().
Jan
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] evtchn: simplify sending of notifications
2015-01-12 11:42 ` Jan Beulich
@ 2015-01-12 16:01 ` Andrew Cooper
0 siblings, 0 replies; 5+ messages in thread
From: Andrew Cooper @ 2015-01-12 16:01 UTC (permalink / raw)
To: Jan Beulich; +Cc: Ian Campbell, xen-devel, Keir Fraser, IanJackson, Tim Deegan
On 12/01/15 11:42, Jan Beulich wrote:
>>>> On 12.01.15 at 12:33, <andrew.cooper3@citrix.com> wrote:
>> On 12/01/15 08:57, Jan Beulich wrote:
>>> --- a/xen/include/xen/event.h
>>> +++ b/xen/include/xen/event.h
>>> @@ -152,10 +152,11 @@ static inline void evtchn_port_init(stru
>>> d->evtchn_port_ops->init(d, evtchn);
>>> }
>>>
>>> -static inline void evtchn_port_set_pending(struct vcpu *v,
>>> +static inline void evtchn_port_set_pending(struct domain *d,
>>> + unsigned int vcpu_id,
>>> struct evtchn *evtchn)
>> I would rename this to the, now vacant, evtchn_set_pending(). It takes
>> an evtchn* not a port. (Its sole caller was evtchn_set_pending(), so
>> the patch won't grow)
> No (and I had actually considered it) - that would get its name out of
> sync with all its sibling wrappers.
Ah yes - consistency is more important than correctness here.
>
>> Furthermore, all callers except send_guest_vcpu_virq() currently use
>> evtchn->notify_vcpu_id to get a struct vcpu* to pass. I think you can
>> drop the vcpu_id parameter and use evtchn->notify_vcpu_id directly,
>> which reduces the likelyhood of a bug where the evtchn is bound to one
>> vcpu but a caller gets the wrong id and raises the event channel on the
>> wrong vcpu.
> Generally a nice idea, but it doesn't immediately/obviously fit with
> the use in send_guest_vcpu_virq().
It is awkward that some of the virqs will get delivered on an arbitrary
vcpu, especially as the virq API requires the binding domain to choose a
destination vcpu. Either way, this is not something to be addressed in
a cleanup patch.
Therefore the original patch is Reviewed-by: Andrew Cooper
<andrew.cooper3@citrix.com>
~Andrew
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2015-01-12 16:01 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-12 8:57 [PATCH] evtchn: simplify sending of notifications Jan Beulich
2015-01-12 10:52 ` David Vrabel
2015-01-12 11:33 ` Andrew Cooper
2015-01-12 11:42 ` Jan Beulich
2015-01-12 16:01 ` Andrew Cooper
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.