From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
"George Dunlap" <George.Dunlap@eu.citrix.com>,
"Ian Jackson" <iwj@xenproject.org>,
"Julien Grall" <julien@xen.org>, "Wei Liu" <wl@xen.org>,
"Stefano Stabellini" <sstabellini@kernel.org>,
"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH v6 2/3] evtchn: add helper for port_is_valid() + evtchn_from_port()
Date: Thu, 27 May 2021 13:28:37 +0200 [thread overview]
Message-ID: <76106d2d-6219-bbb1-ee06-601da6f40673@suse.com> (raw)
In-Reply-To: <01bbf3d4-ca6a-e837-91fe-b34aa014564c@suse.com>
The combination is pretty common, so adding a simple local helper seems
worthwhile. Make it const- and type-correct, in turn requiring the
two called function to also be const-correct (and at this occasion also
make them type-correct).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Julien Grall <jgrall@amazon.com>
---
v6: Re-base, also for re-ordering / shrinking of series.
v4: New.
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -147,6 +147,12 @@ static bool virq_is_global(unsigned int
return true;
}
+static struct evtchn *_evtchn_from_port(const struct domain *d,
+ evtchn_port_t port)
+{
+ return port_is_valid(d, port) ? evtchn_from_port(d, port) : NULL;
+}
+
static void free_evtchn_bucket(struct domain *d, struct evtchn *bucket)
{
if ( !bucket )
@@ -319,7 +325,6 @@ static long evtchn_alloc_unbound(evtchn_
return rc;
}
-
static void double_evtchn_lock(struct evtchn *lchn, struct evtchn *rchn)
{
ASSERT(lchn != rchn);
@@ -365,9 +370,9 @@ static long evtchn_bind_interdomain(evtc
ERROR_EXIT(lport);
lchn = evtchn_from_port(ld, lport);
- if ( !port_is_valid(rd, rport) )
+ rchn = _evtchn_from_port(rd, rport);
+ if ( !rchn )
ERROR_EXIT_DOM(-EINVAL, rd);
- rchn = evtchn_from_port(rd, rport);
if ( (rchn->state != ECS_UNBOUND) ||
(rchn->u.unbound.remote_domid != ld->domain_id) )
ERROR_EXIT_DOM(-EINVAL, rd);
@@ -602,15 +607,12 @@ static long evtchn_bind_pirq(evtchn_bind
int evtchn_close(struct domain *d1, int port1, bool guest)
{
struct domain *d2 = NULL;
- struct evtchn *chn1, *chn2;
- int port2;
+ struct evtchn *chn1 = _evtchn_from_port(d1, port1), *chn2;
long rc = 0;
- if ( !port_is_valid(d1, port1) )
+ if ( !chn1 )
return -EINVAL;
- chn1 = evtchn_from_port(d1, port1);
-
again:
spin_lock(&d1->event_lock);
@@ -698,10 +700,8 @@ int evtchn_close(struct domain *d1, int
goto out;
}
- port2 = chn1->u.interdomain.remote_port;
- BUG_ON(!port_is_valid(d2, port2));
-
- chn2 = evtchn_from_port(d2, port2);
+ chn2 = _evtchn_from_port(d2, chn1->u.interdomain.remote_port);
+ BUG_ON(!chn2);
BUG_ON(chn2->state != ECS_INTERDOMAIN);
BUG_ON(chn2->u.interdomain.remote_dom != d1);
@@ -739,15 +739,13 @@ int evtchn_close(struct domain *d1, int
int evtchn_send(struct domain *ld, unsigned int lport)
{
- struct evtchn *lchn, *rchn;
+ struct evtchn *lchn = _evtchn_from_port(ld, lport), *rchn;
struct domain *rd;
int rport, ret = 0;
- if ( !port_is_valid(ld, lport) )
+ if ( !lchn )
return -EINVAL;
- lchn = evtchn_from_port(ld, lport);
-
evtchn_read_lock(lchn);
/* Guest cannot send via a Xen-attached event channel. */
@@ -967,15 +965,15 @@ int evtchn_status(evtchn_status_t *statu
if ( d == NULL )
return -ESRCH;
- spin_lock(&d->event_lock);
-
- if ( !port_is_valid(d, port) )
+ chn = _evtchn_from_port(d, port);
+ if ( !chn )
{
- rc = -EINVAL;
- goto out;
+ rcu_unlock_domain(d);
+ return -EINVAL;
}
- chn = evtchn_from_port(d, port);
+ spin_lock(&d->event_lock);
+
if ( consumer_is_xen(chn) )
{
rc = -EACCES;
@@ -1038,11 +1036,10 @@ long evtchn_bind_vcpu(unsigned int port,
if ( (v = domain_vcpu(d, vcpu_id)) == NULL )
return -ENOENT;
- if ( !port_is_valid(d, port) )
+ chn = _evtchn_from_port(d, port);
+ if ( !chn )
return -EINVAL;
- chn = evtchn_from_port(d, port);
-
spin_lock(&d->event_lock);
/* Guest cannot re-bind a Xen-attached event channel. */
@@ -1088,13 +1085,11 @@ long evtchn_bind_vcpu(unsigned int port,
int evtchn_unmask(unsigned int port)
{
struct domain *d = current->domain;
- struct evtchn *evtchn;
+ struct evtchn *evtchn = _evtchn_from_port(d, port);
- if ( unlikely(!port_is_valid(d, port)) )
+ if ( unlikely(!evtchn) )
return -EINVAL;
- evtchn = evtchn_from_port(d, port);
-
evtchn_read_lock(evtchn);
evtchn_port_unmask(d, evtchn);
@@ -1177,14 +1172,12 @@ static long evtchn_set_priority(const st
{
struct domain *d = current->domain;
unsigned int port = set_priority->port;
- struct evtchn *chn;
+ struct evtchn *chn = _evtchn_from_port(d, port);
long ret;
- if ( !port_is_valid(d, port) )
+ if ( !chn )
return -EINVAL;
- chn = evtchn_from_port(d, port);
-
evtchn_read_lock(chn);
ret = evtchn_port_set_priority(d, chn, set_priority->priority);
@@ -1410,10 +1403,10 @@ void free_xen_event_channel(struct domai
void notify_via_xen_event_channel(struct domain *ld, int lport)
{
- struct evtchn *lchn, *rchn;
+ struct evtchn *lchn = _evtchn_from_port(ld, lport), *rchn;
struct domain *rd;
- if ( !port_is_valid(ld, lport) )
+ if ( !lchn )
{
/*
* Make sure ->is_dying is read /after/ ->valid_evtchns, pairing
@@ -1424,8 +1417,6 @@ void notify_via_xen_event_channel(struct
return;
}
- lchn = evtchn_from_port(ld, lport);
-
if ( !evtchn_read_trylock(lchn) )
return;
@@ -1580,12 +1571,14 @@ static void domain_dump_evtchn_info(stru
spin_lock(&d->event_lock);
- for ( port = 1; port_is_valid(d, port); ++port )
+ for ( port = 1; ; ++port )
{
- const struct evtchn *chn;
+ const struct evtchn *chn = _evtchn_from_port(d, port);
char *ssid;
- chn = evtchn_from_port(d, port);
+ if ( !chn )
+ break;
+
if ( chn->state == ECS_FREE )
continue;
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -120,7 +120,7 @@ static inline void evtchn_read_unlock(st
read_unlock(&evtchn->lock);
}
-static inline bool_t port_is_valid(struct domain *d, unsigned int p)
+static inline bool port_is_valid(const struct domain *d, evtchn_port_t p)
{
if ( p >= read_atomic(&d->valid_evtchns) )
return false;
@@ -135,7 +135,8 @@ static inline bool_t port_is_valid(struc
return true;
}
-static inline struct evtchn *evtchn_from_port(struct domain *d, unsigned int p)
+static inline struct evtchn *evtchn_from_port(const struct domain *d,
+ evtchn_port_t p)
{
if ( p < EVTCHNS_PER_BUCKET )
return &d->evtchn[array_index_nospec(p, EVTCHNS_PER_BUCKET)];
next prev parent reply other threads:[~2021-05-27 11:28 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-27 11:27 [PATCH v6 0/3] evtchn: (not so) recent XSAs follow-on Jan Beulich
2021-05-27 11:28 ` [PATCH v6 1/3] evtchn: slightly defer lock acquire where possible Jan Beulich
2021-05-27 13:46 ` Roger Pau Monné
2021-05-27 18:48 ` Julien Grall
2021-05-28 8:30 ` Roger Pau Monné
2021-05-28 10:23 ` Jan Beulich
2021-05-28 10:48 ` Julien Grall
2021-05-28 13:31 ` Roger Pau Monné
2021-05-28 13:41 ` Jan Beulich
2021-05-28 14:26 ` Julien Grall
2021-06-01 11:54 ` Jan Beulich
2021-06-07 18:15 ` Julien Grall
2021-05-27 11:28 ` Jan Beulich [this message]
2021-05-27 11:28 ` [PATCH v6 3/3] evtchn: type adjustments Jan Beulich
2021-05-27 13:52 ` Roger Pau Monné
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=76106d2d-6219-bbb1-ee06-601da6f40673@suse.com \
--to=jbeulich@suse.com \
--cc=George.Dunlap@eu.citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=iwj@xenproject.org \
--cc=julien@xen.org \
--cc=roger.pau@citrix.com \
--cc=sstabellini@kernel.org \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).