From: Roger Pau Monne <roger.pau@citrix.com>
To: <xen-devel@lists.xenproject.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>, Wei Liu <wl@xen.org>,
Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>,
George Dunlap <George.Dunlap@eu.citrix.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Ian Jackson <ian.jackson@eu.citrix.com>, Tim Deegan <tim@xen.org>,
Julien Grall <julien.grall@arm.com>,
Paul Durrant <paul.durrant@citrix.com>,
Jan Beulich <jbeulich@suse.com>,
Roger Pau Monne <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v2 03/11] ioreq: switch selection and forwarding to use ioservid_t
Date: Tue, 3 Sep 2019 18:14:20 +0200 [thread overview]
Message-ID: <20190903161428.7159-4-roger.pau@citrix.com> (raw)
In-Reply-To: <20190903161428.7159-1-roger.pau@citrix.com>
hvm_select_ioreq_server and hvm_send_ioreq where both using
hvm_ioreq_server directly, switch to use ioservid_t in order to select
and forward ioreqs.
This is a preparatory change, since future patches will use the ioreq
server id in order to differentiate between internal and external
ioreq servers.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Changes since v1:
- New in this version.
---
xen/arch/x86/hvm/dm.c | 2 +-
xen/arch/x86/hvm/emulate.c | 14 +++++++-------
xen/arch/x86/hvm/ioreq.c | 24 ++++++++++++------------
xen/arch/x86/hvm/stdvga.c | 8 ++++----
xen/arch/x86/mm/p2m.c | 20 ++++++++++----------
xen/include/asm-x86/hvm/ioreq.h | 5 ++---
xen/include/asm-x86/p2m.h | 9 ++++-----
xen/include/public/hvm/dm_op.h | 1 +
8 files changed, 41 insertions(+), 42 deletions(-)
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d6d0e8be89..c2fca9f729 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -263,7 +263,7 @@ static int set_mem_type(struct domain *d,
return -EOPNOTSUPP;
/* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
- if ( !p2m_get_ioreq_server(d, &flags) )
+ if ( p2m_get_ioreq_server(d, &flags) == XEN_INVALID_IOSERVID )
return -EINVAL;
}
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index d75d3e6fd6..51d2fcba2d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -254,7 +254,7 @@ static int hvmemul_do_io(
* However, there's no cheap approach to avoid above situations in xen,
* so the device model side needs to check the incoming ioreq event.
*/
- struct hvm_ioreq_server *s = NULL;
+ ioservid_t id = XEN_INVALID_IOSERVID;
p2m_type_t p2mt = p2m_invalid;
if ( is_mmio )
@@ -267,9 +267,9 @@ static int hvmemul_do_io(
{
unsigned int flags;
- s = p2m_get_ioreq_server(currd, &flags);
+ id = p2m_get_ioreq_server(currd, &flags);
- if ( s == NULL )
+ if ( id == XEN_INVALID_IOSERVID )
{
rc = X86EMUL_RETRY;
vio->io_req.state = STATE_IOREQ_NONE;
@@ -289,18 +289,18 @@ static int hvmemul_do_io(
}
}
- if ( !s )
- s = hvm_select_ioreq_server(currd, &p);
+ if ( id == XEN_INVALID_IOSERVID )
+ id = hvm_select_ioreq_server(currd, &p);
/* If there is no suitable backing DM, just ignore accesses */
- if ( !s )
+ if ( id == XEN_INVALID_IOSERVID )
{
rc = hvm_process_io_intercept(&null_handler, &p);
vio->io_req.state = STATE_IOREQ_NONE;
}
else
{
- rc = hvm_send_ioreq(s, &p, 0);
+ rc = hvm_send_ioreq(id, &p, 0);
if ( rc != X86EMUL_RETRY || currd->is_shutting_down )
vio->io_req.state = STATE_IOREQ_NONE;
else if ( !hvm_ioreq_needs_completion(&vio->io_req) )
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 69652e1080..95492bc111 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -39,6 +39,7 @@ static void set_ioreq_server(struct domain *d, unsigned int id,
{
ASSERT(id < MAX_NR_IOREQ_SERVERS);
ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
+ BUILD_BUG_ON(MAX_NR_IOREQ_SERVERS >= XEN_INVALID_IOSERVID);
d->arch.hvm.ioreq_server.server[id] = s;
}
@@ -868,7 +869,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
domain_pause(d);
- p2m_set_ioreq_server(d, 0, s);
+ p2m_set_ioreq_server(d, 0, id);
hvm_ioreq_server_disable(s);
@@ -1131,7 +1132,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
if ( s->emulator != current->domain )
goto out;
- rc = p2m_set_ioreq_server(d, flags, s);
+ rc = p2m_set_ioreq_server(d, flags, id);
out:
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
@@ -1255,8 +1256,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
}
-struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
- ioreq_t *p)
+ioservid_t hvm_select_ioreq_server(struct domain *d, ioreq_t *p)
{
struct hvm_ioreq_server *s;
uint32_t cf8;
@@ -1265,7 +1265,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
unsigned int id;
if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
- return NULL;
+ return XEN_INVALID_IOSERVID;
cf8 = d->arch.hvm.pci_cf8;
@@ -1320,7 +1320,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
start = addr;
end = start + p->size - 1;
if ( rangeset_contains_range(r, start, end) )
- return s;
+ return id;
break;
@@ -1329,7 +1329,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
end = hvm_mmio_last_byte(p);
if ( rangeset_contains_range(r, start, end) )
- return s;
+ return id;
break;
@@ -1338,14 +1338,14 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
{
p->type = IOREQ_TYPE_PCI_CONFIG;
p->addr = addr;
- return s;
+ return id;
}
break;
}
}
- return NULL;
+ return XEN_INVALID_IOSERVID;
}
static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
@@ -1441,12 +1441,12 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
return X86EMUL_OKAY;
}
-int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
- bool buffered)
+int hvm_send_ioreq(ioservid_t id, ioreq_t *proto_p, bool buffered)
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
struct hvm_ioreq_vcpu *sv;
+ struct hvm_ioreq_server *s = get_ioreq_server(d, id);
ASSERT(s);
@@ -1512,7 +1512,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
if ( !s->enabled )
continue;
- if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
+ if ( hvm_send_ioreq(id, p, buffered) == X86EMUL_UNHANDLEABLE )
failed++;
}
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index bd398dbb1b..a689269712 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -466,7 +466,7 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler,
.dir = IOREQ_WRITE,
.data = data,
};
- struct hvm_ioreq_server *srv;
+ ioservid_t id;
if ( !stdvga_cache_is_enabled(s) || !s->stdvga )
goto done;
@@ -507,11 +507,11 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler,
}
done:
- srv = hvm_select_ioreq_server(current->domain, &p);
- if ( !srv )
+ id = hvm_select_ioreq_server(current->domain, &p);
+ if ( id == XEN_INVALID_IOSERVID )
return X86EMUL_UNHANDLEABLE;
- return hvm_send_ioreq(srv, &p, 1);
+ return hvm_send_ioreq(id, &p, 1);
}
static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 8a5229ee21..43849cbbd9 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -102,6 +102,7 @@ static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
p2m_pt_init(p2m);
spin_lock_init(&p2m->ioreq.lock);
+ p2m->ioreq.server = XEN_INVALID_IOSERVID;
return ret;
}
@@ -361,7 +362,7 @@ void p2m_memory_type_changed(struct domain *d)
int p2m_set_ioreq_server(struct domain *d,
unsigned int flags,
- struct hvm_ioreq_server *s)
+ ioservid_t id)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int rc;
@@ -376,16 +377,16 @@ int p2m_set_ioreq_server(struct domain *d,
if ( flags == 0 )
{
rc = -EINVAL;
- if ( p2m->ioreq.server != s )
+ if ( p2m->ioreq.server != id )
goto out;
- p2m->ioreq.server = NULL;
+ p2m->ioreq.server = XEN_INVALID_IOSERVID;
p2m->ioreq.flags = 0;
}
else
{
rc = -EBUSY;
- if ( p2m->ioreq.server != NULL )
+ if ( p2m->ioreq.server != XEN_INVALID_IOSERVID )
goto out;
/*
@@ -397,7 +398,7 @@ int p2m_set_ioreq_server(struct domain *d,
if ( read_atomic(&p2m->ioreq.entry_count) )
goto out;
- p2m->ioreq.server = s;
+ p2m->ioreq.server = id;
p2m->ioreq.flags = flags;
}
@@ -409,19 +410,18 @@ int p2m_set_ioreq_server(struct domain *d,
return rc;
}
-struct hvm_ioreq_server *p2m_get_ioreq_server(struct domain *d,
- unsigned int *flags)
+ioservid_t p2m_get_ioreq_server(struct domain *d, unsigned int *flags)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
- struct hvm_ioreq_server *s;
+ ioservid_t id;
spin_lock(&p2m->ioreq.lock);
- s = p2m->ioreq.server;
+ id = p2m->ioreq.server;
*flags = p2m->ioreq.flags;
spin_unlock(&p2m->ioreq.lock);
- return s;
+ return id;
}
void p2m_enable_hardware_log_dirty(struct domain *d)
diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
index e2588e912f..65491c48d2 100644
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -47,9 +47,8 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v);
void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v);
void hvm_destroy_all_ioreq_servers(struct domain *d);
-struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
- ioreq_t *p);
-int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
+ioservid_t hvm_select_ioreq_server(struct domain *d, ioreq_t *p);
+int hvm_send_ioreq(ioservid_t id, ioreq_t *proto_p,
bool buffered);
unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 94285db1b4..99a1dab311 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -354,7 +354,7 @@ struct p2m_domain {
* ioreq server who's responsible for the emulation of
* gfns with specific p2m type(for now, p2m_ioreq_server).
*/
- struct hvm_ioreq_server *server;
+ ioservid_t server;
/*
* flags specifies whether read, write or both operations
* are to be emulated by an ioreq server.
@@ -819,7 +819,7 @@ static inline p2m_type_t p2m_recalc_type_range(bool recalc, p2m_type_t t,
if ( !recalc || !p2m_is_changeable(t) )
return t;
- if ( t == p2m_ioreq_server && p2m->ioreq.server != NULL )
+ if ( t == p2m_ioreq_server && p2m->ioreq.server != XEN_INVALID_IOSERVID )
return t;
return p2m_is_logdirty_range(p2m, gfn_start, gfn_end) ? p2m_ram_logdirty
@@ -938,9 +938,8 @@ static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt, mfn_t mfn)
}
int p2m_set_ioreq_server(struct domain *d, unsigned int flags,
- struct hvm_ioreq_server *s);
-struct hvm_ioreq_server *p2m_get_ioreq_server(struct domain *d,
- unsigned int *flags);
+ ioservid_t id);
+ioservid_t p2m_get_ioreq_server(struct domain *d, unsigned int *flags);
static inline int p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t nt,
p2m_type_t ot, mfn_t nfn, mfn_t ofn,
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index d3b554d019..8725cc20d3 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -54,6 +54,7 @@
*/
typedef uint16_t ioservid_t;
+#define XEN_INVALID_IOSERVID 0xffff
/*
* XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
--
2.22.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2019-09-03 16:15 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-09-03 16:14 [Xen-devel] [PATCH v2 00/11] ioreq: add support for internal servers Roger Pau Monne
2019-09-03 16:14 ` [Xen-devel] [PATCH v2 01/11] ioreq: fix hvm_all_ioreq_servers_add_vcpu fail path cleanup Roger Pau Monne
2019-09-10 10:44 ` Paul Durrant
2019-09-10 13:28 ` Jan Beulich
2019-09-10 13:33 ` Roger Pau Monné
2019-09-10 13:35 ` Jan Beulich
2019-09-10 13:42 ` Roger Pau Monné
2019-09-10 13:53 ` Paul Durrant
2019-09-03 16:14 ` [Xen-devel] [PATCH v2 02/11] ioreq: terminate cf8 handling at hypervisor level Roger Pau Monne
2019-09-03 17:13 ` Andrew Cooper
2019-09-04 7:49 ` Roger Pau Monné
2019-09-04 8:00 ` Roger Pau Monné
2019-09-04 8:04 ` Jan Beulich
2019-09-04 9:46 ` Paul Durrant
2019-09-04 13:39 ` Roger Pau Monné
2019-09-04 13:56 ` Paul Durrant
2019-09-03 16:14 ` Roger Pau Monne [this message]
2019-09-10 12:31 ` [Xen-devel] [PATCH v2 03/11] ioreq: switch selection and forwarding to use ioservid_t Paul Durrant
2019-09-20 10:47 ` Jan Beulich
2019-09-03 16:14 ` [Xen-devel] [PATCH v2 04/11] ioreq: add fields to allow internal ioreq servers Roger Pau Monne
2019-09-10 12:34 ` Paul Durrant
2019-09-20 10:53 ` Jan Beulich
2019-09-03 16:14 ` [Xen-devel] [PATCH v2 05/11] ioreq: add internal ioreq initialization support Roger Pau Monne
2019-09-10 12:59 ` Paul Durrant
2019-09-26 10:49 ` Roger Pau Monné
2019-09-26 10:58 ` Paul Durrant
2019-09-20 11:15 ` Jan Beulich
2019-09-26 10:51 ` Roger Pau Monné
2019-09-03 16:14 ` [Xen-devel] [PATCH v2 06/11] ioreq: allow dispatching ioreqs to internal servers Roger Pau Monne
2019-09-10 13:06 ` Paul Durrant
2019-09-20 11:35 ` Jan Beulich
2019-09-26 11:14 ` Roger Pau Monné
2019-09-26 13:17 ` Jan Beulich
2019-09-26 13:46 ` Roger Pau Monné
2019-09-26 15:13 ` Jan Beulich
2019-09-26 15:59 ` Roger Pau Monné
2019-09-27 8:17 ` Paul Durrant
2019-09-26 16:36 ` Roger Pau Monné
2019-09-03 16:14 ` [Xen-devel] [PATCH v2 07/11] ioreq: allow registering internal ioreq server handler Roger Pau Monne
2019-09-10 13:12 ` Paul Durrant
2019-09-03 16:14 ` [Xen-devel] [PATCH v2 08/11] ioreq: allow decoding accesses to MMCFG regions Roger Pau Monne
2019-09-10 13:37 ` Paul Durrant
2019-09-03 16:14 ` [Xen-devel] [PATCH v2 09/11] vpci: register as an internal ioreq server Roger Pau Monne
2019-09-10 13:49 ` Paul Durrant
2019-09-26 15:07 ` Roger Pau Monné
2019-09-27 8:29 ` Paul Durrant
2019-09-27 8:45 ` Roger Pau Monné
2019-09-27 9:01 ` Paul Durrant
2019-09-27 10:46 ` Roger Pau Monné
2019-09-03 16:14 ` [Xen-devel] [PATCH v2 10/11] ioreq: split the code to detect PCI config space accesses Roger Pau Monne
2019-09-10 14:06 ` Paul Durrant
2019-09-26 16:05 ` Roger Pau Monné
2019-09-03 16:14 ` [Xen-devel] [PATCH v2 11/11] ioreq: provide support for long-running operations Roger Pau Monne
2019-09-10 14:14 ` Paul Durrant
2019-09-10 14:28 ` Roger Pau Monné
2019-09-10 14:40 ` Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190903161428.7159-4-roger.pau@citrix.com \
--to=roger.pau@citrix.com \
--cc=George.Dunlap@eu.citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=julien.grall@arm.com \
--cc=konrad.wilk@oracle.com \
--cc=paul.durrant@citrix.com \
--cc=sstabellini@kernel.org \
--cc=tim@xen.org \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).