From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Paul Durrant <paul.durrant@citrix.com>,
Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v3 2/4] x86/hvm: take a reference on ioreq server emulating domain
Date: Tue, 20 Mar 2018 18:05:23 +0000 [thread overview]
Message-ID: <20180320180525.28605-3-paul.durrant@citrix.com> (raw)
In-Reply-To: <20180320180525.28605-1-paul.durrant@citrix.com>
When an ioreq server is created the code currently stores the id
of the emulating domain, but does not take a reference on that domain.
This patch modifies the code to hold a reference for the lifetime of the
ioreq server.
NOTE: ioreq servers are either destroyed explicitly or destroyed implicitly
in context of XEN_DOMCTL_destroydomain.
If the emulating domain is shut down prior to the target then the
any domain reference held by an ioreq server will prevent it from
being destroyed. However, if an emulating domain is shut down prior
to its target then it is likely that the target's vcpus will block
fairly quickly waiting for emulation that will never occur, and when
the target domain is destroyed the reference on the zombie emulating
domain will be dropped allowing both to be cleaned up.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
v3:
- Minor code tweaks requested by Jan
- Expanded commit comment to explain how domain references will be
released
---
xen/arch/x86/hvm/ioreq.c | 31 +++++++++++++++++++------------
xen/include/asm-x86/hvm/domain.h | 4 +---
2 files changed, 20 insertions(+), 15 deletions(-)
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 2b9e5562dd..154f6f1a32 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -218,7 +218,7 @@ static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool buf)
static int hvm_map_ioreq_page(
struct hvm_ioreq_server *s, bool buf, unsigned long gfn)
{
- struct domain *d = s->domain;
+ struct domain *d = s->target;
struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
struct page_info *page;
void *va;
@@ -315,8 +315,8 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
spin_lock(&s->lock);
- rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id, s->domid,
- NULL);
+ rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id,
+ s->emulator->domain_id, NULL);
if ( rc < 0 )
goto fail2;
@@ -324,9 +324,10 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
if ( v->vcpu_id == 0 && s->bufioreq.va != NULL )
{
- struct domain *d = s->domain;
+ struct domain *d = s->target;
- rc = alloc_unbound_xen_event_channel(v->domain, 0, s->domid, NULL);
+ rc = alloc_unbound_xen_event_channel(v->domain, 0,
+ s->emulator->domain_id, NULL);
if ( rc < 0 )
goto fail3;
@@ -434,7 +435,7 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
bool is_default,
bool handle_bufioreq)
{
- struct domain *d = s->domain;
+ struct domain *d = s->target;
unsigned long ioreq_gfn = gfn_x(INVALID_GFN);
unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
int rc;
@@ -471,7 +472,7 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
bool is_default)
{
- struct domain *d = s->domain;
+ struct domain *d = s->target;
bool handle_bufioreq = !!s->bufioreq.va;
if ( handle_bufioreq )
@@ -521,7 +522,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
if ( rc )
goto fail;
- s->range[i] = rangeset_new(s->domain, name,
+ s->range[i] = rangeset_new(s->target, name,
RANGESETF_prettyprint_hex);
xfree(name);
@@ -545,7 +546,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
bool is_default)
{
- struct domain *d = s->domain;
+ struct domain *d = s->target;
struct hvm_ioreq_vcpu *sv;
bool handle_bufioreq = !!s->bufioreq.va;
@@ -576,7 +577,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
bool is_default)
{
- struct domain *d = s->domain;
+ struct domain *d = s->target;
bool handle_bufioreq = !!s->bufioreq.va;
spin_lock(&s->lock);
@@ -602,12 +603,15 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
struct domain *d, bool is_default,
int bufioreq_handling, ioservid_t id)
{
+ struct domain *currd = current->domain;
struct vcpu *v;
int rc;
s->id = id;
- s->domain = d;
- s->domid = current->domain->domain_id;
+ s->target = d;
+
+ get_knownalive_domain(currd);
+ s->emulator = currd;
spin_lock_init(&s->lock);
INIT_LIST_HEAD(&s->ioreq_vcpu_list);
@@ -641,6 +645,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
fail_map:
hvm_ioreq_server_free_rangesets(s, is_default);
+ put_domain(s->emulator);
return rc;
}
@@ -651,6 +656,8 @@ static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s,
hvm_ioreq_server_remove_all_vcpus(s);
hvm_ioreq_server_unmap_pages(s, is_default);
hvm_ioreq_server_free_rangesets(s, is_default);
+
+ put_domain(s->emulator);
}
static ioservid_t next_ioservid(struct domain *d)
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 7f128c05ff..6e03d024c8 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -53,13 +53,11 @@ struct hvm_ioreq_vcpu {
struct hvm_ioreq_server {
struct list_head list_entry;
- struct domain *domain;
+ struct domain *target, *emulator;
/* Lock to serialize toolstack modifications */
spinlock_t lock;
- /* Domain id of emulating domain */
- domid_t domid;
ioservid_t id;
struct hvm_ioreq_page ioreq;
struct list_head ioreq_vcpu_list;
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-03-20 18:07 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-03-20 18:05 [PATCH v3 0/4] stricter ioreq server permissions checks Paul Durrant
2018-03-20 18:05 ` [PATCH v3 1/4] x86/hvm: stop passing explicit domid to hvm_create_ioreq_server() Paul Durrant
2018-03-21 13:28 ` Andrew Cooper
2018-03-20 18:05 ` Paul Durrant [this message]
2018-03-20 18:05 ` [PATCH v3 3/4] x86/hvm: re-structure some of the ioreq server look-up loops Paul Durrant
2018-03-20 18:05 ` [PATCH v3 4/4] x86/hvm: add stricter permissions checks to ioreq server control plane Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180320180525.28605-3-paul.durrant@citrix.com \
--to=paul.durrant@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.