From: Oleksandr <olekstysh@gmail.com>
To: Stefano Stabellini <sstabellini@kernel.org>
Cc: "'Kevin Tian'" <kevin.tian@intel.com>,
"Julien Grall" <julien@xen.org>,
"'Jun Nakajima'" <jun.nakajima@intel.com>,
"'Wei Liu'" <wl@xen.org>,
paul@xen.org, "'Andrew Cooper'" <andrew.cooper3@citrix.com>,
"'Ian Jackson'" <ian.jackson@eu.citrix.com>,
"'George Dunlap'" <george.dunlap@citrix.com>,
"'Tim Deegan'" <tim@xen.org>,
"'Oleksandr Tyshchenko'" <oleksandr_tyshchenko@epam.com>,
"'Julien Grall'" <julien.grall@arm.com>,
"Jan Beulich" <jbeulich@suse.com>,
xen-devel@lists.xenproject.org,
"'Roger Pau Monné'" <roger.pau@citrix.com>
Subject: Re: [RFC PATCH V1 01/12] hvm/ioreq: Make x86's IOREQ feature common
Date: Sat, 8 Aug 2020 01:19:03 +0300 [thread overview]
Message-ID: <06f78323-b8f5-fd11-486a-437267eccc29@gmail.com> (raw)
In-Reply-To: <alpine.DEB.2.21.2008071259580.16004@sstabellini-ThinkPad-T480s>
On 08.08.20 00:50, Stefano Stabellini wrote:
Hi Stefano
> On Fri, 7 Aug 2020, Oleksandr wrote:
>> On 06.08.20 03:37, Stefano Stabellini wrote:
>>
>> Hi Stefano
>>
>> Trying to simulate IO_RETRY handling mechanism (according to model below) I
>> continuously get IO_RETRY from try_fwd_ioserv() ...
>>
>>> OK, thanks for the details. My interpretation seems to be correct.
>>>
>>> In which case, it looks like xen/arch/arm/io.c:try_fwd_ioserv should
>>> return IO_RETRY. Then, xen/arch/arm/traps.c:do_trap_stage2_abort_guest
>>> also needs to handle try_handle_mmio returning IO_RETRY the first
>>> around, and IO_HANDLED when after QEMU does its job.
>>>
>>> What should do_trap_stage2_abort_guest do on IO_RETRY? Simply return
>>> early and let the scheduler do its job? Something like:
>>>
>>> enum io_state state = try_handle_mmio(regs, hsr, gpa);
>>>
>>> switch ( state )
>>> {
>>> case IO_ABORT:
>>> goto inject_abt;
>>> case IO_HANDLED:
>>> advance_pc(regs, hsr);
>>> return;
>>> case IO_RETRY:
>>> /* finish later */
>>> return;
>>> case IO_UNHANDLED:
>>> /* IO unhandled, try another way to handle it. */
>>> break;
>>> default:
>>> ASSERT_UNREACHABLE();
>>> }
>>>
>>> Then, xen/arch/arm/ioreq.c:handle_mmio() gets called by
>>> handle_hvm_io_completion() after QEMU completes the emulation. Today,
>>> handle_mmio just sets the user register with the read value.
>>>
>>> But it would be better if it called again the original function
>>> do_trap_stage2_abort_guest to actually retry the original operation.
>>> This time do_trap_stage2_abort_guest calls try_handle_mmio() and gets
>>> IO_HANDLED instead of IO_RETRY,
>> I may miss some important point, but I failed to see why try_handle_mmio
>> (try_fwd_ioserv) will return IO_HANDLED instead of IO_RETRY at this stage.
>> Or current try_fwd_ioserv() logic needs rework?
> I think you should check the ioreq->state in try_fwd_ioserv(), if the
> result is ready, then ioreq->state should be STATE_IORESP_READY, and you
> can return IO_HANDLED.
Indeed! Just coming to this opinion I saw your answer)
This is a dirty test patch:
---
xen/arch/arm/io.c | 12 ++++++++++++
xen/arch/arm/ioreq.c | 12 ++++++++++++
xen/arch/arm/traps.c | 6 ++++--
xen/include/asm-arm/hvm/ioreq.h | 2 ++
xen/include/asm-arm/traps.h | 3 +++
5 files changed, 33 insertions(+), 2 deletions(-)
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index 436f669..65a08f8 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -130,6 +130,10 @@ static enum io_state try_fwd_ioserv(struct
cpu_user_regs *regs,
{
case STATE_IOREQ_NONE:
break;
+
+ case STATE_IORESP_READY:
+ return IO_HANDLED;
+
default:
printk("d%u wrong state %u\n", v->domain->domain_id,
vio->io_req.state);
@@ -156,9 +160,11 @@ static enum io_state try_fwd_ioserv(struct
cpu_user_regs *regs,
else
vio->io_completion = HVMIO_mmio_completion;
+#if 0
/* XXX: Decide what to do */
if ( rc == IO_RETRY )
rc = IO_HANDLED;
+#endif
return rc;
}
@@ -185,6 +191,12 @@ enum io_state try_handle_mmio(struct cpu_user_regs
*regs,
#ifdef CONFIG_IOREQ_SERVER
rc = try_fwd_ioserv(regs, v, &info);
+ if ( rc == IO_HANDLED )
+ {
+ printk("HANDLED %s[%d]\n", __func__, __LINE__);
+ handle_mmio_finish();
+ } else
+ printk("RETRY %s[%d]\n", __func__, __LINE__);
#endif
return rc;
diff --git a/xen/arch/arm/ioreq.c b/xen/arch/arm/ioreq.c
index 8f60c41..c8ed454 100644
--- a/xen/arch/arm/ioreq.c
+++ b/xen/arch/arm/ioreq.c
@@ -33,8 +33,20 @@
#include <public/hvm/dm_op.h>
#include <public/hvm/ioreq.h>
+#include <asm/traps.h>
+
bool handle_mmio(void)
{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ const union hsr hsr = { .bits = regs->hsr };
+
+ do_trap_stage2_abort_guest(regs, hsr);
+
+ return true;
+}
+
+bool handle_mmio_finish(void)
+{
struct vcpu *v = current;
struct cpu_user_regs *regs = guest_cpu_user_regs();
const union hsr hsr = { .bits = regs->hsr };
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index ea472d1..3493d77 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1882,7 +1882,7 @@ static bool try_map_mmio(gfn_t gfn)
return !map_regions_p2mt(d, gfn, 1, mfn, p2m_mmio_direct_c);
}
-static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
+void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
const union hsr hsr)
{
/*
@@ -1965,11 +1965,13 @@ static void do_trap_stage2_abort_guest(struct
cpu_user_regs *regs,
case IO_HANDLED:
advance_pc(regs, hsr);
return;
+ case IO_RETRY:
+ /* finish later */
+ return;
case IO_UNHANDLED:
/* IO unhandled, try another way to handle it. */
break;
default:
- /* XXX: Handle IO_RETRY */
ASSERT_UNREACHABLE();
}
}
diff --git a/xen/include/asm-arm/hvm/ioreq.h
b/xen/include/asm-arm/hvm/ioreq.h
index 392ce64..fb4684d 100644
--- a/xen/include/asm-arm/hvm/ioreq.h
+++ b/xen/include/asm-arm/hvm/ioreq.h
@@ -27,6 +27,8 @@
bool handle_mmio(void);
+bool handle_mmio_finish(void);
+
static inline bool handle_pio(uint16_t port, unsigned int size, int dir)
{
/* XXX */
diff --git a/xen/include/asm-arm/traps.h b/xen/include/asm-arm/traps.h
index 997c378..392fdb1 100644
--- a/xen/include/asm-arm/traps.h
+++ b/xen/include/asm-arm/traps.h
@@ -40,6 +40,9 @@ void advance_pc(struct cpu_user_regs *regs, const
union hsr hsr);
void inject_undef_exception(struct cpu_user_regs *regs, const union
hsr hsr);
+void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
+ const union hsr hsr);
+
/* read as zero and write ignore */
void handle_raz_wi(struct cpu_user_regs *regs, int regidx, bool read,
const union hsr hsr, int min_el);
--
2.7.4
>
> That is assuming that you are looking at the live version of the ioreq
> shared with QEMU instead of a private copy of it, which I am not sure.
> Looking at try_fwd_ioserv() it would seem that vio->io_req is just a
> copy? The live version is returned by get_ioreq() ?
>
> Even in handle_hvm_io_completion, instead of setting vio->io_req.state
> to STATE_IORESP_READY by hand, it would be better to look at the live
> version of the ioreq because QEMU will have already set ioreq->state
> to STATE_IORESP_READY (hw/i386/xen/xen-hvm.c:cpu_handle_ioreq).
I need to recheck that.
Thank you.
--
Regards,
Oleksandr Tyshchenko
next prev parent reply other threads:[~2020-08-07 22:19 UTC|newest]
Thread overview: 140+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-08-03 18:21 [RFC PATCH V1 00/12] IOREQ feature (+ virtio-mmio) on Arm Oleksandr Tyshchenko
2020-08-03 18:21 ` [RFC PATCH V1 01/12] hvm/ioreq: Make x86's IOREQ feature common Oleksandr Tyshchenko
2020-08-04 7:45 ` Paul Durrant
2020-08-04 11:10 ` Oleksandr
2020-08-04 11:23 ` Paul Durrant
2020-08-04 11:51 ` Oleksandr
2020-08-04 13:18 ` Paul Durrant
2020-08-04 13:52 ` Julien Grall
2020-08-04 15:41 ` Jan Beulich
2020-08-04 19:11 ` Stefano Stabellini
2020-08-05 7:01 ` Jan Beulich
2020-08-06 0:37 ` Stefano Stabellini
2020-08-06 6:59 ` Jan Beulich
2020-08-06 20:32 ` Stefano Stabellini
2020-08-07 13:19 ` Oleksandr
2020-08-07 16:45 ` Oleksandr
2020-08-07 21:50 ` Stefano Stabellini
2020-08-07 22:19 ` Oleksandr [this message]
2020-08-10 13:41 ` Oleksandr
2020-08-10 23:34 ` Stefano Stabellini
2020-08-11 9:19 ` Julien Grall
2020-08-11 10:10 ` Oleksandr
2020-08-11 22:47 ` Stefano Stabellini
2020-08-12 14:35 ` Oleksandr
2020-08-12 23:08 ` Stefano Stabellini
2020-08-13 20:16 ` Julien Grall
2020-08-07 23:45 ` Oleksandr
2020-08-10 23:34 ` Stefano Stabellini
2020-08-05 8:33 ` Julien Grall
2020-08-06 0:37 ` Stefano Stabellini
2020-08-06 9:45 ` Julien Grall
2020-08-06 23:48 ` Stefano Stabellini
2020-08-10 19:20 ` Julien Grall
2020-08-10 23:34 ` Stefano Stabellini
2020-08-11 11:28 ` Julien Grall
2020-08-11 22:48 ` Stefano Stabellini
2020-08-12 8:19 ` Julien Grall
2020-08-20 19:14 ` Oleksandr
2020-08-21 0:53 ` Stefano Stabellini
2020-08-21 18:54 ` Julien Grall
2020-08-05 13:30 ` Julien Grall
2020-08-06 11:37 ` Oleksandr
2020-08-10 16:29 ` Julien Grall
2020-08-10 17:28 ` Oleksandr
2020-08-05 16:15 ` Andrew Cooper
2020-08-06 8:20 ` Oleksandr
2020-08-15 17:30 ` Julien Grall
2020-08-16 19:37 ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 02/12] hvm/dm: Make x86's DM " Oleksandr Tyshchenko
2020-08-03 18:21 ` [RFC PATCH V1 03/12] xen/mm: Make x86's XENMEM_resource_ioreq_server handling common Oleksandr Tyshchenko
2020-08-03 18:21 ` [RFC PATCH V1 04/12] xen/arm: Introduce arch specific bits for IOREQ/DM features Oleksandr Tyshchenko
2020-08-04 7:49 ` Paul Durrant
2020-08-04 14:01 ` Julien Grall
2020-08-04 23:22 ` Stefano Stabellini
2020-08-15 17:56 ` Julien Grall
2020-08-17 14:36 ` Oleksandr
2020-08-04 23:22 ` Stefano Stabellini
2020-08-05 7:05 ` Jan Beulich
2020-08-05 16:41 ` Stefano Stabellini
2020-08-05 19:45 ` Oleksandr
2020-08-05 9:32 ` Julien Grall
2020-08-05 15:41 ` Oleksandr
2020-08-06 10:19 ` Julien Grall
2020-08-10 18:09 ` Oleksandr
2020-08-10 18:21 ` Oleksandr
2020-08-10 19:00 ` Julien Grall
2020-08-10 20:29 ` Oleksandr
2020-08-10 22:37 ` Julien Grall
2020-08-11 6:13 ` Oleksandr
2020-08-12 15:08 ` Oleksandr
2020-08-11 17:09 ` Oleksandr
2020-08-11 17:50 ` Julien Grall
2020-08-13 18:41 ` Oleksandr
2020-08-13 20:36 ` Julien Grall
2020-08-13 21:49 ` Oleksandr
2020-08-13 20:39 ` Oleksandr Tyshchenko
2020-08-13 22:14 ` Julien Grall
2020-08-14 12:08 ` Oleksandr
2020-08-05 14:12 ` Julien Grall
2020-08-05 14:45 ` Jan Beulich
2020-08-05 19:30 ` Oleksandr
2020-08-06 11:08 ` Julien Grall
2020-08-06 11:29 ` Jan Beulich
2020-08-20 18:30 ` Oleksandr
2020-08-21 6:16 ` Jan Beulich
2020-08-21 11:13 ` Oleksandr
2020-08-06 13:27 ` Oleksandr
2020-08-10 18:25 ` Julien Grall
2020-08-10 19:58 ` Oleksandr
2020-08-05 16:13 ` Jan Beulich
2020-08-05 19:47 ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 05/12] hvm/dm: Introduce xendevicemodel_set_irq_level DM op Oleksandr Tyshchenko
2020-08-04 23:22 ` Stefano Stabellini
2020-08-05 9:39 ` Julien Grall
2020-08-06 0:37 ` Stefano Stabellini
2020-08-06 11:32 ` Julien Grall
2020-08-06 23:49 ` Stefano Stabellini
2020-08-07 8:43 ` Jan Beulich
2020-08-07 21:50 ` Stefano Stabellini
2020-08-08 9:27 ` Julien Grall
2020-08-08 9:28 ` Julien Grall
2020-08-10 23:34 ` Stefano Stabellini
2020-08-11 13:04 ` Julien Grall
2020-08-11 22:48 ` Stefano Stabellini
2020-08-18 9:31 ` Julien Grall
2020-08-21 0:53 ` Stefano Stabellini
2020-08-17 15:23 ` Jan Beulich
2020-08-17 22:56 ` Stefano Stabellini
2020-08-18 8:03 ` Jan Beulich
2020-08-05 16:15 ` Jan Beulich
2020-08-05 22:12 ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 06/12] libxl: Introduce basic virtio-mmio support on Arm Oleksandr Tyshchenko
2020-08-03 18:21 ` [RFC PATCH V1 07/12] A collection of tweaks to be able to run emulator in driver domain Oleksandr Tyshchenko
2020-08-05 16:19 ` Jan Beulich
2020-08-05 16:40 ` Paul Durrant
2020-08-06 9:22 ` Oleksandr
2020-08-06 9:27 ` Jan Beulich
2020-08-14 16:30 ` Oleksandr
2020-08-16 15:36 ` Julien Grall
2020-08-17 15:07 ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 08/12] xen/arm: Invalidate qemu mapcache on XENMEM_decrease_reservation Oleksandr Tyshchenko
2020-08-05 16:21 ` Jan Beulich
2020-08-06 11:35 ` Julien Grall
2020-08-06 11:50 ` Jan Beulich
2020-08-06 14:28 ` Oleksandr
2020-08-06 16:33 ` Jan Beulich
2020-08-06 16:57 ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 09/12] libxl: Handle virtio-mmio irq in more correct way Oleksandr Tyshchenko
2020-08-04 23:22 ` Stefano Stabellini
2020-08-05 20:51 ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 10/12] libxl: Add support for virtio-disk configuration Oleksandr Tyshchenko
2020-08-04 23:23 ` Stefano Stabellini
2020-08-05 21:12 ` Oleksandr
2020-08-06 0:37 ` Stefano Stabellini
2020-08-03 18:21 ` [RFC PATCH V1 11/12] libxl: Insert "dma-coherent" property into virtio-mmio device node Oleksandr Tyshchenko
2020-08-04 23:23 ` Stefano Stabellini
2020-08-05 20:35 ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 12/12] libxl: Fix duplicate memory node in DT Oleksandr Tyshchenko
2020-08-15 17:24 ` [RFC PATCH V1 00/12] IOREQ feature (+ virtio-mmio) on Arm Julien Grall
2020-08-16 19:34 ` Oleksandr
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=06f78323-b8f5-fd11-486a-437267eccc29@gmail.com \
--to=olekstysh@gmail.com \
--cc=andrew.cooper3@citrix.com \
--cc=george.dunlap@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=julien.grall@arm.com \
--cc=julien@xen.org \
--cc=jun.nakajima@intel.com \
--cc=kevin.tian@intel.com \
--cc=oleksandr_tyshchenko@epam.com \
--cc=paul@xen.org \
--cc=roger.pau@citrix.com \
--cc=sstabellini@kernel.org \
--cc=tim@xen.org \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).