xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Stefano Stabellini <sstabellini@kernel.org>
To: Oleksandr Tyshchenko <olekstysh@gmail.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>,
	Julien Grall <julien@xen.org>, Wei Liu <wl@xen.org>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>,
	Julien Grall <julien.grall@arm.com>,
	Jan Beulich <jbeulich@suse.com>,
	xen-devel@lists.xenproject.org,
	Daniel De Graaf <dgdegra@tycho.nsa.gov>,
	Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
Subject: Re: [RFC PATCH V1 04/12] xen/arm: Introduce arch specific bits for IOREQ/DM features
Date: Tue, 4 Aug 2020 16:22:32 -0700 (PDT)	[thread overview]
Message-ID: <alpine.DEB.2.21.2008041327110.5748@sstabellini-ThinkPad-T480s> (raw)
In-Reply-To: <1596478888-23030-5-git-send-email-olekstysh@gmail.com>

On Mon, 3 Aug 2020, Oleksandr Tyshchenko wrote:
> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
> 
> This patch makes possible to forward Guest MMIO accesses
> to a device emulator on Arm and enables that support for
> Arm64.
> 
> Also update XSM code a bit to let DM op be used on Arm.
> New arch DM op will be introduced in the follow-up patch.
> 
> Please note, at the moment build on Arm32 is broken
> (see cmpxchg usage in hvm_send_buffered_ioreq()) if someone

Speaking of buffered_ioreq, if I recall correctly, they were only used
for VGA-related things on x86. It looks like it is still true.

If so, do we need it on ARM? Note that I don't think we can get rid of
it from the interface as it is baked into ioreq, but it might be
possible to have a dummy implementation on ARM. Or maybe not: looking at
xen/common/hvm/ioreq.c it looks like it would be difficult to
disentangle bufioreq stuff from the rest of the code.


> wants to enable CONFIG_IOREQ_SERVER due to the lack of
> cmpxchg_64 support on Arm32.
> 
> Please note, this is a split/cleanup of Julien's PoC:
> "Add support for Guest IO forwarding to a device emulator"
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>

[...]


> @@ -2275,6 +2282,16 @@ static void check_for_vcpu_work(void)
>   */
>  void leave_hypervisor_to_guest(void)
>  {
> +#ifdef CONFIG_IOREQ_SERVER
> +    /*
> +     * XXX: Check the return. Shall we call that in
> +     * continue_running and context_switch instead?
> +     * The benefits would be to avoid calling
> +     * handle_hvm_io_completion on every return.
> +     */

Yeah, that could be a simple and good optimization


> +    local_irq_enable();
> +    handle_hvm_io_completion(current);
> +#endif
>      local_irq_disable();
>  
>      check_for_vcpu_work();
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 4e2f582..e060b0a 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -11,12 +11,64 @@
>  #include <asm/vgic.h>
>  #include <asm/vpl011.h>
>  #include <public/hvm/params.h>
> +#include <public/hvm/dm_op.h>
> +#include <public/hvm/ioreq.h>
>  #include <xen/serial.h>
>  #include <xen/rbtree.h>
>  
> +struct hvm_ioreq_page {
> +    gfn_t gfn;
> +    struct page_info *page;
> +    void *va;
> +};
> +
> +struct hvm_ioreq_vcpu {
> +    struct list_head list_entry;
> +    struct vcpu      *vcpu;
> +    evtchn_port_t    ioreq_evtchn;
> +    bool             pending;
> +};
> +
> +#define NR_IO_RANGE_TYPES (XEN_DMOP_IO_RANGE_PCI + 1)
> +#define MAX_NR_IO_RANGES  256
> +
> +#define MAX_NR_IOREQ_SERVERS 8
> +#define DEFAULT_IOSERVID 0
> +
> +struct hvm_ioreq_server {
> +    struct domain          *target, *emulator;
> +
> +    /* Lock to serialize toolstack modifications */
> +    spinlock_t             lock;
> +
> +    struct hvm_ioreq_page  ioreq;
> +    struct list_head       ioreq_vcpu_list;
> +    struct hvm_ioreq_page  bufioreq;
> +
> +    /* Lock to serialize access to buffered ioreq ring */
> +    spinlock_t             bufioreq_lock;
> +    evtchn_port_t          bufioreq_evtchn;
> +    struct rangeset        *range[NR_IO_RANGE_TYPES];
> +    bool                   enabled;
> +    uint8_t                bufioreq_handling;
> +};
> +
>  struct hvm_domain
>  {
>      uint64_t              params[HVM_NR_PARAMS];
> +
> +    /* Guest page range used for non-default ioreq servers */
> +    struct {
> +        unsigned long base;
> +        unsigned long mask;
> +        unsigned long legacy_mask; /* indexed by HVM param number */
> +    } ioreq_gfn;
> +
> +    /* Lock protects all other values in the sub-struct and the default */
> +    struct {
> +        spinlock_t              lock;
> +        struct hvm_ioreq_server *server[MAX_NR_IOREQ_SERVERS];
> +    } ioreq_server;
>  };
>  
>  #ifdef CONFIG_ARM_64
> @@ -93,6 +145,29 @@ struct arch_domain
>  #endif
>  }  __cacheline_aligned;
>  
> +enum hvm_io_completion {
> +    HVMIO_no_completion,
> +    HVMIO_mmio_completion,
> +    HVMIO_pio_completion,
> +    HVMIO_realmode_completion

realmode is an x86-ism (as pio), I wonder if we could get rid of it on ARM


> +};
> +
> +struct hvm_vcpu_io {
> +    /* I/O request in flight to device model. */
> +    enum hvm_io_completion io_completion;
> +    ioreq_t                io_req;
> +
> +    /*
> +     * HVM emulation:
> +     *  Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn.
> +     *  The latter is known to be an MMIO frame (not RAM).
> +     *  This translation is only valid for accesses as per @mmio_access.
> +     */
> +    struct npfec        mmio_access;
> +    unsigned long       mmio_gla;
> +    unsigned long       mmio_gpfn;
> +};
> +
>  struct arch_vcpu
>  {
>      struct {
> @@ -206,6 +281,11 @@ struct arch_vcpu
>       */
>      bool need_flush_to_ram;
>  
> +    struct hvm_vcpu
> +    {
> +        struct hvm_vcpu_io hvm_io;
> +    } hvm;
> +
>  }  __cacheline_aligned;
>  
>  void vcpu_show_execution_state(struct vcpu *);
> diff --git a/xen/include/asm-arm/hvm/ioreq.h b/xen/include/asm-arm/hvm/ioreq.h
> new file mode 100644
> index 0000000..83a560c
> --- /dev/null
> +++ b/xen/include/asm-arm/hvm/ioreq.h
> @@ -0,0 +1,103 @@
> +/*
> + * hvm.h: Hardware virtual machine assist interface definitions.
> + *
> + * Copyright (c) 2016 Citrix Systems Inc.
> + * Copyright (c) 2019 Arm ltd.
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program; If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ASM_ARM_HVM_IOREQ_H__
> +#define __ASM_ARM_HVM_IOREQ_H__
> +
> +#include <public/hvm/ioreq.h>
> +#include <public/hvm/dm_op.h>
> +
> +#define has_vpci(d) (false)
> +
> +bool handle_mmio(void);
> +
> +static inline bool handle_pio(uint16_t port, unsigned int size, int dir)
> +{
> +    /* XXX */
> +    BUG();
> +    return true;
> +}
> +
> +static inline paddr_t hvm_mmio_first_byte(const ioreq_t *p)
> +{
> +    return p->addr;
> +}
> +
> +static inline paddr_t hvm_mmio_last_byte(const ioreq_t *p)
> +{
> +    unsigned long size = p->size;
> +
> +    return p->addr + size - 1;
> +}
> +
> +struct hvm_ioreq_server;
> +
> +static inline int p2m_set_ioreq_server(struct domain *d,
> +                                       unsigned int flags,
> +                                       struct hvm_ioreq_server *s)
> +{
> +    return -EOPNOTSUPP;
> +}
> +
> +static inline void msix_write_completion(struct vcpu *v)
> +{
> +}
> +
> +static inline void handle_realmode_completion(void)
> +{
> +    ASSERT_UNREACHABLE();
> +}
> +
> +static inline void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
> +{
> +}
> +
> +static inline void hvm_get_ioreq_server_range_type(struct domain *d,
> +                                                   ioreq_t *p,
> +                                                   uint8_t *type,
> +                                                   uint64_t *addr)
> +{
> +    *type = (p->type == IOREQ_TYPE_PIO) ?
> +             XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
> +    *addr = p->addr;
> +}
> +
> +static inline void arch_hvm_ioreq_init(struct domain *d)
> +{
> +}
> +
> +static inline void arch_hvm_ioreq_destroy(struct domain *d)
> +{
> +}
> +
> +#define IOREQ_IO_HANDLED     IO_HANDLED
> +#define IOREQ_IO_UNHANDLED   IO_UNHANDLED
> +#define IOREQ_IO_RETRY       IO_RETRY
> +
> +#endif /* __ASM_X86_HVM_IOREQ_H__ */
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * tab-width: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 5fdb6e8..5823f11 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -385,10 +385,11 @@ static inline int set_foreign_p2m_entry(struct domain *d, unsigned long gfn,
>                                          mfn_t mfn)
>  {
>      /*
> -     * NOTE: If this is implemented then proper reference counting of
> -     *       foreign entries will need to be implemented.
> +     * XXX: handle properly reference. It looks like the page may not always
> +     * belong to d.

Just as a reference, and without taking away anything from the comment,
I think that QEMU is doing its own internal reference counting for these
mappings.


>       */
> -    return -EOPNOTSUPP;
> +
> +    return guest_physmap_add_entry(d, _gfn(gfn), mfn, 0, p2m_ram_rw);
>  }
>  
>  /*



  parent reply	other threads:[~2020-08-04 23:23 UTC|newest]

Thread overview: 140+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-03 18:21 [RFC PATCH V1 00/12] IOREQ feature (+ virtio-mmio) on Arm Oleksandr Tyshchenko
2020-08-03 18:21 ` [RFC PATCH V1 01/12] hvm/ioreq: Make x86's IOREQ feature common Oleksandr Tyshchenko
2020-08-04  7:45   ` Paul Durrant
2020-08-04 11:10     ` Oleksandr
2020-08-04 11:23       ` Paul Durrant
2020-08-04 11:51         ` Oleksandr
2020-08-04 13:18           ` Paul Durrant
2020-08-04 13:52       ` Julien Grall
2020-08-04 15:41         ` Jan Beulich
2020-08-04 19:11         ` Stefano Stabellini
2020-08-05  7:01           ` Jan Beulich
2020-08-06  0:37             ` Stefano Stabellini
2020-08-06  6:59               ` Jan Beulich
2020-08-06 20:32                 ` Stefano Stabellini
2020-08-07 13:19                   ` Oleksandr
2020-08-07 16:45               ` Oleksandr
2020-08-07 21:50                 ` Stefano Stabellini
2020-08-07 22:19                   ` Oleksandr
2020-08-10 13:41                     ` Oleksandr
2020-08-10 23:34                       ` Stefano Stabellini
2020-08-11  9:19                         ` Julien Grall
2020-08-11 10:10                           ` Oleksandr
2020-08-11 22:47                             ` Stefano Stabellini
2020-08-12 14:35                               ` Oleksandr
2020-08-12 23:08                                 ` Stefano Stabellini
2020-08-13 20:16                                   ` Julien Grall
2020-08-07 23:45                   ` Oleksandr
2020-08-10 23:34                     ` Stefano Stabellini
2020-08-05  8:33           ` Julien Grall
2020-08-06  0:37             ` Stefano Stabellini
2020-08-06  9:45               ` Julien Grall
2020-08-06 23:48                 ` Stefano Stabellini
2020-08-10 19:20                   ` Julien Grall
2020-08-10 23:34                     ` Stefano Stabellini
2020-08-11 11:28                       ` Julien Grall
2020-08-11 22:48                         ` Stefano Stabellini
2020-08-12  8:19                           ` Julien Grall
2020-08-20 19:14                             ` Oleksandr
2020-08-21  0:53                               ` Stefano Stabellini
2020-08-21 18:54                                 ` Julien Grall
2020-08-05 13:30   ` Julien Grall
2020-08-06 11:37     ` Oleksandr
2020-08-10 16:29       ` Julien Grall
2020-08-10 17:28         ` Oleksandr
2020-08-05 16:15   ` Andrew Cooper
2020-08-06  8:20     ` Oleksandr
2020-08-15 17:30   ` Julien Grall
2020-08-16 19:37     ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 02/12] hvm/dm: Make x86's DM " Oleksandr Tyshchenko
2020-08-03 18:21 ` [RFC PATCH V1 03/12] xen/mm: Make x86's XENMEM_resource_ioreq_server handling common Oleksandr Tyshchenko
2020-08-03 18:21 ` [RFC PATCH V1 04/12] xen/arm: Introduce arch specific bits for IOREQ/DM features Oleksandr Tyshchenko
2020-08-04  7:49   ` Paul Durrant
2020-08-04 14:01     ` Julien Grall
2020-08-04 23:22       ` Stefano Stabellini
2020-08-15 17:56       ` Julien Grall
2020-08-17 14:36         ` Oleksandr
2020-08-04 23:22   ` Stefano Stabellini [this message]
2020-08-05  7:05     ` Jan Beulich
2020-08-05 16:41       ` Stefano Stabellini
2020-08-05 19:45         ` Oleksandr
2020-08-05  9:32     ` Julien Grall
2020-08-05 15:41       ` Oleksandr
2020-08-06 10:19         ` Julien Grall
2020-08-10 18:09       ` Oleksandr
2020-08-10 18:21         ` Oleksandr
2020-08-10 19:00         ` Julien Grall
2020-08-10 20:29           ` Oleksandr
2020-08-10 22:37             ` Julien Grall
2020-08-11  6:13               ` Oleksandr
2020-08-12 15:08                 ` Oleksandr
2020-08-11 17:09       ` Oleksandr
2020-08-11 17:50         ` Julien Grall
2020-08-13 18:41           ` Oleksandr
2020-08-13 20:36             ` Julien Grall
2020-08-13 21:49               ` Oleksandr
2020-08-13 20:39             ` Oleksandr Tyshchenko
2020-08-13 22:14               ` Julien Grall
2020-08-14 12:08                 ` Oleksandr
2020-08-05 14:12   ` Julien Grall
2020-08-05 14:45     ` Jan Beulich
2020-08-05 19:30     ` Oleksandr
2020-08-06 11:08       ` Julien Grall
2020-08-06 11:29         ` Jan Beulich
2020-08-20 18:30           ` Oleksandr
2020-08-21  6:16             ` Jan Beulich
2020-08-21 11:13               ` Oleksandr
2020-08-06 13:27         ` Oleksandr
2020-08-10 18:25           ` Julien Grall
2020-08-10 19:58             ` Oleksandr
2020-08-05 16:13   ` Jan Beulich
2020-08-05 19:47     ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 05/12] hvm/dm: Introduce xendevicemodel_set_irq_level DM op Oleksandr Tyshchenko
2020-08-04 23:22   ` Stefano Stabellini
2020-08-05  9:39     ` Julien Grall
2020-08-06  0:37       ` Stefano Stabellini
2020-08-06 11:32         ` Julien Grall
2020-08-06 23:49           ` Stefano Stabellini
2020-08-07  8:43             ` Jan Beulich
2020-08-07 21:50               ` Stefano Stabellini
2020-08-08  9:27                 ` Julien Grall
2020-08-08  9:28                   ` Julien Grall
2020-08-10 23:34                   ` Stefano Stabellini
2020-08-11 13:04                     ` Julien Grall
2020-08-11 22:48                       ` Stefano Stabellini
2020-08-18  9:31                         ` Julien Grall
2020-08-21  0:53                           ` Stefano Stabellini
2020-08-17 15:23                 ` Jan Beulich
2020-08-17 22:56                   ` Stefano Stabellini
2020-08-18  8:03                     ` Jan Beulich
2020-08-05 16:15   ` Jan Beulich
2020-08-05 22:12     ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 06/12] libxl: Introduce basic virtio-mmio support on Arm Oleksandr Tyshchenko
2020-08-03 18:21 ` [RFC PATCH V1 07/12] A collection of tweaks to be able to run emulator in driver domain Oleksandr Tyshchenko
2020-08-05 16:19   ` Jan Beulich
2020-08-05 16:40     ` Paul Durrant
2020-08-06  9:22       ` Oleksandr
2020-08-06  9:27         ` Jan Beulich
2020-08-14 16:30           ` Oleksandr
2020-08-16 15:36             ` Julien Grall
2020-08-17 15:07               ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 08/12] xen/arm: Invalidate qemu mapcache on XENMEM_decrease_reservation Oleksandr Tyshchenko
2020-08-05 16:21   ` Jan Beulich
2020-08-06 11:35     ` Julien Grall
2020-08-06 11:50       ` Jan Beulich
2020-08-06 14:28         ` Oleksandr
2020-08-06 16:33           ` Jan Beulich
2020-08-06 16:57             ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 09/12] libxl: Handle virtio-mmio irq in more correct way Oleksandr Tyshchenko
2020-08-04 23:22   ` Stefano Stabellini
2020-08-05 20:51     ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 10/12] libxl: Add support for virtio-disk configuration Oleksandr Tyshchenko
2020-08-04 23:23   ` Stefano Stabellini
2020-08-05 21:12     ` Oleksandr
2020-08-06  0:37       ` Stefano Stabellini
2020-08-03 18:21 ` [RFC PATCH V1 11/12] libxl: Insert "dma-coherent" property into virtio-mmio device node Oleksandr Tyshchenko
2020-08-04 23:23   ` Stefano Stabellini
2020-08-05 20:35     ` Oleksandr
2020-08-03 18:21 ` [RFC PATCH V1 12/12] libxl: Fix duplicate memory node in DT Oleksandr Tyshchenko
2020-08-15 17:24 ` [RFC PATCH V1 00/12] IOREQ feature (+ virtio-mmio) on Arm Julien Grall
2020-08-16 19:34   ` Oleksandr

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=alpine.DEB.2.21.2008041327110.5748@sstabellini-ThinkPad-T480s \
    --to=sstabellini@kernel.org \
    --cc=Volodymyr_Babchuk@epam.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=george.dunlap@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=julien@xen.org \
    --cc=oleksandr_tyshchenko@epam.com \
    --cc=olekstysh@gmail.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).