xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Stefano Stabellini <sstabellini@kernel.org>
To: Stefano Stabellini <sstabellini@kernel.org>
Cc: Stefano Stabellini <stefanos@xilinx.com>,
	wei.liu2@citrix.com, blackskygg@gmail.com,
	ian.jackson@eu.citrix.com, xen-devel@lists.xen.org,
	julien.grall@arm.com
Subject: Re: [PATCH v9 7/7] xen/arm: export shared memory regions as reserved-memory on device tree
Date: Fri, 7 Dec 2018 13:06:27 -0800 (PST)	[thread overview]
Message-ID: <alpine.DEB.2.10.1812071252370.18779@sstabellini-ThinkPad-X260> (raw)
In-Reply-To: <1544048163-27499-7-git-send-email-sstabellini@kernel.org>

On Wed, 5 Dec 2018, Stefano Stabellini wrote:
> Shared memory regions need to be advertised to the guest. Fortunately, a
> device tree binding for special memory regions already exist:
> reserved-memory.
> 
> Add a reserved-memory node for each shared memory region, for both
> owners and borrowers.
> 
> Signed-off-by: Stefano Stabellini <stefanos@xilinx.com>
> ---
> Changes in v9:
> - rename master to owner and slave to borrower
> - always add offset because it is 0 for the owner
> - use xen,id for id
> - add xen,offset according to new bindings

FYI the device tree binding just went upstream.



> Changes in v8:
> - code style
> - id is added to device tree
> 
> Changes in v7:
> - change node name to xen-shmem
> - add compatible property
> - add id property
> ---
>  tools/libxl/libxl_arch.h |  2 +-
>  tools/libxl/libxl_arm.c  | 66 +++++++++++++++++++++++++++++++++++++++++++++---
>  tools/libxl/libxl_dom.c  |  2 +-
>  tools/libxl/libxl_x86.c  |  2 +-
>  4 files changed, 66 insertions(+), 6 deletions(-)
> 
> diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
> index 63c26cc..417e710 100644
> --- a/tools/libxl/libxl_arch.h
> +++ b/tools/libxl/libxl_arch.h
> @@ -36,7 +36,7 @@ int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config,
>  /* setup arch specific hardware description, i.e. DTB on ARM */
>  _hidden
>  int libxl__arch_domain_init_hw_description(libxl__gc *gc,
> -                                           libxl_domain_build_info *info,
> +                                           libxl_domain_config *d_config,
>                                             libxl__domain_build_state *state,
>                                             struct xc_dom_image *dom);
>  /* finalize arch specific hardware description. */
> diff --git a/tools/libxl/libxl_arm.c b/tools/libxl/libxl_arm.c
> index 054ad58..aa1e07f 100644
> --- a/tools/libxl/libxl_arm.c
> +++ b/tools/libxl/libxl_arm.c
> @@ -436,6 +436,63 @@ static int make_memory_nodes(libxl__gc *gc, void *fdt,
>      return 0;
>  }
>  
> +static int make_reserved_nodes(libxl__gc *gc, void *fdt,
> +                               libxl_domain_config *d_config)
> +{
> +    int res, i;
> +    const char *name;
> +
> +    if (d_config->num_sshms == 0)
> +        return 0;
> +
> +    res = fdt_begin_node(fdt, "reserved-memory");
> +    if (res) return res;
> +
> +    res = fdt_property_cell(fdt, "#address-cells", GUEST_ROOT_ADDRESS_CELLS);
> +    if (res) return res;
> +
> +    res = fdt_property_cell(fdt, "#size-cells", GUEST_ROOT_SIZE_CELLS);
> +    if (res) return res;
> +
> +    res = fdt_property(fdt, "ranges", NULL, 0);
> +    if (res) return res;
> +
> +    for (i = 0; i < d_config->num_sshms; i++) {
> +        uint64_t start = d_config->sshms[i].begin +
> +                         d_config->sshms[i].offset;
> +
> +        name = GCSPRINTF("xen-shmem@%"PRIx64, start);
> +
> +        res = fdt_begin_node(fdt, name);
> +        if (res) return res;
> +
> +        res = fdt_property_regs(gc, fdt, GUEST_ROOT_ADDRESS_CELLS,
> +                                GUEST_ROOT_SIZE_CELLS, 1, start,
> +                                d_config->sshms[i].size);
> +        if (res) return res;
> +
> +        res = fdt_property_compat(gc, fdt, 1, "xen,shared-memory-v1");
> +        if (res) return res;
> +
> +        res = fdt_property_string(fdt, "xen,id", d_config->sshms[i].id);
> +        if (res) return res;
> +
> +        if (d_config->sshms[i].role == LIBXL_SSHM_ROLE_BORROWER) {
> +            res = fdt_property_u64(fdt, "xen,offset",
> +                                   d_config->sshms[i].offset);
> +            if (res) return res;
> +        }
> +
> +        res = fdt_end_node(fdt);
> +        if (res) return res;
> +    }
> +
> +    res = fdt_end_node(fdt);
> +    if (res) return res;
> +
> +    return 0;
> +}
> +
>  static int make_gicv2_node(libxl__gc *gc, void *fdt,
>                             uint64_t gicd_base, uint64_t gicd_size,
>                             uint64_t gicc_base, uint64_t gicc_size)
> @@ -811,10 +868,11 @@ static int copy_partial_fdt(libxl__gc *gc, void *fdt, void *pfdt)
>  
>  #define FDT_MAX_SIZE (1<<20)
>  
> -static int libxl__prepare_dtb(libxl__gc *gc, libxl_domain_build_info *info,
> +static int libxl__prepare_dtb(libxl__gc *gc, libxl_domain_config *d_config,
>                                libxl__domain_build_state *state,
>                                struct xc_dom_image *dom)
>  {
> +    libxl_domain_build_info *info = &d_config->b_info;
>      void *fdt = NULL;
>      void *pfdt = NULL;
>      int rc, res;
> @@ -897,6 +955,7 @@ next_resize:
>          FDT( make_psci_node(gc, fdt) );
>  
>          FDT( make_memory_nodes(gc, fdt, dom) );
> +        FDT( make_reserved_nodes(gc, fdt, d_config) );
>  
>          switch (info->arch_arm.gic_version) {
>          case LIBXL_GIC_VERSION_V2:
> @@ -946,12 +1005,13 @@ out:
>  }
>  
>  int libxl__arch_domain_init_hw_description(libxl__gc *gc,
> -                                           libxl_domain_build_info *info,
> +                                           libxl_domain_config *d_config,
>                                             libxl__domain_build_state *state,
>                                             struct xc_dom_image *dom)
>  {
>      int rc;
>      uint64_t val;
> +    libxl_domain_build_info *info = &d_config->b_info;
>  
>      if (info->type != LIBXL_DOMAIN_TYPE_PVH) {
>          LOG(ERROR, "Unsupported Arm guest type %s",
> @@ -971,7 +1031,7 @@ int libxl__arch_domain_init_hw_description(libxl__gc *gc,
>      if (rc)
>          return rc;
>  
> -    rc = libxl__prepare_dtb(gc, info, state, dom);
> +    rc = libxl__prepare_dtb(gc, d_config, state, dom);
>      if (rc) goto out;
>  
>      if (!libxl_defbool_val(info->acpi)) {
> diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
> index 8a8a32c..2dc7696 100644
> --- a/tools/libxl/libxl_dom.c
> +++ b/tools/libxl/libxl_dom.c
> @@ -706,7 +706,7 @@ static int libxl__build_dom(libxl__gc *gc, uint32_t domid,
>          LOG(ERROR, "xc_dom_parse_image failed");
>          goto out;
>      }
> -    if ( (ret = libxl__arch_domain_init_hw_description(gc, info, state, dom)) != 0 ) {
> +    if ( (ret = libxl__arch_domain_init_hw_description(gc, d_config, state, dom)) != 0 ) {
>          LOGE(ERROR, "libxl__arch_domain_init_hw_description failed");
>          goto out;
>      }
> diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
> index dc8c703..b99ec25 100644
> --- a/tools/libxl/libxl_x86.c
> +++ b/tools/libxl/libxl_x86.c
> @@ -367,7 +367,7 @@ int libxl__arch_extra_memory(libxl__gc *gc,
>  }
>  
>  int libxl__arch_domain_init_hw_description(libxl__gc *gc,
> -                                           libxl_domain_build_info *info,
> +                                           libxl_domain_config *d_config,
>                                             libxl__domain_build_state *state,
>                                             struct xc_dom_image *dom)
>  {
> -- 
> 1.9.1
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

      reply	other threads:[~2018-12-07 21:06 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-05 22:15 [PATCH v9 0/7] Allow setting up shared memory areas between VMs from xl config files Stefano Stabellini
2018-12-05 22:15 ` [PATCH v9 1/7] xen: xsm: flask: introduce XENMAPSPACE_gmfn_share for memory sharing Stefano Stabellini
2018-12-07 19:33   ` Daniel De Graaf
2018-12-05 22:15 ` [PATCH v9 2/7] libxl: introduce a new structure to represent static shared memory regions Stefano Stabellini
2018-12-05 22:15 ` [PATCH v9 3/7] libxl: support mapping static shared memory areas during domain creation Stefano Stabellini
2018-12-05 22:16 ` [PATCH v9 4/7] libxl: support unmapping static shared memory areas during domain destruction Stefano Stabellini
2018-12-05 22:16 ` [PATCH v9 5/7] libxl:xl: add parsing code to parse "libxl_static_sshm" from xl config files Stefano Stabellini
2018-12-05 22:16 ` [PATCH v9 6/7] docs: documentation about static shared memory regions Stefano Stabellini
2018-12-05 22:16 ` [PATCH v9 7/7] xen/arm: export shared memory regions as reserved-memory on device tree Stefano Stabellini
2018-12-07 21:06   ` Stefano Stabellini [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=alpine.DEB.2.10.1812071252370.18779@sstabellini-ThinkPad-X260 \
    --to=sstabellini@kernel.org \
    --cc=blackskygg@gmail.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=julien.grall@arm.com \
    --cc=stefanos@xilinx.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).