All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <Paul.Durrant@citrix.com>
To: 'Chao Gao' <chao.gao@intel.com>,
	"xen-devel@lists.xen.org" <xen-devel@lists.xen.org>
Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: Re: [RFC Patch v4 1/8] ioreq: remove most 'buf' parameter from static functions
Date: Wed, 6 Dec 2017 14:44:52 +0000	[thread overview]
Message-ID: <ff33a474affb41f3a6c295a1e039a66a@AMSPEX02CL03.citrite.net> (raw)
In-Reply-To: <1512546614-9937-2-git-send-email-chao.gao@intel.com>

> -----Original Message-----
> From: Chao Gao [mailto:chao.gao@intel.com]
> Sent: 06 December 2017 07:50
> To: xen-devel@lists.xen.org
> Cc: Chao Gao <chao.gao@intel.com>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Jan Beulich <jbeulich@suse.com>; Paul
> Durrant <Paul.Durrant@citrix.com>
> Subject: [RFC Patch v4 1/8] ioreq: remove most 'buf' parameter from static
> functions
> 
> It is a preparation to support multiple IOREQ pages.
> No functional change.
> 
> Signed-off-by: Chao Gao <chao.gao@intel.com>
> ---
> v4:
>  -new
> ---
>  xen/arch/x86/hvm/ioreq.c | 48 +++++++++++++++++++++++------------------
> -------
>  1 file changed, 23 insertions(+), 25 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index d991ac9..a879f20 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -237,10 +237,9 @@ static void hvm_free_ioreq_gfn(struct
> hvm_ioreq_server *s, gfn_t gfn)
>      set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
>  }
> 
> -static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
> +static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s,
> +                                struct hvm_ioreq_page *iorp)
>  {
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> -

I don't really like this approach. I'd prefer swapping the bool for an unsigned page index, where we follow the convention adopted in hvm_get_ioreq_server_frame() for which macros exist: 0 equating to the bufioreq page, 1+ for the struct-per-cpu pages.

  Paul

>      if ( gfn_eq(iorp->gfn, INVALID_GFN) )
>          return;
> 
> @@ -289,15 +288,15 @@ static int hvm_map_ioreq_gfn(struct
> hvm_ioreq_server *s, bool buf)
>                                   &iorp->va);
> 
>      if ( rc )
> -        hvm_unmap_ioreq_gfn(s, buf);
> +        hvm_unmap_ioreq_gfn(s, iorp);
> 
>      return rc;
>  }
> 
> -static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
> +static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s,
> +                               struct hvm_ioreq_page *iorp)
>  {
>      struct domain *currd = current->domain;
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> 
>      if ( iorp->page )
>      {
> @@ -344,10 +343,9 @@ static int hvm_alloc_ioreq_mfn(struct
> hvm_ioreq_server *s, bool buf)
>      return 0;
>  }
> 
> -static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
> +static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s,
> +                               struct hvm_ioreq_page *iorp)
>  {
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> -
>      if ( !iorp->page )
>          return;
> 
> @@ -380,11 +378,11 @@ bool is_ioreq_server_page(struct domain *d, const
> struct page_info *page)
>      return found;
>  }
> 
> -static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
> +static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s,
> +                                 struct hvm_ioreq_page *iorp)
> 
>  {
>      struct domain *d = s->domain;
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
> 
>      if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
>          return;
> @@ -395,10 +393,10 @@ static void hvm_remove_ioreq_gfn(struct
> hvm_ioreq_server *s, bool buf)
>      clear_page(iorp->va);
>  }
> 
> -static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
> +static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s,
> +                             struct hvm_ioreq_page *iorp)
>  {
>      struct domain *d = s->domain;
> -    struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
>      int rc;
> 
>      if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
> @@ -550,36 +548,36 @@ static int hvm_ioreq_server_map_pages(struct
> hvm_ioreq_server *s)
>          rc = hvm_map_ioreq_gfn(s, true);
> 
>      if ( rc )
> -        hvm_unmap_ioreq_gfn(s, false);
> +        hvm_unmap_ioreq_gfn(s, &s->ioreq);
> 
>      return rc;
>  }
> 
>  static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
>  {
> -    hvm_unmap_ioreq_gfn(s, true);
> -    hvm_unmap_ioreq_gfn(s, false);
> +    hvm_unmap_ioreq_gfn(s, &s->ioreq);
> +    hvm_unmap_ioreq_gfn(s, &s->bufioreq);
>  }
> 
>  static int hvm_ioreq_server_alloc_pages(struct hvm_ioreq_server *s)
>  {
>      int rc;
> 
> -    rc = hvm_alloc_ioreq_mfn(s, false);
> +    rc = hvm_alloc_ioreq_mfn(s, &s->ioreq);
> 
>      if ( !rc && (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) )
> -        rc = hvm_alloc_ioreq_mfn(s, true);
> +        rc = hvm_alloc_ioreq_mfn(s, &s->bufioreq);
> 
>      if ( rc )
> -        hvm_free_ioreq_mfn(s, false);
> +        hvm_free_ioreq_mfn(s, &s->ioreq);
> 
>      return rc;
>  }
> 
>  static void hvm_ioreq_server_free_pages(struct hvm_ioreq_server *s)
>  {
> -    hvm_free_ioreq_mfn(s, true);
> -    hvm_free_ioreq_mfn(s, false);
> +    hvm_free_ioreq_mfn(s, &s->bufioreq);
> +    hvm_free_ioreq_mfn(s, &s->ioreq);
>  }
> 
>  static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
> @@ -646,8 +644,8 @@ static void hvm_ioreq_server_enable(struct
> hvm_ioreq_server *s)
>      if ( s->enabled )
>          goto done;
> 
> -    hvm_remove_ioreq_gfn(s, false);
> -    hvm_remove_ioreq_gfn(s, true);
> +    hvm_remove_ioreq_gfn(s, &s->ioreq);
> +    hvm_remove_ioreq_gfn(s, &s->bufioreq);
> 
>      s->enabled = true;
> 
> @@ -667,8 +665,8 @@ static void hvm_ioreq_server_disable(struct
> hvm_ioreq_server *s)
>      if ( !s->enabled )
>          goto done;
> 
> -    hvm_add_ioreq_gfn(s, true);
> -    hvm_add_ioreq_gfn(s, false);
> +    hvm_add_ioreq_gfn(s, &s->bufioreq);
> +    hvm_add_ioreq_gfn(s, &s->ioreq);
> 
>      s->enabled = false;
> 
> --
> 1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  reply	other threads:[~2017-12-06 14:44 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-06  7:50 [RFC Patch v4 0/8] Extend resources to support more vcpus in single VM Chao Gao
2017-12-06  7:50 ` [RFC Patch v4 1/8] ioreq: remove most 'buf' parameter from static functions Chao Gao
2017-12-06 14:44   ` Paul Durrant [this message]
2017-12-06  8:37     ` Chao Gao
2017-12-06  7:50 ` [RFC Patch v4 2/8] ioreq: bump the number of IOREQ page to 4 pages Chao Gao
2017-12-06 15:04   ` Paul Durrant
2017-12-06  9:02     ` Chao Gao
2017-12-06 16:10       ` Paul Durrant
2017-12-07  8:41         ` Paul Durrant
2017-12-07  6:56           ` Chao Gao
2017-12-08 11:06             ` Paul Durrant
2017-12-12  1:03               ` Chao Gao
2017-12-12  9:07                 ` Paul Durrant
2017-12-12 23:39                   ` Chao Gao
2017-12-13 10:49                     ` Paul Durrant
2017-12-13 17:50                       ` Paul Durrant
2017-12-14 14:50                         ` Paul Durrant
2017-12-15  0:35                           ` Chao Gao
2017-12-15  9:40                             ` Paul Durrant
2018-04-18  8:19   ` Jan Beulich
2017-12-06  7:50 ` [RFC Patch v4 3/8] xl/acpi: unify the computation of lapic_id Chao Gao
2018-02-22 18:05   ` Wei Liu
2017-12-06  7:50 ` [RFC Patch v4 4/8] hvmloader: boot cpu through broadcast Chao Gao
2018-02-22 18:44   ` Wei Liu
2018-02-23  8:41     ` Jan Beulich
2018-02-23 16:42   ` Roger Pau Monné
2018-02-24  5:49     ` Chao Gao
2018-02-26  8:28       ` Jan Beulich
2018-02-26 12:33         ` Chao Gao
2018-02-26 14:19           ` Roger Pau Monné
2018-04-18  8:38   ` Jan Beulich
2018-04-18 11:20     ` Chao Gao
2018-04-18 11:50       ` Jan Beulich
2017-12-06  7:50 ` [RFC Patch v4 5/8] Tool/ACPI: DSDT extension to support more vcpus Chao Gao
2017-12-06  7:50 ` [RFC Patch v4 6/8] hvmload: Add x2apic entry support in the MADT and SRAT build Chao Gao
2018-04-18  8:48   ` Jan Beulich
2017-12-06  7:50 ` [RFC Patch v4 7/8] x86/hvm: bump the number of pages of shadow memory Chao Gao
2018-02-27 14:17   ` George Dunlap
2018-04-18  8:53   ` Jan Beulich
2018-04-18 11:39     ` Chao Gao
2018-04-18 11:50       ` Andrew Cooper
2018-04-18 11:59       ` Jan Beulich
2017-12-06  7:50 ` [RFC Patch v4 8/8] x86/hvm: bump the maximum number of vcpus to 512 Chao Gao
2018-02-22 18:46   ` Wei Liu
2018-02-23  8:50     ` Jan Beulich
2018-02-23 17:18       ` Wei Liu
2018-02-23 18:11   ` Roger Pau Monné
2018-02-24  6:26     ` Chao Gao
2018-02-26  8:26     ` Jan Beulich
2018-02-26 13:11       ` Chao Gao
2018-02-26 16:10         ` Jan Beulich
2018-03-01  5:21           ` Chao Gao
2018-03-01  7:17             ` Juergen Gross
2018-03-01  7:37             ` Jan Beulich
2018-03-01  7:11               ` Chao Gao
2018-02-27 14:59         ` George Dunlap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ff33a474affb41f3a6c295a1e039a66a@AMSPEX02CL03.citrite.net \
    --to=paul.durrant@citrix.com \
    --cc=Andrew.Cooper3@citrix.com \
    --cc=chao.gao@intel.com \
    --cc=jbeulich@suse.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.