xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
@ 2019-07-03 11:36 Jan Beulich
  2019-07-03 15:22 ` Paul Durrant
  2019-07-15  8:38 ` [Xen-devel] Ping: " Jan Beulich
  0 siblings, 2 replies; 7+ messages in thread
From: Jan Beulich @ 2019-07-03 11:36 UTC (permalink / raw)
  To: xen-devel
  Cc: George Dunlap, Andrew Cooper, Paul Durrant, Wei Liu,
	Roger Pau Monné

The write-discard property of the type can't be represented in IOMMU
page table entries. Make sure the respective checks / tracking can't
race, by utilizing the domain lock. The other sides of the sharing/
paging/log-dirty exclusion checks should subsequently perhaps also be
put under that lock then.

Take the opportunity and also convert neighboring bool_t to bool in
struct hvm_domain.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Don't set p2m_ram_ro_used when failing the request.

--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -255,16 +255,33 @@ static int set_mem_type(struct domain *d
  
      mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
  
-    if ( mem_type == HVMMEM_ioreq_server )
+    switch ( mem_type )
      {
          unsigned int flags;
  
+    case HVMMEM_ioreq_server:
          if ( !hap_enabled(d) )
              return -EOPNOTSUPP;
  
          /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
          if ( !p2m_get_ioreq_server(d, &flags) )
              return -EINVAL;
+
+        break;
+
+    case HVMMEM_ram_ro:
+        /* p2m_ram_ro can't be represented in IOMMU mappings. */
+        domain_lock(d);
+        if ( has_iommu_pt(d) )
+            rc = -EXDEV;
+        else
+            d->arch.hvm.p2m_ram_ro_used = true;
+        domain_unlock(d);
+
+        if ( rc )
+            return rc;
+
+        break;
      }
  
      while ( iter < data->nr )
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1448,17 +1448,36 @@ static int assign_device(struct domain *
      if ( !iommu_enabled || !hd->platform_ops )
          return 0;
  
-    /* Prevent device assign if mem paging or mem sharing have been
-     * enabled for this domain */
-    if ( unlikely(d->arch.hvm.mem_sharing_enabled ||
-                  vm_event_check_ring(d->vm_event_paging) ||
+    domain_lock(d);
+
+    /*
+     * Prevent device assignment if any of
+     * - mem paging
+     * - mem sharing
+     * - the p2m_ram_ro type
+     * - global log-dirty mode
+     * are in use by this domain.
+     */
+    if ( unlikely(vm_event_check_ring(d->vm_event_paging) ||
+#ifdef CONFIG_HVM
+                  (is_hvm_domain(d) &&
+                   (d->arch.hvm.mem_sharing_enabled ||
+                    d->arch.hvm.p2m_ram_ro_used)) ||
+#endif
                    p2m_get_hostp2m(d)->global_logdirty) )
+    {
+        domain_unlock(d);
          return -EXDEV;
+    }
  
      if ( !pcidevs_trylock() )
+    {
+        domain_unlock(d);
          return -ERESTART;
+    }
  
      rc = iommu_construct(d);
+    domain_unlock(d);
      if ( rc )
      {
          pcidevs_unlock();
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -156,10 +156,11 @@ struct hvm_domain {
  
      struct viridian_domain *viridian;
  
-    bool_t                 hap_enabled;
-    bool_t                 mem_sharing_enabled;
-    bool_t                 qemu_mapcache_invalidate;
-    bool_t                 is_s3_suspended;
+    bool                   hap_enabled;
+    bool                   mem_sharing_enabled;
+    bool                   p2m_ram_ro_used;
+    bool                   qemu_mapcache_invalidate;
+    bool                   is_s3_suspended;
  
      /*
       * TSC value that VCPUs use to calculate their tsc_offset value.
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
  2019-07-03 11:36 [Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through Jan Beulich
@ 2019-07-03 15:22 ` Paul Durrant
  2019-07-04  9:18   ` Jan Beulich
  2019-07-15  8:38 ` [Xen-devel] Ping: " Jan Beulich
  1 sibling, 1 reply; 7+ messages in thread
From: Paul Durrant @ 2019-07-03 15:22 UTC (permalink / raw)
  To: 'Jan Beulich', xen-devel
  Cc: Andrew Cooper, George Dunlap, Wei Liu, Roger Pau Monne

> -----Original Message-----
> From: Jan Beulich <JBeulich@suse.com>
> Sent: 03 July 2019 12:36
> To: xen-devel@lists.xenproject.org
> Cc: George Dunlap <George.Dunlap@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Wei Liu <wl@xen.org>; Roger Pau Monne <roger.pau@citrix.com>
> Subject: [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
> 
> The write-discard property of the type can't be represented in IOMMU
> page table entries. Make sure the respective checks / tracking can't
> race, by utilizing the domain lock. The other sides of the sharing/
> paging/log-dirty exclusion checks should subsequently perhaps also be
> put under that lock then.
> 
> Take the opportunity and also convert neighboring bool_t to bool in
> struct hvm_domain.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Don't set p2m_ram_ro_used when failing the request.
> 
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -255,16 +255,33 @@ static int set_mem_type(struct domain *d
> 
>       mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
> 
> -    if ( mem_type == HVMMEM_ioreq_server )
> +    switch ( mem_type )
>       {
>           unsigned int flags;
> 
> +    case HVMMEM_ioreq_server:
>           if ( !hap_enabled(d) )
>               return -EOPNOTSUPP;
> 
>           /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
>           if ( !p2m_get_ioreq_server(d, &flags) )
>               return -EINVAL;
> +
> +        break;
> +
> +    case HVMMEM_ram_ro:
> +        /* p2m_ram_ro can't be represented in IOMMU mappings. */
> +        domain_lock(d);
> +        if ( has_iommu_pt(d) )
> +            rc = -EXDEV;
> +        else
> +            d->arch.hvm.p2m_ram_ro_used = true;

Do we really want this to be a one-way trip? On the face of it, it would seem that keeping a count of p2m_ram_ro entries would be more desirable such that, once the last one is gone, devices can be assigned again?
If not maybe it's time to go all the way and make iommu page table construction part of domain create and then we simplify a lot of code and we don't need any flag/refcount like this at all.

  Paul
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
  2019-07-03 15:22 ` Paul Durrant
@ 2019-07-04  9:18   ` Jan Beulich
  2019-07-04  9:35     ` Paul Durrant
  0 siblings, 1 reply; 7+ messages in thread
From: Jan Beulich @ 2019-07-04  9:18 UTC (permalink / raw)
  To: Paul Durrant, xen-devel
  Cc: Andrew Cooper, George Dunlap, Wei Liu, Roger Pau Monne

On 03.07.2019 17:22, Paul Durrant wrote:
>> -----Original Message-----
>> From: Jan Beulich <JBeulich@suse.com>
>> Sent: 03 July 2019 12:36
>> To: xen-devel@lists.xenproject.org
>> Cc: George Dunlap <George.Dunlap@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>; Andrew Cooper
>> <Andrew.Cooper3@citrix.com>; Wei Liu <wl@xen.org>; Roger Pau Monne <roger.pau@citrix.com>
>> Subject: [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
>>
>> The write-discard property of the type can't be represented in IOMMU
>> page table entries. Make sure the respective checks / tracking can't
>> race, by utilizing the domain lock. The other sides of the sharing/
>> paging/log-dirty exclusion checks should subsequently perhaps also be
>> put under that lock then.
>>
>> Take the opportunity and also convert neighboring bool_t to bool in
>> struct hvm_domain.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> v2: Don't set p2m_ram_ro_used when failing the request.
>>
>> --- a/xen/arch/x86/hvm/dm.c
>> +++ b/xen/arch/x86/hvm/dm.c
>> @@ -255,16 +255,33 @@ static int set_mem_type(struct domain *d
>>
>>        mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
>>
>> -    if ( mem_type == HVMMEM_ioreq_server )
>> +    switch ( mem_type )
>>        {
>>            unsigned int flags;
>>
>> +    case HVMMEM_ioreq_server:
>>            if ( !hap_enabled(d) )
>>                return -EOPNOTSUPP;
>>
>>            /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
>>            if ( !p2m_get_ioreq_server(d, &flags) )
>>                return -EINVAL;
>> +
>> +        break;
>> +
>> +    case HVMMEM_ram_ro:
>> +        /* p2m_ram_ro can't be represented in IOMMU mappings. */
>> +        domain_lock(d);
>> +        if ( has_iommu_pt(d) )
>> +            rc = -EXDEV;
>> +        else
>> +            d->arch.hvm.p2m_ram_ro_used = true;
> 
> Do we really want this to be a one-way trip? On the face of it, it
> would seem that keeping a count of p2m_ram_ro entries would be more
> desirable such that, once the last one is gone, devices can be
> assigned again?

Well, at this point I'm not really up to introducing accounting of
the number of uses of p2m_ram_ro. This could be a further step to
be done in the future, if necessary.

> If not maybe it's time to go all the way and make iommu page table
> construction part of domain create and then we simplify a lot of
> code and we don't need any flag/refcount like this at all.

I've said this before: I don't think it should be a requirement to
know at the time of the creation of a VM whether it'll eventually
have a pass-through device assigned. Furthermore you realize that
this suggestion of yours is contrary to what you've said further up:
This way you'd make the two things exclusive of one another without
any recourse.

Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
  2019-07-04  9:18   ` Jan Beulich
@ 2019-07-04  9:35     ` Paul Durrant
  2019-07-04 10:09       ` Jan Beulich
  0 siblings, 1 reply; 7+ messages in thread
From: Paul Durrant @ 2019-07-04  9:35 UTC (permalink / raw)
  To: 'Jan Beulich', xen-devel
  Cc: Andrew Cooper, George Dunlap, Wei Liu, Roger Pau Monne

> -----Original Message-----
> From: Jan Beulich <JBeulich@suse.com>
> Sent: 04 July 2019 10:19
> To: Paul Durrant <Paul.Durrant@citrix.com>; xen-devel@lists.xenproject.org
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; George Dunlap <George.Dunlap@citrix.com>; Roger Pau
> Monne <roger.pau@citrix.com>; Wei Liu <wl@xen.org>
> Subject: Re: [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
> 
> On 03.07.2019 17:22, Paul Durrant wrote:
> >> -----Original Message-----
> >> From: Jan Beulich <JBeulich@suse.com>
> >> Sent: 03 July 2019 12:36
> >> To: xen-devel@lists.xenproject.org
> >> Cc: George Dunlap <George.Dunlap@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>; Andrew Cooper
> >> <Andrew.Cooper3@citrix.com>; Wei Liu <wl@xen.org>; Roger Pau Monne <roger.pau@citrix.com>
> >> Subject: [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
> >>
> >> The write-discard property of the type can't be represented in IOMMU
> >> page table entries. Make sure the respective checks / tracking can't
> >> race, by utilizing the domain lock. The other sides of the sharing/
> >> paging/log-dirty exclusion checks should subsequently perhaps also be
> >> put under that lock then.
> >>
> >> Take the opportunity and also convert neighboring bool_t to bool in
> >> struct hvm_domain.
> >>
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> ---
> >> v2: Don't set p2m_ram_ro_used when failing the request.
> >>
> >> --- a/xen/arch/x86/hvm/dm.c
> >> +++ b/xen/arch/x86/hvm/dm.c
> >> @@ -255,16 +255,33 @@ static int set_mem_type(struct domain *d
> >>
> >>        mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
> >>
> >> -    if ( mem_type == HVMMEM_ioreq_server )
> >> +    switch ( mem_type )
> >>        {
> >>            unsigned int flags;
> >>
> >> +    case HVMMEM_ioreq_server:
> >>            if ( !hap_enabled(d) )
> >>                return -EOPNOTSUPP;
> >>
> >>            /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
> >>            if ( !p2m_get_ioreq_server(d, &flags) )
> >>                return -EINVAL;
> >> +
> >> +        break;
> >> +
> >> +    case HVMMEM_ram_ro:
> >> +        /* p2m_ram_ro can't be represented in IOMMU mappings. */
> >> +        domain_lock(d);
> >> +        if ( has_iommu_pt(d) )
> >> +            rc = -EXDEV;
> >> +        else
> >> +            d->arch.hvm.p2m_ram_ro_used = true;
> >
> > Do we really want this to be a one-way trip? On the face of it, it
> > would seem that keeping a count of p2m_ram_ro entries would be more
> > desirable such that, once the last one is gone, devices can be
> > assigned again?
> 
> Well, at this point I'm not really up to introducing accounting of
> the number of uses of p2m_ram_ro. This could be a further step to
> be done in the future, if necessary.
> 
> > If not maybe it's time to go all the way and make iommu page table
> > construction part of domain create and then we simplify a lot of
> > code and we don't need any flag/refcount like this at all.
> 
> I've said this before: I don't think it should be a requirement to
> know at the time of the creation of a VM whether it'll eventually
> have a pass-through device assigned. Furthermore you realize that
> this suggestion of yours is contrary to what you've said further up:
> This way you'd make the two things exclusive of one another without
> any recourse.

Yes, I realize the suggestions are contradictory. My point is that adding IOMMU pages to a running domain is tricky and leads to issues like the one you are trying to solve with the ram_ro_used flag.
The whole subsystem is in need of an overhaul anyway so I guess this band-aid is ok for now.

  Paul
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
  2019-07-04  9:35     ` Paul Durrant
@ 2019-07-04 10:09       ` Jan Beulich
  2019-07-04 10:12         ` Paul Durrant
  0 siblings, 1 reply; 7+ messages in thread
From: Jan Beulich @ 2019-07-04 10:09 UTC (permalink / raw)
  To: Paul Durrant, xen-devel
  Cc: Andrew Cooper, George Dunlap, Wei Liu, Roger Pau Monne

On 04.07.2019 11:35, Paul Durrant wrote:
>> -----Original Message-----
>> From: Jan Beulich <JBeulich@suse.com>
>> Sent: 04 July 2019 10:19
>> To: Paul Durrant <Paul.Durrant@citrix.com>; xen-devel@lists.xenproject.org
>> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; George Dunlap <George.Dunlap@citrix.com>; Roger Pau
>> Monne <roger.pau@citrix.com>; Wei Liu <wl@xen.org>
>> Subject: Re: [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
>>
>> On 03.07.2019 17:22, Paul Durrant wrote:
>>>> -----Original Message-----
>>>> From: Jan Beulich <JBeulich@suse.com>
>>>> Sent: 03 July 2019 12:36
>>>> To: xen-devel@lists.xenproject.org
>>>> Cc: George Dunlap <George.Dunlap@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>; Andrew Cooper
>>>> <Andrew.Cooper3@citrix.com>; Wei Liu <wl@xen.org>; Roger Pau Monne <roger.pau@citrix.com>
>>>> Subject: [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
>>>>
>>>> The write-discard property of the type can't be represented in IOMMU
>>>> page table entries. Make sure the respective checks / tracking can't
>>>> race, by utilizing the domain lock. The other sides of the sharing/
>>>> paging/log-dirty exclusion checks should subsequently perhaps also be
>>>> put under that lock then.
>>>>
>>>> Take the opportunity and also convert neighboring bool_t to bool in
>>>> struct hvm_domain.
>>>>
>>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>>> ---
>>>> v2: Don't set p2m_ram_ro_used when failing the request.
>>>>
>>>> --- a/xen/arch/x86/hvm/dm.c
>>>> +++ b/xen/arch/x86/hvm/dm.c
>>>> @@ -255,16 +255,33 @@ static int set_mem_type(struct domain *d
>>>>
>>>>         mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
>>>>
>>>> -    if ( mem_type == HVMMEM_ioreq_server )
>>>> +    switch ( mem_type )
>>>>         {
>>>>             unsigned int flags;
>>>>
>>>> +    case HVMMEM_ioreq_server:
>>>>             if ( !hap_enabled(d) )
>>>>                 return -EOPNOTSUPP;
>>>>
>>>>             /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
>>>>             if ( !p2m_get_ioreq_server(d, &flags) )
>>>>                 return -EINVAL;
>>>> +
>>>> +        break;
>>>> +
>>>> +    case HVMMEM_ram_ro:
>>>> +        /* p2m_ram_ro can't be represented in IOMMU mappings. */
>>>> +        domain_lock(d);
>>>> +        if ( has_iommu_pt(d) )
>>>> +            rc = -EXDEV;
>>>> +        else
>>>> +            d->arch.hvm.p2m_ram_ro_used = true;
>>>
>>> Do we really want this to be a one-way trip? On the face of it, it
>>> would seem that keeping a count of p2m_ram_ro entries would be more
>>> desirable such that, once the last one is gone, devices can be
>>> assigned again?
>>
>> Well, at this point I'm not really up to introducing accounting of
>> the number of uses of p2m_ram_ro. This could be a further step to
>> be done in the future, if necessary.
>>
>>> If not maybe it's time to go all the way and make iommu page table
>>> construction part of domain create and then we simplify a lot of
>>> code and we don't need any flag/refcount like this at all.
>>
>> I've said this before: I don't think it should be a requirement to
>> know at the time of the creation of a VM whether it'll eventually
>> have a pass-through device assigned. Furthermore you realize that
>> this suggestion of yours is contrary to what you've said further up:
>> This way you'd make the two things exclusive of one another without
>> any recourse.
> 
> Yes, I realize the suggestions are contradictory. My point is that
> adding IOMMU pages to a running domain is tricky and leads to issues
> like the one you are trying to solve with the ram_ro_used flag.
> The whole subsystem is in need of an overhaul anyway so I guess this
> band-aid is ok for now.

Thanks. I wonder whether I may translate this into R-b or A-b?

Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
  2019-07-04 10:09       ` Jan Beulich
@ 2019-07-04 10:12         ` Paul Durrant
  0 siblings, 0 replies; 7+ messages in thread
From: Paul Durrant @ 2019-07-04 10:12 UTC (permalink / raw)
  To: 'Jan Beulich', xen-devel
  Cc: Andrew Cooper, George Dunlap, Wei Liu, Roger Pau Monne

> -----Original Message-----
> From: Jan Beulich <JBeulich@suse.com>
> Sent: 04 July 2019 11:09
> To: Paul Durrant <Paul.Durrant@citrix.com>; xen-devel@lists.xenproject.org
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; George Dunlap <George.Dunlap@citrix.com>; Roger Pau
> Monne <roger.pau@citrix.com>; Wei Liu <wl@xen.org>
> Subject: Re: [Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
> 
> On 04.07.2019 11:35, Paul Durrant wrote:
> >> -----Original Message-----
> >> From: Jan Beulich <JBeulich@suse.com>
> >> Sent: 04 July 2019 10:19
> >> To: Paul Durrant <Paul.Durrant@citrix.com>; xen-devel@lists.xenproject.org
> >> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; George Dunlap <George.Dunlap@citrix.com>; Roger Pau
> >> Monne <roger.pau@citrix.com>; Wei Liu <wl@xen.org>
> >> Subject: Re: [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
> >>
> >> On 03.07.2019 17:22, Paul Durrant wrote:
> >>>> -----Original Message-----
> >>>> From: Jan Beulich <JBeulich@suse.com>
> >>>> Sent: 03 July 2019 12:36
> >>>> To: xen-devel@lists.xenproject.org
> >>>> Cc: George Dunlap <George.Dunlap@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>; Andrew
> Cooper
> >>>> <Andrew.Cooper3@citrix.com>; Wei Liu <wl@xen.org>; Roger Pau Monne <roger.pau@citrix.com>
> >>>> Subject: [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
> >>>>
> >>>> The write-discard property of the type can't be represented in IOMMU
> >>>> page table entries. Make sure the respective checks / tracking can't
> >>>> race, by utilizing the domain lock. The other sides of the sharing/
> >>>> paging/log-dirty exclusion checks should subsequently perhaps also be
> >>>> put under that lock then.
> >>>>
> >>>> Take the opportunity and also convert neighboring bool_t to bool in
> >>>> struct hvm_domain.
> >>>>
> >>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >>>> ---
> >>>> v2: Don't set p2m_ram_ro_used when failing the request.
> >>>>
> >>>> --- a/xen/arch/x86/hvm/dm.c
> >>>> +++ b/xen/arch/x86/hvm/dm.c
> >>>> @@ -255,16 +255,33 @@ static int set_mem_type(struct domain *d
> >>>>
> >>>>         mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
> >>>>
> >>>> -    if ( mem_type == HVMMEM_ioreq_server )
> >>>> +    switch ( mem_type )
> >>>>         {
> >>>>             unsigned int flags;
> >>>>
> >>>> +    case HVMMEM_ioreq_server:
> >>>>             if ( !hap_enabled(d) )
> >>>>                 return -EOPNOTSUPP;
> >>>>
> >>>>             /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
> >>>>             if ( !p2m_get_ioreq_server(d, &flags) )
> >>>>                 return -EINVAL;
> >>>> +
> >>>> +        break;
> >>>> +
> >>>> +    case HVMMEM_ram_ro:
> >>>> +        /* p2m_ram_ro can't be represented in IOMMU mappings. */
> >>>> +        domain_lock(d);
> >>>> +        if ( has_iommu_pt(d) )
> >>>> +            rc = -EXDEV;
> >>>> +        else
> >>>> +            d->arch.hvm.p2m_ram_ro_used = true;
> >>>
> >>> Do we really want this to be a one-way trip? On the face of it, it
> >>> would seem that keeping a count of p2m_ram_ro entries would be more
> >>> desirable such that, once the last one is gone, devices can be
> >>> assigned again?
> >>
> >> Well, at this point I'm not really up to introducing accounting of
> >> the number of uses of p2m_ram_ro. This could be a further step to
> >> be done in the future, if necessary.
> >>
> >>> If not maybe it's time to go all the way and make iommu page table
> >>> construction part of domain create and then we simplify a lot of
> >>> code and we don't need any flag/refcount like this at all.
> >>
> >> I've said this before: I don't think it should be a requirement to
> >> know at the time of the creation of a VM whether it'll eventually
> >> have a pass-through device assigned. Furthermore you realize that
> >> this suggestion of yours is contrary to what you've said further up:
> >> This way you'd make the two things exclusive of one another without
> >> any recourse.
> >
> > Yes, I realize the suggestions are contradictory. My point is that
> > adding IOMMU pages to a running domain is tricky and leads to issues
> > like the one you are trying to solve with the ram_ro_used flag.
> > The whole subsystem is in need of an overhaul anyway so I guess this
> > band-aid is ok for now.
> 
> Thanks. I wonder whether I may translate this into R-b or A-b?
> 

Yes, you can consider this an R-b.

  Paul

> Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [Xen-devel] Ping: [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through
  2019-07-03 11:36 [Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through Jan Beulich
  2019-07-03 15:22 ` Paul Durrant
@ 2019-07-15  8:38 ` Jan Beulich
  1 sibling, 0 replies; 7+ messages in thread
From: Jan Beulich @ 2019-07-15  8:38 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: George Dunlap, xen-devel, Paul Durrant, Wei Liu, Roger Pau Monné

On 03.07.2019 13:36, Jan Beulich wrote:
> The write-discard property of the type can't be represented in IOMMU
> page table entries. Make sure the respective checks / tracking can't
> race, by utilizing the domain lock. The other sides of the sharing/
> paging/log-dirty exclusion checks should subsequently perhaps also be
> put under that lock then.
> 
> Take the opportunity and also convert neighboring bool_t to bool in
> struct hvm_domain.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Alongside Paul's R-b could I get an ack or otherwise from you?

Thanks, Jan

> ---
> v2: Don't set p2m_ram_ro_used when failing the request.
> 
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -255,16 +255,33 @@ static int set_mem_type(struct domain *d
>    
>        mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
>    
> -    if ( mem_type == HVMMEM_ioreq_server )
> +    switch ( mem_type )
>        {
>            unsigned int flags;
>    
> +    case HVMMEM_ioreq_server:
>            if ( !hap_enabled(d) )
>                return -EOPNOTSUPP;
>    
>            /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
>            if ( !p2m_get_ioreq_server(d, &flags) )
>                return -EINVAL;
> +
> +        break;
> +
> +    case HVMMEM_ram_ro:
> +        /* p2m_ram_ro can't be represented in IOMMU mappings. */
> +        domain_lock(d);
> +        if ( has_iommu_pt(d) )
> +            rc = -EXDEV;
> +        else
> +            d->arch.hvm.p2m_ram_ro_used = true;
> +        domain_unlock(d);
> +
> +        if ( rc )
> +            return rc;
> +
> +        break;
>        }
>    
>        while ( iter < data->nr )
> --- a/xen/drivers/passthrough/pci.c
> +++ b/xen/drivers/passthrough/pci.c
> @@ -1448,17 +1448,36 @@ static int assign_device(struct domain *
>        if ( !iommu_enabled || !hd->platform_ops )
>            return 0;
>    
> -    /* Prevent device assign if mem paging or mem sharing have been
> -     * enabled for this domain */
> -    if ( unlikely(d->arch.hvm.mem_sharing_enabled ||
> -                  vm_event_check_ring(d->vm_event_paging) ||
> +    domain_lock(d);
> +
> +    /*
> +     * Prevent device assignment if any of
> +     * - mem paging
> +     * - mem sharing
> +     * - the p2m_ram_ro type
> +     * - global log-dirty mode
> +     * are in use by this domain.
> +     */
> +    if ( unlikely(vm_event_check_ring(d->vm_event_paging) ||
> +#ifdef CONFIG_HVM
> +                  (is_hvm_domain(d) &&
> +                   (d->arch.hvm.mem_sharing_enabled ||
> +                    d->arch.hvm.p2m_ram_ro_used)) ||
> +#endif
>                      p2m_get_hostp2m(d)->global_logdirty) )
> +    {
> +        domain_unlock(d);
>            return -EXDEV;
> +    }
>    
>        if ( !pcidevs_trylock() )
> +    {
> +        domain_unlock(d);
>            return -ERESTART;
> +    }
>    
>        rc = iommu_construct(d);
> +    domain_unlock(d);
>        if ( rc )
>        {
>            pcidevs_unlock();
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -156,10 +156,11 @@ struct hvm_domain {
>    
>        struct viridian_domain *viridian;
>    
> -    bool_t                 hap_enabled;
> -    bool_t                 mem_sharing_enabled;
> -    bool_t                 qemu_mapcache_invalidate;
> -    bool_t                 is_s3_suspended;
> +    bool                   hap_enabled;
> +    bool                   mem_sharing_enabled;
> +    bool                   p2m_ram_ro_used;
> +    bool                   qemu_mapcache_invalidate;
> +    bool                   is_s3_suspended;
>    
>        /*
>         * TSC value that VCPUs use to calculate their tsc_offset value.
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xenproject.org
> https://lists.xenproject.org/mailman/listinfo/xen-devel
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2019-07-15  8:55 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-03 11:36 [Xen-devel] [PATCH v2] x86/HVM: p2m_ram_ro is incompatible with device pass-through Jan Beulich
2019-07-03 15:22 ` Paul Durrant
2019-07-04  9:18   ` Jan Beulich
2019-07-04  9:35     ` Paul Durrant
2019-07-04 10:09       ` Jan Beulich
2019-07-04 10:12         ` Paul Durrant
2019-07-15  8:38 ` [Xen-devel] Ping: " Jan Beulich

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).