qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] acpi: Fix access to PM1 control and status registers
@ 2020-07-01 11:05 Anthony PERARD
  2020-07-01 12:01 ` Michael S. Tsirkin
  0 siblings, 1 reply; 11+ messages in thread
From: Anthony PERARD @ 2020-07-01 11:05 UTC (permalink / raw)
  To: qemu-devel; +Cc: Anthony PERARD, Igor Mammedov, Michael S. Tsirkin

The ACPI spec state that "Accesses to PM1 control registers are
accessed through byte and word accesses." (In section 4.7.3.2.1 PM1
Control Registers of my old spec copy rev 4.0a).

With commit 5d971f9e6725 ("memory: Revert "memory: accept mismatching
sizes in memory_region_access_valid""), it wasn't possible anymore to
access the pm1_cnt register by reading a single byte, and that is use
by at least a Xen firmware called "hvmloader".

Also, take care of the PM1 Status Registers which also have "Accesses
to the PM1 status registers are done through byte or word accesses"
(In section 4.7.3.1.1 PM1 Status Registers).

Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
---
 hw/acpi/core.c | 46 +++++++++++++++++++++++++++++++++++++---------
 1 file changed, 37 insertions(+), 9 deletions(-)

diff --git a/hw/acpi/core.c b/hw/acpi/core.c
index 45cbed49abdd..31974e2f91bf 100644
--- a/hw/acpi/core.c
+++ b/hw/acpi/core.c
@@ -394,9 +394,17 @@ uint16_t acpi_pm1_evt_get_sts(ACPIREGS *ar)
     return ar->pm1.evt.sts;
 }
 
-static void acpi_pm1_evt_write_sts(ACPIREGS *ar, uint16_t val)
+static void acpi_pm1_evt_write_sts(ACPIREGS *ar, hwaddr addr, uint16_t val,
+                                   unsigned width)
 {
     uint16_t pm1_sts = acpi_pm1_evt_get_sts(ar);
+    if (width == 1) {
+        if (addr == 0) {
+            val |= pm1_sts & 0xff00;
+        } else if (addr == 1) {
+            val = (val << BITS_PER_BYTE) | (pm1_sts & 0xff);
+        }
+    }
     if (pm1_sts & val & ACPI_BITMASK_TIMER_STATUS) {
         /* if TMRSTS is reset, then compute the new overflow time */
         acpi_pm_tmr_calc_overflow_time(ar);
@@ -404,8 +412,16 @@ static void acpi_pm1_evt_write_sts(ACPIREGS *ar, uint16_t val)
     ar->pm1.evt.sts &= ~val;
 }
 
-static void acpi_pm1_evt_write_en(ACPIREGS *ar, uint16_t val)
+static void acpi_pm1_evt_write_en(ACPIREGS *ar, hwaddr addr, uint16_t val,
+                                  unsigned width)
 {
+    if (width == 1) {
+        if (addr == 0) {
+            val |= ar->pm1.evt.en & 0xff00;
+        } else if (addr == 1) {
+            val = (val << BITS_PER_BYTE) | (ar->pm1.evt.en & 0xff);
+        }
+    }
     ar->pm1.evt.en = val;
     qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC,
                               val & ACPI_BITMASK_RT_CLOCK_ENABLE);
@@ -434,9 +450,11 @@ static uint64_t acpi_pm_evt_read(void *opaque, hwaddr addr, unsigned width)
     ACPIREGS *ar = opaque;
     switch (addr) {
     case 0:
-        return acpi_pm1_evt_get_sts(ar);
+    case 1:
+        return acpi_pm1_evt_get_sts(ar) >> (addr * BITS_PER_BYTE);
     case 2:
-        return ar->pm1.evt.en;
+    case 3:
+        return ar->pm1.evt.en >> ((addr - 2) * BITS_PER_BYTE);
     default:
         return 0;
     }
@@ -448,11 +466,13 @@ static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val,
     ACPIREGS *ar = opaque;
     switch (addr) {
     case 0:
-        acpi_pm1_evt_write_sts(ar, val);
+    case 1:
+        acpi_pm1_evt_write_sts(ar, addr, val, width);
         ar->pm1.evt.update_sci(ar);
         break;
     case 2:
-        acpi_pm1_evt_write_en(ar, val);
+    case 3:
+        acpi_pm1_evt_write_en(ar, addr - 2, val, width);
         ar->pm1.evt.update_sci(ar);
         break;
     }
@@ -461,7 +481,7 @@ static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val,
 static const MemoryRegionOps acpi_pm_evt_ops = {
     .read = acpi_pm_evt_read,
     .write = acpi_pm_evt_write,
-    .valid.min_access_size = 2,
+    .valid.min_access_size = 1,
     .valid.max_access_size = 2,
     .endianness = DEVICE_LITTLE_ENDIAN,
 };
@@ -590,19 +610,27 @@ void acpi_pm1_cnt_update(ACPIREGS *ar,
 static uint64_t acpi_pm_cnt_read(void *opaque, hwaddr addr, unsigned width)
 {
     ACPIREGS *ar = opaque;
-    return ar->pm1.cnt.cnt;
+    return ar->pm1.cnt.cnt >> (addr * BITS_PER_BYTE);
 }
 
 static void acpi_pm_cnt_write(void *opaque, hwaddr addr, uint64_t val,
                               unsigned width)
 {
+    ACPIREGS *ar = opaque;
+    if (width == 1) {
+        if (addr == 0) {
+            val |= ar->pm1.cnt.cnt & 0xff00;
+        } else if (addr == 1) {
+            val = (val << BITS_PER_BYTE) | (ar->pm1.cnt.cnt & 0xff);
+        }
+    }
     acpi_pm1_cnt_write(opaque, val);
 }
 
 static const MemoryRegionOps acpi_pm_cnt_ops = {
     .read = acpi_pm_cnt_read,
     .write = acpi_pm_cnt_write,
-    .valid.min_access_size = 2,
+    .valid.min_access_size = 1,
     .valid.max_access_size = 2,
     .endianness = DEVICE_LITTLE_ENDIAN,
 };
-- 
Anthony PERARD



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH] acpi: Fix access to PM1 control and status registers
  2020-07-01 11:05 [PATCH] acpi: Fix access to PM1 control and status registers Anthony PERARD
@ 2020-07-01 12:01 ` Michael S. Tsirkin
  2020-07-01 12:48   ` Anthony PERARD
  0 siblings, 1 reply; 11+ messages in thread
From: Michael S. Tsirkin @ 2020-07-01 12:01 UTC (permalink / raw)
  To: Anthony PERARD; +Cc: Igor Mammedov, qemu-devel

On Wed, Jul 01, 2020 at 12:05:49PM +0100, Anthony PERARD wrote:
> The ACPI spec state that "Accesses to PM1 control registers are
> accessed through byte and word accesses." (In section 4.7.3.2.1 PM1
> Control Registers of my old spec copy rev 4.0a).
> 
> With commit 5d971f9e6725 ("memory: Revert "memory: accept mismatching
> sizes in memory_region_access_valid""), it wasn't possible anymore to
> access the pm1_cnt register by reading a single byte, and that is use
> by at least a Xen firmware called "hvmloader".
> 
> Also, take care of the PM1 Status Registers which also have "Accesses
> to the PM1 status registers are done through byte or word accesses"
> (In section 4.7.3.1.1 PM1 Status Registers).
> 
> Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>


Can't we set impl.min_access_size to convert byte accesses
to word accesses?

> ---
>  hw/acpi/core.c | 46 +++++++++++++++++++++++++++++++++++++---------
>  1 file changed, 37 insertions(+), 9 deletions(-)
> 
> diff --git a/hw/acpi/core.c b/hw/acpi/core.c
> index 45cbed49abdd..31974e2f91bf 100644
> --- a/hw/acpi/core.c
> +++ b/hw/acpi/core.c
> @@ -394,9 +394,17 @@ uint16_t acpi_pm1_evt_get_sts(ACPIREGS *ar)
>      return ar->pm1.evt.sts;
>  }
>  
> -static void acpi_pm1_evt_write_sts(ACPIREGS *ar, uint16_t val)
> +static void acpi_pm1_evt_write_sts(ACPIREGS *ar, hwaddr addr, uint16_t val,
> +                                   unsigned width)
>  {
>      uint16_t pm1_sts = acpi_pm1_evt_get_sts(ar);
> +    if (width == 1) {
> +        if (addr == 0) {
> +            val |= pm1_sts & 0xff00;
> +        } else if (addr == 1) {
> +            val = (val << BITS_PER_BYTE) | (pm1_sts & 0xff);
> +        }
> +    }
>      if (pm1_sts & val & ACPI_BITMASK_TIMER_STATUS) {
>          /* if TMRSTS is reset, then compute the new overflow time */
>          acpi_pm_tmr_calc_overflow_time(ar);
> @@ -404,8 +412,16 @@ static void acpi_pm1_evt_write_sts(ACPIREGS *ar, uint16_t val)
>      ar->pm1.evt.sts &= ~val;
>  }
>  
> -static void acpi_pm1_evt_write_en(ACPIREGS *ar, uint16_t val)
> +static void acpi_pm1_evt_write_en(ACPIREGS *ar, hwaddr addr, uint16_t val,
> +                                  unsigned width)
>  {
> +    if (width == 1) {
> +        if (addr == 0) {
> +            val |= ar->pm1.evt.en & 0xff00;
> +        } else if (addr == 1) {
> +            val = (val << BITS_PER_BYTE) | (ar->pm1.evt.en & 0xff);
> +        }
> +    }
>      ar->pm1.evt.en = val;
>      qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC,
>                                val & ACPI_BITMASK_RT_CLOCK_ENABLE);
> @@ -434,9 +450,11 @@ static uint64_t acpi_pm_evt_read(void *opaque, hwaddr addr, unsigned width)
>      ACPIREGS *ar = opaque;
>      switch (addr) {
>      case 0:
> -        return acpi_pm1_evt_get_sts(ar);
> +    case 1:
> +        return acpi_pm1_evt_get_sts(ar) >> (addr * BITS_PER_BYTE);
>      case 2:
> -        return ar->pm1.evt.en;
> +    case 3:
> +        return ar->pm1.evt.en >> ((addr - 2) * BITS_PER_BYTE);
>      default:
>          return 0;
>      }
> @@ -448,11 +466,13 @@ static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val,
>      ACPIREGS *ar = opaque;
>      switch (addr) {
>      case 0:
> -        acpi_pm1_evt_write_sts(ar, val);
> +    case 1:
> +        acpi_pm1_evt_write_sts(ar, addr, val, width);
>          ar->pm1.evt.update_sci(ar);
>          break;
>      case 2:
> -        acpi_pm1_evt_write_en(ar, val);
> +    case 3:
> +        acpi_pm1_evt_write_en(ar, addr - 2, val, width);
>          ar->pm1.evt.update_sci(ar);
>          break;
>      }
> @@ -461,7 +481,7 @@ static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val,
>  static const MemoryRegionOps acpi_pm_evt_ops = {
>      .read = acpi_pm_evt_read,
>      .write = acpi_pm_evt_write,
> -    .valid.min_access_size = 2,
> +    .valid.min_access_size = 1,
>      .valid.max_access_size = 2,
>      .endianness = DEVICE_LITTLE_ENDIAN,
>  };
> @@ -590,19 +610,27 @@ void acpi_pm1_cnt_update(ACPIREGS *ar,
>  static uint64_t acpi_pm_cnt_read(void *opaque, hwaddr addr, unsigned width)
>  {
>      ACPIREGS *ar = opaque;
> -    return ar->pm1.cnt.cnt;
> +    return ar->pm1.cnt.cnt >> (addr * BITS_PER_BYTE);
>  }
>  
>  static void acpi_pm_cnt_write(void *opaque, hwaddr addr, uint64_t val,
>                                unsigned width)
>  {
> +    ACPIREGS *ar = opaque;
> +    if (width == 1) {
> +        if (addr == 0) {
> +            val |= ar->pm1.cnt.cnt & 0xff00;
> +        } else if (addr == 1) {
> +            val = (val << BITS_PER_BYTE) | (ar->pm1.cnt.cnt & 0xff);
> +        }
> +    }
>      acpi_pm1_cnt_write(opaque, val);
>  }
>  
>  static const MemoryRegionOps acpi_pm_cnt_ops = {
>      .read = acpi_pm_cnt_read,
>      .write = acpi_pm_cnt_write,
> -    .valid.min_access_size = 2,
> +    .valid.min_access_size = 1,
>      .valid.max_access_size = 2,
>      .endianness = DEVICE_LITTLE_ENDIAN,
>  };
> -- 
> Anthony PERARD



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] acpi: Fix access to PM1 control and status registers
  2020-07-01 12:01 ` Michael S. Tsirkin
@ 2020-07-01 12:48   ` Anthony PERARD
  2020-07-02 11:12     ` Michael S. Tsirkin
  2020-07-23 12:54     ` Michael Tokarev
  0 siblings, 2 replies; 11+ messages in thread
From: Anthony PERARD @ 2020-07-01 12:48 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: Igor Mammedov, qemu-devel

On Wed, Jul 01, 2020 at 08:01:55AM -0400, Michael S. Tsirkin wrote:
> On Wed, Jul 01, 2020 at 12:05:49PM +0100, Anthony PERARD wrote:
> > The ACPI spec state that "Accesses to PM1 control registers are
> > accessed through byte and word accesses." (In section 4.7.3.2.1 PM1
> > Control Registers of my old spec copy rev 4.0a).
> > 
> > With commit 5d971f9e6725 ("memory: Revert "memory: accept mismatching
> > sizes in memory_region_access_valid""), it wasn't possible anymore to
> > access the pm1_cnt register by reading a single byte, and that is use
> > by at least a Xen firmware called "hvmloader".
> > 
> > Also, take care of the PM1 Status Registers which also have "Accesses
> > to the PM1 status registers are done through byte or word accesses"
> > (In section 4.7.3.1.1 PM1 Status Registers).
> > 
> > Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
> 
> 
> Can't we set impl.min_access_size to convert byte accesses
> to word accesses?

I actually tried, but when reading `addr` or `addr+1` I had the same
value. So I guess `addr` wasn't taken into account.

I've checked again, with `.impl.min_access_size = 2`, the width that the
function acpi_pm_cnt_read() get is 2, but addr isn't changed so the
function is still supposed to shift the result (or the value to write)
based on addr, I guess.

-- 
Anthony PERARD


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] acpi: Fix access to PM1 control and status registers
  2020-07-01 12:48   ` Anthony PERARD
@ 2020-07-02 11:12     ` Michael S. Tsirkin
  2020-07-10  9:42       ` Anthony PERARD
  2020-07-16  9:05       ` Cédric Le Goater
  2020-07-23 12:54     ` Michael Tokarev
  1 sibling, 2 replies; 11+ messages in thread
From: Michael S. Tsirkin @ 2020-07-02 11:12 UTC (permalink / raw)
  To: Anthony PERARD
  Cc: Peter Maydell, Andrew Jeffery, Alistair Francis, qemu-devel,
	open list:ASPEED BMCs, Hervé Poussineau,
	Cédric Le Goater, pbonzini, Igor Mammedov, open list:PReP,
	Joel Stanley

On Wed, Jul 01, 2020 at 01:48:36PM +0100, Anthony PERARD wrote:
> On Wed, Jul 01, 2020 at 08:01:55AM -0400, Michael S. Tsirkin wrote:
> > On Wed, Jul 01, 2020 at 12:05:49PM +0100, Anthony PERARD wrote:
> > > The ACPI spec state that "Accesses to PM1 control registers are
> > > accessed through byte and word accesses." (In section 4.7.3.2.1 PM1
> > > Control Registers of my old spec copy rev 4.0a).
> > > 
> > > With commit 5d971f9e6725 ("memory: Revert "memory: accept mismatching
> > > sizes in memory_region_access_valid""), it wasn't possible anymore to
> > > access the pm1_cnt register by reading a single byte, and that is use
> > > by at least a Xen firmware called "hvmloader".
> > > 
> > > Also, take care of the PM1 Status Registers which also have "Accesses
> > > to the PM1 status registers are done through byte or word accesses"
> > > (In section 4.7.3.1.1 PM1 Status Registers).
> > > 
> > > Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
> > 
> > 
> > Can't we set impl.min_access_size to convert byte accesses
> > to word accesses?
> 
> I actually tried, but when reading `addr` or `addr+1` I had the same
> value. So I guess `addr` wasn't taken into account.
> 
> I've checked again, with `.impl.min_access_size = 2`, the width that the
> function acpi_pm_cnt_read() get is 2, but addr isn't changed so the
> function is still supposed to shift the result (or the value to write)
> based on addr, I guess.

True address is misaligned.  I think memory core should just align it -
this is what devices seem to expect.
However result is shifted properly so just align addr and be done with
it.


In fact I have a couple more questions. Paolo - maybe you can answer some of these?



    if (!access_size_min) {
        access_size_min = 1;
    }
    if (!access_size_max) {
        access_size_max = 4;
    }

>>>>

So 8 byte accesses are split up unless one requests 8 bytes.
Undocumented right?  Why are we doing this?

>>>>


    /* FIXME: support unaligned access? */

>>>>

Shouldn't we document impl.unaligned is ignored right now?
Shouldn't we do something to make sure callbacks do not get
unaligned accesses they don't expect?


In fact, there are just 2 devices which set valid.unaligned but
not impl.unaligned:
    aspeed_smc_ops
    raven_io_ops


Is this intentional? Do these in fact expect memory core to
provide aligned addresses to the callbacks?
Given impl.unaligned is not implemented, can we drop it completely?
Cc a bunch of people who might know.

Can relevant maintainers please comment? Thanks a lot!

>>>>


    access_size = MAX(MIN(size, access_size_max), access_size_min);
    access_mask = MAKE_64BIT_MASK(0, access_size * 8);

>>>>


So with a 1 byte access at address 1, with impl.min_access_size = 2, we get:
    access_size = 2
    access_mask = 0xffff
    addr = 1



<<<<


    if (memory_region_big_endian(mr)) {
        for (i = 0; i < size; i += access_size) {
            r |= access_fn(mr, addr + i, value, access_size,
                        (size - access_size - i) * 8, access_mask, attrs);

>>>

now shift is -8.

<<<<


        }
    } else {
        for (i = 0; i < size; i += access_size) {
            r |= access_fn(mr, addr + i, value, access_size, i * 8,
                        access_mask, attrs);
        }
    }


<<<<

callback is invoked with addr 1 and size 2:

>>>>


    uint64_t tmp;

    tmp = mr->ops->read(mr->opaque, addr, size);
    if (mr->subpage) {
        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
    } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
    }
    memory_region_shift_read_access(value, shift, mask, tmp);
    return MEMTX_OK;

<<<<

let's assume callback returned 0xabcd

this is where we are shifting the return value:

>>>>


static inline void memory_region_shift_read_access(uint64_t *value,
                                                   signed shift,
                                                   uint64_t mask,
                                                   uint64_t tmp)
{
    if (shift >= 0) {
        *value |= (tmp & mask) << shift;
    } else {
        *value |= (tmp & mask) >> -shift;
    }
}


So we do 0xabcd & 0xffff >> 8, and we get 0xab.

>>>

How about aligning address for now? Paolo?

-->

memory: align to min access size

If impl.min_access_size > valid.min_access_size access callbacks
can get a misaligned access as size is increased.
They don't expect that, let's fix it in the memory core.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

---


diff --git a/memory.c b/memory.c
index 9200b20130..ea489ce405 100644
--- a/memory.c
+++ b/memory.c
@@ -532,6 +532,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
     }
 
     /* FIXME: support unaligned access? */
+    addr &= ~(access_size_min - 1);
     access_size = MAX(MIN(size, access_size_max), access_size_min);
     access_mask = MAKE_64BIT_MASK(0, access_size * 8);
     if (memory_region_big_endian(mr)) {
> -- 
> Anthony PERARD



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH] acpi: Fix access to PM1 control and status registers
  2020-07-02 11:12     ` Michael S. Tsirkin
@ 2020-07-10  9:42       ` Anthony PERARD
  2020-07-23 12:44         ` Michael S. Tsirkin
  2020-07-16  9:05       ` Cédric Le Goater
  1 sibling, 1 reply; 11+ messages in thread
From: Anthony PERARD @ 2020-07-10  9:42 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: Peter Maydell, Andrew Jeffery, Alistair Francis, qemu-devel,
	open list:ASPEED BMCs, Hervé Poussineau,
	Cédric Le Goater, pbonzini, Igor Mammedov, open list:PReP,
	Joel Stanley

On Thu, Jul 02, 2020 at 07:12:08AM -0400, Michael S. Tsirkin wrote:
> memory: align to min access size
> 
> If impl.min_access_size > valid.min_access_size access callbacks
> can get a misaligned access as size is increased.
> They don't expect that, let's fix it in the memory core.
> 
> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> 
> ---
> 
> 
> diff --git a/memory.c b/memory.c
> index 9200b20130..ea489ce405 100644
> --- a/memory.c
> +++ b/memory.c
> @@ -532,6 +532,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
>      }
>  
>      /* FIXME: support unaligned access? */
> +    addr &= ~(access_size_min - 1);
>      access_size = MAX(MIN(size, access_size_max), access_size_min);
>      access_mask = MAKE_64BIT_MASK(0, access_size * 8);
>      if (memory_region_big_endian(mr)) {

I've tried this (and .impl.min_access_size=2) but that wasn't enough.

In the guest, I did `inb(base_addr + 1)`, but I've got back the value as
if `inb(base_addr)` was run.

The device emulation read callbacks did get addr=0 width=2, so that's
fine, but the result returned to the guest wasn't shifted. Same thing
for write access, the write value isn't shifted, so a write to the
second byte would be written to the first.

Thanks,

-- 
Anthony PERARD


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] acpi: Fix access to PM1 control and status registers
  2020-07-02 11:12     ` Michael S. Tsirkin
  2020-07-10  9:42       ` Anthony PERARD
@ 2020-07-16  9:05       ` Cédric Le Goater
  2020-07-23 12:46         ` Michael S. Tsirkin
  1 sibling, 1 reply; 11+ messages in thread
From: Cédric Le Goater @ 2020-07-16  9:05 UTC (permalink / raw)
  To: Michael S. Tsirkin, Anthony PERARD
  Cc: Peter Maydell, Andrew Jeffery, Alistair Francis, qemu-devel,
	open list:ASPEED BMCs, Hervé Poussineau, Joel Stanley,
	pbonzini, Igor Mammedov, open list:PReP

On 7/2/20 1:12 PM, Michael S. Tsirkin wrote:
> On Wed, Jul 01, 2020 at 01:48:36PM +0100, Anthony PERARD wrote:
>> On Wed, Jul 01, 2020 at 08:01:55AM -0400, Michael S. Tsirkin wrote:
>>> On Wed, Jul 01, 2020 at 12:05:49PM +0100, Anthony PERARD wrote:
>>>> The ACPI spec state that "Accesses to PM1 control registers are
>>>> accessed through byte and word accesses." (In section 4.7.3.2.1 PM1
>>>> Control Registers of my old spec copy rev 4.0a).
>>>>
>>>> With commit 5d971f9e6725 ("memory: Revert "memory: accept mismatching
>>>> sizes in memory_region_access_valid""), it wasn't possible anymore to
>>>> access the pm1_cnt register by reading a single byte, and that is use
>>>> by at least a Xen firmware called "hvmloader".
>>>>
>>>> Also, take care of the PM1 Status Registers which also have "Accesses
>>>> to the PM1 status registers are done through byte or word accesses"
>>>> (In section 4.7.3.1.1 PM1 Status Registers).
>>>>
>>>> Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
>>>
>>>
>>> Can't we set impl.min_access_size to convert byte accesses
>>> to word accesses?
>>
>> I actually tried, but when reading `addr` or `addr+1` I had the same
>> value. So I guess `addr` wasn't taken into account.
>>
>> I've checked again, with `.impl.min_access_size = 2`, the width that the
>> function acpi_pm_cnt_read() get is 2, but addr isn't changed so the
>> function is still supposed to shift the result (or the value to write)
>> based on addr, I guess.
> 
> True address is misaligned.  I think memory core should just align it -
> this is what devices seem to expect.
> However result is shifted properly so just align addr and be done with
> it.
> 
> 
> In fact I have a couple more questions. Paolo - maybe you can answer some of these?
> 
> 
> 
>     if (!access_size_min) {
>         access_size_min = 1;
>     }
>     if (!access_size_max) {
>         access_size_max = 4;
>     }
> 
>>>>>
> 
> So 8 byte accesses are split up unless one requests 8 bytes.
> Undocumented right?  Why are we doing this?
> 
>>>>>
> 
> 
>     /* FIXME: support unaligned access? */
> 
>>>>>
> 
> Shouldn't we document impl.unaligned is ignored right now?
> Shouldn't we do something to make sure callbacks do not get
> unaligned accesses they don't expect?
> 
> 
> In fact, there are just 2 devices which set valid.unaligned but
> not impl.unaligned:
>     aspeed_smc_ops
>     raven_io_ops
> 
> 
> Is this intentional? 

I think it is a leftover from the initial implementation. The model works fine 
without valid.unaligned being set and with your patch.

C. 
 

> Do these in fact expect memory core to
> provide aligned addresses to the callbacks?
> Given impl.unaligned is not implemented, can we drop it completely?
> Cc a bunch of people who might know.
> 
> Can relevant maintainers please comment? Thanks a lot!
> 
>>>>>
> 
> 
>     access_size = MAX(MIN(size, access_size_max), access_size_min);
>     access_mask = MAKE_64BIT_MASK(0, access_size * 8);
> 
>>>>>
> 
> 
> So with a 1 byte access at address 1, with impl.min_access_size = 2, we get:
>     access_size = 2
>     access_mask = 0xffff
>     addr = 1
> 
> 
> 
> <<<<
> 
> 
>     if (memory_region_big_endian(mr)) {
>         for (i = 0; i < size; i += access_size) {
>             r |= access_fn(mr, addr + i, value, access_size,
>                         (size - access_size - i) * 8, access_mask, attrs);
> 
>>>>
> 
> now shift is -8.
> 
> <<<<
> 
> 
>         }
>     } else {
>         for (i = 0; i < size; i += access_size) {
>             r |= access_fn(mr, addr + i, value, access_size, i * 8,
>                         access_mask, attrs);
>         }
>     }
> 
> 
> <<<<
> 
> callback is invoked with addr 1 and size 2:
> 
>>>>>
> 
> 
>     uint64_t tmp;
> 
>     tmp = mr->ops->read(mr->opaque, addr, size);
>     if (mr->subpage) {
>         trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
>     } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
>         hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
>         trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
>     }
>     memory_region_shift_read_access(value, shift, mask, tmp);
>     return MEMTX_OK;
> 
> <<<<
> 
> let's assume callback returned 0xabcd
> 
> this is where we are shifting the return value:
> 
>>>>>
> 
> 
> static inline void memory_region_shift_read_access(uint64_t *value,
>                                                    signed shift,
>                                                    uint64_t mask,
>                                                    uint64_t tmp)
> {
>     if (shift >= 0) {
>         *value |= (tmp & mask) << shift;
>     } else {
>         *value |= (tmp & mask) >> -shift;
>     }
> }
> 
> 
> So we do 0xabcd & 0xffff >> 8, and we get 0xab.
> 
>>>>
> 
> How about aligning address for now? Paolo?
> 
> -->
> 
> memory: align to min access size
> 
> If impl.min_access_size > valid.min_access_size access callbacks
> can get a misaligned access as size is increased.
> They don't expect that, let's fix it in the memory core.
> 
> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> 
> ---
> 
> 
> diff --git a/memory.c b/memory.c
> index 9200b20130..ea489ce405 100644
> --- a/memory.c
> +++ b/memory.c
> @@ -532,6 +532,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
>      }
>  
>      /* FIXME: support unaligned access? */
> +    addr &= ~(access_size_min - 1);
>      access_size = MAX(MIN(size, access_size_max), access_size_min);
>      access_mask = MAKE_64BIT_MASK(0, access_size * 8);
>      if (memory_region_big_endian(mr)) {
>> -- 
>> Anthony PERARD
> 



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] acpi: Fix access to PM1 control and status registers
  2020-07-10  9:42       ` Anthony PERARD
@ 2020-07-23 12:44         ` Michael S. Tsirkin
  2020-07-23 13:08           ` Anthony PERARD
  0 siblings, 1 reply; 11+ messages in thread
From: Michael S. Tsirkin @ 2020-07-23 12:44 UTC (permalink / raw)
  To: Anthony PERARD
  Cc: Peter Maydell, Andrew Jeffery, Alistair Francis, qemu-devel,
	open list:ASPEED BMCs, Hervé Poussineau,
	Cédric Le Goater, pbonzini, Igor Mammedov, open list:PReP,
	Joel Stanley

On Fri, Jul 10, 2020 at 10:42:58AM +0100, Anthony PERARD wrote:
> On Thu, Jul 02, 2020 at 07:12:08AM -0400, Michael S. Tsirkin wrote:
> > memory: align to min access size
> > 
> > If impl.min_access_size > valid.min_access_size access callbacks
> > can get a misaligned access as size is increased.
> > They don't expect that, let's fix it in the memory core.
> > 
> > Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> > 
> > ---
> > 
> > 
> > diff --git a/memory.c b/memory.c
> > index 9200b20130..ea489ce405 100644
> > --- a/memory.c
> > +++ b/memory.c
> > @@ -532,6 +532,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
> >      }
> >  
> >      /* FIXME: support unaligned access? */
> > +    addr &= ~(access_size_min - 1);
> >      access_size = MAX(MIN(size, access_size_max), access_size_min);
> >      access_mask = MAKE_64BIT_MASK(0, access_size * 8);
> >      if (memory_region_big_endian(mr)) {
> 
> I've tried this (and .impl.min_access_size=2) but that wasn't enough.
> 
> In the guest, I did `inb(base_addr + 1)`, but I've got back the value as
> if `inb(base_addr)` was run.
> 
> The device emulation read callbacks did get addr=0 width=2, so that's
> fine, but the result returned to the guest wasn't shifted. Same thing
> for write access, the write value isn't shifted, so a write to the
> second byte would be written to the first.
> 
> Thanks,

So is there still an issue with my latest pull req?
Or is everything fixed?


> -- 
> Anthony PERARD



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] acpi: Fix access to PM1 control and status registers
  2020-07-16  9:05       ` Cédric Le Goater
@ 2020-07-23 12:46         ` Michael S. Tsirkin
  0 siblings, 0 replies; 11+ messages in thread
From: Michael S. Tsirkin @ 2020-07-23 12:46 UTC (permalink / raw)
  To: Cédric Le Goater
  Cc: Peter Maydell, Andrew Jeffery, Alistair Francis, qemu-devel,
	open list:ASPEED BMCs, Hervé Poussineau, Joel Stanley,
	pbonzini, Anthony PERARD, Igor Mammedov, open list:PReP

On Thu, Jul 16, 2020 at 11:05:06AM +0200, Cédric Le Goater wrote:
> On 7/2/20 1:12 PM, Michael S. Tsirkin wrote:
> > On Wed, Jul 01, 2020 at 01:48:36PM +0100, Anthony PERARD wrote:
> >> On Wed, Jul 01, 2020 at 08:01:55AM -0400, Michael S. Tsirkin wrote:
> >>> On Wed, Jul 01, 2020 at 12:05:49PM +0100, Anthony PERARD wrote:
> >>>> The ACPI spec state that "Accesses to PM1 control registers are
> >>>> accessed through byte and word accesses." (In section 4.7.3.2.1 PM1
> >>>> Control Registers of my old spec copy rev 4.0a).
> >>>>
> >>>> With commit 5d971f9e6725 ("memory: Revert "memory: accept mismatching
> >>>> sizes in memory_region_access_valid""), it wasn't possible anymore to
> >>>> access the pm1_cnt register by reading a single byte, and that is use
> >>>> by at least a Xen firmware called "hvmloader".
> >>>>
> >>>> Also, take care of the PM1 Status Registers which also have "Accesses
> >>>> to the PM1 status registers are done through byte or word accesses"
> >>>> (In section 4.7.3.1.1 PM1 Status Registers).
> >>>>
> >>>> Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
> >>>
> >>>
> >>> Can't we set impl.min_access_size to convert byte accesses
> >>> to word accesses?
> >>
> >> I actually tried, but when reading `addr` or `addr+1` I had the same
> >> value. So I guess `addr` wasn't taken into account.
> >>
> >> I've checked again, with `.impl.min_access_size = 2`, the width that the
> >> function acpi_pm_cnt_read() get is 2, but addr isn't changed so the
> >> function is still supposed to shift the result (or the value to write)
> >> based on addr, I guess.
> > 
> > True address is misaligned.  I think memory core should just align it -
> > this is what devices seem to expect.
> > However result is shifted properly so just align addr and be done with
> > it.
> > 
> > 
> > In fact I have a couple more questions. Paolo - maybe you can answer some of these?
> > 
> > 
> > 
> >     if (!access_size_min) {
> >         access_size_min = 1;
> >     }
> >     if (!access_size_max) {
> >         access_size_max = 4;
> >     }
> > 
> >>>>>
> > 
> > So 8 byte accesses are split up unless one requests 8 bytes.
> > Undocumented right?  Why are we doing this?
> > 
> >>>>>
> > 
> > 
> >     /* FIXME: support unaligned access? */
> > 
> >>>>>
> > 
> > Shouldn't we document impl.unaligned is ignored right now?
> > Shouldn't we do something to make sure callbacks do not get
> > unaligned accesses they don't expect?
> > 
> > 
> > In fact, there are just 2 devices which set valid.unaligned but
> > not impl.unaligned:
> >     aspeed_smc_ops
> >     raven_io_ops
> > 
> > 
> > Is this intentional? 
> 
> I think it is a leftover from the initial implementation. The model works fine 
> without valid.unaligned being set and with your patch.
> 
> C. 

Oh good, we can drop this. What about raven? Hervé could you comment pls?


> 
> > Do these in fact expect memory core to
> > provide aligned addresses to the callbacks?
> > Given impl.unaligned is not implemented, can we drop it completely?
> > Cc a bunch of people who might know.
> > 
> > Can relevant maintainers please comment? Thanks a lot!
> > 
> >>>>>
> > 
> > 
> >     access_size = MAX(MIN(size, access_size_max), access_size_min);
> >     access_mask = MAKE_64BIT_MASK(0, access_size * 8);
> > 
> >>>>>
> > 
> > 
> > So with a 1 byte access at address 1, with impl.min_access_size = 2, we get:
> >     access_size = 2
> >     access_mask = 0xffff
> >     addr = 1
> > 
> > 
> > 
> > <<<<
> > 
> > 
> >     if (memory_region_big_endian(mr)) {
> >         for (i = 0; i < size; i += access_size) {
> >             r |= access_fn(mr, addr + i, value, access_size,
> >                         (size - access_size - i) * 8, access_mask, attrs);
> > 
> >>>>
> > 
> > now shift is -8.
> > 
> > <<<<
> > 
> > 
> >         }
> >     } else {
> >         for (i = 0; i < size; i += access_size) {
> >             r |= access_fn(mr, addr + i, value, access_size, i * 8,
> >                         access_mask, attrs);
> >         }
> >     }
> > 
> > 
> > <<<<
> > 
> > callback is invoked with addr 1 and size 2:
> > 
> >>>>>
> > 
> > 
> >     uint64_t tmp;
> > 
> >     tmp = mr->ops->read(mr->opaque, addr, size);
> >     if (mr->subpage) {
> >         trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
> >     } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
> >         hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
> >         trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
> >     }
> >     memory_region_shift_read_access(value, shift, mask, tmp);
> >     return MEMTX_OK;
> > 
> > <<<<
> > 
> > let's assume callback returned 0xabcd
> > 
> > this is where we are shifting the return value:
> > 
> >>>>>
> > 
> > 
> > static inline void memory_region_shift_read_access(uint64_t *value,
> >                                                    signed shift,
> >                                                    uint64_t mask,
> >                                                    uint64_t tmp)
> > {
> >     if (shift >= 0) {
> >         *value |= (tmp & mask) << shift;
> >     } else {
> >         *value |= (tmp & mask) >> -shift;
> >     }
> > }
> > 
> > 
> > So we do 0xabcd & 0xffff >> 8, and we get 0xab.
> > 
> >>>>
> > 
> > How about aligning address for now? Paolo?
> > 
> > -->
> > 
> > memory: align to min access size
> > 
> > If impl.min_access_size > valid.min_access_size access callbacks
> > can get a misaligned access as size is increased.
> > They don't expect that, let's fix it in the memory core.
> > 
> > Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> > 
> > ---
> > 
> > 
> > diff --git a/memory.c b/memory.c
> > index 9200b20130..ea489ce405 100644
> > --- a/memory.c
> > +++ b/memory.c
> > @@ -532,6 +532,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
> >      }
> >  
> >      /* FIXME: support unaligned access? */
> > +    addr &= ~(access_size_min - 1);
> >      access_size = MAX(MIN(size, access_size_max), access_size_min);
> >      access_mask = MAKE_64BIT_MASK(0, access_size * 8);
> >      if (memory_region_big_endian(mr)) {
> >> -- 
> >> Anthony PERARD
> > 



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] acpi: Fix access to PM1 control and status registers
  2020-07-01 12:48   ` Anthony PERARD
  2020-07-02 11:12     ` Michael S. Tsirkin
@ 2020-07-23 12:54     ` Michael Tokarev
  2020-07-23 13:14       ` Anthony PERARD
  1 sibling, 1 reply; 11+ messages in thread
From: Michael Tokarev @ 2020-07-23 12:54 UTC (permalink / raw)
  To: Anthony PERARD, Michael S. Tsirkin; +Cc: Igor Mammedov, qemu-devel

01.07.2020 15:48, Anthony PERARD wrote:

> I actually tried, but when reading `addr` or `addr+1` I had the same
> value. So I guess `addr` wasn't taken into account.

AFAICS, these registers aren't actually supposed to be accessed like this
as addr+1. ACPI and ISA spec states multiple times that `addr' should be
accessible as 8/16/32 bits, but it does not mention `addr+1' or `addr+2'.

So far all now-rejected accesses we've seen (not that many but still) goes
to `addr', not to any other variation of it.

/mjt


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] acpi: Fix access to PM1 control and status registers
  2020-07-23 12:44         ` Michael S. Tsirkin
@ 2020-07-23 13:08           ` Anthony PERARD
  0 siblings, 0 replies; 11+ messages in thread
From: Anthony PERARD @ 2020-07-23 13:08 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: Peter Maydell, Andrew Jeffery, Alistair Francis, qemu-devel,
	open list:ASPEED BMCs, Hervé Poussineau,
	Cédric Le Goater, pbonzini, Igor Mammedov, open list:PReP,
	Joel Stanley

On Thu, Jul 23, 2020 at 08:44:27AM -0400, Michael S. Tsirkin wrote:
> On Fri, Jul 10, 2020 at 10:42:58AM +0100, Anthony PERARD wrote:
> > On Thu, Jul 02, 2020 at 07:12:08AM -0400, Michael S. Tsirkin wrote:
> > > memory: align to min access size
> > > 
> > > If impl.min_access_size > valid.min_access_size access callbacks
> > > can get a misaligned access as size is increased.
> > > They don't expect that, let's fix it in the memory core.
> > > 
> > > Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
> > > 
> > > ---
> > > 
> > > 
> > > diff --git a/memory.c b/memory.c
> > > index 9200b20130..ea489ce405 100644
> > > --- a/memory.c
> > > +++ b/memory.c
> > > @@ -532,6 +532,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
> > >      }
> > >  
> > >      /* FIXME: support unaligned access? */
> > > +    addr &= ~(access_size_min - 1);
> > >      access_size = MAX(MIN(size, access_size_max), access_size_min);
> > >      access_mask = MAKE_64BIT_MASK(0, access_size * 8);
> > >      if (memory_region_big_endian(mr)) {
> > 
> > I've tried this (and .impl.min_access_size=2) but that wasn't enough.
> > 
> > In the guest, I did `inb(base_addr + 1)`, but I've got back the value as
> > if `inb(base_addr)` was run.
> > 
> > The device emulation read callbacks did get addr=0 width=2, so that's
> > fine, but the result returned to the guest wasn't shifted. Same thing
> > for write access, the write value isn't shifted, so a write to the
> > second byte would be written to the first.
> > 
> > Thanks,
> 
> So is there still an issue with my latest pull req?
> Or is everything fixed?

I can boot a guest with that pull req, it fixes the issue introduced by
the CVE fix.

Thanks!

-- 
Anthony PERARD


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] acpi: Fix access to PM1 control and status registers
  2020-07-23 12:54     ` Michael Tokarev
@ 2020-07-23 13:14       ` Anthony PERARD
  0 siblings, 0 replies; 11+ messages in thread
From: Anthony PERARD @ 2020-07-23 13:14 UTC (permalink / raw)
  To: Michael Tokarev; +Cc: Igor Mammedov, qemu-devel, Michael S. Tsirkin

On Thu, Jul 23, 2020 at 03:54:18PM +0300, Michael Tokarev wrote:
> 01.07.2020 15:48, Anthony PERARD wrote:
> 
> > I actually tried, but when reading `addr` or `addr+1` I had the same
> > value. So I guess `addr` wasn't taken into account.
> 
> AFAICS, these registers aren't actually supposed to be accessed like this
> as addr+1. ACPI and ISA spec states multiple times that `addr' should be
> accessible as 8/16/32 bits, but it does not mention `addr+1' or `addr+2'.

I guess that's why there's never been a "fix" for this before. Thanks
for the explanation.

> So far all now-rejected accesses we've seen (not that many but still) goes
> to `addr', not to any other variation of it.
> 
> /mjt

-- 
Anthony PERARD


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2020-07-23 13:15 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-01 11:05 [PATCH] acpi: Fix access to PM1 control and status registers Anthony PERARD
2020-07-01 12:01 ` Michael S. Tsirkin
2020-07-01 12:48   ` Anthony PERARD
2020-07-02 11:12     ` Michael S. Tsirkin
2020-07-10  9:42       ` Anthony PERARD
2020-07-23 12:44         ` Michael S. Tsirkin
2020-07-23 13:08           ` Anthony PERARD
2020-07-16  9:05       ` Cédric Le Goater
2020-07-23 12:46         ` Michael S. Tsirkin
2020-07-23 12:54     ` Michael Tokarev
2020-07-23 13:14       ` Anthony PERARD

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).