qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address
@ 2019-09-17 13:07 Igor Mammedov
  2019-09-17 13:07 ` [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM " Igor Mammedov
                   ` (3 more replies)
  0 siblings, 4 replies; 17+ messages in thread
From: Igor Mammedov @ 2019-09-17 13:07 UTC (permalink / raw)
  To: qemu-devel
  Cc: yingwen.chen, devel, phillip.goerl, alex.williamson, jiewen.yao,
	jun.nakajima, michael.d.kinney, pbonzini, boris.ostrovsky, rfc,
	lersek, joao.m.martins


Try #2 using PCI config space of MCH to negotiate/lock SMRAM
at 0x30000.

CC: yingwen.chen@intel.com
CC: devel@edk2.groups.io
CC: phillip.goerl@oracle.com
CC: alex.williamson@redhat.com
CC: jiewen.yao@intel.com
CC: jun.nakajima@intel.com
CC: michael.d.kinney@intel.com
CC: pbonzini@redhat.com
CC: boris.ostrovsky@oracle.com
CC: rfc@edk2.groups.io
CC: joao.m.martins@oracle.com
CC: lersek@redhat.com


Igor Mammedov (2):
  q35: implement 128K SMRAM at default SMBASE address
  tests: q35: MCH: add default SMBASE SMRAM lock test

 include/hw/pci-host/q35.h |  10 ++++
 hw/i386/pc.c              |   4 +-
 hw/pci-host/q35.c         |  80 ++++++++++++++++++++++++++---
 tests/q35-test.c          | 105 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 191 insertions(+), 8 deletions(-)

-- 
2.18.1



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-17 13:07 [Qemu-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address Igor Mammedov
@ 2019-09-17 13:07 ` Igor Mammedov
  2019-09-19 17:02   ` Laszlo Ersek
  2019-09-17 13:07 ` [Qemu-devel] [PATCH 2/2] tests: q35: MCH: add default SMBASE SMRAM lock test Igor Mammedov
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 17+ messages in thread
From: Igor Mammedov @ 2019-09-17 13:07 UTC (permalink / raw)
  To: qemu-devel
  Cc: yingwen.chen, devel, phillip.goerl, alex.williamson, jiewen.yao,
	jun.nakajima, michael.d.kinney, pbonzini, boris.ostrovsky, rfc,
	lersek, joao.m.martins

Use commit (2f295167e0 q35/mch: implement extended TSEG sizes) for
inspiration and (ab)use reserved register in config space at 0x9c
offset [*] to extend q35 pci-host with ability to use 128K at
0x30000 as SMRAM and hide it (like TSEG) from non-SMM context.

Usage:
  1: write 0xff in the register
  2: if the feature is supported, follow up read from the register
     should return 0x01. At this point RAM at 0x30000 is still
     available for SMI handler configuration from non-SMM context
  3: writing 0x02 in the register, locks SMBASE area, making its contents
     available only from SMM context. In non-SMM context, reads return
     0xff and writes are ignored. Further writes into the register are
     ignored until the system reset.

*) https://www.mail-archive.com/qemu-devel@nongnu.org/msg455991.html

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
---
 include/hw/pci-host/q35.h | 10 +++++
 hw/i386/pc.c              |  4 +-
 hw/pci-host/q35.c         | 80 +++++++++++++++++++++++++++++++++++----
 3 files changed, 86 insertions(+), 8 deletions(-)

diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h
index b3bcf2e632..976fbae599 100644
--- a/include/hw/pci-host/q35.h
+++ b/include/hw/pci-host/q35.h
@@ -32,6 +32,7 @@
 #include "hw/acpi/ich9.h"
 #include "hw/pci-host/pam.h"
 #include "hw/i386/intel_iommu.h"
+#include "qemu/units.h"
 
 #define TYPE_Q35_HOST_DEVICE "q35-pcihost"
 #define Q35_HOST_DEVICE(obj) \
@@ -54,6 +55,8 @@ typedef struct MCHPCIState {
     MemoryRegion smram_region, open_high_smram;
     MemoryRegion smram, low_smram, high_smram;
     MemoryRegion tseg_blackhole, tseg_window;
+    MemoryRegion smbase_blackhole, smbase_window;
+    bool has_smram_at_smbase;
     Range pci_hole;
     uint64_t below_4g_mem_size;
     uint64_t above_4g_mem_size;
@@ -97,6 +100,13 @@ typedef struct Q35PCIHost {
 #define MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY  0xffff
 #define MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_MAX    0xfff
 
+#define MCH_HOST_BRIDGE_SMBASE_SIZE            (128 * KiB)
+#define MCH_HOST_BRIDGE_SMBASE_ADDR            0x30000
+#define MCH_HOST_BRIDGE_F_SMBASE               0x9c
+#define MCH_HOST_BRIDGE_F_SMBASE_QUERY         0xff
+#define MCH_HOST_BRIDGE_F_SMBASE_IN_RAM        0x01
+#define MCH_HOST_BRIDGE_F_SMBASE_LCK           0x02
+
 #define MCH_HOST_BRIDGE_PCIEXBAR               0x60    /* 64bit register */
 #define MCH_HOST_BRIDGE_PCIEXBAR_SIZE          8       /* 64bit register */
 #define MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT       0xb0000000
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index bad866fe44..288d28358a 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -119,7 +119,9 @@ struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX};
 /* Physical Address of PVH entry point read from kernel ELF NOTE */
 static size_t pvh_start_addr;
 
-GlobalProperty pc_compat_4_1[] = {};
+GlobalProperty pc_compat_4_1[] = {
+    { "mch", "smbase-smram", "off" },
+};
 const size_t pc_compat_4_1_len = G_N_ELEMENTS(pc_compat_4_1);
 
 GlobalProperty pc_compat_4_0[] = {};
diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c
index 158d270b9f..c1bd9f78ae 100644
--- a/hw/pci-host/q35.c
+++ b/hw/pci-host/q35.c
@@ -275,20 +275,20 @@ static const TypeInfo q35_host_info = {
  * MCH D0:F0
  */
 
-static uint64_t tseg_blackhole_read(void *ptr, hwaddr reg, unsigned size)
+static uint64_t blackhole_read(void *ptr, hwaddr reg, unsigned size)
 {
     return 0xffffffff;
 }
 
-static void tseg_blackhole_write(void *opaque, hwaddr addr, uint64_t val,
-                                 unsigned width)
+static void blackhole_write(void *opaque, hwaddr addr, uint64_t val,
+                            unsigned width)
 {
     /* nothing */
 }
 
-static const MemoryRegionOps tseg_blackhole_ops = {
-    .read = tseg_blackhole_read,
-    .write = tseg_blackhole_write,
+static const MemoryRegionOps blackhole_ops = {
+    .read = blackhole_read,
+    .write = blackhole_write,
     .endianness = DEVICE_NATIVE_ENDIAN,
     .valid.min_access_size = 1,
     .valid.max_access_size = 4,
@@ -430,6 +430,46 @@ static void mch_update_ext_tseg_mbytes(MCHPCIState *mch)
     }
 }
 
+static void mch_update_smbase_smram(MCHPCIState *mch)
+{
+    PCIDevice *pd = PCI_DEVICE(mch);
+    uint8_t *reg = pd->config + MCH_HOST_BRIDGE_F_SMBASE;
+    bool lck;
+
+    if (!mch->has_smram_at_smbase) {
+        return;
+    }
+
+    if (*reg == MCH_HOST_BRIDGE_F_SMBASE_QUERY) {
+        pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] =
+            MCH_HOST_BRIDGE_F_SMBASE_LCK;
+        *reg = MCH_HOST_BRIDGE_F_SMBASE_IN_RAM;
+        return;
+    }
+
+    /*
+     * default/reset state, discard written value
+     * which will disable SMRAM balackhole at SMBASE
+     */
+    if (pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] == 0xff) {
+        *reg = 0x00;
+    }
+
+    memory_region_transaction_begin();
+    if (*reg & MCH_HOST_BRIDGE_F_SMBASE_LCK) {
+        /* disable all writes */
+        pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] &=
+            ~MCH_HOST_BRIDGE_F_SMBASE_LCK;
+        *reg = MCH_HOST_BRIDGE_F_SMBASE_LCK;
+        lck = true;
+    } else {
+        lck = false;
+    }
+    memory_region_set_enabled(&mch->smbase_blackhole, lck);
+    memory_region_set_enabled(&mch->smbase_window, lck);
+    memory_region_transaction_commit();
+}
+
 static void mch_write_config(PCIDevice *d,
                               uint32_t address, uint32_t val, int len)
 {
@@ -456,6 +496,10 @@ static void mch_write_config(PCIDevice *d,
                        MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_SIZE)) {
         mch_update_ext_tseg_mbytes(mch);
     }
+
+    if (ranges_overlap(address, len, MCH_HOST_BRIDGE_F_SMBASE, 1)) {
+        mch_update_smbase_smram(mch);
+    }
 }
 
 static void mch_update(MCHPCIState *mch)
@@ -464,6 +508,7 @@ static void mch_update(MCHPCIState *mch)
     mch_update_pam(mch);
     mch_update_smram(mch);
     mch_update_ext_tseg_mbytes(mch);
+    mch_update_smbase_smram(mch);
 
     /*
      * pci hole goes from end-of-low-ram to io-apic.
@@ -514,6 +559,9 @@ static void mch_reset(DeviceState *qdev)
                      MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY);
     }
 
+    d->config[MCH_HOST_BRIDGE_F_SMBASE] = 0;
+    d->wmask[MCH_HOST_BRIDGE_F_SMBASE] = 0xff;
+
     mch_update(mch);
 }
 
@@ -563,7 +611,7 @@ static void mch_realize(PCIDevice *d, Error **errp)
     memory_region_add_subregion(&mch->smram, 0xfeda0000, &mch->high_smram);
 
     memory_region_init_io(&mch->tseg_blackhole, OBJECT(mch),
-                          &tseg_blackhole_ops, NULL,
+                          &blackhole_ops, NULL,
                           "tseg-blackhole", 0);
     memory_region_set_enabled(&mch->tseg_blackhole, false);
     memory_region_add_subregion_overlap(mch->system_memory,
@@ -575,6 +623,23 @@ static void mch_realize(PCIDevice *d, Error **errp)
     memory_region_set_enabled(&mch->tseg_window, false);
     memory_region_add_subregion(&mch->smram, mch->below_4g_mem_size,
                                 &mch->tseg_window);
+
+    memory_region_init_io(&mch->smbase_blackhole, OBJECT(mch), &blackhole_ops,
+                          NULL, "smbase-blackhole",
+                          MCH_HOST_BRIDGE_SMBASE_SIZE);
+    memory_region_set_enabled(&mch->smbase_blackhole, false);
+    memory_region_add_subregion_overlap(mch->system_memory,
+                                        MCH_HOST_BRIDGE_SMBASE_ADDR,
+                                        &mch->smbase_blackhole, 1);
+
+    memory_region_init_alias(&mch->smbase_window, OBJECT(mch),
+                             "smbase-window", mch->ram_memory,
+                             MCH_HOST_BRIDGE_SMBASE_ADDR,
+                             MCH_HOST_BRIDGE_SMBASE_SIZE);
+    memory_region_set_enabled(&mch->smbase_window, false);
+    memory_region_add_subregion(&mch->smram, MCH_HOST_BRIDGE_SMBASE_ADDR,
+                                &mch->smbase_window);
+
     object_property_add_const_link(qdev_get_machine(), "smram",
                                    OBJECT(&mch->smram), &error_abort);
 
@@ -601,6 +666,7 @@ uint64_t mch_mcfg_base(void)
 static Property mch_props[] = {
     DEFINE_PROP_UINT16("extended-tseg-mbytes", MCHPCIState, ext_tseg_mbytes,
                        16),
+    DEFINE_PROP_BOOL("smbase-smram", MCHPCIState, has_smram_at_smbase, true),
     DEFINE_PROP_END_OF_LIST(),
 };
 
-- 
2.18.1



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Qemu-devel] [PATCH 2/2] tests: q35: MCH: add default SMBASE SMRAM lock test
  2019-09-17 13:07 [Qemu-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address Igor Mammedov
  2019-09-17 13:07 ` [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM " Igor Mammedov
@ 2019-09-17 13:07 ` Igor Mammedov
  2019-09-17 15:23 ` [Qemu-devel] [edk2-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address no-reply
  2019-09-17 15:24 ` no-reply
  3 siblings, 0 replies; 17+ messages in thread
From: Igor Mammedov @ 2019-09-17 13:07 UTC (permalink / raw)
  To: qemu-devel
  Cc: yingwen.chen, devel, phillip.goerl, alex.williamson, jiewen.yao,
	jun.nakajima, michael.d.kinney, pbonzini, boris.ostrovsky, rfc,
	lersek, joao.m.martins

test lockable SMRAM at default SMBASE feature introduced by
commit "q35: implement 128K SMRAM at default SMBASE address"

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
---
 tests/q35-test.c | 105 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 105 insertions(+)

diff --git a/tests/q35-test.c b/tests/q35-test.c
index a68183d513..dd02660303 100644
--- a/tests/q35-test.c
+++ b/tests/q35-test.c
@@ -186,6 +186,109 @@ static void test_tseg_size(const void *data)
     qtest_quit(qts);
 }
 
+#define SMBASE 0x30000
+#define SMRAM_TEST_PATTERN 0x32
+#define SMRAM_TEST_RESET_PATTERN 0x23
+
+static void test_smram_smbase_lock(void)
+{
+    QPCIBus *pcibus;
+    QPCIDevice *pcidev;
+    QDict *response;
+    QTestState *qts;
+    int i;
+
+    qts = qtest_init("-M q35");
+
+    pcibus = qpci_new_pc(qts, NULL);
+    g_assert(pcibus != NULL);
+
+    pcidev = qpci_device_find(pcibus, 0);
+    g_assert(pcidev != NULL);
+
+    /* check that SMRAM is not enabled by default */
+    g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == 0);
+    qtest_writeb(qts, SMBASE, SMRAM_TEST_PATTERN);
+    g_assert_cmpint(qtest_readb(qts, SMBASE), ==, SMRAM_TEST_PATTERN);
+
+    /* check that writinng junk to 0x9c before before negotiating is ignorred */
+    for (i = 0; i < 0xff; i++) {
+        qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, i);
+        g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == 0);
+    }
+
+    /* enable SMRAM at SMBASE */
+    qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, 0xff);
+    g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == 0x01);
+    /* lock SMRAM at SMBASE */
+    qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, 0x02);
+    g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == 0x02);
+
+    /* check that SMRAM at SMBASE is locked and can't be unlocked */
+    g_assert_cmpint(qtest_readb(qts, SMBASE), ==, 0xff);
+    for (i = 0; i <= 0xff; i++) {
+        /* make sure register is immutable */
+        qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, i);
+        g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == 0x02);
+
+        /* RAM access should go inot black hole */
+        qtest_writeb(qts, SMBASE, SMRAM_TEST_PATTERN);
+        g_assert_cmpint(qtest_readb(qts, SMBASE), ==, 0xff);
+    }
+
+    /* reset */
+    response = qtest_qmp(qts, "{'execute': 'system_reset', 'arguments': {} }");
+    g_assert(response);
+    g_assert(!qdict_haskey(response, "error"));
+    qobject_unref(response);
+
+    /* check RAM at SMBASE is available after reset */
+    g_assert_cmpint(qtest_readb(qts, SMBASE), ==, SMRAM_TEST_PATTERN);
+    g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == 0);
+    qtest_writeb(qts, SMBASE, SMRAM_TEST_RESET_PATTERN);
+    g_assert_cmpint(qtest_readb(qts, SMBASE), ==, SMRAM_TEST_RESET_PATTERN);
+
+    g_free(pcidev);
+    qpci_free_pc(pcibus);
+
+    qtest_quit(qts);
+}
+
+static void test_without_smram_base(void)
+{
+    QPCIBus *pcibus;
+    QPCIDevice *pcidev;
+    QTestState *qts;
+    int i;
+
+    qts = qtest_init("-M pc-q35-4.1");
+
+    pcibus = qpci_new_pc(qts, NULL);
+    g_assert(pcibus != NULL);
+
+    pcidev = qpci_device_find(pcibus, 0);
+    g_assert(pcidev != NULL);
+
+    /* check that RAM accessible */
+    qtest_writeb(qts, SMBASE, SMRAM_TEST_PATTERN);
+    g_assert_cmpint(qtest_readb(qts, SMBASE), ==, SMRAM_TEST_PATTERN);
+
+    /* check that writing to 0x9c succeeds */
+    for (i = 0; i <= 0xff; i++) {
+        qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, i);
+        g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == i);
+    }
+
+    /* check that RAM is still accessible */
+    qtest_writeb(qts, SMBASE, SMRAM_TEST_PATTERN + 1);
+    g_assert_cmpint(qtest_readb(qts, SMBASE), ==, (SMRAM_TEST_PATTERN + 1));
+
+    g_free(pcidev);
+    qpci_free_pc(pcibus);
+
+    qtest_quit(qts);
+}
+
 int main(int argc, char **argv)
 {
     g_test_init(&argc, &argv, NULL);
@@ -197,5 +300,7 @@ int main(int argc, char **argv)
     qtest_add_data_func("/q35/tseg-size/8mb", &tseg_8mb, test_tseg_size);
     qtest_add_data_func("/q35/tseg-size/ext/16mb", &tseg_ext_16mb,
                         test_tseg_size);
+    qtest_add_func("/q35/smram/smbase_lock", test_smram_smbase_lock);
+    qtest_add_func("/q35/smram/legacy_smbase", test_without_smram_base);
     return g_test_run();
 }
-- 
2.18.1



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [Qemu-devel] [edk2-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address
  2019-09-17 13:07 [Qemu-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address Igor Mammedov
  2019-09-17 13:07 ` [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM " Igor Mammedov
  2019-09-17 13:07 ` [Qemu-devel] [PATCH 2/2] tests: q35: MCH: add default SMBASE SMRAM lock test Igor Mammedov
@ 2019-09-17 15:23 ` no-reply
  2019-09-17 15:24 ` no-reply
  3 siblings, 0 replies; 17+ messages in thread
From: no-reply @ 2019-09-17 15:23 UTC (permalink / raw)
  To: imammedo
  Cc: yingwen.chen, devel, phillip.goerl, qemu-devel, alex.williamson,
	jiewen.yao, jun.nakajima, michael.d.kinney, pbonzini,
	boris.ostrovsky, rfc, lersek, joao.m.martins

Patchew URL: https://patchew.org/QEMU/20190917130708.10281-1-imammedo@redhat.com/



Hi,

This series failed the asan build test. Please find the testing commands and
their output below. If you have Docker installed, you can probably reproduce it
locally.

=== TEST SCRIPT BEGIN ===
#!/bin/bash
make docker-image-fedora V=1 NETWORK=1
time make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu J=14 NETWORK=1
=== TEST SCRIPT END ===

./tests/docker/docker.py --engine auto build qemu:fedora tests/docker/dockerfiles/fedora.docker   --add-current-user  
Image is up to date.
  LD      docker-test-debug@fedora.mo
cc: fatal error: no input files
compilation terminated.
make: *** [docker-test-debug@fedora.mo] Error 4



The full log is available at
http://patchew.org/logs/20190917130708.10281-1-imammedo@redhat.com/testing.asan/?type=message.
---
Email generated automatically by Patchew [https://patchew.org/].
Please send your feedback to patchew-devel@redhat.com

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Qemu-devel] [edk2-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address
  2019-09-17 13:07 [Qemu-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address Igor Mammedov
                   ` (2 preceding siblings ...)
  2019-09-17 15:23 ` [Qemu-devel] [edk2-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address no-reply
@ 2019-09-17 15:24 ` no-reply
  3 siblings, 0 replies; 17+ messages in thread
From: no-reply @ 2019-09-17 15:24 UTC (permalink / raw)
  To: imammedo
  Cc: yingwen.chen, devel, phillip.goerl, qemu-devel, alex.williamson,
	jiewen.yao, jun.nakajima, michael.d.kinney, pbonzini,
	boris.ostrovsky, rfc, lersek, joao.m.martins

Patchew URL: https://patchew.org/QEMU/20190917130708.10281-1-imammedo@redhat.com/



Hi,

This series failed the docker-mingw@fedora build test. Please find the testing commands and
their output below. If you have Docker installed, you can probably reproduce it
locally.

=== TEST SCRIPT BEGIN ===
#! /bin/bash
make docker-image-fedora V=1 NETWORK=1
time make docker-test-mingw@fedora J=14 NETWORK=1
=== TEST SCRIPT END ===

./tests/docker/docker.py --engine auto build qemu:fedora tests/docker/dockerfiles/fedora.docker   --add-current-user  
Image is up to date.
  LD      docker-test-mingw@fedora.mo
cc: fatal error: no input files
compilation terminated.
make: *** [docker-test-mingw@fedora.mo] Error 4



The full log is available at
http://patchew.org/logs/20190917130708.10281-1-imammedo@redhat.com/testing.docker-mingw@fedora/?type=message.
---
Email generated automatically by Patchew [https://patchew.org/].
Please send your feedback to patchew-devel@redhat.com

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-17 13:07 ` [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM " Igor Mammedov
@ 2019-09-19 17:02   ` Laszlo Ersek
  2019-09-20  8:28     ` [edk2-devel] " Igor Mammedov
  0 siblings, 1 reply; 17+ messages in thread
From: Laszlo Ersek @ 2019-09-19 17:02 UTC (permalink / raw)
  To: Igor Mammedov, qemu-devel
  Cc: yingwen.chen, Brijesh Singh, devel, phillip.goerl,
	alex.williamson, jiewen.yao, jun.nakajima, michael.d.kinney,
	pbonzini, boris.ostrovsky, rfc, joao.m.martins

Hi Igor,

(+Brijesh)

long-ish pondering ahead, with a question at the end.

On 09/17/19 15:07, Igor Mammedov wrote:
> Use commit (2f295167e0 q35/mch: implement extended TSEG sizes) for
> inspiration and (ab)use reserved register in config space at 0x9c
> offset [*] to extend q35 pci-host with ability to use 128K at
> 0x30000 as SMRAM and hide it (like TSEG) from non-SMM context.
>
> Usage:
>   1: write 0xff in the register
>   2: if the feature is supported, follow up read from the register
>      should return 0x01. At this point RAM at 0x30000 is still
>      available for SMI handler configuration from non-SMM context
>   3: writing 0x02 in the register, locks SMBASE area, making its contents
>      available only from SMM context. In non-SMM context, reads return
>      0xff and writes are ignored. Further writes into the register are
>      ignored until the system reset.

I haven't written any OVMF code for this yet, but I've spent a few hours
thinking about it. Progress! :)

So, this looks really promising.

I'm commenting now for summarizing my thoughts before I write the --
initially minimal -- counterpart patches in OVMF.

There is one complication that deserves this separate email, and that's
SEV's effect on the SMM save state map. It is the topic of the following
edk2 commit range:

  1  61a044c6c15f OvmfPkg/MemEncryptSevLib: find pages of initial SMRAM
                  save state map
  2  86defc2c2575 OvmfPkg/PlatformPei: SEV: allocate pages of initial
                  SMRAM save state map
  3  5ef3b66fec13 OvmfPkg/SmmCpuFeaturesLib: SEV: encrypt+free pages of
                  init. save state map
  4  5e2e5647b9fb OvmfPkg/AmdSevDxe: decrypt the pages of the initial
                  SMRAM save state map

For performing the initial SMBASE relocation, QEMU+KVM have to read the
save state map from guest RAM. For that, under SEV, the guest RAM has to
be decrypted before, and re-encrypted after. Meanwhile, the guest RAM
(while decrypted) should not leak other (unrelated) information to the
hypervisor.

Therefore the above edk2 commits implement the following procedure:

(a.1) in OvmfPkg/PlatformPei, the page in which the save state map
      exists, is allocated away from other firmware components

(a.2) in AmdSevDxe, which runs early in the DXE phase (via APRIORI DXE
      file), we decrypt the page in question

(a.3) PiSmmCpuDxeSmm, after it performs the initial SMBASE relocation,
      calls a hook in SmmCpuFeaturesLib -- and in that hook, we
      re-encrypt the page, and also release (free) it for other uses
      (firmware and OS both)

Clearly, the above conflicts (or at least intersects) with reserving
128KB (32 pages) at 0x3_0000, i.e. at [0x3_0000..0x4_FFFF], forever,
from the OS. (That area is going to be locked to SMM, and so the OS
should see from the UEFI memmap that it should not touch it.) The
conflict exists because the save state map is in the last kilobyte of
page#15 (from said pages #0..#31) -- the save state map is at
[0x3_FC00..0x3_FFFF].

So here's my plan:

(b.1) In PlatformPei, probe the QEMU feature by writing 0xFF to register
      0x9C, and reading back 0x01. If the feature is available, then:

(b.1.1) set a new dynamic PCD to TRUE

(b.1.2) allocate away (as reserved memory) the range
        [0x3_0000..0x4_FFFF]

(b.2) In PlatformPei, conditionalize the current, SEV-specific,
      allocation of the save state map, on the PCD being FALSE.
      (Modifying (a.1).) This will prevent a conflicting
      double-allocation (double coverage by memory allocation HOBs)

(b.3) In AmdSevDxe, don't touch the decryption of the save state map
      (a.2)

(b.4) in the SmmCpuFeaturesLib hook, preserve the re-encryption of the
      save state map (part of (a.3))

(b.5) in the SmmCpuFeaturesLib hook, conditionalize the freeing of the
      save state map, on the PCD being FALSE. (Modifying (a.3).) This
      will prevent a hole from being punched in the allocation that
      covers [0x3_0000..0x4_FFFF], and the entire allocation will
      survive into OS runtime.

The above steps ensure that, while the decrypt/encrypt steps prevail,
the save state map allocations will be ingested by the larger, and
longer-term,  [0x3_0000..0x4_FFFF] allocation.

Furthermore:

(b.6) Extend SmmAccessDxe to write 0x02 to register 0x9C, in an
      EFI_DXE_MM_READY_TO_LOCK_PROTOCOL notification callback, if the
      PCD is true. This will cause the [0x3_0000..0x4_FFFF] area to be
      locked down at the same time with the rest of SMRAM (i.e., TSEG).

(b.7) Extend SmmAccessDxe to save S3 boot script opcodes for writing
      0x02 to register 0x9C, if the PCD is true. This makes sure that
      the above lockdown will occur also during S3 resume, before the OS
      is reached. (The S3 boot script is executed *after* SMBASE
      relocation and TSEG locking, during S3 resume.)

Considering (b.6) and (b.7), the "lockdown on normal boot" (from (b.6))
could actually be merged into (b.5) -- we don't necessarily have to
delay the normal boot lockdown of [0x3_0000..0x4_FFFF] until platform
BDS signals EFI_DXE_MM_READY_TO_LOCK_PROTOCOL; we could do it right
after initial SMBASE relocation.

However: I'd really like to keep (b.6) and (b.7) together, in the same
driver, and (b.7) is really inappropriate for PiSmmCpuDxeSmm (into which
SmmCpuFeaturesLib is linked). For writing S3 boot script opcodes in
(b.7), we need another protocol notify (on EFI_S3_SAVE_STATE_PROTOCOL),
and sneaking that kind of callback into PiSmmCpuDxeSmm sounds really
bad. Hence I plan to add both (b.6) and (b.7) to SmmAccessDxe.

I'll report back with my findings; just wanted to give a heads-up (to
myself as well :))

Finally: can you please remind me why we lock down 128KB (32 pages) at
0x3_0000, and not just half of that? What do we need the range at
[0x4_0000..0x4_FFFF] for?

Thanks!
Laszlo


>
> *) https://www.mail-archive.com/qemu-devel@nongnu.org/msg455991.html
>
> Signed-off-by: Igor Mammedov <imammedo@redhat.com>
> ---
>  include/hw/pci-host/q35.h | 10 +++++
>  hw/i386/pc.c              |  4 +-
>  hw/pci-host/q35.c         | 80 +++++++++++++++++++++++++++++++++++----
>  3 files changed, 86 insertions(+), 8 deletions(-)
>
> diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h
> index b3bcf2e632..976fbae599 100644
> --- a/include/hw/pci-host/q35.h
> +++ b/include/hw/pci-host/q35.h
> @@ -32,6 +32,7 @@
>  #include "hw/acpi/ich9.h"
>  #include "hw/pci-host/pam.h"
>  #include "hw/i386/intel_iommu.h"
> +#include "qemu/units.h"
>
>  #define TYPE_Q35_HOST_DEVICE "q35-pcihost"
>  #define Q35_HOST_DEVICE(obj) \
> @@ -54,6 +55,8 @@ typedef struct MCHPCIState {
>      MemoryRegion smram_region, open_high_smram;
>      MemoryRegion smram, low_smram, high_smram;
>      MemoryRegion tseg_blackhole, tseg_window;
> +    MemoryRegion smbase_blackhole, smbase_window;
> +    bool has_smram_at_smbase;
>      Range pci_hole;
>      uint64_t below_4g_mem_size;
>      uint64_t above_4g_mem_size;
> @@ -97,6 +100,13 @@ typedef struct Q35PCIHost {
>  #define MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY  0xffff
>  #define MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_MAX    0xfff
>
> +#define MCH_HOST_BRIDGE_SMBASE_SIZE            (128 * KiB)
> +#define MCH_HOST_BRIDGE_SMBASE_ADDR            0x30000
> +#define MCH_HOST_BRIDGE_F_SMBASE               0x9c
> +#define MCH_HOST_BRIDGE_F_SMBASE_QUERY         0xff
> +#define MCH_HOST_BRIDGE_F_SMBASE_IN_RAM        0x01
> +#define MCH_HOST_BRIDGE_F_SMBASE_LCK           0x02
> +
>  #define MCH_HOST_BRIDGE_PCIEXBAR               0x60    /* 64bit register */
>  #define MCH_HOST_BRIDGE_PCIEXBAR_SIZE          8       /* 64bit register */
>  #define MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT       0xb0000000
> diff --git a/hw/i386/pc.c b/hw/i386/pc.c
> index bad866fe44..288d28358a 100644
> --- a/hw/i386/pc.c
> +++ b/hw/i386/pc.c
> @@ -119,7 +119,9 @@ struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX};
>  /* Physical Address of PVH entry point read from kernel ELF NOTE */
>  static size_t pvh_start_addr;
>
> -GlobalProperty pc_compat_4_1[] = {};
> +GlobalProperty pc_compat_4_1[] = {
> +    { "mch", "smbase-smram", "off" },
> +};
>  const size_t pc_compat_4_1_len = G_N_ELEMENTS(pc_compat_4_1);
>
>  GlobalProperty pc_compat_4_0[] = {};
> diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c
> index 158d270b9f..c1bd9f78ae 100644
> --- a/hw/pci-host/q35.c
> +++ b/hw/pci-host/q35.c
> @@ -275,20 +275,20 @@ static const TypeInfo q35_host_info = {
>   * MCH D0:F0
>   */
>
> -static uint64_t tseg_blackhole_read(void *ptr, hwaddr reg, unsigned size)
> +static uint64_t blackhole_read(void *ptr, hwaddr reg, unsigned size)
>  {
>      return 0xffffffff;
>  }
>
> -static void tseg_blackhole_write(void *opaque, hwaddr addr, uint64_t val,
> -                                 unsigned width)
> +static void blackhole_write(void *opaque, hwaddr addr, uint64_t val,
> +                            unsigned width)
>  {
>      /* nothing */
>  }
>
> -static const MemoryRegionOps tseg_blackhole_ops = {
> -    .read = tseg_blackhole_read,
> -    .write = tseg_blackhole_write,
> +static const MemoryRegionOps blackhole_ops = {
> +    .read = blackhole_read,
> +    .write = blackhole_write,
>      .endianness = DEVICE_NATIVE_ENDIAN,
>      .valid.min_access_size = 1,
>      .valid.max_access_size = 4,
> @@ -430,6 +430,46 @@ static void mch_update_ext_tseg_mbytes(MCHPCIState *mch)
>      }
>  }
>
> +static void mch_update_smbase_smram(MCHPCIState *mch)
> +{
> +    PCIDevice *pd = PCI_DEVICE(mch);
> +    uint8_t *reg = pd->config + MCH_HOST_BRIDGE_F_SMBASE;
> +    bool lck;
> +
> +    if (!mch->has_smram_at_smbase) {
> +        return;
> +    }
> +
> +    if (*reg == MCH_HOST_BRIDGE_F_SMBASE_QUERY) {
> +        pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] =
> +            MCH_HOST_BRIDGE_F_SMBASE_LCK;
> +        *reg = MCH_HOST_BRIDGE_F_SMBASE_IN_RAM;
> +        return;
> +    }
> +
> +    /*
> +     * default/reset state, discard written value
> +     * which will disable SMRAM balackhole at SMBASE
> +     */
> +    if (pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] == 0xff) {
> +        *reg = 0x00;
> +    }
> +
> +    memory_region_transaction_begin();
> +    if (*reg & MCH_HOST_BRIDGE_F_SMBASE_LCK) {
> +        /* disable all writes */
> +        pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] &=
> +            ~MCH_HOST_BRIDGE_F_SMBASE_LCK;
> +        *reg = MCH_HOST_BRIDGE_F_SMBASE_LCK;
> +        lck = true;
> +    } else {
> +        lck = false;
> +    }
> +    memory_region_set_enabled(&mch->smbase_blackhole, lck);
> +    memory_region_set_enabled(&mch->smbase_window, lck);
> +    memory_region_transaction_commit();
> +}
> +
>  static void mch_write_config(PCIDevice *d,
>                                uint32_t address, uint32_t val, int len)
>  {
> @@ -456,6 +496,10 @@ static void mch_write_config(PCIDevice *d,
>                         MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_SIZE)) {
>          mch_update_ext_tseg_mbytes(mch);
>      }
> +
> +    if (ranges_overlap(address, len, MCH_HOST_BRIDGE_F_SMBASE, 1)) {
> +        mch_update_smbase_smram(mch);
> +    }
>  }
>
>  static void mch_update(MCHPCIState *mch)
> @@ -464,6 +508,7 @@ static void mch_update(MCHPCIState *mch)
>      mch_update_pam(mch);
>      mch_update_smram(mch);
>      mch_update_ext_tseg_mbytes(mch);
> +    mch_update_smbase_smram(mch);
>
>      /*
>       * pci hole goes from end-of-low-ram to io-apic.
> @@ -514,6 +559,9 @@ static void mch_reset(DeviceState *qdev)
>                       MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY);
>      }
>
> +    d->config[MCH_HOST_BRIDGE_F_SMBASE] = 0;
> +    d->wmask[MCH_HOST_BRIDGE_F_SMBASE] = 0xff;
> +
>      mch_update(mch);
>  }
>
> @@ -563,7 +611,7 @@ static void mch_realize(PCIDevice *d, Error **errp)
>      memory_region_add_subregion(&mch->smram, 0xfeda0000, &mch->high_smram);
>
>      memory_region_init_io(&mch->tseg_blackhole, OBJECT(mch),
> -                          &tseg_blackhole_ops, NULL,
> +                          &blackhole_ops, NULL,
>                            "tseg-blackhole", 0);
>      memory_region_set_enabled(&mch->tseg_blackhole, false);
>      memory_region_add_subregion_overlap(mch->system_memory,
> @@ -575,6 +623,23 @@ static void mch_realize(PCIDevice *d, Error **errp)
>      memory_region_set_enabled(&mch->tseg_window, false);
>      memory_region_add_subregion(&mch->smram, mch->below_4g_mem_size,
>                                  &mch->tseg_window);
> +
> +    memory_region_init_io(&mch->smbase_blackhole, OBJECT(mch), &blackhole_ops,
> +                          NULL, "smbase-blackhole",
> +                          MCH_HOST_BRIDGE_SMBASE_SIZE);
> +    memory_region_set_enabled(&mch->smbase_blackhole, false);
> +    memory_region_add_subregion_overlap(mch->system_memory,
> +                                        MCH_HOST_BRIDGE_SMBASE_ADDR,
> +                                        &mch->smbase_blackhole, 1);
> +
> +    memory_region_init_alias(&mch->smbase_window, OBJECT(mch),
> +                             "smbase-window", mch->ram_memory,
> +                             MCH_HOST_BRIDGE_SMBASE_ADDR,
> +                             MCH_HOST_BRIDGE_SMBASE_SIZE);
> +    memory_region_set_enabled(&mch->smbase_window, false);
> +    memory_region_add_subregion(&mch->smram, MCH_HOST_BRIDGE_SMBASE_ADDR,
> +                                &mch->smbase_window);
> +
>      object_property_add_const_link(qdev_get_machine(), "smram",
>                                     OBJECT(&mch->smram), &error_abort);
>
> @@ -601,6 +666,7 @@ uint64_t mch_mcfg_base(void)
>  static Property mch_props[] = {
>      DEFINE_PROP_UINT16("extended-tseg-mbytes", MCHPCIState, ext_tseg_mbytes,
>                         16),
> +    DEFINE_PROP_BOOL("smbase-smram", MCHPCIState, has_smram_at_smbase, true),
>      DEFINE_PROP_END_OF_LIST(),
>  };
>
>



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-19 17:02   ` Laszlo Ersek
@ 2019-09-20  8:28     ` Igor Mammedov
  2019-09-20  9:28       ` Laszlo Ersek
  0 siblings, 1 reply; 17+ messages in thread
From: Igor Mammedov @ 2019-09-20  8:28 UTC (permalink / raw)
  To: Laszlo Ersek
  Cc: yingwen.chen, Brijesh Singh, devel, phillip.goerl, qemu-devel,
	alex.williamson, jiewen.yao, jun.nakajima, michael.d.kinney,
	pbonzini, boris.ostrovsky, rfc, joao.m.martins

On Thu, 19 Sep 2019 19:02:07 +0200
"Laszlo Ersek" <lersek@redhat.com> wrote:

> Hi Igor,
> 
> (+Brijesh)
> 
> long-ish pondering ahead, with a question at the end.
[...]

> Finally: can you please remind me why we lock down 128KB (32 pages) at
> 0x3_0000, and not just half of that? What do we need the range at
> [0x4_0000..0x4_FFFF] for?


If I recall correctly, CPU consumes 64K of save/restore area.
The rest 64K are temporary RAM for using in SMI relocation handler,
if it's possible to get away without it then we can drop it and
lock only 64K required for CPU state. It won't help with SEV
conflict though as it's in the first 64K.

On QEMU side,  we can drop black-hole approach and allocate
dedicated SMRAM region, which explicitly gets mapped into
RAM address space and after SMI hanlder initialization, gets
unmapped (locked). So that SMRAM would be accessible only
from SMM context. That way RAM at 0x30000 could be used as
normal when SMRAM is unmapped.


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-20  8:28     ` [edk2-devel] " Igor Mammedov
@ 2019-09-20  9:28       ` Laszlo Ersek
  2019-09-23 18:35         ` Laszlo Ersek
  2019-09-24 11:47         ` Paolo Bonzini
  0 siblings, 2 replies; 17+ messages in thread
From: Laszlo Ersek @ 2019-09-20  9:28 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: yingwen.chen, Brijesh Singh, devel, phillip.goerl, qemu-devel,
	alex.williamson, jiewen.yao, jun.nakajima, michael.d.kinney,
	pbonzini, boris.ostrovsky, rfc, joao.m.martins

On 09/20/19 10:28, Igor Mammedov wrote:
> On Thu, 19 Sep 2019 19:02:07 +0200
> "Laszlo Ersek" <lersek@redhat.com> wrote:
> 
>> Hi Igor,
>>
>> (+Brijesh)
>>
>> long-ish pondering ahead, with a question at the end.
> [...]
> 
>> Finally: can you please remind me why we lock down 128KB (32 pages) at
>> 0x3_0000, and not just half of that? What do we need the range at
>> [0x4_0000..0x4_FFFF] for?
> 
> 
> If I recall correctly, CPU consumes 64K of save/restore area.
> The rest 64K are temporary RAM for using in SMI relocation handler,
> if it's possible to get away without it then we can drop it and
> lock only 64K required for CPU state. It won't help with SEV
> conflict though as it's in the first 64K.

OK. Let's go with 128KB for now. Shrinking the area is always easier
than growing it.

> On QEMU side,  we can drop black-hole approach and allocate
> dedicated SMRAM region, which explicitly gets mapped into
> RAM address space and after SMI hanlder initialization, gets
> unmapped (locked). So that SMRAM would be accessible only
> from SMM context. That way RAM at 0x30000 could be used as
> normal when SMRAM is unmapped.

I prefer the black-hole approach, introduced in your current patch
series, if it can work. Way less opportunity for confusion.

I've started work on the counterpart OVMF patches; I'll report back.

Thanks
Laszlo


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-20  9:28       ` Laszlo Ersek
@ 2019-09-23 18:35         ` Laszlo Ersek
  2019-09-24 11:19           ` Igor Mammedov
  2019-09-24 11:47         ` Paolo Bonzini
  1 sibling, 1 reply; 17+ messages in thread
From: Laszlo Ersek @ 2019-09-23 18:35 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: yingwen.chen, Brijesh Singh, devel, phillip.goerl, qemu-devel,
	alex.williamson, jiewen.yao, jun.nakajima, michael.d.kinney,
	pbonzini, boris.ostrovsky, rfc, joao.m.martins

On 09/20/19 11:28, Laszlo Ersek wrote:
> On 09/20/19 10:28, Igor Mammedov wrote:
>> On Thu, 19 Sep 2019 19:02:07 +0200
>> "Laszlo Ersek" <lersek@redhat.com> wrote:
>>
>>> Hi Igor,
>>>
>>> (+Brijesh)
>>>
>>> long-ish pondering ahead, with a question at the end.
>> [...]
>>
>>> Finally: can you please remind me why we lock down 128KB (32 pages) at
>>> 0x3_0000, and not just half of that? What do we need the range at
>>> [0x4_0000..0x4_FFFF] for?
>>
>>
>> If I recall correctly, CPU consumes 64K of save/restore area.
>> The rest 64K are temporary RAM for using in SMI relocation handler,
>> if it's possible to get away without it then we can drop it and
>> lock only 64K required for CPU state. It won't help with SEV
>> conflict though as it's in the first 64K.
> 
> OK. Let's go with 128KB for now. Shrinking the area is always easier
> than growing it.
> 
>> On QEMU side,  we can drop black-hole approach and allocate
>> dedicated SMRAM region, which explicitly gets mapped into
>> RAM address space and after SMI hanlder initialization, gets
>> unmapped (locked). So that SMRAM would be accessible only
>> from SMM context. That way RAM at 0x30000 could be used as
>> normal when SMRAM is unmapped.
> 
> I prefer the black-hole approach, introduced in your current patch
> series, if it can work. Way less opportunity for confusion.
> 
> I've started work on the counterpart OVMF patches; I'll report back.

I've got good results. For this (1/2) QEMU patch:

Tested-by: Laszlo Ersek <lersek@redhat.com>

I tested the following scenarios. In every case, I verified the OVMF
log, and also the "info mtree" monitor command's result (i.e. whether
"smbase-blackhole" / "smbase-window" were disabled or enabled). Mostly,
I diffed these text files between the test scenarios (looking for
desired / undesired differences). In the Linux guests, I checked /
compared the dmesg too (wrt. the UEFI memmap).

- unpatched OVMF (regression test), Fedora guest, normal boot and S3

- patched OVMF, but feature disabled with "-global mch.smbase-smram=off"
(another regression test), Fedora guest, normal boot and S3

- patched OVMF, feature enabled, Fedora and various Windows guests
(win7, win8, win10 families, client/server), normal boot and S3

- a subset of the above guests, with S3 disabled (-global
  ICH9-LPC.disable_s3=1), and obviously S3 resume not tested

SEV: used 5.2-ish Linux guest, with S3 disabled (no support under SEV
for that now):

- unpatched OVMF (regression test), normal boot

- patched OVMF but feature disabled on the QEMU cmdline (another
regression test), normal boot

- patched OVMF, feature enabled, normal boot.

I plan to post the OVMF patches tomorrow, for discussion.

(It's likely too early to push these QEMU / edk2 patches right now -- we
don't know yet if this path will take us to the destination. For now, it
certainly looks great.)

Thanks
Laszlo


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-23 18:35         ` Laszlo Ersek
@ 2019-09-24 11:19           ` Igor Mammedov
  2019-09-30 11:51             ` Laszlo Ersek
  0 siblings, 1 reply; 17+ messages in thread
From: Igor Mammedov @ 2019-09-24 11:19 UTC (permalink / raw)
  To: Laszlo Ersek
  Cc: yingwen.chen, Brijesh Singh, devel, phillip.goerl, qemu-devel,
	alex.williamson, jiewen.yao, jun.nakajima, michael.d.kinney,
	pbonzini, boris.ostrovsky, rfc, joao.m.martins

On Mon, 23 Sep 2019 20:35:02 +0200
"Laszlo Ersek" <lersek@redhat.com> wrote:

> On 09/20/19 11:28, Laszlo Ersek wrote:
> > On 09/20/19 10:28, Igor Mammedov wrote:  
> >> On Thu, 19 Sep 2019 19:02:07 +0200
> >> "Laszlo Ersek" <lersek@redhat.com> wrote:
> >>  
> >>> Hi Igor,
> >>>
> >>> (+Brijesh)
> >>>
> >>> long-ish pondering ahead, with a question at the end.  
> >> [...]
> >>  
> >>> Finally: can you please remind me why we lock down 128KB (32 pages) at
> >>> 0x3_0000, and not just half of that? What do we need the range at
> >>> [0x4_0000..0x4_FFFF] for?  
> >>
> >>
> >> If I recall correctly, CPU consumes 64K of save/restore area.
> >> The rest 64K are temporary RAM for using in SMI relocation handler,
> >> if it's possible to get away without it then we can drop it and
> >> lock only 64K required for CPU state. It won't help with SEV
> >> conflict though as it's in the first 64K.  
> > 
> > OK. Let's go with 128KB for now. Shrinking the area is always easier
> > than growing it.
> >   
> >> On QEMU side,  we can drop black-hole approach and allocate
> >> dedicated SMRAM region, which explicitly gets mapped into
> >> RAM address space and after SMI hanlder initialization, gets
> >> unmapped (locked). So that SMRAM would be accessible only
> >> from SMM context. That way RAM at 0x30000 could be used as
> >> normal when SMRAM is unmapped.  
> > 
> > I prefer the black-hole approach, introduced in your current patch
> > series, if it can work. Way less opportunity for confusion.
> > 
> > I've started work on the counterpart OVMF patches; I'll report back.  
> 
> I've got good results. For this (1/2) QEMU patch:
> 
> Tested-by: Laszlo Ersek <lersek@redhat.com>
> 
> I tested the following scenarios. In every case, I verified the OVMF
> log, and also the "info mtree" monitor command's result (i.e. whether
> "smbase-blackhole" / "smbase-window" were disabled or enabled). Mostly,
> I diffed these text files between the test scenarios (looking for
> desired / undesired differences). In the Linux guests, I checked /
> compared the dmesg too (wrt. the UEFI memmap).
> 
> - unpatched OVMF (regression test), Fedora guest, normal boot and S3
> 
> - patched OVMF, but feature disabled with "-global mch.smbase-smram=off"
> (another regression test), Fedora guest, normal boot and S3
> 
> - patched OVMF, feature enabled, Fedora and various Windows guests
> (win7, win8, win10 families, client/server), normal boot and S3
> 
> - a subset of the above guests, with S3 disabled (-global
>   ICH9-LPC.disable_s3=1), and obviously S3 resume not tested
> 
> SEV: used 5.2-ish Linux guest, with S3 disabled (no support under SEV
> for that now):
> 
> - unpatched OVMF (regression test), normal boot
> 
> - patched OVMF but feature disabled on the QEMU cmdline (another
> regression test), normal boot
> 
> - patched OVMF, feature enabled, normal boot.
> 
> I plan to post the OVMF patches tomorrow, for discussion.
> 
> (It's likely too early to push these QEMU / edk2 patches right now -- we
> don't know yet if this path will take us to the destination. For now, it
> certainly looks great.)

Laszlo, thanks for trying it out.
It's nice to hear that approach is somewhat usable.
Hopefully we won't have to invent 'paused' cpu mode.

Pls CC me on your patches
(not that I qualify for reviewing,
but may be I could learn a thing or two from it)

> Thanks
> Laszlo
> 
> -=-=-=-=-=-=-=-=-=-=-=-
> Groups.io Links: You receive all messages sent to this group.
> 
> View/Reply Online (#47864): https://edk2.groups.io/g/devel/message/47864
> Mute This Topic: https://groups.io/mt/34201782/1958639
> Group Owner: devel+owner@edk2.groups.io
> Unsubscribe: https://edk2.groups.io/g/devel/unsub  [imammedo@redhat.com]
> -=-=-=-=-=-=-=-=-=-=-=-
> 



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-20  9:28       ` Laszlo Ersek
  2019-09-23 18:35         ` Laszlo Ersek
@ 2019-09-24 11:47         ` Paolo Bonzini
  1 sibling, 0 replies; 17+ messages in thread
From: Paolo Bonzini @ 2019-09-24 11:47 UTC (permalink / raw)
  To: Laszlo Ersek, Igor Mammedov
  Cc: yingwen.chen, Brijesh Singh, devel, phillip.goerl, qemu-devel,
	alex.williamson, jiewen.yao, jun.nakajima, michael.d.kinney,
	boris.ostrovsky, rfc, joao.m.martins

On 20/09/19 11:28, Laszlo Ersek wrote:
>> On QEMU side,  we can drop black-hole approach and allocate
>> dedicated SMRAM region, which explicitly gets mapped into
>> RAM address space and after SMI hanlder initialization, gets
>> unmapped (locked). So that SMRAM would be accessible only
>> from SMM context. That way RAM at 0x30000 could be used as
>> normal when SMRAM is unmapped.
>
> I prefer the black-hole approach, introduced in your current patch
> series, if it can work. Way less opportunity for confusion.

Another possibility would be to alias the 0xA0000..0xBFFFF SMRAM to
0x30000..0x4FFFF (only when in SMM).

I'm not super enthusiastic about adding this kind of QEMU-only feature.
 The alternative would be to implement VT-d range locking through the
intel-iommu device's PCI configuration space (which includes _adding_
the configuration space, i.e. making the IOMMU a PCI device in the first
place, and the support to the firmware for configuring the VT-d BAR at
0xfed90000).  This would be the right way to do it, but it would entail
a lot of work throughout the stack. :(  So I guess some variant of this
would be okay, as long as it's peppered with "this is not how real
hardware does it" comments in both QEMU and EDK2.

Thanks,

Paolo

> I've started work on the counterpart OVMF patches; I'll report back.



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-24 11:19           ` Igor Mammedov
@ 2019-09-30 11:51             ` Laszlo Ersek
  2019-09-30 12:36               ` Igor Mammedov
  0 siblings, 1 reply; 17+ messages in thread
From: Laszlo Ersek @ 2019-09-30 11:51 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: yingwen.chen, Brijesh Singh, devel, phillip.goerl, qemu-devel,
	alex.williamson, jiewen.yao, jun.nakajima, michael.d.kinney,
	pbonzini, boris.ostrovsky, rfc, joao.m.martins

Hi Igor,

On 09/24/19 13:19, Igor Mammedov wrote:
> On Mon, 23 Sep 2019 20:35:02 +0200
> "Laszlo Ersek" <lersek@redhat.com> wrote:

>> I've got good results. For this (1/2) QEMU patch:
>>
>> Tested-by: Laszlo Ersek <lersek@redhat.com>
>>
>> I tested the following scenarios. In every case, I verified the OVMF
>> log, and also the "info mtree" monitor command's result (i.e. whether
>> "smbase-blackhole" / "smbase-window" were disabled or enabled).
>> Mostly, I diffed these text files between the test scenarios (looking
>> for desired / undesired differences). In the Linux guests, I checked
>> / compared the dmesg too (wrt. the UEFI memmap).
>>
>> - unpatched OVMF (regression test), Fedora guest, normal boot and S3
>>
>> - patched OVMF, but feature disabled with "-global
>>   mch.smbase-smram=off" (another regression test), Fedora guest,
>>   normal boot and S3
>>
>> - patched OVMF, feature enabled, Fedora and various Windows guests
>>   (win7, win8, win10 families, client/server), normal boot and S3
>>
>> - a subset of the above guests, with S3 disabled (-global
>>   ICH9-LPC.disable_s3=1), and obviously S3 resume not tested
>>
>> SEV: used 5.2-ish Linux guest, with S3 disabled (no support under SEV
>> for that now):
>>
>> - unpatched OVMF (regression test), normal boot
>>
>> - patched OVMF but feature disabled on the QEMU cmdline (another
>>   regression test), normal boot
>>
>> - patched OVMF, feature enabled, normal boot.
>>
>> I plan to post the OVMF patches tomorrow, for discussion.
>>
>> (It's likely too early to push these QEMU / edk2 patches right now --
>> we don't know yet if this path will take us to the destination. For
>> now, it certainly looks great.)
>
> Laszlo, thanks for trying it out.
> It's nice to hear that approach is somewhat usable.
> Hopefully we won't have to invent 'paused' cpu mode.
>
> Pls CC me on your patches
> (not that I qualify for reviewing,
> but may be I could learn a thing or two from it)

Considering the plan at [1], the two patch sets [2] [3] should cover
step (01); at least as proof of concept.

[1] [edk2-devel] CPU hotplug using SMM with QEMU+OVMF
    http://mid.mail-archive.com/20190830164802.1b17ff26@redhat.com

[2] The current thread:
    [Qemu-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address
    http://mid.mail-archive.com/20190917130708.10281-1-imammedo@redhat.com

[3] [edk2-devel] [PATCH wave 1 00/10] support QEMU's "SMRAM at default SMBASE" feature
    http://mid.mail-archive.com/20190924113505.27272-1-lersek@redhat.com

(I'll have to figure out what SMI handler to put in place there, but I'd
like to experiment with that once we can cause a new CPU to start
executing code there, in SMM.)

So what's next?

To me it looks like we need to figure out how QEMU can make the OS call
into SMM (in the GPE cpu hotplug handler), passing in parameters and
such. This would be step (03).

Do you agree?

If so, I'll ask Jiewen about such OS->SMM calls separately, because I
seem to remember that there used to be an "SMM communcation table" of
sorts, for flexible OS->SMM calls. However, it appears to be deprecated
lately.

Hmmm.... Yes, UEFI 2.8 has "Appendix O - UEFI ACPI Data Table", and it
writes (after defining the table format):

    The first use of this UEFI ACPI table format is the SMM
    Communication ACPI Table. This table describes a special software
    SMI that can be used to initiate inter-mode communication in the OS
    present environment by non-firmware agents with SMM code.

    Note: The use of the SMM Communication ACPI table is deprecated in
          UEFI spec. 2.7. This is due to the lack of a use case for
          inter-mode communication by non-firmware agents with SMM code
          and support for initiating this form of communication in
          common OSes.

The changelog at the front of the UEFI spec also references the
Mantis#1691 spec ticket, "Remove/Deprecate SMM Communication ACPI Table"
(addressed in UEFI 2.6B).

(I think that must have been a security ticket, because, while I
generally have access to Mantis tickets,
<https://mantis.uefi.org/mantis/view.php?id=1631> gives me "Access
Denied" :/ )

Thanks,
Laszlo


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-30 11:51             ` Laszlo Ersek
@ 2019-09-30 12:36               ` Igor Mammedov
  2019-09-30 14:22                 ` Yao, Jiewen
  0 siblings, 1 reply; 17+ messages in thread
From: Igor Mammedov @ 2019-09-30 12:36 UTC (permalink / raw)
  To: Laszlo Ersek
  Cc: yingwen.chen, Brijesh Singh, devel, phillip.goerl, qemu-devel,
	alex.williamson, jiewen.yao, jun.nakajima, michael.d.kinney,
	pbonzini, boris.ostrovsky, rfc, joao.m.martins

On Mon, 30 Sep 2019 13:51:46 +0200
"Laszlo Ersek" <lersek@redhat.com> wrote:

> Hi Igor,
> 
> On 09/24/19 13:19, Igor Mammedov wrote:
> > On Mon, 23 Sep 2019 20:35:02 +0200
> > "Laszlo Ersek" <lersek@redhat.com> wrote:  
> 
> >> I've got good results. For this (1/2) QEMU patch:
> >>
> >> Tested-by: Laszlo Ersek <lersek@redhat.com>
> >>
> >> I tested the following scenarios. In every case, I verified the OVMF
> >> log, and also the "info mtree" monitor command's result (i.e. whether
> >> "smbase-blackhole" / "smbase-window" were disabled or enabled).
> >> Mostly, I diffed these text files between the test scenarios (looking
> >> for desired / undesired differences). In the Linux guests, I checked
> >> / compared the dmesg too (wrt. the UEFI memmap).
> >>
> >> - unpatched OVMF (regression test), Fedora guest, normal boot and S3
> >>
> >> - patched OVMF, but feature disabled with "-global
> >>   mch.smbase-smram=off" (another regression test), Fedora guest,
> >>   normal boot and S3
> >>
> >> - patched OVMF, feature enabled, Fedora and various Windows guests
> >>   (win7, win8, win10 families, client/server), normal boot and S3
> >>
> >> - a subset of the above guests, with S3 disabled (-global
> >>   ICH9-LPC.disable_s3=1), and obviously S3 resume not tested
> >>
> >> SEV: used 5.2-ish Linux guest, with S3 disabled (no support under SEV
> >> for that now):
> >>
> >> - unpatched OVMF (regression test), normal boot
> >>
> >> - patched OVMF but feature disabled on the QEMU cmdline (another
> >>   regression test), normal boot
> >>
> >> - patched OVMF, feature enabled, normal boot.
> >>
> >> I plan to post the OVMF patches tomorrow, for discussion.
> >>
> >> (It's likely too early to push these QEMU / edk2 patches right now --
> >> we don't know yet if this path will take us to the destination. For
> >> now, it certainly looks great.)  
> >
> > Laszlo, thanks for trying it out.
> > It's nice to hear that approach is somewhat usable.
> > Hopefully we won't have to invent 'paused' cpu mode.
> >
> > Pls CC me on your patches
> > (not that I qualify for reviewing,
> > but may be I could learn a thing or two from it)  
> 
> Considering the plan at [1], the two patch sets [2] [3] should cover
> step (01); at least as proof of concept.
> 
> [1] [edk2-devel] CPU hotplug using SMM with QEMU+OVMF
>     http://mid.mail-archive.com/20190830164802.1b17ff26@redhat.com
> 
> [2] The current thread:
>     [Qemu-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address
>     http://mid.mail-archive.com/20190917130708.10281-1-imammedo@redhat.com
> 
> [3] [edk2-devel] [PATCH wave 1 00/10] support QEMU's "SMRAM at default SMBASE" feature
>     http://mid.mail-archive.com/20190924113505.27272-1-lersek@redhat.com
> 
> (I'll have to figure out what SMI handler to put in place there, but I'd
> like to experiment with that once we can cause a new CPU to start
> executing code there, in SMM.)
> 
> So what's next?
> 
> To me it looks like we need to figure out how QEMU can make the OS call
> into SMM (in the GPE cpu hotplug handler), passing in parameters and
> such. This would be step (03).
> 
> Do you agree?
> 
> If so, I'll ask Jiewen about such OS->SMM calls separately, because I
> seem to remember that there used to be an "SMM communcation table" of
> sorts, for flexible OS->SMM calls. However, it appears to be deprecated
> lately.
we can try to resurrect and put over it some kind of protocol
to describe which CPUs to where hotplugged.

or we could put a parameter into SMI status register (IO port 0xb3)
and the trigger SMI from GPE handler to tell SMI handler that cpu
hotplug happened and then use QEMU's cpu hotplug interface
to enumerate hotplugged CPUs for SMI handler.

The later is probably simpler as we won't need to reinvent the wheel
(just reuse the interface that's already in use by GPE handler).

> Hmmm.... Yes, UEFI 2.8 has "Appendix O - UEFI ACPI Data Table", and it
> writes (after defining the table format):
> 
>     The first use of this UEFI ACPI table format is the SMM
>     Communication ACPI Table. This table describes a special software
>     SMI that can be used to initiate inter-mode communication in the OS
>     present environment by non-firmware agents with SMM code.
> 
>     Note: The use of the SMM Communication ACPI table is deprecated in
>           UEFI spec. 2.7. This is due to the lack of a use case for
>           inter-mode communication by non-firmware agents with SMM code
>           and support for initiating this form of communication in
>           common OSes.
> 
> The changelog at the front of the UEFI spec also references the
> Mantis#1691 spec ticket, "Remove/Deprecate SMM Communication ACPI Table"
> (addressed in UEFI 2.6B).
> 
> (I think that must have been a security ticket, because, while I
> generally have access to Mantis tickets,
> <https://mantis.uefi.org/mantis/view.php?id=1631> gives me "Access
> Denied" :/ )
> 
> Thanks,
> Laszlo
> 
> -=-=-=-=-=-=-=-=-=-=-=-
> Groups.io Links: You receive all messages sent to this group.
> 
> View/Reply Online (#48282): https://edk2.groups.io/g/devel/message/48282
> Mute This Topic: https://groups.io/mt/34201782/1958639
> Group Owner: devel+owner@edk2.groups.io
> Unsubscribe: https://edk2.groups.io/g/devel/unsub  [imammedo@redhat.com]
> -=-=-=-=-=-=-=-=-=-=-=-
> 



^ permalink raw reply	[flat|nested] 17+ messages in thread

* RE: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-30 12:36               ` Igor Mammedov
@ 2019-09-30 14:22                 ` Yao, Jiewen
  2019-10-01 18:03                   ` Laszlo Ersek
  0 siblings, 1 reply; 17+ messages in thread
From: Yao, Jiewen @ 2019-09-30 14:22 UTC (permalink / raw)
  To: devel, imammedo, Laszlo Ersek
  Cc: Chen, Yingwen, Brijesh Singh, phillip.goerl, qemu-devel,
	alex.williamson, Nakajima, Jun, Kinney, Michael D, pbonzini,
	boris.ostrovsky, rfc, joao.m.martins

below

> -----Original Message-----
> From: devel@edk2.groups.io <devel@edk2.groups.io> On Behalf Of Igor
> Mammedov
> Sent: Monday, September 30, 2019 8:37 PM
> To: Laszlo Ersek <lersek@redhat.com>
> Cc: devel@edk2.groups.io; qemu-devel@nongnu.org; Chen, Yingwen
> <yingwen.chen@intel.com>; phillip.goerl@oracle.com;
> alex.williamson@redhat.com; Yao, Jiewen <jiewen.yao@intel.com>; Nakajima,
> Jun <jun.nakajima@intel.com>; Kinney, Michael D
> <michael.d.kinney@intel.com>; pbonzini@redhat.com;
> boris.ostrovsky@oracle.com; rfc@edk2.groups.io; joao.m.martins@oracle.com;
> Brijesh Singh <brijesh.singh@amd.com>
> Subject: Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K
> SMRAM at default SMBASE address
> 
> On Mon, 30 Sep 2019 13:51:46 +0200
> "Laszlo Ersek" <lersek@redhat.com> wrote:
> 
> > Hi Igor,
> >
> > On 09/24/19 13:19, Igor Mammedov wrote:
> > > On Mon, 23 Sep 2019 20:35:02 +0200
> > > "Laszlo Ersek" <lersek@redhat.com> wrote:
> >
> > >> I've got good results. For this (1/2) QEMU patch:
> > >>
> > >> Tested-by: Laszlo Ersek <lersek@redhat.com>
> > >>
> > >> I tested the following scenarios. In every case, I verified the OVMF
> > >> log, and also the "info mtree" monitor command's result (i.e. whether
> > >> "smbase-blackhole" / "smbase-window" were disabled or enabled).
> > >> Mostly, I diffed these text files between the test scenarios (looking
> > >> for desired / undesired differences). In the Linux guests, I checked
> > >> / compared the dmesg too (wrt. the UEFI memmap).
> > >>
> > >> - unpatched OVMF (regression test), Fedora guest, normal boot and S3
> > >>
> > >> - patched OVMF, but feature disabled with "-global
> > >>   mch.smbase-smram=off" (another regression test), Fedora guest,
> > >>   normal boot and S3
> > >>
> > >> - patched OVMF, feature enabled, Fedora and various Windows guests
> > >>   (win7, win8, win10 families, client/server), normal boot and S3
> > >>
> > >> - a subset of the above guests, with S3 disabled (-global
> > >>   ICH9-LPC.disable_s3=1), and obviously S3 resume not tested
> > >>
> > >> SEV: used 5.2-ish Linux guest, with S3 disabled (no support under SEV
> > >> for that now):
> > >>
> > >> - unpatched OVMF (regression test), normal boot
> > >>
> > >> - patched OVMF but feature disabled on the QEMU cmdline (another
> > >>   regression test), normal boot
> > >>
> > >> - patched OVMF, feature enabled, normal boot.
> > >>
> > >> I plan to post the OVMF patches tomorrow, for discussion.
> > >>
> > >> (It's likely too early to push these QEMU / edk2 patches right now --
> > >> we don't know yet if this path will take us to the destination. For
> > >> now, it certainly looks great.)
> > >
> > > Laszlo, thanks for trying it out.
> > > It's nice to hear that approach is somewhat usable.
> > > Hopefully we won't have to invent 'paused' cpu mode.
> > >
> > > Pls CC me on your patches
> > > (not that I qualify for reviewing,
> > > but may be I could learn a thing or two from it)
> >
> > Considering the plan at [1], the two patch sets [2] [3] should cover
> > step (01); at least as proof of concept.
> >
> > [1] [edk2-devel] CPU hotplug using SMM with QEMU+OVMF
> >     http://mid.mail-archive.com/20190830164802.1b17ff26@redhat.com
> >
> > [2] The current thread:
> >     [Qemu-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at
> default SMBASE address
> >     http://mid.mail-archive.com/20190917130708.10281-1-
> imammedo@redhat.com
> >
> > [3] [edk2-devel] [PATCH wave 1 00/10] support QEMU's "SMRAM at default
> SMBASE" feature
> >     http://mid.mail-archive.com/20190924113505.27272-1-lersek@redhat.com
> >
> > (I'll have to figure out what SMI handler to put in place there, but I'd
> > like to experiment with that once we can cause a new CPU to start
> > executing code there, in SMM.)
> >
> > So what's next?
> >
> > To me it looks like we need to figure out how QEMU can make the OS call
> > into SMM (in the GPE cpu hotplug handler), passing in parameters and
> > such. This would be step (03).
> >
> > Do you agree?
> >
> > If so, I'll ask Jiewen about such OS->SMM calls separately, because I
> > seem to remember that there used to be an "SMM communcation table" of
> > sorts, for flexible OS->SMM calls. However, it appears to be deprecated
> > lately.
> we can try to resurrect and put over it some kind of protocol
> to describe which CPUs to where hotplugged.
> 
> or we could put a parameter into SMI status register (IO port 0xb3)
> and the trigger SMI from GPE handler to tell SMI handler that cpu
> hotplug happened and then use QEMU's cpu hotplug interface
> to enumerate hotplugged CPUs for SMI handler.
> 
> The later is probably simpler as we won't need to reinvent the wheel
> (just reuse the interface that's already in use by GPE handler).

[Jiewen] The PI specification Volume 4 - SMM defines EFI_MM_COMMUNICATION_PROTOCOL.Communicate() - It can be used to communicate between OS and SMM handler. But it requires the runtime protocol call. I am not sure how OS loader passes this information to OS kernel.

As such, I think using ACPI SCI/GPE -> software SMI handler is an easier way to achieve this. I also recommend this way.
For parameter passing, we can use 1) Port B2 (1 byte), 2) Port B3 (1 byte), 3) chipset scratch register (4 bytes or 8 bytes, based upon scratch register size), 4) ACPI NVS OPREGION, if the data structure is complicated.



> > Hmmm.... Yes, UEFI 2.8 has "Appendix O - UEFI ACPI Data Table", and it
> > writes (after defining the table format):
> >
> >     The first use of this UEFI ACPI table format is the SMM
> >     Communication ACPI Table. This table describes a special software
> >     SMI that can be used to initiate inter-mode communication in the OS
> >     present environment by non-firmware agents with SMM code.
> >
> >     Note: The use of the SMM Communication ACPI table is deprecated in
> >           UEFI spec. 2.7. This is due to the lack of a use case for
> >           inter-mode communication by non-firmware agents with SMM code
> >           and support for initiating this form of communication in
> >           common OSes.
> >
> > The changelog at the front of the UEFI spec also references the
> > Mantis#1691 spec ticket, "Remove/Deprecate SMM Communication ACPI
> Table"
> > (addressed in UEFI 2.6B).
> >
> > (I think that must have been a security ticket, because, while I
> > generally have access to Mantis tickets,
> > <https://mantis.uefi.org/mantis/view.php?id=1631> gives me "Access
> > Denied" :/ )
> >
> > Thanks,
> > Laszlo
> >
> >
> >
> 
> 
> -=-=-=-=-=-=-=-=-=-=-=-
> Groups.io Links: You receive all messages sent to this group.
> 
> View/Reply Online (#48283): https://edk2.groups.io/g/devel/message/48283
> Mute This Topic: https://groups.io/mt/34201782/1772286
> Group Owner: devel+owner@edk2.groups.io
> Unsubscribe: https://edk2.groups.io/g/devel/unsub  [jiewen.yao@intel.com]
> -=-=-=-=-=-=-=-=-=-=-=-



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-09-30 14:22                 ` Yao, Jiewen
@ 2019-10-01 18:03                   ` Laszlo Ersek
  2019-10-04 11:31                     ` Igor Mammedov
  0 siblings, 1 reply; 17+ messages in thread
From: Laszlo Ersek @ 2019-10-01 18:03 UTC (permalink / raw)
  To: Yao, Jiewen, devel, imammedo
  Cc: Chen, Yingwen, Brijesh Singh, phillip.goerl, qemu-devel,
	alex.williamson, Nakajima, Jun, Kinney, Michael D, pbonzini,
	boris.ostrovsky, rfc, joao.m.martins

On 09/30/19 16:22, Yao, Jiewen wrote:
> 
>> -----Original Message-----
>> From: devel@edk2.groups.io <devel@edk2.groups.io> On Behalf Of Igor
>> Mammedov
>> Sent: Monday, September 30, 2019 8:37 PM
>> To: Laszlo Ersek <lersek@redhat.com>

>>> To me it looks like we need to figure out how QEMU can make the OS call
>>> into SMM (in the GPE cpu hotplug handler), passing in parameters and
>>> such. This would be step (03).
>>>
>>> Do you agree?
>>>
>>> If so, I'll ask Jiewen about such OS->SMM calls separately, because I
>>> seem to remember that there used to be an "SMM communcation table" of
>>> sorts, for flexible OS->SMM calls. However, it appears to be deprecated
>>> lately.
>> we can try to resurrect and put over it some kind of protocol
>> to describe which CPUs to where hotplugged.
>>
>> or we could put a parameter into SMI status register (IO port 0xb3)
>> and the trigger SMI from GPE handler to tell SMI handler that cpu
>> hotplug happened and then use QEMU's cpu hotplug interface
>> to enumerate hotplugged CPUs for SMI handler.
>>
>> The later is probably simpler as we won't need to reinvent the wheel
>> (just reuse the interface that's already in use by GPE handler).

Based on "docs/specs/acpi_cpu_hotplug.txt", this seems to boil down to a
bunch of IO port accesses at base 0x0cd8.

Is that correct?

> [Jiewen] The PI specification Volume 4 - SMM defines EFI_MM_COMMUNICATION_PROTOCOL.Communicate() - It can be used to communicate between OS and SMM handler. But it requires the runtime protocol call. I am not sure how OS loader passes this information to OS kernel.
> 
> As such, I think using ACPI SCI/GPE -> software SMI handler is an easier way to achieve this. I also recommend this way.
> For parameter passing, we can use 1) Port B2 (1 byte), 2) Port B3 (1 byte), 3) chipset scratch register (4 bytes or 8 bytes, based upon scratch register size), 4) ACPI NVS OPREGION, if the data structure is complicated.

I'm confused about the details. In two categories:
(1) what values to use,
(2) how those values are passed.

Assume a CPU is hotpluged, QEMU injects an SCI, and the ACPI GPE handler
in the OS -- which also originates from QEMU -- writes a particular byte
to the Data port (0xB3), and then to the Control port (0xB2),
broadcasting an SMI.

(1) What values to use.

Note that values ICH9_APM_ACPI_ENABLE (2) and ICH9_APM_ACPI_DISABLE (3)
are already reserved in QEMU for IO port 0xB2, for different purposes.
So we can't use those in the GPE handler.

Furthermote, OVMF's EFI_SMM_CONTROL2_PROTOCOL.Trigger() implementation
(in "OvmfPkg/SmmControl2Dxe/SmmControl2Dxe.c") writes 0 to both ports,
as long as the caller does not specify them.

  IoWrite8 (ICH9_APM_STS, DataPort    == NULL ? 0 : *DataPort);
  IoWrite8 (ICH9_APM_CNT, CommandPort == NULL ? 0 : *CommandPort);

And, there is only one Trigger() call site in edk2: namely in
"MdeModulePkg/Core/PiSmmCore/PiSmmIpl.c", in the
SmmCommunicationCommunicate() function.

(That function implements EFI_SMM_COMMUNICATION_PROTOCOL.Communicate().)
This call site passes NULL for both DataPort and CommandPort.

Yet further, EFI_SMM_COMMUNICATION_PROTOCOL.Communicate() is used for
example by the UEFI variable driver, for talking between the
unprivileged (runtime DXE) and privileged (SMM) half.

As a result, all of the software SMIs currently in use in OVMF, related
to actual firmware services, write 0 to both ports.

I guess we can choose new values, as long as we avoid 2 and 3 for the
control port (0xB2), because those are reserved in QEMU -- see
ich9_apm_ctrl_changed() in "hw/isa/lpc_ich9.c".


(2) How the parameters are passed.


(2a) For the new CPU, the SMI remains pending, until it gets an
INIT-SIPI-SIPI from one of the previously plugged CPUs (most likely, the
BSP). At that point, the new CPU will execute the "initial SMI handler
for hotplugged CPUs", at the default SMBASE.

That's a routine we'll have to write in assembly, from zero. In this
routine, we can read back IO ports 0xB2 and 0xB3. And QEMU will be happy
to provide the values last written (see apm_ioport_readb() in
"hw/isa/apm.c"). So we can receive the values in this routine. Alright.


(2b) On all other CPUs, the SMM foundation already accepts the SMI.

There point where it makes sense to start looking is SmmEntryPoint()
[MdeModulePkg/Core/PiSmmCore/PiSmmCore.c].

(2b1) This function first checks whether the SMI is synchronous. The
logic for determining that is based on
"gSmmCorePrivate->CommunicationBuffer" being non-NULL. This field is set
to non-NULL in SmmCommunicationCommunicate() -- see above, in (1).

In other words, the SMI is deemed synchronous if it was initiated with
EFI_SMM_COMMUNICATION_PROTOCOL.Communicate(). In that case, the
HandlerType GUID is extracted from the communication buffer, and passed
to SmiManage(). In turn, SmiManage() locates the SMI handler registered
with the same handler GUID, and delegates the SMI handling to that
specific handler.

This is how the UEFI variable driver is split in two halves:

- in "MdeModulePkg/Universal/Variable/RuntimeDxe/VariableSmm.c", we have
a call to gMmst->MmiHandlerRegister(), with HandlerType =
"gEfiSmmVariableProtocolGuid"

- in
"MdeModulePkg/Universal/Variable/RuntimeDxe/VariableSmmRuntimeDxe.c", we
format communication buffers with the header GUID set to the same
"gEfiSmmVariableProtocolGuid".

Of course, this is what does *not* apply to our use case, as the SMI is
raised by the OS (via an ACPI method), and *not* by a firmware agent
that calls EFI_SMM_COMMUNICATION_PROTOCOL.Communicate().

Therefore, we need to look further in SmmEntryPoint()
[MdeModulePkg/Core/PiSmmCore/PiSmmCore.c].

(2b2) What's left there is only the following:

  //
  // Process Asynchronous SMI sources
  //
  SmiManage (NULL, NULL, NULL, NULL);


So...

- Are we supposed to write a new DXE_SMM_DRIVER for OvmfPkg, and call
gMmst->MmiHandlerRegister() in it, with HandlerType=NULL? (I.e.,
register a "root SMI handler"?)

- And then in the handler, should we read IO ports 0xB2 / 0xB3?

- Also, is that handler where we'd somehow sync up with the hot-plugged
VCPU, and finally call EFI_SMM_CPU_SERVICE_PROTOCOL.SmmAddProcessor()?

- Does it matter what (pre-existent) CPU executes the handler? (IOW,
does it matter what the value of gMmst->CurrentlyExecutingCpu is?)

Thanks,
Laszlo


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-10-01 18:03                   ` Laszlo Ersek
@ 2019-10-04 11:31                     ` Igor Mammedov
  2019-10-07  9:44                       ` Laszlo Ersek
  0 siblings, 1 reply; 17+ messages in thread
From: Igor Mammedov @ 2019-10-04 11:31 UTC (permalink / raw)
  To: Laszlo Ersek
  Cc: Chen, Yingwen, Brijesh Singh, devel, phillip.goerl, qemu-devel,
	alex.williamson, Yao, Jiewen, Nakajima, Jun, Kinney, Michael D,
	pbonzini, boris.ostrovsky, rfc, joao.m.martins

On Tue, 1 Oct 2019 20:03:20 +0200
"Laszlo Ersek" <lersek@redhat.com> wrote:

> On 09/30/19 16:22, Yao, Jiewen wrote:
> >   
> >> -----Original Message-----
> >> From: devel@edk2.groups.io <devel@edk2.groups.io> On Behalf Of Igor
> >> Mammedov
> >> Sent: Monday, September 30, 2019 8:37 PM
> >> To: Laszlo Ersek <lersek@redhat.com>  
> 
> >>> To me it looks like we need to figure out how QEMU can make the OS call
> >>> into SMM (in the GPE cpu hotplug handler), passing in parameters and
> >>> such. This would be step (03).
> >>>
> >>> Do you agree?
> >>>
> >>> If so, I'll ask Jiewen about such OS->SMM calls separately, because I
> >>> seem to remember that there used to be an "SMM communcation table" of
> >>> sorts, for flexible OS->SMM calls. However, it appears to be deprecated
> >>> lately.  
> >> we can try to resurrect and put over it some kind of protocol
> >> to describe which CPUs to where hotplugged.
> >>
> >> or we could put a parameter into SMI status register (IO port 0xb3)
> >> and the trigger SMI from GPE handler to tell SMI handler that cpu
> >> hotplug happened and then use QEMU's cpu hotplug interface
> >> to enumerate hotplugged CPUs for SMI handler.
> >>
> >> The later is probably simpler as we won't need to reinvent the wheel
> >> (just reuse the interface that's already in use by GPE handler).  
> 
> Based on "docs/specs/acpi_cpu_hotplug.txt", this seems to boil down to a
> bunch of IO port accesses at base 0x0cd8.
> 
> Is that correct?

yep, you can use it to iterate over hotplugged CPUs.
hw side (QEMU) uses cpu_hotplug_ops as IO write/read handlers
and firmware side (ACPI) scannig for hotplugged CPUs is implemented
in CPU_SCAN_METHOD.

What we can do on QEMU side is to write agreed upon value to command port (0xB2)
from CPU_SCAN_METHOD after taking ctrl_lock but before starting scan loop.
That way firmware will first bring up (from fw pov) all hotplugged CPUs
and then return control to OS to do the same from OS pov.


> 
> > [Jiewen] The PI specification Volume 4 - SMM defines EFI_MM_COMMUNICATION_PROTOCOL.Communicate() - It can be used to communicate between OS and SMM handler. But it requires the runtime protocol call. I am not sure how OS loader passes this information to OS kernel.
> > 
> > As such, I think using ACPI SCI/GPE -> software SMI handler is an easier way to achieve this. I also recommend this way.
> > For parameter passing, we can use 1) Port B2 (1 byte), 2) Port B3 (1 byte), 3) chipset scratch register (4 bytes or 8 bytes, based upon scratch register size), 4) ACPI NVS OPREGION, if the data structure is complicated.  
> 
> I'm confused about the details. In two categories:
> (1) what values to use,
> (2) how those values are passed.
> 
> Assume a CPU is hotpluged, QEMU injects an SCI, and the ACPI GPE handler
> in the OS -- which also originates from QEMU -- writes a particular byte
> to the Data port (0xB3), and then to the Control port (0xB2),
> broadcasting an SMI.
> 
> (1) What values to use.
> 
> Note that values ICH9_APM_ACPI_ENABLE (2) and ICH9_APM_ACPI_DISABLE (3)
> are already reserved in QEMU for IO port 0xB2, for different purposes.
> So we can't use those in the GPE handler.

SeaBIOS writes 0x00 into command port, but it seems that's taken by
EFI_SMM_COMMUNICATION_PROTOCOL. So we can use the next unused value
(lets say 0x4). We probably don't have to use status port or 
EFI_SMM_COMMUNICATION_PROTOCOL, since the value of written into 0xB2
is sufficient to distinguish hotplug event.

> Furthermote, OVMF's EFI_SMM_CONTROL2_PROTOCOL.Trigger() implementation
> (in "OvmfPkg/SmmControl2Dxe/SmmControl2Dxe.c") writes 0 to both ports,
> as long as the caller does not specify them.
> 
>   IoWrite8 (ICH9_APM_STS, DataPort    == NULL ? 0 : *DataPort);
>   IoWrite8 (ICH9_APM_CNT, CommandPort == NULL ? 0 : *CommandPort);
> 
> And, there is only one Trigger() call site in edk2: namely in
> "MdeModulePkg/Core/PiSmmCore/PiSmmIpl.c", in the
> SmmCommunicationCommunicate() function.
> 
> (That function implements EFI_SMM_COMMUNICATION_PROTOCOL.Communicate().)
> This call site passes NULL for both DataPort and CommandPort.
> 
> Yet further, EFI_SMM_COMMUNICATION_PROTOCOL.Communicate() is used for
> example by the UEFI variable driver, for talking between the
> unprivileged (runtime DXE) and privileged (SMM) half.
> 
> As a result, all of the software SMIs currently in use in OVMF, related
> to actual firmware services, write 0 to both ports.
> 
> I guess we can choose new values, as long as we avoid 2 and 3 for the
> control port (0xB2), because those are reserved in QEMU -- see
> ich9_apm_ctrl_changed() in "hw/isa/lpc_ich9.c".
> 
> 
> (2) How the parameters are passed.
> 
> 
> (2a) For the new CPU, the SMI remains pending, until it gets an
> INIT-SIPI-SIPI from one of the previously plugged CPUs (most likely, the
> BSP). At that point, the new CPU will execute the "initial SMI handler
> for hotplugged CPUs", at the default SMBASE.
> 
> That's a routine we'll have to write in assembly, from zero. In this
> routine, we can read back IO ports 0xB2 and 0xB3. And QEMU will be happy
> to provide the values last written (see apm_ioport_readb() in
> "hw/isa/apm.c"). So we can receive the values in this routine. Alright.

Potentially we can can avoid writing custom SMI handler,
what do you think about following workflow:

on system boot after initial CPUs relocation, firmware set NOP SMI handler
at default SMBASE.
Then as reaction to GPE triggered SMI (on cpu hotplug), after SMI rendezvous,
a host cpu reads IO port 0xB2 and does hotplugged CPUs enumeration.

  a) assuming we allow hotplug only in case of negotiated SMI broadcast
     host CPU shoots down all in-flight INIT/SIPI/SIPI for hotpugged CPUs
     to avoid race within relocation handler.

 After that host CPU in loop

  b) it prepares/initializes necessary CPU structures for a hotplugged
     CPU if necessary and replaces NOP SMI handler with the relocation
     SMI handler that is used during system boot.
     
  c) a host CPU sends NOP INIT/SIPI/SIPI to the hotplugged CPU

  d) the woken up hotplugged CPU, jumps to default SMBASE and
     executes hotplug relocation handler.

  e) after the hotplugged CPU  is relocated and if there are more
     hotplugged CPUs, a host CPU repeats b-d steps for the next
     hotplugged CPU.

  f) after all CPUs are relocated, restore NOP SMI handler at default
     SMBASE.

> (2b) On all other CPUs, the SMM foundation already accepts the SMI.
> 
> There point where it makes sense to start looking is SmmEntryPoint()
> [MdeModulePkg/Core/PiSmmCore/PiSmmCore.c].
> 
> (2b1) This function first checks whether the SMI is synchronous. The
> logic for determining that is based on
> "gSmmCorePrivate->CommunicationBuffer" being non-NULL. This field is set
> to non-NULL in SmmCommunicationCommunicate() -- see above, in (1).
> 
> In other words, the SMI is deemed synchronous if it was initiated with
> EFI_SMM_COMMUNICATION_PROTOCOL.Communicate(). In that case, the
> HandlerType GUID is extracted from the communication buffer, and passed
> to SmiManage(). In turn, SmiManage() locates the SMI handler registered
> with the same handler GUID, and delegates the SMI handling to that
> specific handler.
> 
> This is how the UEFI variable driver is split in two halves:
> 
> - in "MdeModulePkg/Universal/Variable/RuntimeDxe/VariableSmm.c", we have
> a call to gMmst->MmiHandlerRegister(), with HandlerType =
> "gEfiSmmVariableProtocolGuid"
> 
> - in
> "MdeModulePkg/Universal/Variable/RuntimeDxe/VariableSmmRuntimeDxe.c", we
> format communication buffers with the header GUID set to the same
> "gEfiSmmVariableProtocolGuid".
> 
> Of course, this is what does *not* apply to our use case, as the SMI is
> raised by the OS (via an ACPI method), and *not* by a firmware agent
> that calls EFI_SMM_COMMUNICATION_PROTOCOL.Communicate().
> 
> Therefore, we need to look further in SmmEntryPoint()
> [MdeModulePkg/Core/PiSmmCore/PiSmmCore.c].
> 
> (2b2) What's left there is only the following:
> 
>   //
>   // Process Asynchronous SMI sources
>   //
>   SmiManage (NULL, NULL, NULL, NULL);
> 
> 
> So...
> 
> - Are we supposed to write a new DXE_SMM_DRIVER for OvmfPkg, and call
> gMmst->MmiHandlerRegister() in it, with HandlerType=NULL? (I.e.,
> register a "root SMI handler"?)
> 
> - And then in the handler, should we read IO ports 0xB2 / 0xB3?
> 
> - Also, is that handler where we'd somehow sync up with the hot-plugged
> VCPU, and finally call EFI_SMM_CPU_SERVICE_PROTOCOL.SmmAddProcessor()?
> 
> - Does it matter what (pre-existent) CPU executes the handler? (IOW,
> does it matter what the value of gMmst->CurrentlyExecutingCpu is?)
> 
> Thanks,
> Laszlo
> 
> -=-=-=-=-=-=-=-=-=-=-=-
> Groups.io Links: You receive all messages sent to this group.
> 
> View/Reply Online (#48353): https://edk2.groups.io/g/devel/message/48353
> Mute This Topic: https://groups.io/mt/34201782/1958639
> Group Owner: devel+owner@edk2.groups.io
> Unsubscribe: https://edk2.groups.io/g/devel/unsub  [imammedo@redhat.com]
> -=-=-=-=-=-=-=-=-=-=-=-
> 



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [edk2-devel] [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM at default SMBASE address
  2019-10-04 11:31                     ` Igor Mammedov
@ 2019-10-07  9:44                       ` Laszlo Ersek
  0 siblings, 0 replies; 17+ messages in thread
From: Laszlo Ersek @ 2019-10-07  9:44 UTC (permalink / raw)
  To: Igor Mammedov
  Cc: Chen, Yingwen, Brijesh Singh, devel, phillip.goerl, qemu-devel,
	alex.williamson, Yao, Jiewen, Nakajima, Jun, Kinney, Michael D,
	pbonzini, boris.ostrovsky, rfc, joao.m.martins

On 10/04/19 13:31, Igor Mammedov wrote:
> On Tue, 1 Oct 2019 20:03:20 +0200
> "Laszlo Ersek" <lersek@redhat.com> wrote:

>> (1) What values to use.

> SeaBIOS writes 0x00 into command port, but it seems that's taken by
> EFI_SMM_COMMUNICATION_PROTOCOL. So we can use the next unused value
> (lets say 0x4). We probably don't have to use status port or 
> EFI_SMM_COMMUNICATION_PROTOCOL, since the value of written into 0xB2
> is sufficient to distinguish hotplug event.

Thanks. Can you please write a QEMU patch for the ACPI generator such
that hotplugging a VCPU writes value 4 to IO port 0xB2?

That will allow me to experiment with OVMF.

(I can experiment with some other parts in edk2 even before that.)

>> (2) How the parameters are passed.
>>
>>
>> (2a) For the new CPU, the SMI remains pending, until it gets an
>> INIT-SIPI-SIPI from one of the previously plugged CPUs (most likely, the
>> BSP). At that point, the new CPU will execute the "initial SMI handler
>> for hotplugged CPUs", at the default SMBASE.
>>
>> That's a routine we'll have to write in assembly, from zero. In this
>> routine, we can read back IO ports 0xB2 and 0xB3. And QEMU will be happy
>> to provide the values last written (see apm_ioport_readb() in
>> "hw/isa/apm.c"). So we can receive the values in this routine. Alright.
> 
> Potentially we can can avoid writing custom SMI handler,
> what do you think about following workflow:
> 
> on system boot after initial CPUs relocation, firmware set NOP SMI handler
> at default SMBASE.
> Then as reaction to GPE triggered SMI (on cpu hotplug), after SMI rendezvous,
> a host cpu reads IO port 0xB2 and does hotplugged CPUs enumeration.
> 
>   a) assuming we allow hotplug only in case of negotiated SMI broadcast
>      host CPU shoots down all in-flight INIT/SIPI/SIPI for hotpugged CPUs
>      to avoid race within relocation handler.

How is that "shootdown" possible?

>  After that host CPU in loop
> 
>   b) it prepares/initializes necessary CPU structures for a hotplugged
>      CPU if necessary and replaces NOP SMI handler with the relocation
>      SMI handler that is used during system boot.
>      
>   c) a host CPU sends NOP INIT/SIPI/SIPI to the hotplugged CPU
> 
>   d) the woken up hotplugged CPU, jumps to default SMBASE and
>      executes hotplug relocation handler.
> 
>   e) after the hotplugged CPU  is relocated and if there are more
>      hotplugged CPUs, a host CPU repeats b-d steps for the next
>      hotplugged CPU.
> 
>   f) after all CPUs are relocated, restore NOP SMI handler at default
>      SMBASE.
> 

Thanks
Laszlo


^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2019-10-07  9:45 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-09-17 13:07 [Qemu-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address Igor Mammedov
2019-09-17 13:07 ` [Qemu-devel] [PATCH 1/2] q35: implement 128K SMRAM " Igor Mammedov
2019-09-19 17:02   ` Laszlo Ersek
2019-09-20  8:28     ` [edk2-devel] " Igor Mammedov
2019-09-20  9:28       ` Laszlo Ersek
2019-09-23 18:35         ` Laszlo Ersek
2019-09-24 11:19           ` Igor Mammedov
2019-09-30 11:51             ` Laszlo Ersek
2019-09-30 12:36               ` Igor Mammedov
2019-09-30 14:22                 ` Yao, Jiewen
2019-10-01 18:03                   ` Laszlo Ersek
2019-10-04 11:31                     ` Igor Mammedov
2019-10-07  9:44                       ` Laszlo Ersek
2019-09-24 11:47         ` Paolo Bonzini
2019-09-17 13:07 ` [Qemu-devel] [PATCH 2/2] tests: q35: MCH: add default SMBASE SMRAM lock test Igor Mammedov
2019-09-17 15:23 ` [Qemu-devel] [edk2-devel] [PATCH 0/2] q35: mch: allow to lock down 128K RAM at default SMBASE address no-reply
2019-09-17 15:24 ` no-reply

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).