All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] tpm_emulator: Signal swtpm to again lock storage
@ 2022-09-12 17:47 Stefan Berger
  2022-09-12 17:47 ` [PATCH 1/2] tpm_emulator: Use latest tpm_ioctl.h from swtpm project Stefan Berger
  2022-09-12 17:47 ` [PATCH 2/2] tpm_emulator: Have swtpm relock storage upon migration fall-back Stefan Berger
  0 siblings, 2 replies; 4+ messages in thread
From: Stefan Berger @ 2022-09-12 17:47 UTC (permalink / raw)
  To: qemu-devel, marcandre.lureau; +Cc: Stefan Berger

Swtpm has been extended to release the lock on the storage where its state
is written to upon migration of the last one of its state blobs. Signal
swtpm to again lock the storage upon migration fallback. An explicit signal
helps swtpm to lock the storage earlier because otherwise it would have
to wait for the next TPM command from the VM.

Releasing the lock on the storage is necessary for setups where the storage
holding the TPM state is shared between hosts.

Regards,
   Stefan

v3:
 - extended timeout to 3 seconds

Stefan Berger (2):
  tpm_emulator: Use latest tpm_ioctl.h from swtpm project
  tpm_emulator: Have swtpm relock storage upon migration fall-back

 backends/tpm/tpm_emulator.c | 60 ++++++++++++++++++++++-
 backends/tpm/tpm_ioctl.h    | 96 +++++++++++++++++++++++++++++--------
 backends/tpm/trace-events   |  2 +
 3 files changed, 137 insertions(+), 21 deletions(-)

-- 
2.37.2



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] tpm_emulator: Use latest tpm_ioctl.h from swtpm project
  2022-09-12 17:47 [PATCH 0/2] tpm_emulator: Signal swtpm to again lock storage Stefan Berger
@ 2022-09-12 17:47 ` Stefan Berger
  2022-09-12 17:47 ` [PATCH 2/2] tpm_emulator: Have swtpm relock storage upon migration fall-back Stefan Berger
  1 sibling, 0 replies; 4+ messages in thread
From: Stefan Berger @ 2022-09-12 17:47 UTC (permalink / raw)
  To: qemu-devel, marcandre.lureau; +Cc: Stefan Berger

Use the latest tpm_ioctl.h from upstream swtpm project.

Signed-off-by: Stefan Berger <stefanb@linux.ibm.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
---
 backends/tpm/tpm_ioctl.h | 96 +++++++++++++++++++++++++++++++---------
 1 file changed, 76 insertions(+), 20 deletions(-)

diff --git a/backends/tpm/tpm_ioctl.h b/backends/tpm/tpm_ioctl.h
index d67bf0283b..e506ef5160 100644
--- a/backends/tpm/tpm_ioctl.h
+++ b/backends/tpm/tpm_ioctl.h
@@ -5,10 +5,15 @@
  *
  * This file is licensed under the terms of the 3-clause BSD license
  */
+#ifndef _TPM_IOCTL_H_
+#define _TPM_IOCTL_H_
 
-#ifndef TPM_IOCTL_H
-#define TPM_IOCTL_H
+#if defined(__CYGWIN__)
+# define __USE_LINUX_IOCTL_DEFS
+#endif
 
+#include <stdint.h>
+#include <sys/types.h>
 #ifndef _WIN32
 #include <sys/uio.h>
 #include <sys/ioctl.h>
@@ -196,6 +201,48 @@ struct ptm_setbuffersize {
     } u;
 };
 
+#define PTM_GETINFO_SIZE (3 * 1024)
+/*
+ * PTM_GET_INFO: Get info about the TPM implementation (from libtpms)
+ *
+ * This request allows to indirectly call TPMLIB_GetInfo(flags) and
+ * retrieve information from libtpms.
+ * Only one transaction is currently necessary for returning results
+ * to a client. Therefore, totlength and length will be the same if
+ * offset is 0.
+ */
+struct ptm_getinfo {
+    union {
+        struct {
+            uint64_t flags;
+            uint32_t offset;      /* offset from where to read */
+            uint32_t pad;         /* 32 bit arch */
+        } req; /* request */
+        struct {
+            ptm_res tpm_result;
+            uint32_t totlength;
+            uint32_t length;
+            char buffer[PTM_GETINFO_SIZE];
+        } resp; /* response */
+    } u;
+};
+
+#define SWTPM_INFO_TPMSPECIFICATION ((uint64_t)1 << 0)
+#define SWTPM_INFO_TPMATTRIBUTES    ((uint64_t)1 << 1)
+
+/*
+ * PTM_LOCK_STORAGE: Lock the storage and retry n times
+ */
+struct ptm_lockstorage {
+    union {
+        struct {
+            uint32_t retries; /* number of retries */
+        } req; /* request */
+        struct {
+            ptm_res tpm_result;
+        } resp; /* reponse */
+    } u;
+};
 
 typedef uint64_t ptm_cap;
 typedef struct ptm_est ptm_est;
@@ -207,6 +254,8 @@ typedef struct ptm_getstate ptm_getstate;
 typedef struct ptm_setstate ptm_setstate;
 typedef struct ptm_getconfig ptm_getconfig;
 typedef struct ptm_setbuffersize ptm_setbuffersize;
+typedef struct ptm_getinfo ptm_getinfo;
+typedef struct ptm_lockstorage ptm_lockstorage;
 
 /* capability flags returned by PTM_GET_CAPABILITY */
 #define PTM_CAP_INIT               (1)
@@ -223,6 +272,9 @@ typedef struct ptm_setbuffersize ptm_setbuffersize;
 #define PTM_CAP_GET_CONFIG         (1 << 11)
 #define PTM_CAP_SET_DATAFD         (1 << 12)
 #define PTM_CAP_SET_BUFFERSIZE     (1 << 13)
+#define PTM_CAP_GET_INFO           (1 << 14)
+#define PTM_CAP_SEND_COMMAND_HEADER (1 << 15)
+#define PTM_CAP_LOCK_STORAGE       (1 << 16)
 
 #ifndef _WIN32
 enum {
@@ -243,6 +295,8 @@ enum {
     PTM_GET_CONFIG         = _IOR('P', 14, ptm_getconfig),
     PTM_SET_DATAFD         = _IOR('P', 15, ptm_res),
     PTM_SET_BUFFERSIZE     = _IOWR('P', 16, ptm_setbuffersize),
+    PTM_GET_INFO           = _IOWR('P', 17, ptm_getinfo),
+    PTM_LOCK_STORAGE       = _IOWR('P', 18, ptm_lockstorage),
 };
 #endif
 
@@ -257,23 +311,25 @@ enum {
  * and ptm_set_state:u.req.data) are 0xffffffff.
  */
 enum {
-    CMD_GET_CAPABILITY = 1,
-    CMD_INIT,
-    CMD_SHUTDOWN,
-    CMD_GET_TPMESTABLISHED,
-    CMD_SET_LOCALITY,
-    CMD_HASH_START,
-    CMD_HASH_DATA,
-    CMD_HASH_END,
-    CMD_CANCEL_TPM_CMD,
-    CMD_STORE_VOLATILE,
-    CMD_RESET_TPMESTABLISHED,
-    CMD_GET_STATEBLOB,
-    CMD_SET_STATEBLOB,
-    CMD_STOP,
-    CMD_GET_CONFIG,
-    CMD_SET_DATAFD,
-    CMD_SET_BUFFERSIZE,
+    CMD_GET_CAPABILITY = 1,   /* 0x01 */
+    CMD_INIT,                 /* 0x02 */
+    CMD_SHUTDOWN,             /* 0x03 */
+    CMD_GET_TPMESTABLISHED,   /* 0x04 */
+    CMD_SET_LOCALITY,         /* 0x05 */
+    CMD_HASH_START,           /* 0x06 */
+    CMD_HASH_DATA,            /* 0x07 */
+    CMD_HASH_END,             /* 0x08 */
+    CMD_CANCEL_TPM_CMD,       /* 0x09 */
+    CMD_STORE_VOLATILE,       /* 0x0a */
+    CMD_RESET_TPMESTABLISHED, /* 0x0b */
+    CMD_GET_STATEBLOB,        /* 0x0c */
+    CMD_SET_STATEBLOB,        /* 0x0d */
+    CMD_STOP,                 /* 0x0e */
+    CMD_GET_CONFIG,           /* 0x0f */
+    CMD_SET_DATAFD,           /* 0x10 */
+    CMD_SET_BUFFERSIZE,       /* 0x11 */
+    CMD_GET_INFO,             /* 0x12 */
+    CMD_LOCK_STORAGE,         /* 0x13 */
 };
 
-#endif /* TPM_IOCTL_H */
+#endif /* _TPM_IOCTL_H_ */
-- 
2.37.2



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] tpm_emulator: Have swtpm relock storage upon migration fall-back
  2022-09-12 17:47 [PATCH 0/2] tpm_emulator: Signal swtpm to again lock storage Stefan Berger
  2022-09-12 17:47 ` [PATCH 1/2] tpm_emulator: Use latest tpm_ioctl.h from swtpm project Stefan Berger
@ 2022-09-12 17:47 ` Stefan Berger
  1 sibling, 0 replies; 4+ messages in thread
From: Stefan Berger @ 2022-09-12 17:47 UTC (permalink / raw)
  To: qemu-devel, marcandre.lureau; +Cc: Stefan Berger

Swtpm may release the lock once the last one of its state blobs has been
migrated out. In case of VM migration failure QEMU now needs to notify
swtpm that it should again take the lock, which it can otherwise only do
once it has received the first TPM command from the VM.

Only try to send the lock command if swtpm supports it. It will not have
released the lock (and support shared storage setups) if it doesn't
support the locking command since the functionality of releasing the lock
upon state blob reception and the lock command were added to swtpm
'together'.

If QEMU sends the lock command and the storage has already been locked
no error is reported.

If swtpm does not receive the lock command (from older version of QEMU),
it will lock the storage once the first TPM command has been received. So
sending the lock command is an optimization.

Signed-off-by: Stefan Berger <stefanb@linux.ibm.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
---
 backends/tpm/tpm_emulator.c | 60 ++++++++++++++++++++++++++++++++++++-
 backends/tpm/trace-events   |  2 ++
 2 files changed, 61 insertions(+), 1 deletion(-)

diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c
index 87d061e9bb..bb883fe7d2 100644
--- a/backends/tpm/tpm_emulator.c
+++ b/backends/tpm/tpm_emulator.c
@@ -34,6 +34,7 @@
 #include "io/channel-socket.h"
 #include "sysemu/tpm_backend.h"
 #include "sysemu/tpm_util.h"
+#include "sysemu/runstate.h"
 #include "tpm_int.h"
 #include "tpm_ioctl.h"
 #include "migration/blocker.h"
@@ -81,6 +82,9 @@ struct TPMEmulator {
     unsigned int established_flag_cached:1;
 
     TPMBlobBuffers state_blobs;
+
+    bool relock_storage;
+    VMChangeStateEntry *vmstate;
 };
 
 struct tpm_error {
@@ -302,6 +306,35 @@ static int tpm_emulator_stop_tpm(TPMBackend *tb)
     return 0;
 }
 
+static int tpm_emulator_lock_storage(TPMEmulator *tpm_emu)
+{
+    ptm_lockstorage pls;
+
+    if (!TPM_EMULATOR_IMPLEMENTS_ALL_CAPS(tpm_emu, PTM_CAP_LOCK_STORAGE)) {
+        trace_tpm_emulator_lock_storage_cmd_not_supt();
+        return 0;
+    }
+
+    /* give failing side 300 * 10ms time to release lock */
+    pls.u.req.retries = cpu_to_be32(300);
+    if (tpm_emulator_ctrlcmd(tpm_emu, CMD_LOCK_STORAGE, &pls,
+                             sizeof(pls.u.req), sizeof(pls.u.resp)) < 0) {
+        error_report("tpm-emulator: Could not lock storage within 3 seconds: "
+                     "%s", strerror(errno));
+        return -1;
+    }
+
+    pls.u.resp.tpm_result = be32_to_cpu(pls.u.resp.tpm_result);
+    if (pls.u.resp.tpm_result != 0) {
+        error_report("tpm-emulator: TPM result for CMD_LOCK_STORAGE: 0x%x %s",
+                     pls.u.resp.tpm_result,
+                     tpm_emulator_strerror(pls.u.resp.tpm_result));
+        return -1;
+    }
+
+    return 0;
+}
+
 static int tpm_emulator_set_buffer_size(TPMBackend *tb,
                                         size_t wanted_size,
                                         size_t *actual_size)
@@ -843,13 +876,34 @@ static int tpm_emulator_pre_save(void *opaque)
 {
     TPMBackend *tb = opaque;
     TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
+    int ret;
 
     trace_tpm_emulator_pre_save();
 
     tpm_backend_finish_sync(tb);
 
     /* get the state blobs from the TPM */
-    return tpm_emulator_get_state_blobs(tpm_emu);
+    ret = tpm_emulator_get_state_blobs(tpm_emu);
+
+    tpm_emu->relock_storage = ret == 0;
+
+    return ret;
+}
+
+static void tpm_emulator_vm_state_change(void *opaque, bool running,
+                                         RunState state)
+{
+    TPMBackend *tb = opaque;
+    TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
+
+    trace_tpm_emulator_vm_state_change(running, state);
+
+    if (!running || state != RUN_STATE_RUNNING || !tpm_emu->relock_storage) {
+        return;
+    }
+
+    /* lock storage after migration fall-back */
+    tpm_emulator_lock_storage(tpm_emu);
 }
 
 /*
@@ -911,6 +965,9 @@ static void tpm_emulator_inst_init(Object *obj)
     tpm_emu->options = g_new0(TPMEmulatorOptions, 1);
     tpm_emu->cur_locty_number = ~0;
     qemu_mutex_init(&tpm_emu->mutex);
+    tpm_emu->vmstate =
+        qemu_add_vm_change_state_handler(tpm_emulator_vm_state_change,
+                                         tpm_emu);
 
     vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY,
                      &vmstate_tpm_emulator, obj);
@@ -960,6 +1017,7 @@ static void tpm_emulator_inst_finalize(Object *obj)
     tpm_sized_buffer_reset(&state_blobs->savestate);
 
     qemu_mutex_destroy(&tpm_emu->mutex);
+    qemu_del_vm_change_state_handler(tpm_emu->vmstate);
 
     vmstate_unregister(NULL, &vmstate_tpm_emulator, obj);
 }
diff --git a/backends/tpm/trace-events b/backends/tpm/trace-events
index 3298766dd7..1ecef42a07 100644
--- a/backends/tpm/trace-events
+++ b/backends/tpm/trace-events
@@ -20,6 +20,8 @@ tpm_emulator_set_buffer_size(uint32_t buffersize, uint32_t minsize, uint32_t max
 tpm_emulator_startup_tpm_resume(bool is_resume, size_t buffersize) "is_resume: %d, buffer size: %zu"
 tpm_emulator_get_tpm_established_flag(uint8_t flag) "got established flag: %d"
 tpm_emulator_cancel_cmd_not_supt(void) "Backend does not support CANCEL_TPM_CMD"
+tpm_emulator_lock_storage_cmd_not_supt(void) "Backend does not support LOCK_STORAGE"
+tpm_emulator_vm_state_change(int running, int state) "state change to running %d state %d"
 tpm_emulator_handle_device_opts_tpm12(void) "TPM Version 1.2"
 tpm_emulator_handle_device_opts_tpm2(void) "TPM Version 2"
 tpm_emulator_handle_device_opts_unspec(void) "TPM Version Unspecified"
-- 
2.37.2



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 0/2] tpm_emulator: Signal swtpm to again lock storage
@ 2022-09-07 22:28 Stefan Berger
  0 siblings, 0 replies; 4+ messages in thread
From: Stefan Berger @ 2022-09-07 22:28 UTC (permalink / raw)
  To: qemu-devel, marcandre.lureau; +Cc: Stefan Berger

Swtpm has been extended to release the lock on the storage where its state
is written to upon migration of the last one of its state blobs. Signal
swtpm to again lock the storage upon migration fallback. An explicit signal
helps swtpm to lock the storage earlier because otherwise it would have
to wait for the next TPM command from the VM.

Releasing the lock on the storage is necessary for setups where the storage
holding the TPM state is shared between hosts.

Regards,
   Stefan

Stefan Berger (2):
  tpm_emulator: Use latest tpm_ioctl.h from swtpm project
  tpm_emulator: Have swtpm relock storage upon migration fall-back

 backends/tpm/tpm_emulator.c | 60 ++++++++++++++++++++++-
 backends/tpm/tpm_ioctl.h    | 96 +++++++++++++++++++++++++++++--------
 backends/tpm/trace-events   |  2 +
 3 files changed, 137 insertions(+), 21 deletions(-)

-- 
2.37.2



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-09-12 17:53 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-12 17:47 [PATCH 0/2] tpm_emulator: Signal swtpm to again lock storage Stefan Berger
2022-09-12 17:47 ` [PATCH 1/2] tpm_emulator: Use latest tpm_ioctl.h from swtpm project Stefan Berger
2022-09-12 17:47 ` [PATCH 2/2] tpm_emulator: Have swtpm relock storage upon migration fall-back Stefan Berger
  -- strict thread matches above, loose matches on Subject: below --
2022-09-07 22:28 [PATCH 0/2] tpm_emulator: Signal swtpm to again lock storage Stefan Berger

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.