kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Claudio Imbrenda <imbrenda@linux.ibm.com>
To: pbonzini@redhat.com
Cc: kvm@vger.kernel.org, borntraeger@de.ibm.com, frankja@linux.ibm.com
Subject: [kvm-unit-tests GIT PULL 06/13] s390x: lib: Extend UV library with PV guest management
Date: Mon, 17 Jan 2022 17:59:42 +0100	[thread overview]
Message-ID: <20220117165949.75964-7-imbrenda@linux.ibm.com> (raw)
In-Reply-To: <20220117165949.75964-1-imbrenda@linux.ibm.com>

From: Janosch Frank <frankja@linux.ibm.com>

Let's extend the UV lib with guest 1 code to be able to manage
protected VMs in the future.

Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
---
 lib/s390x/asm/uv.h |  14 +++++
 lib/s390x/sie.h    |   3 ++
 lib/s390x/uv.h     |   7 +++
 lib/s390x/uv.c     | 128 +++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 152 insertions(+)

diff --git a/lib/s390x/asm/uv.h b/lib/s390x/asm/uv.h
index 6e331211..97c90e81 100644
--- a/lib/s390x/asm/uv.h
+++ b/lib/s390x/asm/uv.h
@@ -355,4 +355,18 @@ static inline int uv_set_se_hdr(uint64_t handle, void *hdr, size_t len)
 	return uv_call(0, (uint64_t)&uvcb);
 }
 
+static inline int uv_unp_page(uint64_t handle, uint64_t gaddr, uint64_t tweak1, uint64_t tweak2)
+{
+	struct uv_cb_unp uvcb = {
+		.header.cmd = UVC_CMD_UNPACK_IMG,
+		.header.len = sizeof(uvcb),
+		.guest_handle = handle,
+		.gaddr = gaddr,
+		.tweak[0] = tweak1,
+		.tweak[1] = tweak2,
+	};
+
+	return uv_call(0, (uint64_t)&uvcb);
+}
+
 #endif
diff --git a/lib/s390x/sie.h b/lib/s390x/sie.h
index 1a12faa7..6d209793 100644
--- a/lib/s390x/sie.h
+++ b/lib/s390x/sie.h
@@ -203,6 +203,9 @@ union {
 struct vm_uv {
 	uint64_t vm_handle;
 	uint64_t vcpu_handle;
+	void *conf_base_stor;
+	void *conf_var_stor;
+	void *cpu_stor;
 };
 
 struct vm_save_regs {
diff --git a/lib/s390x/uv.h b/lib/s390x/uv.h
index 2b23407a..6ffe537a 100644
--- a/lib/s390x/uv.h
+++ b/lib/s390x/uv.h
@@ -2,9 +2,16 @@
 #ifndef _S390X_UV_H_
 #define _S390X_UV_H_
 
+#include <sie.h>
+
 bool uv_os_is_guest(void);
 bool uv_os_is_host(void);
 bool uv_query_test_call(unsigned int nr);
+void uv_init(void);
 int uv_setup(void);
+void uv_create_guest(struct vm *vm);
+void uv_destroy_guest(struct vm *vm);
+int uv_unpack(struct vm *vm, uint64_t addr, uint64_t len, uint64_t tweak);
+void uv_verify_load(struct vm *vm);
 
 #endif /* UV_H */
diff --git a/lib/s390x/uv.c b/lib/s390x/uv.c
index c5c69c47..6fe11dff 100644
--- a/lib/s390x/uv.c
+++ b/lib/s390x/uv.c
@@ -17,11 +17,14 @@
 #include <asm/facility.h>
 #include <asm/uv.h>
 #include <uv.h>
+#include <sie.h>
 
 static struct uv_cb_qui uvcb_qui = {
 	.header.cmd = UVC_CMD_QUI,
 	.header.len = sizeof(uvcb_qui),
 };
+static uint64_t uv_init_mem;
+
 
 bool uv_os_is_guest(void)
 {
@@ -54,3 +57,128 @@ int uv_setup(void)
 	assert(uvcb_qui.header.rc == 1 || uvcb_qui.header.rc == 0x100);
 	return 1;
 }
+
+void uv_init(void)
+{
+	struct uv_cb_init uvcb_init = {
+		.header.len = sizeof(uvcb_init),
+		.header.cmd = UVC_CMD_INIT_UV,
+	};
+	static bool initialized;
+	int cc;
+
+	/* Let's not do this twice */
+	assert(!initialized);
+	/* Query is done on initialization but let's check anyway */
+	assert(uvcb_qui.header.rc == 1 || uvcb_qui.header.rc == 0x100);
+
+	/* Donated storage needs to be over 2GB aligned to 1MB */
+	uv_init_mem = (uint64_t)memalign_pages_flags(HPAGE_SIZE, uvcb_qui.uv_base_stor_len, AREA_NORMAL);
+	uvcb_init.stor_origin = uv_init_mem;
+	uvcb_init.stor_len = uvcb_qui.uv_base_stor_len;
+
+	cc = uv_call(0, (uint64_t)&uvcb_init);
+	assert(cc == 0);
+	initialized = true;
+}
+
+void uv_create_guest(struct vm *vm)
+{
+	struct uv_cb_cgc uvcb_cgc = {
+		.header.cmd = UVC_CMD_CREATE_SEC_CONF,
+		.header.len = sizeof(uvcb_cgc),
+	};
+	struct uv_cb_csc uvcb_csc = {
+		.header.len = sizeof(uvcb_csc),
+		.header.cmd = UVC_CMD_CREATE_SEC_CPU,
+		.state_origin = (uint64_t)vm->sblk,
+		.num = 0,
+	};
+	unsigned long vsize;
+	int cc;
+
+	uvcb_cgc.guest_stor_origin = vm->sblk->mso;
+	uvcb_cgc.guest_stor_len = vm->sblk->msl;
+
+	/* Config allocation */
+	vsize = uvcb_qui.conf_base_virt_stor_len +
+		((uvcb_cgc.guest_stor_len / HPAGE_SIZE) * uvcb_qui.conf_virt_var_stor_len);
+
+	vm->uv.conf_base_stor = memalign_pages_flags(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len, 0);
+	/*
+	 * This allocation needs to be below the max guest storage
+	 * address so let's simply put it into the physical memory
+	 */
+	vm->uv.conf_var_stor = memalign_pages_flags(PAGE_SIZE, vsize,0);
+	uvcb_cgc.conf_base_stor_origin = (uint64_t)vm->uv.conf_base_stor;
+	uvcb_cgc.conf_var_stor_origin = (uint64_t)vm->uv.conf_var_stor;
+
+	/* CPU allocation */
+	vm->uv.cpu_stor = memalign_pages_flags(PAGE_SIZE, uvcb_qui.cpu_stor_len, 0);
+	uvcb_csc.stor_origin = (uint64_t)vm->uv.cpu_stor;
+
+	uvcb_cgc.guest_asce = (uint64_t)stctg(1);
+	uvcb_cgc.guest_sca = (uint64_t)vm->sca;
+
+	cc = uv_call(0, (uint64_t)&uvcb_cgc);
+	assert(!cc);
+
+	vm->uv.vm_handle = uvcb_cgc.guest_handle;
+	uvcb_csc.guest_handle = uvcb_cgc.guest_handle;
+	cc = uv_call(0, (uint64_t)&uvcb_csc);
+	vm->uv.vcpu_handle = uvcb_csc.cpu_handle;
+	assert(!cc);
+
+	/*
+	 * Convert guest to format 4:
+	 *
+	 *  - Set format 4
+	 *  - Write UV handles into sblk
+	 *  - Allocate and set SIDA
+	 */
+	vm->sblk->sdf = 2;
+	vm->sblk->sidad = (uint64_t)alloc_page();
+	vm->sblk->pv_handle_cpu = uvcb_csc.cpu_handle;
+	vm->sblk->pv_handle_config = uvcb_cgc.guest_handle;
+}
+
+void uv_destroy_guest(struct vm *vm)
+{
+	int cc;
+	u16 rc, rrc;
+
+	cc = uv_cmd_nodata(vm->sblk->pv_handle_cpu,
+			   UVC_CMD_DESTROY_SEC_CPU, &rc, &rrc);
+	assert(cc == 0);
+	free_page((void *)vm->sblk->sidad);
+	free_pages(vm->uv.cpu_stor);
+
+	cc = uv_cmd_nodata(vm->sblk->pv_handle_config,
+			   UVC_CMD_DESTROY_SEC_CONF, &rc, &rrc);
+	assert(cc == 0);
+	free_pages(vm->uv.conf_base_stor);
+	free_pages(vm->uv.conf_var_stor);
+}
+
+int uv_unpack(struct vm *vm, uint64_t addr, uint64_t len, uint64_t tweak)
+{
+	int i, cc;
+
+	for (i = 0; i < len / PAGE_SIZE; i++) {
+		cc = uv_unp_page(vm->uv.vm_handle, addr, tweak, i * PAGE_SIZE);
+		assert(!cc);
+		addr += PAGE_SIZE;
+	}
+	return cc;
+}
+
+void uv_verify_load(struct vm *vm)
+{
+	uint16_t rc, rrc;
+	int cc;
+
+	cc = uv_cmd_nodata(vm->uv.vm_handle, UVC_CMD_VERIFY_IMG, &rc, &rrc);
+	assert(!cc);
+	cc = uv_set_cpu_state(vm->uv.vcpu_handle, PV_CPU_STATE_OPR_LOAD);
+	assert(!cc);
+}
-- 
2.31.1


  parent reply	other threads:[~2022-01-17 17:00 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-17 16:59 [kvm-unit-tests GIT PULL 00/13] s390x update 2022-01-17 Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 01/13] s390x: snippets: mvpg-snippet: Remove unneeded includes Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 02/13] lib: s390x: sie: Add sca allocation and freeing Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 03/13] s390x: sie: Add PV fields to SIE control block Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 04/13] s390x: sie: Add UV information into VM struct Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 05/13] s390x: uv: Add more UV call functions Claudio Imbrenda
2022-01-17 16:59 ` Claudio Imbrenda [this message]
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 07/13] lib: s390: sie: Add PV guest register handling Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 08/13] s390x: snippets: Add PV support Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 09/13] lib: s390x: Introduce snippet helpers Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 10/13] s390x: mvpg-sie: Use " Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 11/13] s390x: sie: Add PV diag test Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 12/13] s390x: smp: Allocate memory in DMA31 space Claudio Imbrenda
2022-01-17 16:59 ` [kvm-unit-tests GIT PULL 13/13] s390x: firq: Fix sclp buffer allocation Claudio Imbrenda

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220117165949.75964-7-imbrenda@linux.ibm.com \
    --to=imbrenda@linux.ibm.com \
    --cc=borntraeger@de.ibm.com \
    --cc=frankja@linux.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).