kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Janosch Frank <frankja@linux.ibm.com>
To: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: kvm@vger.kernel.org, linux-s390@vger.kernel.org,
	thuth@redhat.com, david@redhat.com
Subject: Re: [kvm-unit-tests PATCH 6/6] s390x: Add UV host test
Date: Mon, 26 Apr 2021 16:31:28 +0200	[thread overview]
Message-ID: <a4facf70-67f7-0522-e149-a0ce35677680@linux.ibm.com> (raw)
In-Reply-To: <20210420174757.49d3ed3a@ibm-vm>

On 4/20/21 5:47 PM, Claudio Imbrenda wrote:
> On Tue, 16 Mar 2021 09:16:54 +0000
> Janosch Frank <frankja@linux.ibm.com> wrote:
> 
>> Let's also test the UV host interfaces.
>>
>> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
> 
> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>

Thanks

> 
> the test is ok as is, but I do have a lot of ideas on how to
> improve/extend it further. Once this is merged (and once I have some
> time) I will send a few patches. 

I'm always happy to take new code for this project.
At some point we might need to split it into separate files though :)

> 
>> ---
>>  s390x/Makefile  |   1 +
>>  s390x/uv-host.c | 513
>> ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 514
>> insertions(+) create mode 100644 s390x/uv-host.c
>>
>> diff --git a/s390x/Makefile b/s390x/Makefile
>> index bbf177fa..8de926ab 100644
>> --- a/s390x/Makefile
>> +++ b/s390x/Makefile
>> @@ -21,6 +21,7 @@ tests += $(TEST_DIR)/css.elf
>>  tests += $(TEST_DIR)/uv-guest.elf
>>  tests += $(TEST_DIR)/sie.elf
>>  tests += $(TEST_DIR)/mvpg.elf
>> +tests += $(TEST_DIR)/uv-host.elf
>>  
>>  tests_binary = $(patsubst %.elf,%.bin,$(tests))
>>  ifneq ($(HOST_KEY_DOCUMENT),)
>> diff --git a/s390x/uv-host.c b/s390x/uv-host.c
>> new file mode 100644
>> index 00000000..746abd8e
>> --- /dev/null
>> +++ b/s390x/uv-host.c
>> @@ -0,0 +1,513 @@
>> +/* SPDX-License-Identifier: GPL-2.0-only */
>> +/*
>> + * Guest Ultravisor Call tests
>> + *
>> + * Copyright (c) 2021 IBM Corp
>> + *
>> + * Authors:
>> + *  Janosch Frank <frankja@linux.ibm.com>
>> + */
>> +
>> +#include <libcflat.h>
>> +#include <alloc.h>
>> +#include <vmalloc.h>
>> +#include <sclp.h>
>> +#include <smp.h>
>> +#include <asm/page.h>
>> +#include <asm/sigp.h>
>> +#include <asm/pgtable.h>
>> +#include <asm/asm-offsets.h>
>> +#include <asm/interrupt.h>
>> +#include <asm/facility.h>
>> +#include <asm/uv.h>
>> +#include <asm-generic/barrier.h>
>> +
>> +static struct uv_cb_qui uvcb_qui;
>> +static struct uv_cb_init uvcb_init;
>> +static struct uv_cb_cgc uvcb_cgc;
>> +static struct uv_cb_csc uvcb_csc;
>> +
>> +extern int diag308_load_reset(u64 code);
>> +
>> +struct cmd_list{
>> +	const char *name;
>> +	uint16_t cmd;
>> +	uint16_t len;
>> +};
>> +
>> +static void cpu_loop(void)
>> +{
>> +	for (;;) {}
>> +}
>> +
>> +static struct cmd_list cmds[] = {
>> +	{ "init", UVC_CMD_INIT_UV, sizeof(struct uv_cb_init) },
>> +	{ "create conf", UVC_CMD_CREATE_SEC_CONF, sizeof(struct
>> uv_cb_cgc) },
>> +	{ "destroy conf", UVC_CMD_DESTROY_SEC_CONF, sizeof(struct
>> uv_cb_nodata) },
>> +	{ "create cpu", UVC_CMD_CREATE_SEC_CPU, sizeof(struct
>> uv_cb_csc) },
>> +	{ "destroy cpu", UVC_CMD_DESTROY_SEC_CPU, sizeof(struct
>> uv_cb_nodata) },
>> +	{ "conv to", UVC_CMD_CONV_TO_SEC_STOR, sizeof(struct
>> uv_cb_cts) },
>> +	{ "conv from", UVC_CMD_CONV_FROM_SEC_STOR, sizeof(struct
>> uv_cb_cfs) },
>> +	{ "set sec conf", UVC_CMD_SET_SEC_CONF_PARAMS, sizeof(struct
>> uv_cb_ssc) },
>> +	{ "unpack", UVC_CMD_UNPACK_IMG, sizeof(struct uv_cb_unp) },
>> +	{ "verify", UVC_CMD_VERIFY_IMG, sizeof(struct uv_cb_nodata)
>> },
>> +	{ "cpu reset", UVC_CMD_CPU_RESET, sizeof(struct
>> uv_cb_nodata) },
>> +	{ "cpu initial reset", UVC_CMD_CPU_RESET_INITIAL,
>> sizeof(struct uv_cb_nodata) },
>> +	{ "conf clear reset", UVC_CMD_PERF_CONF_CLEAR_RESET,
>> sizeof(struct uv_cb_nodata) },
>> +	{ "cpu clear reset", UVC_CMD_CPU_RESET_CLEAR, sizeof(struct
>> uv_cb_nodata) },
>> +	{ "cpu set state", UVC_CMD_CPU_SET_STATE, sizeof(struct
>> uv_cb_cpu_set_state) },
>> +	{ "pin shared", UVC_CMD_PIN_PAGE_SHARED, sizeof(struct
>> uv_cb_cfs) },
>> +	{ "unpin shared", UVC_CMD_UNPIN_PAGE_SHARED, sizeof(struct
>> uv_cb_cts) },
>> +	{ NULL, 0, 0 },
>> +};
>> +
>> +static void test_priv(void)
>> +{
>> +	struct uv_cb_header uvcb = {};
>> +	uint16_t pgm;
>> +	int i;
>> +
>> +	report_prefix_push("privileged");
>> +	for (i = 0; cmds[i].name; i++) {
>> +		expect_pgm_int();
>> +		uvcb.cmd = cmds[i].cmd;
>> +		uvcb.len = cmds[i].len;
>> +		enter_pstate();
>> +		uv_call(0, (uint64_t)&uvcb);
>> +		pgm = clear_pgm_int();
>> +		report(pgm == PGM_INT_CODE_PRIVILEGED_OPERATION,
>> "%s", cmds[i].name);
>> +	}
>> +	report_prefix_pop();
>> +}
>> +
>> +static void test_config_destroy(void)
>> +{
>> +	int rc;
>> +	struct uv_cb_nodata uvcb = {
>> +		.header.cmd = UVC_CMD_DESTROY_SEC_CONF,
>> +		.header.len = sizeof(uvcb),
>> +		.handle = uvcb_cgc.guest_handle,
>> +	};
>> +
>> +	report_prefix_push("dsc");
>> +	uvcb.header.len -= 8;
>> +	rc = uv_call(0, (uint64_t)&uvcb);
>> +	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
>> +	       "hdr invalid length");
>> +	uvcb.header.len += 8;
>> +
>> +	uvcb.handle += 1;
>> +	rc = uv_call(0, (uint64_t)&uvcb);
>> +	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_GHANDLE,
>> "invalid handle");
>> +	uvcb.handle -= 1;
>> +
>> +	rc = uv_call(0, (uint64_t)&uvcb);
>> +	report(rc == 0 && uvcb.header.rc == UVC_RC_EXECUTED,
>> "success");
>> +	report_prefix_pop();
>> +}
>> +
>> +static void test_cpu_destroy(void)
>> +{
>> +	int rc;
>> +	struct uv_cb_nodata uvcb = {
>> +		.header.len = sizeof(uvcb),
>> +		.header.cmd = UVC_CMD_DESTROY_SEC_CPU,
>> +		.handle = uvcb_csc.cpu_handle,
>> +	};
>> +
>> +	report_prefix_push("dcpu");
>> +
>> +	uvcb.header.len -= 8;
>> +	rc = uv_call(0, (uint64_t)&uvcb);
>> +	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_LEN,
>> +	       "hdr invalid length");
>> +	uvcb.header.len += 8;
>> +
>> +	uvcb.handle += 1;
>> +	rc = uv_call(0, (uint64_t)&uvcb);
>> +	report(rc == 1 && uvcb.header.rc == UVC_RC_INV_CHANDLE,
>> "invalid handle");
>> +	uvcb.handle -= 1;
>> +
>> +	rc = uv_call(0, (uint64_t)&uvcb);
>> +	report(rc == 0 && uvcb.header.rc == UVC_RC_EXECUTED,
>> "success"); +
>> +	report_prefix_pop();
>> +}
>> +
>> +static void test_cpu_create(void)
>> +{
>> +	int rc;
>> +	unsigned long tmp;
>> +
>> +	report_prefix_push("csc");
>> +	uvcb_csc.header.len = sizeof(uvcb_csc);
>> +	uvcb_csc.header.cmd = UVC_CMD_CREATE_SEC_CPU;
>> +	uvcb_csc.guest_handle = uvcb_cgc.guest_handle;
>> +	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE,
>> uvcb_qui.cpu_stor_len);
>> +	uvcb_csc.state_origin = (unsigned long)memalign(PAGE_SIZE,
>> PAGE_SIZE); +
>> +	uvcb_csc.header.len -= 8;
>> +	rc = uv_call(0, (uint64_t)&uvcb_csc);
>> +	report(uvcb_csc.header.rc == UVC_RC_INV_LEN && rc == 1 &&
>> +	       !uvcb_csc.cpu_handle, "hdr invalid length");
>> +	uvcb_csc.header.len += 8;
>> +
>> +	uvcb_csc.guest_handle += 1;
>> +	rc = uv_call(0, (uint64_t)&uvcb_csc);
>> +	report(uvcb_csc.header.rc == UVC_RC_INV_GHANDLE && rc == 1,
>> +	       "invalid guest handle");
>> +	uvcb_csc.guest_handle -= 1;
>> +
>> +	uvcb_csc.num = uvcb_qui.max_guest_cpus + 1;
>> +	rc = uv_call(0, (uint64_t)&uvcb_csc);
>> +	report(uvcb_csc.header.rc == 0x103 && rc == 1,
>> +	       "invalid cpu #");
>> +	uvcb_csc.num = 0;
>> +
>> +	tmp = uvcb_csc.stor_origin;
>> +	uvcb_csc.stor_origin = get_max_ram_size() + PAGE_SIZE;
>> +	rc = uv_call(0, (uint64_t)&uvcb_csc);
>> +	report(uvcb_csc.header.rc == 0x105 && rc == 1,
>> +	       "cpu stor inaccessible");
>> +	uvcb_csc.stor_origin = tmp;
>> +
>> +	tmp = uvcb_csc.stor_origin;
>> +	uvcb_csc.stor_origin = 0;
>> +	rc = uv_call(0, (uint64_t)&uvcb_csc);
>> +	report(uvcb_csc.header.rc == 0x106 && rc == 1,
>> +	       "cpu stor in lowcore");
>> +	uvcb_csc.stor_origin = tmp;
>> +
>> +	tmp = uvcb_csc.state_origin;
>> +	uvcb_csc.state_origin = get_max_ram_size() + PAGE_SIZE;
>> +	rc = uv_call(0, (uint64_t)&uvcb_csc);
>> +	report(uvcb_csc.header.rc == 0x107 && rc == 1,
>> +	       "SIE SD inaccessible");
>> +	uvcb_csc.state_origin = tmp;
>> +
>> +	rc = uv_call(0, (uint64_t)&uvcb_csc);
>> +	report(rc == 0 && uvcb_csc.header.rc == UVC_RC_EXECUTED &&
>> +	       uvcb_csc.cpu_handle, "success");
>> +
>> +	tmp = uvcb_csc.stor_origin;
>> +	uvcb_csc.stor_origin = (unsigned long)memalign(PAGE_SIZE,
>> uvcb_qui.cpu_stor_len);
>> +	rc = uv_call(0, (uint64_t)&uvcb_csc);
>> +	report(rc == 1 && uvcb_csc.header.rc == 0x104, "already
>> defined");
>> +	uvcb_csc.stor_origin = tmp;
>> +	report_prefix_pop();
>> +}
>> +
>> +static void test_config_create(void)
>> +{
>> +	int rc;
>> +	unsigned long vsize, tmp;
>> +	static struct uv_cb_cgc uvcb;
>> +
>> +	uvcb_cgc.header.cmd = UVC_CMD_CREATE_SEC_CONF;
>> +	uvcb_cgc.header.len = sizeof(uvcb_cgc);
>> +	report_prefix_push("cgc");
>> +
>> +	uvcb_cgc.guest_stor_origin = 0;
>> +	uvcb_cgc.guest_stor_len = 42 * (1UL << 20);
>> +	vsize = uvcb_qui.conf_base_virt_stor_len +
>> +		((uvcb_cgc.guest_stor_len / (1UL << 20)) *
>> uvcb_qui.conf_virt_var_stor_len); +
>> +	uvcb_cgc.conf_base_stor_origin =
>> (uint64_t)memalign(PAGE_SIZE * 4, uvcb_qui.conf_base_phys_stor_len);
>> +	uvcb_cgc.conf_var_stor_origin =
>> (uint64_t)memalign(PAGE_SIZE, vsize);
>> +	uvcb_cgc.guest_asce = (uint64_t)memalign(PAGE_SIZE, 4 *
>> PAGE_SIZE) | ASCE_DT_SEGMENT | REGION_TABLE_LENGTH | ASCE_P;
>> +	uvcb_cgc.guest_sca = (uint64_t)memalign(PAGE_SIZE * 4,
>> PAGE_SIZE * 4); +
>> +	uvcb_cgc.header.len -= 8;
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(uvcb_cgc.header.rc == UVC_RC_INV_LEN && rc == 1 &&
>> +	       !uvcb_cgc.guest_handle, "hdr invalid length");
>> +	uvcb_cgc.header.len += 8;
>> +
>> +	uvcb_cgc.guest_stor_origin = uvcb_qui.max_guest_stor_addr +
>> (1UL << 20) * 2 + 1;
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(uvcb_cgc.header.rc == 0x101 && rc == 1,
>> +	       "MSO > max guest addr");
>> +	uvcb_cgc.guest_stor_origin = 0;
>> +
>> +	uvcb_cgc.guest_stor_origin = uvcb_qui.max_guest_stor_addr -
>> (1UL << 20);
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(uvcb_cgc.header.rc == 0x102 && rc == 1,
>> +	       "MSO + MSL > max guest addr");
>> +	uvcb_cgc.guest_stor_origin = 0;
>> +
>> +	uvcb_cgc.guest_asce &= ~ASCE_P;
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(uvcb_cgc.header.rc == 0x105 && rc == 1,
>> +	       "ASCE private bit missing");
>> +	uvcb_cgc.guest_asce |= ASCE_P;
>> +
>> +	uvcb_cgc.guest_asce |= 0x20;
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(uvcb_cgc.header.rc == 0x105 && rc == 1,
>> +	       "ASCE bit 58 set");
>> +	uvcb_cgc.guest_asce &= ~0x20;
>> +
>> +	tmp = uvcb_cgc.conf_base_stor_origin;
>> +	uvcb_cgc.conf_base_stor_origin = get_max_ram_size() + 8;
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(uvcb_cgc.header.rc == 0x108 && rc == 1,
>> +	       "base storage origin > available memory");
>> +	uvcb_cgc.conf_base_stor_origin = tmp;
>> +
>> +	tmp = uvcb_cgc.conf_base_stor_origin;
>> +	uvcb_cgc.conf_base_stor_origin = 0x1000;
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(uvcb_cgc.header.rc == 0x109 && rc == 1,
>> +	       "base storage origin contains lowcore");
>> +	uvcb_cgc.conf_base_stor_origin = tmp;
>> +
>> +	if (smp_query_num_cpus() == 1) {
>> +		sigp_retry(1, SIGP_SET_PREFIX,
>> +			   uvcb_cgc.conf_var_stor_origin +
>> PAGE_SIZE, NULL);
>> +		rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +		report(uvcb_cgc.header.rc == 0x10e && rc == 1 &&
>> +		       !uvcb_cgc.guest_handle, "variable storage
>> area contains lowcore");
>> +		sigp_retry(1, SIGP_SET_PREFIX, 0x0, NULL);
>> +	}
>> +
>> +	tmp = uvcb_cgc.guest_sca;
>> +	uvcb_cgc.guest_sca = 0;
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(uvcb_cgc.header.rc == 0x10c && rc == 1,
>> +	       "sca == 0");
>> +	uvcb_cgc.guest_sca = tmp;
>> +
>> +	tmp = uvcb_cgc.guest_sca;
>> +	uvcb_cgc.guest_sca = get_max_ram_size() + + PAGE_SIZE * 4;
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(uvcb_cgc.header.rc == 0x10d && rc == 1,
>> +	       "sca inaccessible");
>> +	uvcb_cgc.guest_sca = tmp;
>> +
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(rc == 0 && uvcb_cgc.header.rc == UVC_RC_EXECUTED,
>> "successful"); +
>> +	uvcb_cgc.header.rc = 0;
>> +	uvcb_cgc.header.rrc = 0;
>> +	tmp = uvcb_cgc.guest_handle;
>> +	uvcb_cgc.guest_handle = 0;
>> +	rc = uv_call(0, (uint64_t)&uvcb_cgc);
>> +	report(uvcb_cgc.header.rc >= 0x100 && rc == 1, "reuse uvcb");
>> +	uvcb_cgc.guest_handle = tmp;
>> +
>> +	/* Copy over most data from uvcb_cgc, so we have the ASCE
>> that was used. */
>> +	memcpy(&uvcb, &uvcb_cgc, sizeof(uvcb));
>> +
>> +	/* Reset the header and handle */
>> +	uvcb.header.rc = 0;
>> +	uvcb.header.rrc = 0;
>> +	uvcb.guest_handle = 0;
>> +
>> +	/* Use new storage areas. */
>> +	uvcb.conf_base_stor_origin = (uint64_t)memalign(PAGE_SIZE *
>> 4, uvcb_qui.conf_base_phys_stor_len);
>> +	uvcb.conf_var_stor_origin = (uint64_t)memalign(PAGE_SIZE,
>> vsize); +
>> +	rc = uv_call(0, (uint64_t)&uvcb);
>> +	report(uvcb.header.rc >= 0x104 && rc == 1 &&
>> !uvcb.guest_handle,
>> +	       "reuse ASCE");
>> +	free((void *)uvcb.conf_base_stor_origin);
>> +	free((void *)uvcb.conf_var_stor_origin);
>> +
>> +
>> +	/* Missing: 106, 10a, a0b */
>> +	report_prefix_pop();
>> +}
>> +
>> +static void test_init(void)
>> +{
>> +	int rc;
>> +	uint64_t mem;
>> +	struct psw psw;
>> +
>> +	/* Donated storage needs to be over 2GB */
>> +	mem = (uint64_t)memalign(1UL << 31,
>> uvcb_qui.uv_base_stor_len); +
>> +	uvcb_init.header.len = sizeof(uvcb_init);
>> +	uvcb_init.header.cmd = UVC_CMD_INIT_UV;
>> +	uvcb_init.stor_origin = mem;
>> +	uvcb_init.stor_len = uvcb_qui.uv_base_stor_len;
>> +
>> +	report_prefix_push("init");
>> +	uvcb_init.header.len -= 8;
>> +	rc = uv_call(0, (uint64_t)&uvcb_init);
>> +	report(rc == 1 && uvcb_init.header.rc == UVC_RC_INV_LEN,
>> +	       "hdr invalid length");
>> +	uvcb_init.header.len += 8;
>> +
>> +	uvcb_init.stor_len -= 8;
>> +	rc = uv_call(0, (uint64_t)&uvcb_init);
>> +	report(rc == 1 && uvcb_init.header.rc == 0x103,
>> +	       "storage invalid length");
>> +	uvcb_init.stor_len += 8;
>> +
>> +	uvcb_init.stor_origin =  get_max_ram_size() + 8;
>> +	rc = uv_call(0, (uint64_t)&uvcb_init);
>> +	report(rc == 1 && uvcb_init.header.rc == 0x104,
>> +	       "storage origin invalid");
>> +	uvcb_init.stor_origin = mem;
>> +
>> +	uvcb_init.stor_origin = get_max_ram_size() - 8;
>> +	rc = uv_call(0, (uint64_t)&uvcb_init);
>> +	report(rc == 1 && uvcb_init.header.rc == 0x105,
>> +	       "storage + length invalid");
>> +	uvcb_init.stor_origin = mem;
>> +
>> +	uvcb_init.stor_origin = 1UL << 30;
>> +	rc = uv_call(0, (uint64_t)&uvcb_init);
>> +	report(rc == 1 && uvcb_init.header.rc == 0x108,
>> +	       "storage below 2GB");
>> +	uvcb_init.stor_origin = mem;
>> +
>> +	psw.mask = extract_psw_mask();
>> +	psw.addr = (unsigned long)cpu_loop;
>> +	smp_cpu_setup(1, psw);
>> +	rc = uv_call(0, (uint64_t)&uvcb_init);
>> +	report(rc == 1 && uvcb_init.header.rc == 0x102,
>> +	       "too many running cpus");
>> +	smp_cpu_stop(1);
>> +
>> +	rc = uv_call(0, (uint64_t)&uvcb_init);
>> +	report(rc == 0 && uvcb_init.header.rc == UVC_RC_EXECUTED,
>> "successful"); +
>> +	mem = (uint64_t)memalign(1UL << 31,
>> uvcb_qui.uv_base_stor_len);
>> +	rc = uv_call(0, (uint64_t)&uvcb_init);
>> +	report(rc == 1 && uvcb_init.header.rc == 0x101, "double
>> init");
>> +	free((void *)mem);
>> +
>> +	report_prefix_pop();
>> +}
>> +
>> +static void test_query(void)
>> +{
>> +	uvcb_qui.header.cmd = UVC_CMD_QUI;
>> +	uvcb_qui.header.len = sizeof(uvcb_qui);
>> +	uint64_t *call_list = &uvcb_qui.inst_calls_list[0];
>> +
>> +	report_prefix_push("query");
>> +	uvcb_qui.header.len = 0xa0;
>> +	uv_call(0, (uint64_t)&uvcb_qui);
>> +	report(uvcb_qui.header.rc == UVC_RC_INV_LEN, "length");
>> +
>> +	uvcb_qui.header.len = 0xa8;
>> +	uv_call(0, (uint64_t)&uvcb_qui);
>> +	report(uvcb_qui.header.rc == 0x100, "insf length");
>> +
>> +	uvcb_qui.header.len = sizeof(uvcb_qui);
>> +	uv_call(0, (uint64_t)&uvcb_qui);
>> +	report(uvcb_qui.header.rc == UVC_RC_EXECUTED, "successful
>> query"); +
>> +	report(test_bit_inv(BIT_UVC_CMD_QUI, call_list) &&
>> +	       test_bit_inv(BIT_UVC_CMD_INIT_UV, call_list),
>> +	       "query and init indicated");
>> +
>> +	report(test_bit_inv(BIT_UVC_CMD_CREATE_SEC_CONF, call_list)
>> &&
>> +	       test_bit_inv(BIT_UVC_CMD_DESTROY_SEC_CONF, call_list),
>> +	       "create/destroy vm indicated");
>> +
>> +	report(test_bit_inv(BIT_UVC_CMD_CREATE_SEC_CPU, call_list) &&
>> +	       test_bit_inv(BIT_UVC_CMD_DESTROY_SEC_CPU, call_list),
>> +	       "create/destroy cpu indicated");
>> +
>> +	report(test_bit_inv(BIT_UVC_CMD_CONV_TO_SEC_STOR, call_list)
>> &&
>> +	       test_bit_inv(BIT_UVC_CMD_CONV_FROM_SEC_STOR,
>> call_list),
>> +	       "convert to/from secure storage indicated");
>> +
>> +	report(test_bit_inv(BIT_UVC_CMD_SET_SEC_PARMS, call_list) &&
>> +	       test_bit_inv(BIT_UVC_CMD_UNPACK_IMG, call_list) &&
>> +	       test_bit_inv(BIT_UVC_CMD_CPU_SET_STATE, call_list) &&
>> +	       test_bit_inv(BIT_UVC_CMD_VERIFY_IMG, call_list),
>> +	       "set sec parm, setcpu state, unpack and verify
>> indicated"); +
>> +	report(test_bit_inv(BIT_UVC_CMD_CPU_RESET, call_list) &&
>> +	       test_bit_inv(BIT_UVC_CMD_CPU_RESET_INITIAL,
>> call_list) &&
>> +	       test_bit_inv(BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET,
>> call_list),
>> +	       "resets indicated");
>> +
>> +	report(test_bit_inv(BIT_UVC_CMD_PREPARE_CLEAR_RESET,
>> call_list) &&
>> +	       test_bit_inv(BIT_UVC_CMD_UNSHARE_ALL, call_list),
>> +	       "prepare reset and unshare all indicated");
>> +
>> +	report(test_bit_inv(BIT_UVC_CMD_PIN_PAGE_SHARED, call_list)
>> &&
>> +	       test_bit_inv(BIT_UVC_CMD_UNPIN_PAGE_SHARED,
>> call_list),
>> +	       "(un)pin shared page indicated");
>> +
>> +	report(!test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS,
>> call_list) &&
>> +	       !test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS,
>> call_list),
>> +	       "(un)share not indicated");
>> +
>> +	report_prefix_pop();
>> +}
>> +
>> +static struct cmd_list invalid_cmds[] = {
>> +	{ "bogus", 0x4242, sizeof(struct uv_cb_header) },
>> +	{ "share", UVC_CMD_SET_SHARED_ACCESS, sizeof(struct
>> uv_cb_share) },
>> +	{ "unshare", UVC_CMD_REMOVE_SHARED_ACCESS, sizeof(struct
>> uv_cb_share) },
>> +	{ NULL, 0, 0 },
>> +};
>> +
>> +static void test_invalid(void)
>> +{
>> +	struct uv_cb_header hdr = {};
>> +	int i, cc;
>> +
>> +	report_prefix_push("invalid");
>> +	for (i = 0; invalid_cmds[i].name; i++) {
>> +		hdr.cmd = invalid_cmds[i].cmd;
>> +		hdr.len = invalid_cmds[i].len;
>> +		cc = uv_call(0, (uint64_t)&hdr);
>> +		report(cc == 1 && hdr.rc == UVC_RC_INV_CMD, "%s",
>> +		       invalid_cmds[i].name);
>> +	}
>> +	report_prefix_pop();
>> +}
>> +
>> +static void test_clear(void)
>> +{
>> +	uint64_t *tmp = (void *)uvcb_init.stor_origin;
>> +
>> +	diag308_load_reset(1);
>> +	sclp_console_setup();
>> +	report(!*tmp, "memory cleared after reset 1");
>> +}
>> +
>> +static void setup_vmem(void)
>> +{
>> +	uint64_t asce, mask;
>> +
>> +	setup_mmu(get_max_ram_size());
>> +	asce = stctg(1);
>> +	lctlg(13, asce);
>> +	mask = extract_psw_mask() | 0x0000C00000000000UL;
>> +	load_psw_mask(mask);
>> +}
>> +
>> +int main(void)
>> +{
>> +	bool has_uvc = test_facility(158);
>> +
>> +	report_prefix_push("uvc");
>> +	if (!has_uvc) {
>> +		report_skip("Ultravisor call facility is not
>> available");
>> +		goto done;
>> +	}
>> +
>> +	test_priv();
>> +	test_invalid();
>> +	test_query();
>> +	test_init();
>> +
>> +	setup_vmem();
>> +	test_config_create();
>> +	test_cpu_create();
>> +	test_cpu_destroy();
>> +	test_config_destroy();
>> +	test_clear();
>> +
>> +done:
>> +	return report_summary();
>> +}
> 


  reply	other threads:[~2021-04-26 14:31 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-16  9:16 [kvm-unit-tests PATCH 0/6] s390x: uv: Extend guest test and add host test Janosch Frank
2021-03-16  9:16 ` [kvm-unit-tests PATCH 1/6] s390x: uv-guest: Add invalid share location test Janosch Frank
2021-04-19 11:24   ` Thomas Huth
2021-04-19 11:45     ` Janosch Frank
2021-04-20  8:48       ` Thomas Huth
2021-04-20 13:40   ` Claudio Imbrenda
2021-04-21 11:04   ` Cornelia Huck
2021-03-16  9:16 ` [kvm-unit-tests PATCH 2/6] s390x: Add more Ultravisor command structure definitions Janosch Frank
2021-04-20 14:09   ` Claudio Imbrenda
2021-04-21 11:13   ` Cornelia Huck
2021-04-26 14:33     ` Janosch Frank
2021-03-16  9:16 ` [kvm-unit-tests PATCH 3/6] s390x: uv: Add UV lib Janosch Frank
2021-04-20 14:15   ` Claudio Imbrenda
2021-04-26 14:20     ` Janosch Frank
2021-03-16  9:16 ` [kvm-unit-tests PATCH 4/6] s390x: Test for share/unshare call support before using them Janosch Frank
2021-04-20 14:18   ` Claudio Imbrenda
2021-03-16  9:16 ` [kvm-unit-tests PATCH 5/6] s390x: uv-guest: Test invalid commands Janosch Frank
2021-04-20 14:26   ` Claudio Imbrenda
2021-04-26 13:40     ` Janosch Frank
2021-03-16  9:16 ` [kvm-unit-tests PATCH 6/6] s390x: Add UV host test Janosch Frank
2021-04-20 15:47   ` Claudio Imbrenda
2021-04-26 14:31     ` Janosch Frank [this message]
2021-04-19  7:24 ` [kvm-unit-tests PATCH 0/6] s390x: uv: Extend guest test and add " Janosch Frank

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a4facf70-67f7-0522-e149-a0ce35677680@linux.ibm.com \
    --to=frankja@linux.ibm.com \
    --cc=david@redhat.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).