All of lore.kernel.org
 help / color / mirror / Atom feed
* [kvm-unit-tests PATCH 0/4] x86: enable malloc and friends
@ 2016-10-31 18:51 Andrew Jones
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 1/4] lib/alloc: improve robustness Andrew Jones
                   ` (4 more replies)
  0 siblings, 5 replies; 12+ messages in thread
From: Andrew Jones @ 2016-10-31 18:51 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, lvivier, thuth

And do some other alloc related things too...

Andrew Jones (4):
  lib/alloc: improve robustness
  x86: lib/alloc: move heap management to common code
  lib/alloc: return a zeroed page from alloc_page
  lib/x86/vm: enable malloc and friends

 lib/alloc.c         | 50 ++++++++++++++++++++++++++++++++++++-------
 lib/alloc.h         | 10 +++++++++
 lib/x86/vm.c        | 61 ++++++++++++++++++++++++-----------------------------
 lib/x86/vm.h        |  2 +-
 x86/Makefile.common |  1 +
 x86/vmx.c           |  6 ------
 x86/vmx_tests.c     |  9 --------
 7 files changed, 81 insertions(+), 58 deletions(-)

-- 
2.7.4


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 1/4] lib/alloc: improve robustness
  2016-10-31 18:51 [kvm-unit-tests PATCH 0/4] x86: enable malloc and friends Andrew Jones
@ 2016-10-31 18:51 ` Andrew Jones
  2016-11-02  6:08   ` Thomas Huth
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 2/4] x86: lib/alloc: move heap management to common code Andrew Jones
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 12+ messages in thread
From: Andrew Jones @ 2016-10-31 18:51 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, lvivier, thuth

Add some asserts to make sure phys_alloc_init is called only once
and before any other alloc calls.

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/alloc.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/lib/alloc.c b/lib/alloc.c
index e1d7b8a380ff..a83c8c5db850 100644
--- a/lib/alloc.c
+++ b/lib/alloc.c
@@ -42,12 +42,13 @@ void phys_alloc_show(void)
 
 void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
 {
-	spin_lock(&lock);
+	/* we should only be called once by one processor */
+	assert(!top);
+	assert(size);
+
 	base = base_addr;
 	top = base + size;
 	align_min = DEFAULT_MINIMUM_ALIGNMENT;
-	nr_regions = 0;
-	spin_unlock(&lock);
 }
 
 void phys_alloc_set_minimum_alignment(phys_addr_t align)
@@ -63,14 +64,14 @@ static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
 {
 	static bool warned = false;
 	phys_addr_t addr, size_orig = size;
-	u64 top_safe;
+	u64 top_safe = top;
 
-	spin_lock(&lock);
+	if (safe && sizeof(long) == 4)
+		top_safe = MIN(top, 1ULL << 32);
 
-	top_safe = top;
+	assert(base < top_safe);
 
-	if (safe && sizeof(long) == 4)
-		top_safe = MIN(top_safe, 1ULL << 32);
+	spin_lock(&lock);
 
 	align = MAX(align, align_min);
 
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 2/4] x86: lib/alloc: move heap management to common code
  2016-10-31 18:51 [kvm-unit-tests PATCH 0/4] x86: enable malloc and friends Andrew Jones
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 1/4] lib/alloc: improve robustness Andrew Jones
@ 2016-10-31 18:51 ` Andrew Jones
  2016-11-02  6:14   ` Thomas Huth
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 3/4] lib/alloc: return a zeroed page from alloc_page Andrew Jones
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 12+ messages in thread
From: Andrew Jones @ 2016-10-31 18:51 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, lvivier, thuth

This allows other arches to use {alloc,free}_page.

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/alloc.c         | 32 ++++++++++++++++++++++++++++++++
 lib/alloc.h         | 10 ++++++++++
 lib/x86/vm.c        | 33 ++-------------------------------
 x86/Makefile.common |  1 +
 4 files changed, 45 insertions(+), 31 deletions(-)

diff --git a/lib/alloc.c b/lib/alloc.c
index a83c8c5db850..7687c703f40b 100644
--- a/lib/alloc.c
+++ b/lib/alloc.c
@@ -5,6 +5,7 @@
  */
 #include "alloc.h"
 #include "asm/spinlock.h"
+#include "asm/page.h"
 #include "asm/io.h"
 
 #define MIN(a, b)		((a) < (b) ? (a) : (b))
@@ -137,6 +138,37 @@ phys_addr_t phys_zalloc(phys_addr_t size)
 	return phys_zalloc_aligned(size, align_min);
 }
 
+static void *free_head;
+void heap_init(void *start, size_t size)
+{
+	void *p = (void *)ALIGN((unsigned long)start, PAGE_SIZE);
+
+	while (size >= PAGE_SIZE) {
+		*(void **)p = free_head;
+		free_head = p;
+		p += PAGE_SIZE;
+		size -= PAGE_SIZE;
+	}
+}
+
+void *alloc_page(void)
+{
+	void *p;
+
+	if (!free_head)
+		return NULL;
+
+	p = free_head;
+	free_head = *(void **)free_head;
+	return p;
+}
+
+void free_page(void *page)
+{
+	*(void **)page = free_head;
+	free_head = page;
+}
+
 static void *early_malloc(size_t size)
 {
 	phys_addr_t addr = phys_alloc_aligned_safe(size, align_min, true);
diff --git a/lib/alloc.h b/lib/alloc.h
index c12bd15f7afc..08c047f4bcb7 100644
--- a/lib/alloc.h
+++ b/lib/alloc.h
@@ -115,4 +115,14 @@ extern phys_addr_t phys_zalloc(phys_addr_t size);
  */
 extern void phys_alloc_show(void);
 
+/*
+ * Heap page allocator
+ *
+ * After initializing with heap_init, {alloc,free}_page can be used
+ * to easily manage pages.
+ */
+extern void heap_init(void *start, size_t size);
+extern void *alloc_page(void);
+extern void free_page(void *page);
+
 #endif /* _ALLOC_H_ */
diff --git a/lib/x86/vm.c b/lib/x86/vm.c
index f7e778b3779c..bba8ccee7dd0 100644
--- a/lib/x86/vm.c
+++ b/lib/x86/vm.c
@@ -1,39 +1,10 @@
 #include "fwcfg.h"
 #include "vm.h"
 #include "libcflat.h"
+#include "alloc.h"
 
-static void *free = 0;
 static void *vfree_top = 0;
 
-static void free_memory(void *mem, unsigned long size)
-{
-    while (size >= PAGE_SIZE) {
-	*(void **)mem = free;
-	free = mem;
-	mem += PAGE_SIZE;
-	size -= PAGE_SIZE;
-    }
-}
-
-void *alloc_page()
-{
-    void *p;
-
-    if (!free)
-	return 0;
-
-    p = free;
-    free = *(void **)free;
-
-    return p;
-}
-
-void free_page(void *page)
-{
-    *(void **)page = free;
-    free = page;
-}
-
 extern char edata;
 static unsigned long end_of_memory;
 
@@ -153,7 +124,7 @@ void setup_vm()
 {
     assert(!end_of_memory);
     end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
-    free_memory(&edata, end_of_memory - (unsigned long)&edata);
+    heap_init(&edata, end_of_memory - (unsigned long)&edata);
     setup_mmu(end_of_memory);
 }
 
diff --git a/x86/Makefile.common b/x86/Makefile.common
index 356d879a986b..88038e41af60 100644
--- a/x86/Makefile.common
+++ b/x86/Makefile.common
@@ -3,6 +3,7 @@
 all: test_cases
 
 cflatobjs += lib/pci.o
+cflatobjs += lib/alloc.o
 cflatobjs += lib/x86/io.o
 cflatobjs += lib/x86/smp.o
 cflatobjs += lib/x86/vm.o
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 3/4] lib/alloc: return a zeroed page from alloc_page
  2016-10-31 18:51 [kvm-unit-tests PATCH 0/4] x86: enable malloc and friends Andrew Jones
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 1/4] lib/alloc: improve robustness Andrew Jones
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 2/4] x86: lib/alloc: move heap management to common code Andrew Jones
@ 2016-10-31 18:51 ` Andrew Jones
  2016-11-01 14:47   ` Paolo Bonzini
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 4/4] lib/x86/vm: enable malloc and friends Andrew Jones
  2016-11-01  8:58 ` [kvm-unit-tests PATCH 0/4] x86: " Andrew Jones
  4 siblings, 1 reply; 12+ messages in thread
From: Andrew Jones @ 2016-10-31 18:51 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, lvivier, thuth

This allows us to remove a bunch of memsets.

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/alloc.c     | 1 +
 lib/x86/vm.c    | 2 --
 x86/vmx.c       | 6 ------
 x86/vmx_tests.c | 9 ---------
 4 files changed, 1 insertion(+), 17 deletions(-)

diff --git a/lib/alloc.c b/lib/alloc.c
index 7687c703f40b..15c96e4ee8a3 100644
--- a/lib/alloc.c
+++ b/lib/alloc.c
@@ -160,6 +160,7 @@ void *alloc_page(void)
 
 	p = free_head;
 	free_head = *(void **)free_head;
+	memset(p, 0, PAGE_SIZE);
 	return p;
 }
 
diff --git a/lib/x86/vm.c b/lib/x86/vm.c
index bba8ccee7dd0..0882e0a478ca 100644
--- a/lib/x86/vm.c
+++ b/lib/x86/vm.c
@@ -91,8 +91,6 @@ static void setup_mmu(unsigned long len)
 {
     unsigned long *cr3 = alloc_page();
 
-    memset(cr3, 0, PAGE_SIZE);
-
 #ifdef __x86_64__
     if (len < (1ul << 32))
         len = (1ul << 32);  /* map mmio 1:1 */
diff --git a/x86/vmx.c b/x86/vmx.c
index 411ed3211d4d..65c8583a18db 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -278,7 +278,6 @@ static void split_large_ept_entry(unsigned long *ptep, int level)
 
 	new_pt = alloc_page();
 	assert(new_pt);
-	memset(new_pt, 0, PAGE_SIZE);
 
 	prototype = pte & ~EPT_ADDR_MASK;
 	if (level == 2)
@@ -618,7 +617,6 @@ static void init_vmcs_guest(void)
 static int init_vmcs(struct vmcs **vmcs)
 {
 	*vmcs = alloc_page();
-	memset(*vmcs, 0, PAGE_SIZE);
 	(*vmcs)->revision_id = basic.revision;
 	/* vmclear first to init vmcs */
 	if (vmcs_clear(*vmcs)) {
@@ -657,7 +655,6 @@ static void init_vmx(void)
 	ulong fix_cr4_set, fix_cr4_clr;
 
 	vmxon_region = alloc_page();
-	memset(vmxon_region, 0, PAGE_SIZE);
 
 	fix_cr0_set =  rdmsr(MSR_IA32_VMX_CR0_FIXED0);
 	fix_cr0_clr =  rdmsr(MSR_IA32_VMX_CR0_FIXED1);
@@ -687,9 +684,7 @@ static void init_vmx(void)
 	*vmxon_region = basic.revision;
 
 	guest_stack = alloc_page();
-	memset(guest_stack, 0, PAGE_SIZE);
 	guest_syscall_stack = alloc_page();
-	memset(guest_syscall_stack, 0, PAGE_SIZE);
 }
 
 static void do_vmxon_off(void *data)
@@ -812,7 +807,6 @@ static void test_vmptrst(void)
 	struct vmcs *vmcs1, *vmcs2;
 
 	vmcs1 = alloc_page();
-	memset(vmcs1, 0, PAGE_SIZE);
 	init_vmcs(&vmcs1);
 	ret = vmcs_save(&vmcs2);
 	report("test vmptrst", (!ret) && (vmcs1 == vmcs2));
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 58736d789bd5..681204238ed9 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -224,7 +224,6 @@ void msr_bmp_init()
 	u32 ctrl_cpu0;
 
 	msr_bitmap = alloc_page();
-	memset(msr_bitmap, 0x0, PAGE_SIZE);
 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
 	ctrl_cpu0 |= CPU_MSR_BITMAP;
 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
@@ -567,8 +566,6 @@ static int iobmp_init()
 
 	io_bitmap_a = alloc_page();
 	io_bitmap_b = alloc_page();
-	memset(io_bitmap_a, 0x0, PAGE_SIZE);
-	memset(io_bitmap_b, 0x0, PAGE_SIZE);
 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
 	ctrl_cpu0 |= CPU_IO_BITMAP;
 	ctrl_cpu0 &= (~CPU_IO);
@@ -938,7 +935,6 @@ static int setup_ept()
 	}
 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
 	pml4 = alloc_page();
-	memset(pml4, 0, PAGE_SIZE);
 	eptp |= virt_to_phys(pml4);
 	vmcs_write(EPTP, eptp);
 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
@@ -974,8 +970,6 @@ static int ept_init()
 		return VMX_TEST_EXIT;
 	data_page1 = alloc_page();
 	data_page2 = alloc_page();
-	memset(data_page1, 0x0, PAGE_SIZE);
-	memset(data_page2, 0x0, PAGE_SIZE);
 	*((u32 *)data_page1) = MAGIC_VAL_1;
 	*((u32 *)data_page2) = MAGIC_VAL_2;
 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
@@ -1541,9 +1535,6 @@ static int msr_switch_init(struct vmcs *vmcs)
 	exit_msr_store = alloc_page();
 	exit_msr_load = alloc_page();
 	entry_msr_load = alloc_page();
-	memset(exit_msr_store, 0, PAGE_SIZE);
-	memset(exit_msr_load, 0, PAGE_SIZE);
-	memset(entry_msr_load, 0, PAGE_SIZE);
 	entry_msr_load[0].index = MSR_KERNEL_GS_BASE;
 	entry_msr_load[0].value = MSR_MAGIC;
 
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [kvm-unit-tests PATCH 4/4] lib/x86/vm: enable malloc and friends
  2016-10-31 18:51 [kvm-unit-tests PATCH 0/4] x86: enable malloc and friends Andrew Jones
                   ` (2 preceding siblings ...)
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 3/4] lib/alloc: return a zeroed page from alloc_page Andrew Jones
@ 2016-10-31 18:51 ` Andrew Jones
  2016-11-01 14:48   ` Paolo Bonzini
  2016-11-01  8:58 ` [kvm-unit-tests PATCH 0/4] x86: " Andrew Jones
  4 siblings, 1 reply; 12+ messages in thread
From: Andrew Jones @ 2016-10-31 18:51 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, lvivier, thuth

We've had malloc, calloc, free, and memalign available for quite a
while, and arm and powerpc make use of them. x86 hasn't yet, but
let's give it the option. arm and powerpc use the early_alloc_ops,
built on phys_alloc, but x86 already has virtual memory management,
so we can build on that instead.

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/x86/vm.c | 26 +++++++++++++++++++++++++-
 lib/x86/vm.h |  2 +-
 2 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/lib/x86/vm.c b/lib/x86/vm.c
index 0882e0a478ca..1e1b74bfb645 100644
--- a/lib/x86/vm.c
+++ b/lib/x86/vm.c
@@ -118,15 +118,39 @@ static void setup_mmu(unsigned long len)
     printf("cr4 = %lx\n", read_cr4());
 }
 
+static void *vcalloc(size_t nmemb, size_t size)
+{
+    return vmalloc(nmemb * size);
+}
+
+static void *vmemalign(size_t alignment, size_t size)
+{
+    size_t size_aligned;
+    void *addr;
+
+    assert(alignment && !(alignment & (alignment - 1)));
+    size_aligned = ALIGN(size, alignment);
+    addr = vmalloc(size_aligned);
+    return addr + (size_aligned - size);
+}
+
+static struct alloc_ops vm_alloc_ops = {
+    .malloc = vmalloc,
+    .calloc = vcalloc,
+    .free = vfree,
+    .memalign = vmemalign,
+};
+
 void setup_vm()
 {
     assert(!end_of_memory);
     end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
     heap_init(&edata, end_of_memory - (unsigned long)&edata);
     setup_mmu(end_of_memory);
+    alloc_ops = &vm_alloc_ops;
 }
 
-void *vmalloc(unsigned long size)
+void *vmalloc(size_t size)
 {
     void *mem, *p;
     unsigned pages;
diff --git a/lib/x86/vm.h b/lib/x86/vm.h
index 6a4384f5a48d..3c9a71d03cf9 100644
--- a/lib/x86/vm.h
+++ b/lib/x86/vm.h
@@ -7,7 +7,7 @@
 
 void setup_vm();
 
-void *vmalloc(unsigned long size);
+void *vmalloc(size_t size);
 void vfree(void *mem);
 void *vmap(unsigned long long phys, unsigned long size);
 void *alloc_vpage(void);
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [kvm-unit-tests PATCH 0/4] x86: enable malloc and friends
  2016-10-31 18:51 [kvm-unit-tests PATCH 0/4] x86: enable malloc and friends Andrew Jones
                   ` (3 preceding siblings ...)
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 4/4] lib/x86/vm: enable malloc and friends Andrew Jones
@ 2016-11-01  8:58 ` Andrew Jones
  4 siblings, 0 replies; 12+ messages in thread
From: Andrew Jones @ 2016-11-01  8:58 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, lvivier, thuth

On Mon, Oct 31, 2016 at 07:51:42PM +0100, Andrew Jones wrote:
> And do some other alloc related things too...

I thought of some further needed improvements and will send a
v2 shortly.

> 
> Andrew Jones (4):
>   lib/alloc: improve robustness
>   x86: lib/alloc: move heap management to common code
>   lib/alloc: return a zeroed page from alloc_page
>   lib/x86/vm: enable malloc and friends
> 
>  lib/alloc.c         | 50 ++++++++++++++++++++++++++++++++++++-------
>  lib/alloc.h         | 10 +++++++++
>  lib/x86/vm.c        | 61 ++++++++++++++++++++++++-----------------------------
>  lib/x86/vm.h        |  2 +-
>  x86/Makefile.common |  1 +
>  x86/vmx.c           |  6 ------
>  x86/vmx_tests.c     |  9 --------
>  7 files changed, 81 insertions(+), 58 deletions(-)
> 
> -- 
> 2.7.4
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [kvm-unit-tests PATCH 3/4] lib/alloc: return a zeroed page from alloc_page
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 3/4] lib/alloc: return a zeroed page from alloc_page Andrew Jones
@ 2016-11-01 14:47   ` Paolo Bonzini
  0 siblings, 0 replies; 12+ messages in thread
From: Paolo Bonzini @ 2016-11-01 14:47 UTC (permalink / raw)
  To: Andrew Jones, kvm; +Cc: lvivier, thuth



On 31/10/2016 19:51, Andrew Jones wrote:
> This allows us to remove a bunch of memsets.

Please add a new function alloc_zeroed_page instead.

Paolo

> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>  lib/alloc.c     | 1 +
>  lib/x86/vm.c    | 2 --
>  x86/vmx.c       | 6 ------
>  x86/vmx_tests.c | 9 ---------
>  4 files changed, 1 insertion(+), 17 deletions(-)
> 
> diff --git a/lib/alloc.c b/lib/alloc.c
> index 7687c703f40b..15c96e4ee8a3 100644
> --- a/lib/alloc.c
> +++ b/lib/alloc.c
> @@ -160,6 +160,7 @@ void *alloc_page(void)
>  
>  	p = free_head;
>  	free_head = *(void **)free_head;
> +	memset(p, 0, PAGE_SIZE);
>  	return p;
>  }
>  
> diff --git a/lib/x86/vm.c b/lib/x86/vm.c
> index bba8ccee7dd0..0882e0a478ca 100644
> --- a/lib/x86/vm.c
> +++ b/lib/x86/vm.c
> @@ -91,8 +91,6 @@ static void setup_mmu(unsigned long len)
>  {
>      unsigned long *cr3 = alloc_page();
>  
> -    memset(cr3, 0, PAGE_SIZE);
> -
>  #ifdef __x86_64__
>      if (len < (1ul << 32))
>          len = (1ul << 32);  /* map mmio 1:1 */
> diff --git a/x86/vmx.c b/x86/vmx.c
> index 411ed3211d4d..65c8583a18db 100644
> --- a/x86/vmx.c
> +++ b/x86/vmx.c
> @@ -278,7 +278,6 @@ static void split_large_ept_entry(unsigned long *ptep, int level)
>  
>  	new_pt = alloc_page();
>  	assert(new_pt);
> -	memset(new_pt, 0, PAGE_SIZE);
>  
>  	prototype = pte & ~EPT_ADDR_MASK;
>  	if (level == 2)
> @@ -618,7 +617,6 @@ static void init_vmcs_guest(void)
>  static int init_vmcs(struct vmcs **vmcs)
>  {
>  	*vmcs = alloc_page();
> -	memset(*vmcs, 0, PAGE_SIZE);
>  	(*vmcs)->revision_id = basic.revision;
>  	/* vmclear first to init vmcs */
>  	if (vmcs_clear(*vmcs)) {
> @@ -657,7 +655,6 @@ static void init_vmx(void)
>  	ulong fix_cr4_set, fix_cr4_clr;
>  
>  	vmxon_region = alloc_page();
> -	memset(vmxon_region, 0, PAGE_SIZE);
>  
>  	fix_cr0_set =  rdmsr(MSR_IA32_VMX_CR0_FIXED0);
>  	fix_cr0_clr =  rdmsr(MSR_IA32_VMX_CR0_FIXED1);
> @@ -687,9 +684,7 @@ static void init_vmx(void)
>  	*vmxon_region = basic.revision;
>  
>  	guest_stack = alloc_page();
> -	memset(guest_stack, 0, PAGE_SIZE);
>  	guest_syscall_stack = alloc_page();
> -	memset(guest_syscall_stack, 0, PAGE_SIZE);
>  }
>  
>  static void do_vmxon_off(void *data)
> @@ -812,7 +807,6 @@ static void test_vmptrst(void)
>  	struct vmcs *vmcs1, *vmcs2;
>  
>  	vmcs1 = alloc_page();
> -	memset(vmcs1, 0, PAGE_SIZE);
>  	init_vmcs(&vmcs1);
>  	ret = vmcs_save(&vmcs2);
>  	report("test vmptrst", (!ret) && (vmcs1 == vmcs2));
> diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
> index 58736d789bd5..681204238ed9 100644
> --- a/x86/vmx_tests.c
> +++ b/x86/vmx_tests.c
> @@ -224,7 +224,6 @@ void msr_bmp_init()
>  	u32 ctrl_cpu0;
>  
>  	msr_bitmap = alloc_page();
> -	memset(msr_bitmap, 0x0, PAGE_SIZE);
>  	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
>  	ctrl_cpu0 |= CPU_MSR_BITMAP;
>  	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
> @@ -567,8 +566,6 @@ static int iobmp_init()
>  
>  	io_bitmap_a = alloc_page();
>  	io_bitmap_b = alloc_page();
> -	memset(io_bitmap_a, 0x0, PAGE_SIZE);
> -	memset(io_bitmap_b, 0x0, PAGE_SIZE);
>  	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
>  	ctrl_cpu0 |= CPU_IO_BITMAP;
>  	ctrl_cpu0 &= (~CPU_IO);
> @@ -938,7 +935,6 @@ static int setup_ept()
>  	}
>  	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
>  	pml4 = alloc_page();
> -	memset(pml4, 0, PAGE_SIZE);
>  	eptp |= virt_to_phys(pml4);
>  	vmcs_write(EPTP, eptp);
>  	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
> @@ -974,8 +970,6 @@ static int ept_init()
>  		return VMX_TEST_EXIT;
>  	data_page1 = alloc_page();
>  	data_page2 = alloc_page();
> -	memset(data_page1, 0x0, PAGE_SIZE);
> -	memset(data_page2, 0x0, PAGE_SIZE);
>  	*((u32 *)data_page1) = MAGIC_VAL_1;
>  	*((u32 *)data_page2) = MAGIC_VAL_2;
>  	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
> @@ -1541,9 +1535,6 @@ static int msr_switch_init(struct vmcs *vmcs)
>  	exit_msr_store = alloc_page();
>  	exit_msr_load = alloc_page();
>  	entry_msr_load = alloc_page();
> -	memset(exit_msr_store, 0, PAGE_SIZE);
> -	memset(exit_msr_load, 0, PAGE_SIZE);
> -	memset(entry_msr_load, 0, PAGE_SIZE);
>  	entry_msr_load[0].index = MSR_KERNEL_GS_BASE;
>  	entry_msr_load[0].value = MSR_MAGIC;
>  
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [kvm-unit-tests PATCH 4/4] lib/x86/vm: enable malloc and friends
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 4/4] lib/x86/vm: enable malloc and friends Andrew Jones
@ 2016-11-01 14:48   ` Paolo Bonzini
  2016-11-01 15:49     ` Andrew Jones
  0 siblings, 1 reply; 12+ messages in thread
From: Paolo Bonzini @ 2016-11-01 14:48 UTC (permalink / raw)
  To: Andrew Jones, kvm; +Cc: lvivier, thuth



On 31/10/2016 19:51, Andrew Jones wrote:
> We've had malloc, calloc, free, and memalign available for quite a
> while, and arm and powerpc make use of them. x86 hasn't yet, but
> let's give it the option. arm and powerpc use the early_alloc_ops,
> built on phys_alloc, but x86 already has virtual memory management,
> so we can build on that instead.

What are your plans for using them?  vmalloc imposes a pretty large
alignment and is quite different from early_alloc_ops, so I'm not sure
it's a good idea to conflate the two into the same high-level API.

Thanks,

Paolo

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [kvm-unit-tests PATCH 4/4] lib/x86/vm: enable malloc and friends
  2016-11-01 14:48   ` Paolo Bonzini
@ 2016-11-01 15:49     ` Andrew Jones
  0 siblings, 0 replies; 12+ messages in thread
From: Andrew Jones @ 2016-11-01 15:49 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: kvm, lvivier, thuth

On Tue, Nov 01, 2016 at 03:48:54PM +0100, Paolo Bonzini wrote:
> 
> 
> On 31/10/2016 19:51, Andrew Jones wrote:
> > We've had malloc, calloc, free, and memalign available for quite a
> > while, and arm and powerpc make use of them. x86 hasn't yet, but
> > let's give it the option. arm and powerpc use the early_alloc_ops,
> > built on phys_alloc, but x86 already has virtual memory management,
> > so we can build on that instead.
> 
> What are your plans for using them?  vmalloc imposes a pretty large
> alignment and is quite different from early_alloc_ops, so I'm not sure
> it's a good idea to conflate the two into the same high-level API.

I was mostly thinking that common lib/* code might want to start
using malloc, etc. Unless x86 implements them then that won't be
possible. I don't have a need yet though, so I'm fine with leaving
it until a different allocation scheme could be used as the base.
Although, that said, we could use the vmalloc allocation scheme
now, just to satisfy malloc, etc., and then improve the underlying
scheme later too.

Anyway, I see I had a bug in my memalign implementation, so it needs
a bit more work either way.

Thanks,
drew

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [kvm-unit-tests PATCH 1/4] lib/alloc: improve robustness
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 1/4] lib/alloc: improve robustness Andrew Jones
@ 2016-11-02  6:08   ` Thomas Huth
  2016-11-02 10:24     ` Andrew Jones
  0 siblings, 1 reply; 12+ messages in thread
From: Thomas Huth @ 2016-11-02  6:08 UTC (permalink / raw)
  To: Andrew Jones, kvm; +Cc: pbonzini, lvivier

[-- Attachment #1: Type: text/plain, Size: 1854 bytes --]

On 31.10.2016 19:51, Andrew Jones wrote:
> Add some asserts to make sure phys_alloc_init is called only once
> and before any other alloc calls.
> 
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>  lib/alloc.c | 17 +++++++++--------
>  1 file changed, 9 insertions(+), 8 deletions(-)
> 
> diff --git a/lib/alloc.c b/lib/alloc.c
> index e1d7b8a380ff..a83c8c5db850 100644
> --- a/lib/alloc.c
> +++ b/lib/alloc.c
> @@ -42,12 +42,13 @@ void phys_alloc_show(void)
>  
>  void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
>  {
> -	spin_lock(&lock);
> +	/* we should only be called once by one processor */
> +	assert(!top);
> +	assert(size);

If you remove the spinlock, isn't there a potential race between the
assert(!top) and the "top = ..." below? E.g. if two CPUs enter this
function at the same time, both could check and passs assert(!top)
independently, and then do the "top = ..." in parallel?

>  	base = base_addr;
>  	top = base + size;
>  	align_min = DEFAULT_MINIMUM_ALIGNMENT;
> -	nr_regions = 0;
> -	spin_unlock(&lock);
>  }
>  
>  void phys_alloc_set_minimum_alignment(phys_addr_t align)
> @@ -63,14 +64,14 @@ static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
>  {
>  	static bool warned = false;
>  	phys_addr_t addr, size_orig = size;
> -	u64 top_safe;
> +	u64 top_safe = top;
>  
> -	spin_lock(&lock);
> +	if (safe && sizeof(long) == 4)
> +		top_safe = MIN(top, 1ULL << 32);
>  
> -	top_safe = top;
> +	assert(base < top_safe);
>  
> -	if (safe && sizeof(long) == 4)
> -		top_safe = MIN(top_safe, 1ULL << 32);
> +	spin_lock(&lock);
>  
>  	align = MAX(align, align_min);

This hunk rather looks like a code re-arrangement to me which is not
directly related to the patch description - maybe you should put this
into a separate patch?

 Thomas



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 836 bytes --]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [kvm-unit-tests PATCH 2/4] x86: lib/alloc: move heap management to common code
  2016-10-31 18:51 ` [kvm-unit-tests PATCH 2/4] x86: lib/alloc: move heap management to common code Andrew Jones
@ 2016-11-02  6:14   ` Thomas Huth
  0 siblings, 0 replies; 12+ messages in thread
From: Thomas Huth @ 2016-11-02  6:14 UTC (permalink / raw)
  To: Andrew Jones, kvm; +Cc: pbonzini, lvivier

On 31.10.2016 19:51, Andrew Jones wrote:
> This allows other arches to use {alloc,free}_page.
> 
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>  lib/alloc.c         | 32 ++++++++++++++++++++++++++++++++
>  lib/alloc.h         | 10 ++++++++++
>  lib/x86/vm.c        | 33 ++-------------------------------
>  x86/Makefile.common |  1 +
>  4 files changed, 45 insertions(+), 31 deletions(-)

Reviewed-by: Thomas Huth <thuth@redhat.com>



^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [kvm-unit-tests PATCH 1/4] lib/alloc: improve robustness
  2016-11-02  6:08   ` Thomas Huth
@ 2016-11-02 10:24     ` Andrew Jones
  0 siblings, 0 replies; 12+ messages in thread
From: Andrew Jones @ 2016-11-02 10:24 UTC (permalink / raw)
  To: Thomas Huth; +Cc: kvm, pbonzini, lvivier

On Wed, Nov 02, 2016 at 07:08:51AM +0100, Thomas Huth wrote:
> On 31.10.2016 19:51, Andrew Jones wrote:
> > Add some asserts to make sure phys_alloc_init is called only once
> > and before any other alloc calls.
> > 
> > Signed-off-by: Andrew Jones <drjones@redhat.com>
> > ---
> >  lib/alloc.c | 17 +++++++++--------
> >  1 file changed, 9 insertions(+), 8 deletions(-)
> > 
> > diff --git a/lib/alloc.c b/lib/alloc.c
> > index e1d7b8a380ff..a83c8c5db850 100644
> > --- a/lib/alloc.c
> > +++ b/lib/alloc.c
> > @@ -42,12 +42,13 @@ void phys_alloc_show(void)
> >  
> >  void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
> >  {
> > -	spin_lock(&lock);
> > +	/* we should only be called once by one processor */
> > +	assert(!top);
> > +	assert(size);
> 
> If you remove the spinlock, isn't there a potential race between the
> assert(!top) and the "top = ..." below? E.g. if two CPUs enter this
> function at the same time, both could check and passs assert(!top)
> independently, and then do the "top = ..." in parallel?

Good point. Adding the asserts is enforcing callers to do the right thing,
so we might as well keep the spinlocks to fully enforce the right thing,
rather than assume callers will mostly do it. I'll also update the
documentation in alloc.h for v2, though, stating that this should only be
called once from the primary cpu, before secondaries are started,
ensuring callers should know what the right thing is.

> 
> >  	base = base_addr;
> >  	top = base + size;
> >  	align_min = DEFAULT_MINIMUM_ALIGNMENT;
> > -	nr_regions = 0;
> > -	spin_unlock(&lock);
> >  }
> >  
> >  void phys_alloc_set_minimum_alignment(phys_addr_t align)
> > @@ -63,14 +64,14 @@ static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
> >  {
> >  	static bool warned = false;
> >  	phys_addr_t addr, size_orig = size;
> > -	u64 top_safe;
> > +	u64 top_safe = top;
> >  
> > -	spin_lock(&lock);
> > +	if (safe && sizeof(long) == 4)
> > +		top_safe = MIN(top, 1ULL << 32);
> >  
> > -	top_safe = top;
> > +	assert(base < top_safe);
> >  
> > -	if (safe && sizeof(long) == 4)
> > -		top_safe = MIN(top_safe, 1ULL << 32);
> > +	spin_lock(&lock);
> >  
> >  	align = MAX(align, align_min);
> 
> This hunk rather looks like a code re-arrangement to me which is not
> directly related to the patch description - maybe you should put this
> into a separate patch?

Much of it was re-arranging, but as a side-effect to getting the
'assert(base < top_safe)' added in the right place. That assert
ensures the subtraction below is safe and also that base != top_safe,
which would likely mean base == top_safe == 0, i.e. not yet initialized.
Hmm, it should probably be assert(top_safe && base < top_safe) though
to account for the no free memory left case as well.

Thanks,
drew

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2016-11-02 10:24 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-10-31 18:51 [kvm-unit-tests PATCH 0/4] x86: enable malloc and friends Andrew Jones
2016-10-31 18:51 ` [kvm-unit-tests PATCH 1/4] lib/alloc: improve robustness Andrew Jones
2016-11-02  6:08   ` Thomas Huth
2016-11-02 10:24     ` Andrew Jones
2016-10-31 18:51 ` [kvm-unit-tests PATCH 2/4] x86: lib/alloc: move heap management to common code Andrew Jones
2016-11-02  6:14   ` Thomas Huth
2016-10-31 18:51 ` [kvm-unit-tests PATCH 3/4] lib/alloc: return a zeroed page from alloc_page Andrew Jones
2016-11-01 14:47   ` Paolo Bonzini
2016-10-31 18:51 ` [kvm-unit-tests PATCH 4/4] lib/x86/vm: enable malloc and friends Andrew Jones
2016-11-01 14:48   ` Paolo Bonzini
2016-11-01 15:49     ` Andrew Jones
2016-11-01  8:58 ` [kvm-unit-tests PATCH 0/4] x86: " Andrew Jones

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.