On Tue, Jul 12, 2016 at 06:23:01PM +0200, Peter Lieven wrote: > the allocated stack will be adjusted to the minimum supported stack size > by the OS and rounded up to be a multiple of the system pagesize. > Additionally an architecture dependent guard page is added to the stack > to catch stack overflows. > > Signed-off-by: Peter Lieven > --- > include/sysemu/os-posix.h | 23 +++++++++++++++++++++++ > util/oslib-posix.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 69 insertions(+) > > diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h > index 9c7dfdf..7630665 100644 > --- a/include/sysemu/os-posix.h > +++ b/include/sysemu/os-posix.h > @@ -60,4 +60,27 @@ int qemu_utimens(const char *path, const qemu_timespec *times); > > bool is_daemonized(void); > > +/** > + * qemu_alloc_stack: > + * @sz: size of required stack in bytes > + * > + * Allocate memory that can be used as a stack, for instance for > + * coroutines. If the memory cannot be allocated, this function > + * will abort (like g_malloc()). > + * > + * The allocated stack must be freed with qemu_free_stack(). > + * > + * Returns: pointer to (the lowest address of) the stack memory. > + */ > +void *qemu_alloc_stack(size_t sz); > + > +/** > + * qemu_free_stack: > + * @stack: stack to free > + * @sz: size of stack in bytes > + * > + * Free a stack allocated via qemu_alloc_stack(). > + */ > +void qemu_free_stack(void *stack, size_t sz); > + > #endif > diff --git a/util/oslib-posix.c b/util/oslib-posix.c > index e2e1d4d..2303ca6 100644 > --- a/util/oslib-posix.c > +++ b/util/oslib-posix.c > @@ -497,3 +497,49 @@ pid_t qemu_fork(Error **errp) > } > return pid; > } > + > +static size_t adjust_stack_size(size_t sz) > +{ > +#ifdef _SC_THREAD_STACK_MIN > + /* avoid stacks smaller than _SC_THREAD_STACK_MIN */ > + sz = MAX(MAX(sysconf(_SC_THREAD_STACK_MIN), 0), sz); > +#endif > + /* adjust stack size to a multiple of the page size */ > + sz = ROUND_UP(sz, getpagesize()); > + return sz; > +} > + > +void *qemu_alloc_stack(size_t sz) > +{ > + void *ptr, *guardpage; > + size_t pagesz = getpagesize(); > + sz = adjust_stack_size(sz); > + > + ptr = mmap(NULL, sz, PROT_READ | PROT_WRITE, It's cleaner to count for the guard page separately and give the caller the sz bytes they expected: sz + pagesz > + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); > + if (ptr == MAP_FAILED) { > + abort(); > + } > + > +#if defined(HOST_IA64) > + /* separate register stack */ > + guardpage = ptr + (((sz - pagesz) / 2) & ~pagesz); > +#elif defined(HOST_HPPA) > + /* stack grows up */ > + guardpage = ptr + sz - pagesz; > +#else > + /* stack grows down */ > + guardpage = ptr; > +#endif > + if (mprotect(guardpage, pagesz, PROT_NONE) != 0) { > + abort(); > + } > + > + return ptr; > +} > + > +void qemu_free_stack(void *stack, size_t sz) > +{ > + sz = adjust_stack_size(sz); > + munmap(stack, sz); > +} > -- > 1.9.1 > >