From: David Hildenbrand <david@redhat.com>
To: qemu-devel@nongnu.org
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>,
Eduardo Habkost <ehabkost@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
David Hildenbrand <david@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
Igor Mammedov <imammedo@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Marek Kedzierski <mkedzier@redhat.com>
Subject: [PATCH v1 2/3] util/oslib-posix: Introduce and use MemsetContext for touch_all_pages()
Date: Wed, 14 Jul 2021 13:23:05 +0200 [thread overview]
Message-ID: <20210714112306.67793-3-david@redhat.com> (raw)
In-Reply-To: <20210714112306.67793-1-david@redhat.com>
Let's minimize the number of global variables to prepare for
os_mem_prealloc() getting called concurrently and make the code a bit
easier to read.
The only consumer that really needs a global variable is the sigbus
handler, which will require protection via a mutex in the future either way
as we cannot concurrently mess with the SIGBUS handler.
Signed-off-by: David Hildenbrand <david@redhat.com>
---
util/oslib-posix.c | 81 ++++++++++++++++++++++++++++------------------
1 file changed, 50 insertions(+), 31 deletions(-)
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
index 679796ac1f..60d1da2d6c 100644
--- a/util/oslib-posix.c
+++ b/util/oslib-posix.c
@@ -73,22 +73,30 @@
#define MAX_MEM_PREALLOC_THREAD_COUNT 16
+struct MemsetThread;
+
+typedef struct MemsetContext {
+ bool all_threads_created;
+ bool any_thread_failed;
+ struct MemsetThread *threads;
+ int num_threads;
+} MemsetContext;
+
struct MemsetThread {
char *addr;
size_t numpages;
size_t hpagesize;
QemuThread pgthread;
sigjmp_buf env;
+ MemsetContext *context;
};
typedef struct MemsetThread MemsetThread;
-static MemsetThread *memset_thread;
-static int memset_num_threads;
-static bool memset_thread_failed;
+/* used by sigbus_handler() */
+static MemsetContext *sigbus_memset_context;
static QemuMutex page_mutex;
static QemuCond page_cond;
-static bool threads_created_flag;
int qemu_get_thread_id(void)
{
@@ -439,10 +447,13 @@ const char *qemu_get_exec_dir(void)
static void sigbus_handler(int signal)
{
int i;
- if (memset_thread) {
- for (i = 0; i < memset_num_threads; i++) {
- if (qemu_thread_is_self(&memset_thread[i].pgthread)) {
- siglongjmp(memset_thread[i].env, 1);
+
+ if (sigbus_memset_context) {
+ for (i = 0; i < sigbus_memset_context->num_threads; i++) {
+ MemsetThread *thread = &sigbus_memset_context->threads[i];
+
+ if (qemu_thread_is_self(&thread->pgthread)) {
+ siglongjmp(thread->env, 1);
}
}
}
@@ -459,7 +470,7 @@ static void *do_touch_pages(void *arg)
* clearing until all threads have been created.
*/
qemu_mutex_lock(&page_mutex);
- while(!threads_created_flag){
+ while (!memset_args->context->all_threads_created) {
qemu_cond_wait(&page_cond, &page_mutex);
}
qemu_mutex_unlock(&page_mutex);
@@ -470,7 +481,7 @@ static void *do_touch_pages(void *arg)
pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
if (sigsetjmp(memset_args->env, 1)) {
- memset_thread_failed = true;
+ memset_args->context->any_thread_failed = true;
} else {
char *addr = memset_args->addr;
size_t numpages = memset_args->numpages;
@@ -502,14 +513,14 @@ static void *do_madv_populate_write_pages(void *arg)
/* See do_touch_pages(). */
qemu_mutex_lock(&page_mutex);
- while (!threads_created_flag) {
+ while (!memset_args->context->all_threads_created) {
qemu_cond_wait(&page_cond, &page_mutex);
}
qemu_mutex_unlock(&page_mutex);
ret = qemu_madvise(addr, size, QEMU_MADV_POPULATE_WRITE);
if (ret) {
- memset_thread_failed = true;
+ memset_args->context->any_thread_failed = true;
}
return NULL;
}
@@ -530,6 +541,9 @@ static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
int smp_cpus, bool use_madv_populate_write)
{
static gsize initialized = 0;
+ MemsetContext context = {
+ .num_threads = get_memset_num_threads(smp_cpus),
+ };
size_t numpages_per_thread, leftover;
void *(*touch_fn)(void *);
char *addr = area;
@@ -547,34 +561,39 @@ static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
touch_fn = do_touch_pages;
}
- memset_thread_failed = false;
- threads_created_flag = false;
- memset_num_threads = get_memset_num_threads(smp_cpus);
- memset_thread = g_new0(MemsetThread, memset_num_threads);
- numpages_per_thread = numpages / memset_num_threads;
- leftover = numpages % memset_num_threads;
- for (i = 0; i < memset_num_threads; i++) {
- memset_thread[i].addr = addr;
- memset_thread[i].numpages = numpages_per_thread + (i < leftover);
- memset_thread[i].hpagesize = hpagesize;
- qemu_thread_create(&memset_thread[i].pgthread, "touch_pages",
- touch_fn, &memset_thread[i],
+ context.threads = g_new0(MemsetThread, context.num_threads);
+ numpages_per_thread = numpages / context.num_threads;
+ leftover = numpages % context.num_threads;
+ for (i = 0; i < context.num_threads; i++) {
+ context.threads[i].addr = addr;
+ context.threads[i].numpages = numpages_per_thread + (i < leftover);
+ context.threads[i].hpagesize = hpagesize;
+ context.threads[i].context = &context;
+ qemu_thread_create(&context.threads[i].pgthread, "touch_pages",
+ touch_fn, &context.threads[i],
QEMU_THREAD_JOINABLE);
- addr += memset_thread[i].numpages * hpagesize;
+ addr += context.threads[i].numpages * hpagesize;
+ }
+
+ if (!use_madv_populate_write) {
+ sigbus_memset_context = &context;
}
qemu_mutex_lock(&page_mutex);
- threads_created_flag = true;
+ context.all_threads_created = true;
qemu_cond_broadcast(&page_cond);
qemu_mutex_unlock(&page_mutex);
- for (i = 0; i < memset_num_threads; i++) {
- qemu_thread_join(&memset_thread[i].pgthread);
+ for (i = 0; i < context.num_threads; i++) {
+ qemu_thread_join(&context.threads[i].pgthread);
+ }
+
+ if (!use_madv_populate_write) {
+ sigbus_memset_context = NULL;
}
- g_free(memset_thread);
- memset_thread = NULL;
+ g_free(context.threads);
- return memset_thread_failed;
+ return context.any_thread_failed;
}
static bool madv_populate_write_possible(char *area, size_t pagesize)
--
2.31.1
next prev parent reply other threads:[~2021-07-14 11:25 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-14 11:23 [PATCH v1 0/3] util/oslib-posix: Support MADV_POPULATE_WRITE for os_mem_prealloc() David Hildenbrand
2021-07-14 11:23 ` [PATCH v1 1/3] " David Hildenbrand
2021-07-20 14:08 ` Daniel P. Berrangé
2021-07-20 14:34 ` David Hildenbrand
2021-07-14 11:23 ` David Hildenbrand [this message]
2021-07-20 14:27 ` [PATCH v1 2/3] util/oslib-posix: Introduce and use MemsetContext for touch_all_pages() Daniel P. Berrangé
2021-07-14 11:23 ` [PATCH v1 3/3] util/oslib-posix: Support concurrent os_mem_prealloc() invocation David Hildenbrand
2021-07-20 14:22 ` Daniel P. Berrangé
2021-07-20 14:27 ` David Hildenbrand
2021-07-20 14:31 ` Daniel P. Berrangé
2021-07-20 14:35 ` David Hildenbrand
2021-07-20 13:55 ` [PATCH v1 0/3] util/oslib-posix: Support MADV_POPULATE_WRITE for os_mem_prealloc() Pankaj Gupta
2021-07-20 13:58 ` Pankaj Gupta
2021-07-20 14:45 ` Daniel P. Berrangé
2021-07-21 8:23 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210714112306.67793-3-david@redhat.com \
--to=david@redhat.com \
--cc=dgilbert@redhat.com \
--cc=ehabkost@redhat.com \
--cc=imammedo@redhat.com \
--cc=mkedzier@redhat.com \
--cc=mst@redhat.com \
--cc=pankaj.gupta.linux@gmail.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).