All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: Umesh Deshpande <udeshpan@redhat.com>
Cc: kvm@vger.kernel.org, quintela@redhat.com, mtosatti@redhat.com
Subject: Re: [RFC PATCH v3 3/4] lock to protect memslots
Date: Thu, 11 Aug 2011 18:20:01 +0200	[thread overview]
Message-ID: <4E440131.6020200@redhat.com> (raw)
In-Reply-To: <deaf1c683fd32c28a72853710b9564dedc7d0060.1313076455.git.udeshpan@redhat.com>

> diff --git a/buffered_file.c b/buffered_file.c
> index b64ada7..5735e18 100644
> --- a/buffered_file.c
> +++ b/buffered_file.c
> @@ -243,7 +243,9 @@ static void *migrate_vm(void *opaque)
>
>       while (!s->closed) {
>           if (s->freeze_output) {
> +            qemu_mutex_unlock_iothread();
>               s->wait_for_unfreeze(s);
> +            qemu_mutex_lock_iothread();
>               s->freeze_output = 0;
>               continue;
>           }
> @@ -255,7 +257,9 @@ static void *migrate_vm(void *opaque)
>           current_time = qemu_get_clock_ms(rt_clock);
>           if (!s->closed&&  (expire_time>  current_time)) {
>               tv.tv_usec = 1000 * (expire_time - current_time);
> +            qemu_mutex_unlock_iothread();
>               select(0, NULL, NULL, NULL,&tv);
> +            qemu_mutex_lock_iothread();
>               continue;
>           }
>

I believe these should be already in patch 1 or anyway in a separate patch.

> diff --git a/cpus.c b/cpus.c
> index de70e02..6090c44 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -666,6 +666,7 @@ int qemu_init_main_loop(void)
>       qemu_cond_init(&qemu_work_cond);
>       qemu_mutex_init(&qemu_fair_mutex);
>       qemu_mutex_init(&qemu_global_mutex);
> +    qemu_mutex_init(&ram_list.mutex);
>       qemu_mutex_lock(&qemu_global_mutex);
>
>       qemu_thread_get_self(&io_thread);

These are only executed if CONFIG_IOTHREAD; you can probably move it 
somewhere in exec.c.

> @@ -919,6 +920,17 @@ void qemu_mutex_unlock_iothread(void)
>       qemu_mutex_unlock(&qemu_global_mutex);
>   }
>
> +void qemu_mutex_lock_ramlist(void)
> +{
> +    qemu_mutex_lock(&ram_list.mutex);
> +}
> +
> +void qemu_mutex_unlock_ramlist(void)
> +{
> +    qemu_mutex_unlock(&ram_list.mutex);
> +}
> +
> +

And these too.

>   static int all_vcpus_paused(void)
>   {
>       CPUState *penv = first_cpu;
> diff --git a/exec.c b/exec.c
> index 0e2ce57..7bfb36f 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -2972,6 +2972,8 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
>       }
>       new_block->length = size;
>
> +    qemu_mutex_lock_ramlist();
> +
>       QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
>
>       ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
> @@ -2979,6 +2981,8 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
>       memset(ram_list.phys_dirty + (new_block->offset>>  TARGET_PAGE_BITS),
>              0xff, size>>  TARGET_PAGE_BITS);
>
> +    qemu_mutex_unlock_ramlist();
> +
>       if (kvm_enabled())
>           kvm_setup_guest_memory(new_block->host, size);
>
> @@ -2996,7 +3000,9 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
>
>       QLIST_FOREACH(block,&ram_list.blocks, next) {
>           if (addr == block->offset) {
> +            qemu_mutex_lock_ramlist();
>               QLIST_REMOVE(block, next);
> +            qemu_mutex_unlock_ramlist();
>               qemu_free(block);
>               return;
>           }
> @@ -3009,7 +3015,9 @@ void qemu_ram_free(ram_addr_t addr)
>
>       QLIST_FOREACH(block,&ram_list.blocks, next) {
>           if (addr == block->offset) {
> +            qemu_mutex_lock_ramlist();
>               QLIST_REMOVE(block, next);
> +            qemu_mutex_unlock_ramlist();
>               if (block->flags&  RAM_PREALLOC_MASK) {
>                   ;
>               } else if (mem_path) {
> @@ -3117,8 +3125,10 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
>           if (addr - block->offset<  block->length) {
>               /* Move this entry to to start of the list.  */
>               if (block != QLIST_FIRST(&ram_list.blocks)) {
> +                qemu_mutex_lock_ramlist();
>                   QLIST_REMOVE(block, next);
>                   QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
> +                qemu_mutex_unlock_ramlist();

Theoretically qemu_get_ram_ptr should be protected.  The problem is not 
just accessing the ramlist, it is accessing the data underneath it 
before anyone frees it.  Luckily we can set aside that problem for now, 
because qemu_ram_free_from_ptr is only used by device assignment and 
device assignment makes VMs unmigratable.

If we support generic RAM hotplug/hotunplug, we would probably need 
something RCU-like to protect accesses, since protecting all uses of 
qemu_get_ram_ptr is not practical.

>               }
>               if (xen_mapcache_enabled()) {
>                   /* We need to check if the requested address is in the RAM
> diff --git a/qemu-common.h b/qemu-common.h
> index abd7a75..b802883 100644
> --- a/qemu-common.h
> +++ b/qemu-common.h
> @@ -212,6 +212,8 @@ char *qemu_strndup(const char *str, size_t size);
>
>   void qemu_mutex_lock_iothread(void);
>   void qemu_mutex_unlock_iothread(void);
> +void qemu_mutex_lock_ramlist(void);
> +void qemu_mutex_unlock_ramlist(void);
>
>   int qemu_open(const char *name, int flags, ...);
>   ssize_t qemu_write_full(int fd, const void *buf, size_t count)

Paolo

  reply	other threads:[~2011-08-11 16:20 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-08-11 15:32 [RFC PATCH v3 0/4] Separate thread for VM migration Umesh Deshpande
2011-08-11 15:32 ` [RFC PATCH v3 1/4] separate " Umesh Deshpande
2011-08-11 16:18   ` Paolo Bonzini
2011-08-11 17:36     ` Umesh Deshpande
2011-08-12  6:40       ` Paolo Bonzini
2011-08-11 15:32 ` [RFC PATCH v3 2/4] Making iothread block for migrate_cancel Umesh Deshpande
2011-08-11 15:32 ` [RFC PATCH v3 3/4] lock to protect memslots Umesh Deshpande
2011-08-11 16:20   ` Paolo Bonzini [this message]
2011-08-12  6:45     ` Paolo Bonzini
2011-08-15  6:45       ` Umesh Deshpande
2011-08-15 14:10         ` Paolo Bonzini
2011-08-15  7:26       ` Marcelo Tosatti
2011-08-15 14:14         ` Paolo Bonzini
2011-08-15 20:27           ` Umesh Deshpande
2011-08-16  6:15             ` Paolo Bonzini
2011-08-16  7:56               ` Paolo Bonzini
2011-08-11 15:32 ` [RFC PATCH v3 4/4] Separate migration bitmap Umesh Deshpande
2011-08-11 16:23 ` [RFC PATCH v3 0/4] Separate thread for VM migration Paolo Bonzini
2011-08-11 18:25 ` Anthony Liguori

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4E440131.6020200@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=mtosatti@redhat.com \
    --cc=quintela@redhat.com \
    --cc=udeshpan@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.