From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([140.186.70.92]:58385) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1QN76K-0000uM-E7 for qemu-devel@nongnu.org; Thu, 19 May 2011 13:33:33 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1QN76I-0001rk-Qk for qemu-devel@nongnu.org; Thu, 19 May 2011 13:33:32 -0400 Received: from smtp.citrix.com ([66.165.176.89]:57797) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1QN76I-0001re-Hx for qemu-devel@nongnu.org; Thu, 19 May 2011 13:33:30 -0400 From: Date: Thu, 19 May 2011 18:35:42 +0100 Message-ID: <1305826546-19059-1-git-send-email-stefano.stabellini@eu.citrix.com> In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain Subject: [Qemu-devel] [PATCH v2 1/5] xen: fix qemu_map_cache with size != MCACHE_BUCKET_SIZE List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: xen-devel@lists.xensource.com, agraf@suse.de, Stefano Stabellini From: Stefano Stabellini Fix the implementation of qemu_map_cache: correctly support size arguments different from 0 or MCACHE_BUCKET_SIZE. The new implementation supports locked mapcache entries with size multiple of MCACHE_BUCKET_SIZE. qemu_invalidate_entry can correctly find and unmap these "large" mapcache entries given that the virtual address passed to qemu_invalidate_entry is the same returned by qemu_map_cache when the locked mapcache entry was created. Signed-off-by: Stefano Stabellini --- xen-mapcache.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++-------- 1 files changed, 65 insertions(+), 12 deletions(-) diff --git a/xen-mapcache.c b/xen-mapcache.c index 349cc62..90fbd49 100644 --- a/xen-mapcache.c +++ b/xen-mapcache.c @@ -43,14 +43,16 @@ typedef struct MapCacheEntry { target_phys_addr_t paddr_index; uint8_t *vaddr_base; - DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT); + unsigned long *valid_mapping; uint8_t lock; + target_phys_addr_t size; struct MapCacheEntry *next; } MapCacheEntry; typedef struct MapCacheRev { uint8_t *vaddr_req; target_phys_addr_t paddr_index; + target_phys_addr_t size; QTAILQ_ENTRY(MapCacheRev) next; } MapCacheRev; @@ -68,6 +70,15 @@ typedef struct MapCache { static MapCache *mapcache; +static inline int test_bits(int nr, int size, const unsigned long *addr) +{ + unsigned long res = find_next_zero_bit(addr, size + nr, nr); + if (res >= nr + size) + return 1; + else + return 0; +} + void qemu_map_cache_init(void) { unsigned long size; @@ -115,11 +126,15 @@ static void qemu_remap_bucket(MapCacheEntry *entry, err = qemu_mallocz(nb_pfn * sizeof (int)); if (entry->vaddr_base != NULL) { - if (munmap(entry->vaddr_base, size) != 0) { + if (munmap(entry->vaddr_base, entry->size) != 0) { perror("unmap fails"); exit(-1); } } + if (entry->valid_mapping != NULL) { + qemu_free(entry->valid_mapping); + entry->valid_mapping = NULL; + } for (i = 0; i < nb_pfn; i++) { pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; @@ -134,6 +149,9 @@ static void qemu_remap_bucket(MapCacheEntry *entry, entry->vaddr_base = vaddr_base; entry->paddr_index = address_index; + entry->size = size; + entry->valid_mapping = (unsigned long *) qemu_mallocz(sizeof(unsigned long) * + BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); bitmap_zero(entry->valid_mapping, nb_pfn); for (i = 0; i < nb_pfn; i++) { @@ -151,32 +169,47 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u MapCacheEntry *entry, *pentry = NULL; target_phys_addr_t address_index = phys_addr >> MCACHE_BUCKET_SHIFT; target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); + target_phys_addr_t __size = size; trace_qemu_map_cache(phys_addr); - if (address_index == mapcache->last_address_index && !lock) { + if (address_index == mapcache->last_address_index && !lock && !__size) { trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset); return mapcache->last_address_vaddr + address_offset; } + /* size is always a multiple of MCACHE_BUCKET_SIZE */ + if ((address_offset + (__size % MCACHE_BUCKET_SIZE)) > MCACHE_BUCKET_SIZE) + __size += MCACHE_BUCKET_SIZE; + if (__size % MCACHE_BUCKET_SIZE) + __size += MCACHE_BUCKET_SIZE - (__size % MCACHE_BUCKET_SIZE); + if (!__size) + __size = MCACHE_BUCKET_SIZE; + entry = &mapcache->entry[address_index % mapcache->nr_buckets]; - while (entry && entry->lock && entry->paddr_index != address_index && entry->vaddr_base) { + while (entry && entry->lock && entry->vaddr_base && + (entry->paddr_index != address_index || entry->size != __size || + !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + entry->valid_mapping))) { pentry = entry; entry = entry->next; } if (!entry) { entry = qemu_mallocz(sizeof (MapCacheEntry)); pentry->next = entry; - qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); + qemu_remap_bucket(entry, __size, address_index); } else if (!entry->lock) { if (!entry->vaddr_base || entry->paddr_index != address_index || - !test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { - qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); + entry->size != __size || + !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + qemu_remap_bucket(entry, __size, address_index); } } - if (!test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { + if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { mapcache->last_address_index = -1; trace_qemu_map_cache_return(NULL); return NULL; @@ -189,6 +222,7 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u entry->lock++; reventry->vaddr_req = mapcache->last_address_vaddr + address_offset; reventry->paddr_index = mapcache->last_address_index; + reventry->size = entry->size; QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); } @@ -231,13 +265,16 @@ void qemu_map_cache_unlock(void *buffer) ram_addr_t qemu_ram_addr_from_mapcache(void *ptr) { + MapCacheEntry *entry = NULL, *pentry = NULL; MapCacheRev *reventry; target_phys_addr_t paddr_index; + target_phys_addr_t size; int found = 0; QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { if (reventry->vaddr_req == ptr) { paddr_index = reventry->paddr_index; + size = reventry->size; found = 1; break; } @@ -252,7 +289,17 @@ ram_addr_t qemu_ram_addr_from_mapcache(void *ptr) return 0; } - return paddr_index << MCACHE_BUCKET_SHIFT; + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { + pentry = entry; + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr); + return 0; + } + return (reventry->paddr_index << MCACHE_BUCKET_SHIFT) + + ((unsigned long) ptr - (unsigned long) entry->vaddr_base); } void qemu_invalidate_entry(uint8_t *buffer) @@ -260,6 +307,7 @@ void qemu_invalidate_entry(uint8_t *buffer) MapCacheEntry *entry = NULL, *pentry = NULL; MapCacheRev *reventry; target_phys_addr_t paddr_index; + target_phys_addr_t size; int found = 0; if (mapcache->last_address_vaddr == buffer) { @@ -269,6 +317,7 @@ void qemu_invalidate_entry(uint8_t *buffer) QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { if (reventry->vaddr_req == buffer) { paddr_index = reventry->paddr_index; + size = reventry->size; found = 1; break; } @@ -284,7 +333,7 @@ void qemu_invalidate_entry(uint8_t *buffer) qemu_free(reventry); entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; - while (entry && entry->paddr_index != paddr_index) { + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { pentry = entry; entry = entry->next; } @@ -298,10 +347,11 @@ void qemu_invalidate_entry(uint8_t *buffer) } pentry->next = entry->next; - if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { + if (munmap(entry->vaddr_base, entry->size) != 0) { perror("unmap fails"); exit(-1); } + qemu_free(entry->valid_mapping); qemu_free(entry); } @@ -328,13 +378,16 @@ void qemu_invalidate_map_cache(void) continue; } - if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { + if (munmap(entry->vaddr_base, entry->size) != 0) { perror("unmap fails"); exit(-1); } entry->paddr_index = 0; entry->vaddr_base = NULL; + entry->size = 0; + qemu_free(entry->valid_mapping); + entry->valid_mapping = NULL; } mapcache->last_address_index = -1; -- 1.7.2.3 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Subject: [PATCH v2 1/5] xen: fix qemu_map_cache with size != MCACHE_BUCKET_SIZE Date: Thu, 19 May 2011 18:35:42 +0100 Message-ID: <1305826546-19059-1-git-send-email-stefano.stabellini@eu.citrix.com> References: Mime-Version: 1.0 Content-Type: text/plain Return-path: In-Reply-To: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+gceq-qemu-devel=gmane.org@nongnu.org Sender: qemu-devel-bounces+gceq-qemu-devel=gmane.org@nongnu.org To: qemu-devel@nongnu.org Cc: xen-devel@lists.xensource.com, agraf@suse.de, Stefano Stabellini List-Id: xen-devel@lists.xenproject.org From: Stefano Stabellini Fix the implementation of qemu_map_cache: correctly support size arguments different from 0 or MCACHE_BUCKET_SIZE. The new implementation supports locked mapcache entries with size multiple of MCACHE_BUCKET_SIZE. qemu_invalidate_entry can correctly find and unmap these "large" mapcache entries given that the virtual address passed to qemu_invalidate_entry is the same returned by qemu_map_cache when the locked mapcache entry was created. Signed-off-by: Stefano Stabellini --- xen-mapcache.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++-------- 1 files changed, 65 insertions(+), 12 deletions(-) diff --git a/xen-mapcache.c b/xen-mapcache.c index 349cc62..90fbd49 100644 --- a/xen-mapcache.c +++ b/xen-mapcache.c @@ -43,14 +43,16 @@ typedef struct MapCacheEntry { target_phys_addr_t paddr_index; uint8_t *vaddr_base; - DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT); + unsigned long *valid_mapping; uint8_t lock; + target_phys_addr_t size; struct MapCacheEntry *next; } MapCacheEntry; typedef struct MapCacheRev { uint8_t *vaddr_req; target_phys_addr_t paddr_index; + target_phys_addr_t size; QTAILQ_ENTRY(MapCacheRev) next; } MapCacheRev; @@ -68,6 +70,15 @@ typedef struct MapCache { static MapCache *mapcache; +static inline int test_bits(int nr, int size, const unsigned long *addr) +{ + unsigned long res = find_next_zero_bit(addr, size + nr, nr); + if (res >= nr + size) + return 1; + else + return 0; +} + void qemu_map_cache_init(void) { unsigned long size; @@ -115,11 +126,15 @@ static void qemu_remap_bucket(MapCacheEntry *entry, err = qemu_mallocz(nb_pfn * sizeof (int)); if (entry->vaddr_base != NULL) { - if (munmap(entry->vaddr_base, size) != 0) { + if (munmap(entry->vaddr_base, entry->size) != 0) { perror("unmap fails"); exit(-1); } } + if (entry->valid_mapping != NULL) { + qemu_free(entry->valid_mapping); + entry->valid_mapping = NULL; + } for (i = 0; i < nb_pfn; i++) { pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; @@ -134,6 +149,9 @@ static void qemu_remap_bucket(MapCacheEntry *entry, entry->vaddr_base = vaddr_base; entry->paddr_index = address_index; + entry->size = size; + entry->valid_mapping = (unsigned long *) qemu_mallocz(sizeof(unsigned long) * + BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); bitmap_zero(entry->valid_mapping, nb_pfn); for (i = 0; i < nb_pfn; i++) { @@ -151,32 +169,47 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u MapCacheEntry *entry, *pentry = NULL; target_phys_addr_t address_index = phys_addr >> MCACHE_BUCKET_SHIFT; target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); + target_phys_addr_t __size = size; trace_qemu_map_cache(phys_addr); - if (address_index == mapcache->last_address_index && !lock) { + if (address_index == mapcache->last_address_index && !lock && !__size) { trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset); return mapcache->last_address_vaddr + address_offset; } + /* size is always a multiple of MCACHE_BUCKET_SIZE */ + if ((address_offset + (__size % MCACHE_BUCKET_SIZE)) > MCACHE_BUCKET_SIZE) + __size += MCACHE_BUCKET_SIZE; + if (__size % MCACHE_BUCKET_SIZE) + __size += MCACHE_BUCKET_SIZE - (__size % MCACHE_BUCKET_SIZE); + if (!__size) + __size = MCACHE_BUCKET_SIZE; + entry = &mapcache->entry[address_index % mapcache->nr_buckets]; - while (entry && entry->lock && entry->paddr_index != address_index && entry->vaddr_base) { + while (entry && entry->lock && entry->vaddr_base && + (entry->paddr_index != address_index || entry->size != __size || + !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + entry->valid_mapping))) { pentry = entry; entry = entry->next; } if (!entry) { entry = qemu_mallocz(sizeof (MapCacheEntry)); pentry->next = entry; - qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); + qemu_remap_bucket(entry, __size, address_index); } else if (!entry->lock) { if (!entry->vaddr_base || entry->paddr_index != address_index || - !test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { - qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); + entry->size != __size || + !test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + qemu_remap_bucket(entry, __size, address_index); } } - if (!test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { + if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { mapcache->last_address_index = -1; trace_qemu_map_cache_return(NULL); return NULL; @@ -189,6 +222,7 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u entry->lock++; reventry->vaddr_req = mapcache->last_address_vaddr + address_offset; reventry->paddr_index = mapcache->last_address_index; + reventry->size = entry->size; QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); } @@ -231,13 +265,16 @@ void qemu_map_cache_unlock(void *buffer) ram_addr_t qemu_ram_addr_from_mapcache(void *ptr) { + MapCacheEntry *entry = NULL, *pentry = NULL; MapCacheRev *reventry; target_phys_addr_t paddr_index; + target_phys_addr_t size; int found = 0; QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { if (reventry->vaddr_req == ptr) { paddr_index = reventry->paddr_index; + size = reventry->size; found = 1; break; } @@ -252,7 +289,17 @@ ram_addr_t qemu_ram_addr_from_mapcache(void *ptr) return 0; } - return paddr_index << MCACHE_BUCKET_SHIFT; + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { + pentry = entry; + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr); + return 0; + } + return (reventry->paddr_index << MCACHE_BUCKET_SHIFT) + + ((unsigned long) ptr - (unsigned long) entry->vaddr_base); } void qemu_invalidate_entry(uint8_t *buffer) @@ -260,6 +307,7 @@ void qemu_invalidate_entry(uint8_t *buffer) MapCacheEntry *entry = NULL, *pentry = NULL; MapCacheRev *reventry; target_phys_addr_t paddr_index; + target_phys_addr_t size; int found = 0; if (mapcache->last_address_vaddr == buffer) { @@ -269,6 +317,7 @@ void qemu_invalidate_entry(uint8_t *buffer) QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { if (reventry->vaddr_req == buffer) { paddr_index = reventry->paddr_index; + size = reventry->size; found = 1; break; } @@ -284,7 +333,7 @@ void qemu_invalidate_entry(uint8_t *buffer) qemu_free(reventry); entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; - while (entry && entry->paddr_index != paddr_index) { + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { pentry = entry; entry = entry->next; } @@ -298,10 +347,11 @@ void qemu_invalidate_entry(uint8_t *buffer) } pentry->next = entry->next; - if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { + if (munmap(entry->vaddr_base, entry->size) != 0) { perror("unmap fails"); exit(-1); } + qemu_free(entry->valid_mapping); qemu_free(entry); } @@ -328,13 +378,16 @@ void qemu_invalidate_map_cache(void) continue; } - if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { + if (munmap(entry->vaddr_base, entry->size) != 0) { perror("unmap fails"); exit(-1); } entry->paddr_index = 0; entry->vaddr_base = NULL; + entry->size = 0; + qemu_free(entry->valid_mapping); + entry->valid_mapping = NULL; } mapcache->last_address_index = -1; -- 1.7.2.3