* [Qemu-devel] [PATCH RFC] xen/mapcache: store dma information in revmapcache entries for debugging
@ 2017-05-03 0:08 ` Stefano Stabellini
0 siblings, 0 replies; 6+ messages in thread
From: Stefano Stabellini @ 2017-05-03 0:08 UTC (permalink / raw)
To: anthony.perard
Cc: sstabellini, qemu-devel, xen-devel, pbonzini, crosthwaite.peter,
rth, hrgstephen, x1917x
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
>From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
diff --git a/exec.c b/exec.c
index eac6085..85769e1 100644
--- a/exec.c
+++ b/exec.c
@@ -2084,10 +2084,10 @@ void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
* In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return xen_map_cache(addr, 0, 0);
+ return xen_map_cache(addr, 0, 0, false);
}
- block->host = xen_map_cache(block->offset, block->max_length, 1);
+ block->host = xen_map_cache(block->offset, block->max_length, 1, false);
}
return ramblock_ptr(block, addr);
}
@@ -2117,10 +2117,10 @@ static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
* In that case just map the requested area.
*/
if (block->offset == 0) {
- return xen_map_cache(addr, *size, 1);
+ return xen_map_cache(addr, *size, 1, true);
}
- block->host = xen_map_cache(block->offset, block->max_length, 1);
+ block->host = xen_map_cache(block->offset, block->max_length, 1, true);
}
return ramblock_ptr(block, addr);
diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c
index 31debdf..fa4282a 100644
--- a/hw/i386/xen/xen-mapcache.c
+++ b/hw/i386/xen/xen-mapcache.c
@@ -62,6 +62,7 @@ typedef struct MapCacheRev {
hwaddr paddr_index;
hwaddr size;
QTAILQ_ENTRY(MapCacheRev) next;
+ bool dma;
} MapCacheRev;
typedef struct MapCache {
@@ -202,7 +203,7 @@ static void xen_remap_bucket(MapCacheEntry *entry,
}
static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
- uint8_t lock)
+ uint8_t lock, bool dma)
{
MapCacheEntry *entry, *pentry = NULL;
hwaddr address_index;
@@ -289,6 +290,7 @@ tryagain:
if (lock) {
MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
entry->lock++;
+ reventry->dma = dma;
reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
reventry->paddr_index = mapcache->last_entry->paddr_index;
reventry->size = entry->size;
@@ -300,12 +302,12 @@ tryagain:
}
uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
- uint8_t lock)
+ uint8_t lock, bool dma)
{
uint8_t *p;
mapcache_lock();
- p = xen_map_cache_unlocked(phys_addr, size, lock);
+ p = xen_map_cache_unlocked(phys_addr, size, lock, dma);
mapcache_unlock();
return p;
}
@@ -426,8 +428,10 @@ void xen_invalidate_map_cache(void)
mapcache_lock();
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
- DPRINTF("There should be no locked mappings at this time, "
- "but "TARGET_FMT_plx" -> %p is present\n",
+ if (!reventry->dma)
+ continue;
+ fprintf(stderr, "Locked DMA mapping while invalidating mapcache!"
+ " "TARGET_FMT_plx" -> %p is present\n",
reventry->paddr_index, reventry->vaddr_req);
}
diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
index b8c93b9..01daaad 100644
--- a/include/sysemu/xen-mapcache.h
+++ b/include/sysemu/xen-mapcache.h
@@ -17,7 +17,7 @@ typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr start_addr,
void xen_map_cache_init(phys_offset_to_gaddr_t f,
void *opaque);
uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
- uint8_t lock);
+ uint8_t lock, bool dma);
ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
void xen_invalidate_map_cache_entry(uint8_t *buffer);
void xen_invalidate_map_cache(void);
@@ -31,7 +31,8 @@ static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
static inline uint8_t *xen_map_cache(hwaddr phys_addr,
hwaddr size,
- uint8_t lock)
+ uint8_t lock,
+ bool dma)
{
abort();
}
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH RFC] xen/mapcache: store dma information in revmapcache entries for debugging
@ 2017-05-03 0:08 ` Stefano Stabellini
0 siblings, 0 replies; 6+ messages in thread
From: Stefano Stabellini @ 2017-05-03 0:08 UTC (permalink / raw)
To: anthony.perard
Cc: xen-devel, crosthwaite.peter, qemu-devel, sstabellini, x1917x,
pbonzini, hrgstephen, rth
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
diff --git a/exec.c b/exec.c
index eac6085..85769e1 100644
--- a/exec.c
+++ b/exec.c
@@ -2084,10 +2084,10 @@ void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
* In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return xen_map_cache(addr, 0, 0);
+ return xen_map_cache(addr, 0, 0, false);
}
- block->host = xen_map_cache(block->offset, block->max_length, 1);
+ block->host = xen_map_cache(block->offset, block->max_length, 1, false);
}
return ramblock_ptr(block, addr);
}
@@ -2117,10 +2117,10 @@ static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
* In that case just map the requested area.
*/
if (block->offset == 0) {
- return xen_map_cache(addr, *size, 1);
+ return xen_map_cache(addr, *size, 1, true);
}
- block->host = xen_map_cache(block->offset, block->max_length, 1);
+ block->host = xen_map_cache(block->offset, block->max_length, 1, true);
}
return ramblock_ptr(block, addr);
diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c
index 31debdf..fa4282a 100644
--- a/hw/i386/xen/xen-mapcache.c
+++ b/hw/i386/xen/xen-mapcache.c
@@ -62,6 +62,7 @@ typedef struct MapCacheRev {
hwaddr paddr_index;
hwaddr size;
QTAILQ_ENTRY(MapCacheRev) next;
+ bool dma;
} MapCacheRev;
typedef struct MapCache {
@@ -202,7 +203,7 @@ static void xen_remap_bucket(MapCacheEntry *entry,
}
static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
- uint8_t lock)
+ uint8_t lock, bool dma)
{
MapCacheEntry *entry, *pentry = NULL;
hwaddr address_index;
@@ -289,6 +290,7 @@ tryagain:
if (lock) {
MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
entry->lock++;
+ reventry->dma = dma;
reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
reventry->paddr_index = mapcache->last_entry->paddr_index;
reventry->size = entry->size;
@@ -300,12 +302,12 @@ tryagain:
}
uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
- uint8_t lock)
+ uint8_t lock, bool dma)
{
uint8_t *p;
mapcache_lock();
- p = xen_map_cache_unlocked(phys_addr, size, lock);
+ p = xen_map_cache_unlocked(phys_addr, size, lock, dma);
mapcache_unlock();
return p;
}
@@ -426,8 +428,10 @@ void xen_invalidate_map_cache(void)
mapcache_lock();
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
- DPRINTF("There should be no locked mappings at this time, "
- "but "TARGET_FMT_plx" -> %p is present\n",
+ if (!reventry->dma)
+ continue;
+ fprintf(stderr, "Locked DMA mapping while invalidating mapcache!"
+ " "TARGET_FMT_plx" -> %p is present\n",
reventry->paddr_index, reventry->vaddr_req);
}
diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
index b8c93b9..01daaad 100644
--- a/include/sysemu/xen-mapcache.h
+++ b/include/sysemu/xen-mapcache.h
@@ -17,7 +17,7 @@ typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr start_addr,
void xen_map_cache_init(phys_offset_to_gaddr_t f,
void *opaque);
uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
- uint8_t lock);
+ uint8_t lock, bool dma);
ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
void xen_invalidate_map_cache_entry(uint8_t *buffer);
void xen_invalidate_map_cache(void);
@@ -31,7 +31,8 @@ static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
static inline uint8_t *xen_map_cache(hwaddr phys_addr,
hwaddr size,
- uint8_t lock)
+ uint8_t lock,
+ bool dma)
{
abort();
}
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH RFC] xen/mapcache: store dma information in revmapcache entries for debugging
2017-05-03 0:08 ` Stefano Stabellini
@ 2017-05-03 0:40 ` no-reply
-1 siblings, 0 replies; 6+ messages in thread
From: no-reply @ 2017-05-03 0:40 UTC (permalink / raw)
To: sstabellini
Cc: famz, anthony.perard, xen-devel, crosthwaite.peter, qemu-devel,
x1917x, pbonzini, hrgstephen, rth
Hi,
This series seems to have some coding style problems. See output below for
more information:
Subject: [Qemu-devel] [PATCH RFC] xen/mapcache: store dma information in revmapcache entries for debugging
Message-id: alpine.DEB.2.10.1705021646310.8859@sstabellini-ThinkPad-X260
Type: series
=== TEST SCRIPT BEGIN ===
#!/bin/bash
BASE=base
n=1
total=$(git log --oneline $BASE.. | wc -l)
failed=0
# Useful git options
git config --local diff.renamelimit 0
git config --local diff.renames True
commits="$(git log --format=%H --reverse $BASE..)"
for c in $commits; do
echo "Checking PATCH $n/$total: $(git log -n 1 --format=%s $c)..."
if ! git show $c --format=email | ./scripts/checkpatch.pl --mailback -; then
failed=1
echo
fi
n=$((n+1))
done
exit $failed
=== TEST SCRIPT END ===
Updating 3c8cf5a9c21ff8782164d1def7f44bd888713384
Switched to a new branch 'test'
868ff55 xen/mapcache: store dma information in revmapcache entries for debugging
=== OUTPUT BEGIN ===
Checking PATCH 1/1: xen/mapcache: store dma information in revmapcache entries for debugging...
ERROR: braces {} are necessary for all arms of this statement
#123: FILE: hw/i386/xen/xen-mapcache.c:431:
+ if (!reventry->dma)
[...]
total: 1 errors, 0 warnings, 89 lines checked
Your patch has style problems, please review. If any of these errors
are false positives report them to the maintainer, see
CHECKPATCH in MAINTAINERS.
=== OUTPUT END ===
Test command exited with code: 1
---
Email generated automatically by Patchew [http://patchew.org/].
Please send your feedback to patchew-devel@freelists.org
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH RFC] xen/mapcache: store dma information in revmapcache entries for debugging
@ 2017-05-03 0:40 ` no-reply
0 siblings, 0 replies; 6+ messages in thread
From: no-reply @ 2017-05-03 0:40 UTC (permalink / raw)
Cc: sstabellini, famz, crosthwaite.peter, qemu-devel, xen-devel,
x1917x, anthony.perard, pbonzini, rth, hrgstephen
Hi,
This series seems to have some coding style problems. See output below for
more information:
Subject: [Qemu-devel] [PATCH RFC] xen/mapcache: store dma information in revmapcache entries for debugging
Message-id: alpine.DEB.2.10.1705021646310.8859@sstabellini-ThinkPad-X260
Type: series
=== TEST SCRIPT BEGIN ===
#!/bin/bash
BASE=base
n=1
total=$(git log --oneline $BASE.. | wc -l)
failed=0
# Useful git options
git config --local diff.renamelimit 0
git config --local diff.renames True
commits="$(git log --format=%H --reverse $BASE..)"
for c in $commits; do
echo "Checking PATCH $n/$total: $(git log -n 1 --format=%s $c)..."
if ! git show $c --format=email | ./scripts/checkpatch.pl --mailback -; then
failed=1
echo
fi
n=$((n+1))
done
exit $failed
=== TEST SCRIPT END ===
Updating 3c8cf5a9c21ff8782164d1def7f44bd888713384
Switched to a new branch 'test'
868ff55 xen/mapcache: store dma information in revmapcache entries for debugging
=== OUTPUT BEGIN ===
Checking PATCH 1/1: xen/mapcache: store dma information in revmapcache entries for debugging...
ERROR: braces {} are necessary for all arms of this statement
#123: FILE: hw/i386/xen/xen-mapcache.c:431:
+ if (!reventry->dma)
[...]
total: 1 errors, 0 warnings, 89 lines checked
Your patch has style problems, please review. If any of these errors
are false positives report them to the maintainer, see
CHECKPATCH in MAINTAINERS.
=== OUTPUT END ===
Test command exited with code: 1
---
Email generated automatically by Patchew [http://patchew.org/].
Please send your feedback to patchew-devel@freelists.org
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH RFC] xen/mapcache: store dma information in revmapcache entries for debugging
2017-05-03 0:08 ` Stefano Stabellini
@ 2017-05-03 8:55 ` Paolo Bonzini
-1 siblings, 0 replies; 6+ messages in thread
From: Paolo Bonzini @ 2017-05-03 8:55 UTC (permalink / raw)
To: Stefano Stabellini, anthony.perard
Cc: qemu-devel, xen-devel, crosthwaite.peter, rth, hrgstephen, x1917x
On 03/05/2017 02:08, Stefano Stabellini wrote:
> The Xen mapcache is able to create long term mappings, they are called
> "locked" mappings. The third parameter of the xen_map_cache call
> specifies if a mapping is a "locked" mapping.
>
>
> From the QEMU point of view there are two kinds of long term mappings:
>
> [a] device memory mappings, such as option roms and video memory
> [b] dma mappings, created by dma_memory_map & friends
>
> After certain operations, ballooning a VM in particular, Xen asks QEMU
> kindly to destroy all mappings. However, certainly [a] mappings are
> present and cannot be removed. That's not a problem as they are not
> affected by balloonning. The *real* problem is that if there are any
> mappings of type [b], any outstanding dma operations could fail. This is
> a known shortcoming. In other words, when Xen asks QEMU to destroy all
> mappings, it is an error if any [b] mappings exist.
>
> However today we have no way of distinguishing [a] from [b]. Because of
> that, we cannot even print a decent warning.
>
> This patch introduces a new "dma" bool field to MapCacheRev entires, to
> remember if a given mapping is for dma or is a long term device memory
> mapping. When xen_invalidate_map_cache is called, we print a warning if
> any [b] mappings exist. We ignore [a] mappings.
>
> Mappings created by qemu_map_ram_ptr are assumed to be [a], while
> mappings created by address_space_map->qemu_ram_ptr_length are assumed
> to be [b].
>
> The goal of the patch is to make debugging and system understanding
> easier.
Sure, why not.
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Paolo
> Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
>
> diff --git a/exec.c b/exec.c
> index eac6085..85769e1 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -2084,10 +2084,10 @@ void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
> * In that case just map until the end of the page.
> */
> if (block->offset == 0) {
> - return xen_map_cache(addr, 0, 0);
> + return xen_map_cache(addr, 0, 0, false);
> }
>
> - block->host = xen_map_cache(block->offset, block->max_length, 1);
> + block->host = xen_map_cache(block->offset, block->max_length, 1, false);
> }
> return ramblock_ptr(block, addr);
> }
> @@ -2117,10 +2117,10 @@ static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
> * In that case just map the requested area.
> */
> if (block->offset == 0) {
> - return xen_map_cache(addr, *size, 1);
> + return xen_map_cache(addr, *size, 1, true);
> }
>
> - block->host = xen_map_cache(block->offset, block->max_length, 1);
> + block->host = xen_map_cache(block->offset, block->max_length, 1, true);
> }
>
> return ramblock_ptr(block, addr);
> diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c
> index 31debdf..fa4282a 100644
> --- a/hw/i386/xen/xen-mapcache.c
> +++ b/hw/i386/xen/xen-mapcache.c
> @@ -62,6 +62,7 @@ typedef struct MapCacheRev {
> hwaddr paddr_index;
> hwaddr size;
> QTAILQ_ENTRY(MapCacheRev) next;
> + bool dma;
> } MapCacheRev;
>
> typedef struct MapCache {
> @@ -202,7 +203,7 @@ static void xen_remap_bucket(MapCacheEntry *entry,
> }
>
> static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
> - uint8_t lock)
> + uint8_t lock, bool dma)
> {
> MapCacheEntry *entry, *pentry = NULL;
> hwaddr address_index;
> @@ -289,6 +290,7 @@ tryagain:
> if (lock) {
> MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
> entry->lock++;
> + reventry->dma = dma;
> reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
> reventry->paddr_index = mapcache->last_entry->paddr_index;
> reventry->size = entry->size;
> @@ -300,12 +302,12 @@ tryagain:
> }
>
> uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
> - uint8_t lock)
> + uint8_t lock, bool dma)
> {
> uint8_t *p;
>
> mapcache_lock();
> - p = xen_map_cache_unlocked(phys_addr, size, lock);
> + p = xen_map_cache_unlocked(phys_addr, size, lock, dma);
> mapcache_unlock();
> return p;
> }
> @@ -426,8 +428,10 @@ void xen_invalidate_map_cache(void)
> mapcache_lock();
>
> QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
> - DPRINTF("There should be no locked mappings at this time, "
> - "but "TARGET_FMT_plx" -> %p is present\n",
> + if (!reventry->dma)
> + continue;
> + fprintf(stderr, "Locked DMA mapping while invalidating mapcache!"
> + " "TARGET_FMT_plx" -> %p is present\n",
> reventry->paddr_index, reventry->vaddr_req);
> }
>
> diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
> index b8c93b9..01daaad 100644
> --- a/include/sysemu/xen-mapcache.h
> +++ b/include/sysemu/xen-mapcache.h
> @@ -17,7 +17,7 @@ typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr start_addr,
> void xen_map_cache_init(phys_offset_to_gaddr_t f,
> void *opaque);
> uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
> - uint8_t lock);
> + uint8_t lock, bool dma);
> ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
> void xen_invalidate_map_cache_entry(uint8_t *buffer);
> void xen_invalidate_map_cache(void);
> @@ -31,7 +31,8 @@ static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
>
> static inline uint8_t *xen_map_cache(hwaddr phys_addr,
> hwaddr size,
> - uint8_t lock)
> + uint8_t lock,
> + bool dma)
> {
> abort();
> }
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH RFC] xen/mapcache: store dma information in revmapcache entries for debugging
@ 2017-05-03 8:55 ` Paolo Bonzini
0 siblings, 0 replies; 6+ messages in thread
From: Paolo Bonzini @ 2017-05-03 8:55 UTC (permalink / raw)
To: Stefano Stabellini, anthony.perard
Cc: xen-devel, crosthwaite.peter, qemu-devel, x1917x, hrgstephen, rth
On 03/05/2017 02:08, Stefano Stabellini wrote:
> The Xen mapcache is able to create long term mappings, they are called
> "locked" mappings. The third parameter of the xen_map_cache call
> specifies if a mapping is a "locked" mapping.
>
>
> From the QEMU point of view there are two kinds of long term mappings:
>
> [a] device memory mappings, such as option roms and video memory
> [b] dma mappings, created by dma_memory_map & friends
>
> After certain operations, ballooning a VM in particular, Xen asks QEMU
> kindly to destroy all mappings. However, certainly [a] mappings are
> present and cannot be removed. That's not a problem as they are not
> affected by balloonning. The *real* problem is that if there are any
> mappings of type [b], any outstanding dma operations could fail. This is
> a known shortcoming. In other words, when Xen asks QEMU to destroy all
> mappings, it is an error if any [b] mappings exist.
>
> However today we have no way of distinguishing [a] from [b]. Because of
> that, we cannot even print a decent warning.
>
> This patch introduces a new "dma" bool field to MapCacheRev entires, to
> remember if a given mapping is for dma or is a long term device memory
> mapping. When xen_invalidate_map_cache is called, we print a warning if
> any [b] mappings exist. We ignore [a] mappings.
>
> Mappings created by qemu_map_ram_ptr are assumed to be [a], while
> mappings created by address_space_map->qemu_ram_ptr_length are assumed
> to be [b].
>
> The goal of the patch is to make debugging and system understanding
> easier.
Sure, why not.
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Paolo
> Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
>
> diff --git a/exec.c b/exec.c
> index eac6085..85769e1 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -2084,10 +2084,10 @@ void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
> * In that case just map until the end of the page.
> */
> if (block->offset == 0) {
> - return xen_map_cache(addr, 0, 0);
> + return xen_map_cache(addr, 0, 0, false);
> }
>
> - block->host = xen_map_cache(block->offset, block->max_length, 1);
> + block->host = xen_map_cache(block->offset, block->max_length, 1, false);
> }
> return ramblock_ptr(block, addr);
> }
> @@ -2117,10 +2117,10 @@ static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
> * In that case just map the requested area.
> */
> if (block->offset == 0) {
> - return xen_map_cache(addr, *size, 1);
> + return xen_map_cache(addr, *size, 1, true);
> }
>
> - block->host = xen_map_cache(block->offset, block->max_length, 1);
> + block->host = xen_map_cache(block->offset, block->max_length, 1, true);
> }
>
> return ramblock_ptr(block, addr);
> diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c
> index 31debdf..fa4282a 100644
> --- a/hw/i386/xen/xen-mapcache.c
> +++ b/hw/i386/xen/xen-mapcache.c
> @@ -62,6 +62,7 @@ typedef struct MapCacheRev {
> hwaddr paddr_index;
> hwaddr size;
> QTAILQ_ENTRY(MapCacheRev) next;
> + bool dma;
> } MapCacheRev;
>
> typedef struct MapCache {
> @@ -202,7 +203,7 @@ static void xen_remap_bucket(MapCacheEntry *entry,
> }
>
> static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
> - uint8_t lock)
> + uint8_t lock, bool dma)
> {
> MapCacheEntry *entry, *pentry = NULL;
> hwaddr address_index;
> @@ -289,6 +290,7 @@ tryagain:
> if (lock) {
> MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
> entry->lock++;
> + reventry->dma = dma;
> reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
> reventry->paddr_index = mapcache->last_entry->paddr_index;
> reventry->size = entry->size;
> @@ -300,12 +302,12 @@ tryagain:
> }
>
> uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
> - uint8_t lock)
> + uint8_t lock, bool dma)
> {
> uint8_t *p;
>
> mapcache_lock();
> - p = xen_map_cache_unlocked(phys_addr, size, lock);
> + p = xen_map_cache_unlocked(phys_addr, size, lock, dma);
> mapcache_unlock();
> return p;
> }
> @@ -426,8 +428,10 @@ void xen_invalidate_map_cache(void)
> mapcache_lock();
>
> QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
> - DPRINTF("There should be no locked mappings at this time, "
> - "but "TARGET_FMT_plx" -> %p is present\n",
> + if (!reventry->dma)
> + continue;
> + fprintf(stderr, "Locked DMA mapping while invalidating mapcache!"
> + " "TARGET_FMT_plx" -> %p is present\n",
> reventry->paddr_index, reventry->vaddr_req);
> }
>
> diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
> index b8c93b9..01daaad 100644
> --- a/include/sysemu/xen-mapcache.h
> +++ b/include/sysemu/xen-mapcache.h
> @@ -17,7 +17,7 @@ typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr start_addr,
> void xen_map_cache_init(phys_offset_to_gaddr_t f,
> void *opaque);
> uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
> - uint8_t lock);
> + uint8_t lock, bool dma);
> ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
> void xen_invalidate_map_cache_entry(uint8_t *buffer);
> void xen_invalidate_map_cache(void);
> @@ -31,7 +31,8 @@ static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
>
> static inline uint8_t *xen_map_cache(hwaddr phys_addr,
> hwaddr size,
> - uint8_t lock)
> + uint8_t lock,
> + bool dma)
> {
> abort();
> }
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2017-05-03 8:55 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-03 0:08 [Qemu-devel] [PATCH RFC] xen/mapcache: store dma information in revmapcache entries for debugging Stefano Stabellini
2017-05-03 0:08 ` Stefano Stabellini
2017-05-03 0:40 ` [Qemu-devel] " no-reply
2017-05-03 0:40 ` no-reply
2017-05-03 8:55 ` Paolo Bonzini
2017-05-03 8:55 ` Paolo Bonzini
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.