All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 1/7] cache: get rid of search loop in cache_add()
  2015-03-16  8:26 [PATCH v2 0/7] Handle mmaped regions in cache Petr Tesarik
@ 2015-03-06  8:23 ` Petr Tesarik
  2015-03-06  8:52 ` [PATCH v3 2/7] cache: allow to return a page to the pool Petr Tesarik
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Petr Tesarik @ 2015-03-06  8:23 UTC (permalink / raw)
  To: Atsushi Kumagai, Michael Holzheu; +Cc: kexec mailing list, Jan Willeke

The intention was that cache code is re-entrant, so all cache entries
should go through these states:

  1. free
  2. pending read
  3. used

The cache_add() function is used to move an entry from state 2 to 3, but
since the caller did not know cache entry pointer, it had to search the
pending list for a pending read for the given physical address. This is
not needed if cache_alloc() returns this pointer.

Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
---
 cache.c        | 26 ++++++--------------------
 cache.h        | 10 ++++++++--
 makedumpfile.c |  8 +++++---
 3 files changed, 19 insertions(+), 25 deletions(-)

diff --git a/cache.c b/cache.c
index 0dd957c..700ba0c 100644
--- a/cache.c
+++ b/cache.c
@@ -20,12 +20,6 @@
 #include "cache.h"
 #include "print_info.h"
 
-struct cache_entry {
-	unsigned long long paddr;
-	void *bufptr;
-	struct cache_entry *next, *prev;
-};
-
 struct cache {
 	struct cache_entry *head, *tail;
 };
@@ -98,38 +92,30 @@ cache_search(unsigned long long paddr)
 	return NULL;		/* cache miss */
 }
 
-void *
+struct cache_entry *
 cache_alloc(unsigned long long paddr)
 {
 	struct cache_entry *entry = NULL;
 
 	if (avail) {
 		entry = &pool[--avail];
-		entry->paddr = paddr;
 		add_entry(&pending, entry);
 	} else if (pending.tail) {
 		entry = pending.tail;
-		entry->paddr = paddr;
 	} else if (used.tail) {
 		entry = used.tail;
 		remove_entry(&used, entry);
-		entry->paddr = paddr;
 		add_entry(&pending, entry);
 	} else
 		return NULL;
 
-	return entry->bufptr;
+	entry->paddr = paddr;
+	return entry;
 }
 
 void
-cache_add(unsigned long long paddr)
+cache_add(struct cache_entry *entry)
 {
-	struct cache_entry *entry;
-	for (entry = pending.head; entry; entry = entry->next) {
-		if (entry->paddr == paddr) {
-			remove_entry(&pending, entry);
-			add_entry(&used, entry);
-			break;
-		}
-	}
+	remove_entry(&pending, entry);
+	add_entry(&used, entry);
 }
diff --git a/cache.h b/cache.h
index 4730e12..dab8eb9 100644
--- a/cache.h
+++ b/cache.h
@@ -19,9 +19,15 @@
 #ifndef _CACHE_H
 #define _CACHE_H
 
+struct cache_entry {
+	unsigned long long paddr;
+	void *bufptr;
+	struct cache_entry *next, *prev;
+};
+
 int cache_init(void);
 void *cache_search(unsigned long long paddr);
-void *cache_alloc(unsigned long long paddr);
-void cache_add(unsigned long long paddr);
+struct cache_entry *cache_alloc(unsigned long long paddr);
+void cache_add(struct cache_entry *entry);
 
 #endif	/* _CACHE_H */
diff --git a/makedumpfile.c b/makedumpfile.c
index 74bc9db..828adeb 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -591,6 +591,7 @@ readmem(int type_addr, unsigned long long addr, void *bufptr, size_t size)
 	unsigned long long paddr, maddr = NOT_PADDR;
 	unsigned long long pgaddr;
 	void *pgbuf;
+	struct cache_entry *cached;
 
 next_page:
 	switch (type_addr) {
@@ -644,9 +645,10 @@ next_page:
 	pgaddr = PAGEBASE(paddr);
 	pgbuf = cache_search(pgaddr);
 	if (!pgbuf) {
-		pgbuf = cache_alloc(pgaddr);
-		if (!pgbuf)
+		cached = cache_alloc(pgaddr);
+		if (!cached)
 			goto error;
+		pgbuf = cached->bufptr;
 
 		if (info->flag_refiltering) {
 			if (!readpage_kdump_compressed(pgaddr, pgbuf))
@@ -658,7 +660,7 @@ next_page:
 			if (!readpage_elf(pgaddr, pgbuf))
 				goto error;
 		}
-		cache_add(pgaddr);
+		cache_add(cached);
 	}
 
 	memcpy(bufptr, pgbuf + PAGEOFFSET(paddr), read_size);
-- 
1.8.4.5


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v3 2/7] cache: allow to return a page to the pool
  2015-03-16  8:26 [PATCH v2 0/7] Handle mmaped regions in cache Petr Tesarik
  2015-03-06  8:23 ` [PATCH v3 1/7] cache: get rid of search loop in cache_add() Petr Tesarik
@ 2015-03-06  8:52 ` Petr Tesarik
  2015-03-06  8:59 ` [PATCH v3 3/7] cache: do not allocate from the pending list Petr Tesarik
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Petr Tesarik @ 2015-03-06  8:52 UTC (permalink / raw)
  To: Atsushi Kumagai, Michael Holzheu; +Cc: kexec mailing list, Jan Willeke

After a failed read, the page should no longer be pending, but rather
available for future allocation.

Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
---
 cache.c        | 15 ++++++++++++---
 cache.h        |  1 +
 makedumpfile.c |  8 +++++---
 3 files changed, 18 insertions(+), 6 deletions(-)

diff --git a/cache.c b/cache.c
index 700ba0c..ad9f0f1 100644
--- a/cache.c
+++ b/cache.c
@@ -26,7 +26,8 @@ struct cache {
 
 /* 8 pages covers 4-level paging plus 4 data pages */
 #define CACHE_SIZE	8
-static struct cache_entry pool[CACHE_SIZE];
+static struct cache_entry entries[CACHE_SIZE];
+static struct cache_entry *pool[CACHE_SIZE];
 static int avail = CACHE_SIZE;
 
 static struct cache used, pending;
@@ -44,7 +45,8 @@ cache_init(void)
 			       strerror(errno));
 			return FALSE;
 		}
-		pool[i].bufptr = bufptr;
+		entries[i].bufptr = bufptr;
+		pool[i] = &entries[i];
 	}
 
 	return TRUE;
@@ -98,7 +100,7 @@ cache_alloc(unsigned long long paddr)
 	struct cache_entry *entry = NULL;
 
 	if (avail) {
-		entry = &pool[--avail];
+		entry = pool[--avail];
 		add_entry(&pending, entry);
 	} else if (pending.tail) {
 		entry = pending.tail;
@@ -119,3 +121,10 @@ cache_add(struct cache_entry *entry)
 	remove_entry(&pending, entry);
 	add_entry(&used, entry);
 }
+
+void
+cache_free(struct cache_entry *entry)
+{
+	remove_entry(&pending, entry);
+	pool[avail++] = entry;
+}
diff --git a/cache.h b/cache.h
index dab8eb9..0e65f97 100644
--- a/cache.h
+++ b/cache.h
@@ -29,5 +29,6 @@ int cache_init(void);
 void *cache_search(unsigned long long paddr);
 struct cache_entry *cache_alloc(unsigned long long paddr);
 void cache_add(struct cache_entry *entry);
+void cache_free(struct cache_entry *entry);
 
 #endif	/* _CACHE_H */
diff --git a/makedumpfile.c b/makedumpfile.c
index 828adeb..c62d035 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -652,13 +652,13 @@ next_page:
 
 		if (info->flag_refiltering) {
 			if (!readpage_kdump_compressed(pgaddr, pgbuf))
-				goto error;
+				goto error_cached;
 		} else if (info->flag_sadump) {
 			if (!readpage_sadump(pgaddr, pgbuf))
-				goto error;
+				goto error_cached;
 		} else {
 			if (!readpage_elf(pgaddr, pgbuf))
-				goto error;
+				goto error_cached;
 		}
 		cache_add(cached);
 	}
@@ -674,6 +674,8 @@ next_page:
 
 	return size_orig;
 
+error_cached:
+	cache_free(cached);
 error:
 	ERRMSG("type_addr: %d, addr:%llx, size:%zd\n", type_addr, addr, size_orig);
 	return FALSE;
-- 
1.8.4.5


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v3 3/7] cache: do not allocate from the pending list
  2015-03-16  8:26 [PATCH v2 0/7] Handle mmaped regions in cache Petr Tesarik
  2015-03-06  8:23 ` [PATCH v3 1/7] cache: get rid of search loop in cache_add() Petr Tesarik
  2015-03-06  8:52 ` [PATCH v3 2/7] cache: allow to return a page to the pool Petr Tesarik
@ 2015-03-06  8:59 ` Petr Tesarik
  2015-03-06  9:26 ` [PATCH v3 4/7] cache: add hit/miss statistics to the final report Petr Tesarik
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Petr Tesarik @ 2015-03-06  8:59 UTC (permalink / raw)
  To: Atsushi Kumagai, Michael Holzheu; +Cc: kexec mailing list, Jan Willeke

Since pending entries are under read, they should not be reused. This
change allows recursive use of the cache (reading pages from within
readpage itself). Although this feature is not used by makedumpfile
right now, this was the original intention of the pending list.
The cache_alloc() function may return NULL if and only if the recursion
level is bigger than CACHE_SIZE.

Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
---
 cache.c | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/cache.c b/cache.c
index ad9f0f1..344b4f6 100644
--- a/cache.c
+++ b/cache.c
@@ -101,16 +101,13 @@ cache_alloc(unsigned long long paddr)
 
 	if (avail) {
 		entry = pool[--avail];
-		add_entry(&pending, entry);
-	} else if (pending.tail) {
-		entry = pending.tail;
 	} else if (used.tail) {
 		entry = used.tail;
 		remove_entry(&used, entry);
-		add_entry(&pending, entry);
 	} else
 		return NULL;
 
+	add_entry(&pending, entry);
 	entry->paddr = paddr;
 	return entry;
 }
-- 
1.8.4.5


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v3 4/7] cache: add hit/miss statistics to the final report
  2015-03-16  8:26 [PATCH v2 0/7] Handle mmaped regions in cache Petr Tesarik
                   ` (2 preceding siblings ...)
  2015-03-06  8:59 ` [PATCH v3 3/7] cache: do not allocate from the pending list Petr Tesarik
@ 2015-03-06  9:26 ` Petr Tesarik
  2015-03-06 13:07 ` [PATCH v3 5/7] cache: allocate buffers in one big chunk Petr Tesarik
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Petr Tesarik @ 2015-03-06  9:26 UTC (permalink / raw)
  To: Atsushi Kumagai, Michael Holzheu; +Cc: kexec mailing list, Jan Willeke

Add the most basic cache statistics (pages hit and missed). Note that
the hit rate is not printed if cache was not used to avoid division
by zero.

Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
---
 makedumpfile.c | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/makedumpfile.c b/makedumpfile.c
index c62d035..d778139 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -39,6 +39,10 @@ struct SplitBlock		*splitblock = NULL;
 
 char filename_stdout[] = FILENAME_STDOUT;
 
+/* Cache statistics */
+static unsigned long long	cache_hit;
+static unsigned long long	cache_miss;
+
 static void first_cycle(mdf_pfn_t start, mdf_pfn_t max, struct cycle *cycle)
 {
 	cycle->start_pfn = round(start, info->pfn_cyclic);
@@ -645,6 +649,7 @@ next_page:
 	pgaddr = PAGEBASE(paddr);
 	pgbuf = cache_search(pgaddr);
 	if (!pgbuf) {
+		++cache_miss;
 		cached = cache_alloc(pgaddr);
 		if (!cached)
 			goto error;
@@ -661,7 +666,8 @@ next_page:
 				goto error_cached;
 		}
 		cache_add(cached);
-	}
+	} else
+		++cache_hit;
 
 	memcpy(bufptr, pgbuf + PAGEOFFSET(paddr), read_size);
 
@@ -8294,6 +8300,11 @@ print_report(void)
 	REPORT_MSG("--------------------------------------------------\n");
 	REPORT_MSG("Total pages     : 0x%016llx\n", info->max_mapnr);
 	REPORT_MSG("\n");
+	REPORT_MSG("Cache hit: %lld, miss: %lld", cache_hit, cache_miss);
+	if (cache_hit + cache_miss)
+		REPORT_MSG(", hit rate: %.1f%%",
+		    100.0 * cache_hit / (cache_hit + cache_miss));
+	REPORT_MSG("\n\n");
 }
 
 static void
-- 
1.8.4.5


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v3 5/7] cache: allocate buffers in one big chunk
  2015-03-16  8:26 [PATCH v2 0/7] Handle mmaped regions in cache Petr Tesarik
                   ` (3 preceding siblings ...)
  2015-03-06  9:26 ` [PATCH v3 4/7] cache: add hit/miss statistics to the final report Petr Tesarik
@ 2015-03-06 13:07 ` Petr Tesarik
  2015-03-06 13:10 ` [PATCH v3 6/7] cache: allow arbitrary size of cache entries Petr Tesarik
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Petr Tesarik @ 2015-03-06 13:07 UTC (permalink / raw)
  To: Atsushi Kumagai, Michael Holzheu; +Cc: kexec mailing list, Jan Willeke

This allows callers to change the buffer pointer, because it can be
reinitialized to the default value in cache_alloc() before returning
the cache entry.

Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
---
 cache.c | 26 +++++++++++++++-----------
 1 file changed, 15 insertions(+), 11 deletions(-)

diff --git a/cache.c b/cache.c
index 344b4f6..ccd67ca 100644
--- a/cache.c
+++ b/cache.c
@@ -32,23 +32,23 @@ static int avail = CACHE_SIZE;
 
 static struct cache used, pending;
 
+static void *cachebuf;
+
 int
 cache_init(void)
 {
-	void *bufptr;
 	int i;
 
-	for (i = 0; i < CACHE_SIZE; ++i) {
-		bufptr = malloc(info->page_size);
-		if (bufptr == NULL) {
-			ERRMSG("Can't allocate memory for cache. %s\n",
-			       strerror(errno));
-			return FALSE;
-		}
-		entries[i].bufptr = bufptr;
-		pool[i] = &entries[i];
+	cachebuf = malloc(info->page_size * CACHE_SIZE);
+	if (cachebuf == NULL) {
+		ERRMSG("Can't allocate memory for cache. %s\n",
+		       strerror(errno));
+		return FALSE;
 	}
 
+	for (i = 0; i < CACHE_SIZE; ++i)
+		pool[i] = &entries[i];
+
 	return TRUE;
 }
 
@@ -98,6 +98,7 @@ struct cache_entry *
 cache_alloc(unsigned long long paddr)
 {
 	struct cache_entry *entry = NULL;
+	int idx;
 
 	if (avail) {
 		entry = pool[--avail];
@@ -107,8 +108,11 @@ cache_alloc(unsigned long long paddr)
 	} else
 		return NULL;
 
-	add_entry(&pending, entry);
+	idx = entry - entries;
 	entry->paddr = paddr;
+	entry->bufptr = cachebuf + idx * info->page_size;
+	add_entry(&pending, entry);
+
 	return entry;
 }
 
-- 
1.8.4.5


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v3 6/7] cache: allow arbitrary size of cache entries
  2015-03-16  8:26 [PATCH v2 0/7] Handle mmaped regions in cache Petr Tesarik
                   ` (4 preceding siblings ...)
  2015-03-06 13:07 ` [PATCH v3 5/7] cache: allocate buffers in one big chunk Petr Tesarik
@ 2015-03-06 13:10 ` Petr Tesarik
  2015-03-06 13:23 ` [PATCH v3 7/7] cache: store mapped regions directly in the cache Petr Tesarik
  2015-03-18  4:17 ` [PATCH v2 0/7] Handle mmaped regions in cache Atsushi Kumagai
  7 siblings, 0 replies; 9+ messages in thread
From: Petr Tesarik @ 2015-03-06 13:10 UTC (permalink / raw)
  To: Atsushi Kumagai, Michael Holzheu; +Cc: kexec mailing list, Jan Willeke

Until this commit the assumed length of all cache entries has been
exactly one page. To make other sizes possible, the length is now
stored in struct cache_entry. Note that cache_search may return a
pointer in the middle of a cache buffer.

Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
---
 cache.c        | 12 ++++++++----
 cache.h        |  3 ++-
 makedumpfile.c |  2 +-
 3 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/cache.c b/cache.c
index ccd67ca..938eda6 100644
--- a/cache.c
+++ b/cache.c
@@ -79,17 +79,20 @@ remove_entry(struct cache *cache, struct cache_entry *entry)
 }
 
 void *
-cache_search(unsigned long long paddr)
+cache_search(unsigned long long paddr, unsigned long length)
 {
 	struct cache_entry *entry;
-	for (entry = used.head; entry; entry = entry->next)
-		if (entry->paddr == paddr) {
+	for (entry = used.head; entry; entry = entry->next) {
+		size_t off = paddr - entry->paddr;
+		if (off < entry->buflen &&
+		    length <= entry->buflen - off) {
 			if (entry != used.head) {
 				remove_entry(&used, entry);
 				add_entry(&used, entry);
 			}
-			return entry->bufptr;
+			return entry->bufptr + off;
 		}
+	}
 
 	return NULL;		/* cache miss */
 }
@@ -111,6 +114,7 @@ cache_alloc(unsigned long long paddr)
 	idx = entry - entries;
 	entry->paddr = paddr;
 	entry->bufptr = cachebuf + idx * info->page_size;
+	entry->buflen = info->page_size;
 	add_entry(&pending, entry);
 
 	return entry;
diff --git a/cache.h b/cache.h
index 0e65f97..792ba6c 100644
--- a/cache.h
+++ b/cache.h
@@ -22,11 +22,12 @@
 struct cache_entry {
 	unsigned long long paddr;
 	void *bufptr;
+	unsigned long buflen;
 	struct cache_entry *next, *prev;
 };
 
 int cache_init(void);
-void *cache_search(unsigned long long paddr);
+void *cache_search(unsigned long long paddr, unsigned long length);
 struct cache_entry *cache_alloc(unsigned long long paddr);
 void cache_add(struct cache_entry *entry);
 void cache_free(struct cache_entry *entry);
diff --git a/makedumpfile.c b/makedumpfile.c
index d778139..f1aad08 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -647,7 +647,7 @@ next_page:
 	read_size = MIN(info->page_size - PAGEOFFSET(paddr), size);
 
 	pgaddr = PAGEBASE(paddr);
-	pgbuf = cache_search(pgaddr);
+	pgbuf = cache_search(pgaddr, read_size);
 	if (!pgbuf) {
 		++cache_miss;
 		cached = cache_alloc(pgaddr);
-- 
1.8.4.5


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v3 7/7] cache: store mapped regions directly in the cache
  2015-03-16  8:26 [PATCH v2 0/7] Handle mmaped regions in cache Petr Tesarik
                   ` (5 preceding siblings ...)
  2015-03-06 13:10 ` [PATCH v3 6/7] cache: allow arbitrary size of cache entries Petr Tesarik
@ 2015-03-06 13:23 ` Petr Tesarik
  2015-03-18  4:17 ` [PATCH v2 0/7] Handle mmaped regions in cache Atsushi Kumagai
  7 siblings, 0 replies; 9+ messages in thread
From: Petr Tesarik @ 2015-03-06 13:23 UTC (permalink / raw)
  To: Atsushi Kumagai, Michael Holzheu; +Cc: kexec mailing list, Jan Willeke

Avoid copying data between the mmapped region and the cache. To do that,
readmem() tries to map the page before reading it. The mmap path and
the read path are separated: mappage_elf() uses the mmap syscall, and
readpage_elf() uses the read syscall.

If mmap is successful, readmem() stores the mmap address in the cache
entry. Of course, the mapping must not be removed until the cache entry
is evicted, but the cache code has no knowledge about mmap. To solve that
in a flexible way, a discard callback is added to struct cache_entry and
called by cache eviction code.

Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
---
 cache.c        |   3 ++
 cache.h        |   2 ++
 makedumpfile.c | 105 ++++++++++++++++++++++++++++++++-------------------------
 3 files changed, 64 insertions(+), 46 deletions(-)

diff --git a/cache.c b/cache.c
index 938eda6..963eb76 100644
--- a/cache.c
+++ b/cache.c
@@ -108,6 +108,8 @@ cache_alloc(unsigned long long paddr)
 	} else if (used.tail) {
 		entry = used.tail;
 		remove_entry(&used, entry);
+		if (entry->discard)
+			entry->discard(entry);
 	} else
 		return NULL;
 
@@ -115,6 +117,7 @@ cache_alloc(unsigned long long paddr)
 	entry->paddr = paddr;
 	entry->bufptr = cachebuf + idx * info->page_size;
 	entry->buflen = info->page_size;
+	entry->discard = NULL;
 	add_entry(&pending, entry);
 
 	return entry;
diff --git a/cache.h b/cache.h
index 792ba6c..c55cec4 100644
--- a/cache.h
+++ b/cache.h
@@ -24,6 +24,8 @@ struct cache_entry {
 	void *bufptr;
 	unsigned long buflen;
 	struct cache_entry *next, *prev;
+
+	void (*discard)(struct cache_entry *);
 };
 
 int cache_init(void);
diff --git a/makedumpfile.c b/makedumpfile.c
index f1aad08..32f5459 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -294,6 +294,12 @@ read_page_desc(unsigned long long paddr, page_desc_t *pd)
 	return TRUE;
 }
 
+static void
+unmap_cache(struct cache_entry *entry)
+{
+	munmap(entry->bufptr, entry->buflen);
+}
+
 static int
 update_mmap_range(off_t offset, int initial) {
 	off_t start_offset, end_offset;
@@ -301,9 +307,6 @@ update_mmap_range(off_t offset, int initial) {
 	off_t max_offset = get_max_file_offset();
 	off_t pt_load_end = offset_to_pt_load_end(offset);
 
-	munmap(info->mmap_buf,
-	       info->mmap_end_offset - info->mmap_start_offset);
-
 	/*
 	 * offset for mmap() must be page aligned.
 	 */
@@ -357,29 +360,45 @@ initialize_mmap(void) {
 	return TRUE;
 }
 
-static int
-read_with_mmap(off_t offset, void *bufptr, unsigned long size) {
-	size_t read_size;
+static char *
+mappage_elf(unsigned long long paddr)
+{
+	off_t offset, offset2;
 
-next_region:
+	if (info->flag_usemmap != MMAP_ENABLE)
+		return NULL;
 
-	if (!is_mapped_with_mmap(offset))
-		if (!update_mmap_range(offset, 0))
-			return FALSE;
+	offset = paddr_to_offset(paddr);
+	if (!offset || page_is_fractional(offset))
+		return NULL;
+
+	offset2 = paddr_to_offset(paddr + info->page_size);
+	if (!offset2)
+		return NULL;
 
-	read_size = MIN(info->mmap_end_offset - offset, size);
+	if (offset2 - offset != info->page_size)
+		return NULL;
 
-	memcpy(bufptr, info->mmap_buf +
-	       (offset - info->mmap_start_offset), read_size);
+	if (!is_mapped_with_mmap(offset) &&
+	    !update_mmap_range(offset, 0)) {
+		ERRMSG("Can't read the dump memory(%s) with mmap().\n",
+		       info->name_memory);
 
-	offset += read_size;
-	bufptr += read_size;
-	size -= read_size;
+		ERRMSG("This kernel might have some problems about mmap().\n");
+		ERRMSG("read() will be used instead of mmap() from now.\n");
 
-	if (size > 0)
-		goto next_region;
+		/*
+		 * Fall back to read().
+		 */
+		info->flag_usemmap = MMAP_DISABLE;
+		return NULL;
+	}
 
-	return TRUE;
+	if (offset < info->mmap_start_offset ||
+	    offset + info->page_size > info->mmap_end_offset)
+		return NULL;
+
+	return info->mmap_buf + (offset - info->mmap_start_offset);
 }
 
 static int
@@ -387,33 +406,16 @@ read_from_vmcore(off_t offset, void *bufptr, unsigned long size)
 {
 	const off_t failed = (off_t)-1;
 
-	if (info->flag_usemmap == MMAP_ENABLE &&
-	    page_is_fractional(offset) == FALSE) {
-		if (!read_with_mmap(offset, bufptr, size)) {
-			ERRMSG("Can't read the dump memory(%s) with mmap().\n",
-			       info->name_memory);
-
-			ERRMSG("This kernel might have some problems about mmap().\n");
-			ERRMSG("read() will be used instead of mmap() from now.\n");
-
-			/*
-			 * Fall back to read().
-			 */
-			info->flag_usemmap = MMAP_DISABLE;
-			read_from_vmcore(offset, bufptr, size);
-		}
-	} else {
-		if (lseek(info->fd_memory, offset, SEEK_SET) == failed) {
-			ERRMSG("Can't seek the dump memory(%s). (offset: %llx) %s\n",
-			       info->name_memory, (unsigned long long)offset, strerror(errno));
-			return FALSE;
-		}
+	if (lseek(info->fd_memory, offset, SEEK_SET) == failed) {
+		ERRMSG("Can't seek the dump memory(%s). (offset: %llx) %s\n",
+		       info->name_memory, (unsigned long long)offset, strerror(errno));
+		return FALSE;
+	}
 
-		if (read(info->fd_memory, bufptr, size) != size) {
-			ERRMSG("Can't read the dump memory(%s). %s\n",
-			       info->name_memory, strerror(errno));
-			return FALSE;
-		}
+	if (read(info->fd_memory, bufptr, size) != size) {
+		ERRMSG("Can't read the dump memory(%s). %s\n",
+		       info->name_memory, strerror(errno));
+		return FALSE;
 	}
 
 	return TRUE;
@@ -662,7 +664,18 @@ next_page:
 			if (!readpage_sadump(pgaddr, pgbuf))
 				goto error_cached;
 		} else {
-			if (!readpage_elf(pgaddr, pgbuf))
+			char *mapbuf = mappage_elf(pgaddr);
+			size_t mapoff;
+
+			if (mapbuf) {
+				pgbuf = mapbuf;
+				mapoff = mapbuf - info->mmap_buf;
+				cached->paddr = pgaddr - mapoff;
+				cached->bufptr = info->mmap_buf;
+				cached->buflen = info->mmap_end_offset -
+					info->mmap_start_offset;
+				cached->discard = unmap_cache;
+			} else if (!readpage_elf(pgaddr, pgbuf))
 				goto error_cached;
 		}
 		cache_add(cached);
-- 
1.8.4.5


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 0/7] Handle mmaped regions in cache
@ 2015-03-16  8:26 Petr Tesarik
  2015-03-06  8:23 ` [PATCH v3 1/7] cache: get rid of search loop in cache_add() Petr Tesarik
                   ` (7 more replies)
  0 siblings, 8 replies; 9+ messages in thread
From: Petr Tesarik @ 2015-03-16  8:26 UTC (permalink / raw)
  To: Atsushi Kumagai, Michael Holzheu; +Cc: kexec mailing list, Jan Willeke

Because all pages must go into the cache, data is unnecessarily
copied from mmapped regions to cache. Avoid this copying by storing
the mmapped regions directly in the cache.

First, the cache code needs a clean up clarification of the concept,
especially the meaning of the pending list (allocated cache entries
whose content is not yet valid).

Second, the cache must be able to handle differently sized objects
so that it can store individual pages as well as mmapped regions.

Last, the cache eviction code must be extended to allow either
reusing the read buffer or unmapping the region.

Changelog:
  v3: do not mmap fractional pages
  v2: add mmapped regions to page cache

Petr Tesarik (7):
  cache: get rid of search loop in cache_add()
  cache: allow to return a page to the pool
  cache: do not allocate from the pending list
  cache: add hit/miss statistics to the final report
  cache: allocate buffers in one big chunk
  cache: allow arbitrary size of cache entries
  cache: store mapped regions directly in the cache

 cache.c        |  81 +++++++++++++++++----------------
 cache.h        |  16 +++++--
 makedumpfile.c | 138 ++++++++++++++++++++++++++++++++++-----------------------
 3 files changed, 138 insertions(+), 97 deletions(-)

-- 
1.8.4.5


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH v2 0/7] Handle mmaped regions in cache
  2015-03-16  8:26 [PATCH v2 0/7] Handle mmaped regions in cache Petr Tesarik
                   ` (6 preceding siblings ...)
  2015-03-06 13:23 ` [PATCH v3 7/7] cache: store mapped regions directly in the cache Petr Tesarik
@ 2015-03-18  4:17 ` Atsushi Kumagai
  7 siblings, 0 replies; 9+ messages in thread
From: Atsushi Kumagai @ 2015-03-18  4:17 UTC (permalink / raw)
  To: ptesarik; +Cc: holzheu, kexec, willeke

Hello Petr,

>Because all pages must go into the cache, data is unnecessarily
>copied from mmapped regions to cache. Avoid this copying by storing
>the mmapped regions directly in the cache.
>
>First, the cache code needs a clean up clarification of the concept,
>especially the meaning of the pending list (allocated cache entries
>whose content is not yet valid).
>
>Second, the cache must be able to handle differently sized objects
>so that it can store individual pages as well as mmapped regions.
>
>Last, the cache eviction code must be extended to allow either
>reusing the read buffer or unmapping the region.
>
>Changelog:
>  v3: do not mmap fractional pages
>  v2: add mmapped regions to page cache

Thanks for your re-posting.
I'm testing with the v3 patch, it works fine so far.
So v1.5.8 can be released next week.


Thanks
Atsushi Kumagai

>Petr Tesarik (7):
>  cache: get rid of search loop in cache_add()
>  cache: allow to return a page to the pool
>  cache: do not allocate from the pending list
>  cache: add hit/miss statistics to the final report
>  cache: allocate buffers in one big chunk
>  cache: allow arbitrary size of cache entries
>  cache: store mapped regions directly in the cache
>
> cache.c        |  81 +++++++++++++++++----------------
> cache.h        |  16 +++++--
> makedumpfile.c | 138 ++++++++++++++++++++++++++++++++++-----------------------
> 3 files changed, 138 insertions(+), 97 deletions(-)
>
>--
>1.8.4.5

_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2015-03-18  4:19 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-16  8:26 [PATCH v2 0/7] Handle mmaped regions in cache Petr Tesarik
2015-03-06  8:23 ` [PATCH v3 1/7] cache: get rid of search loop in cache_add() Petr Tesarik
2015-03-06  8:52 ` [PATCH v3 2/7] cache: allow to return a page to the pool Petr Tesarik
2015-03-06  8:59 ` [PATCH v3 3/7] cache: do not allocate from the pending list Petr Tesarik
2015-03-06  9:26 ` [PATCH v3 4/7] cache: add hit/miss statistics to the final report Petr Tesarik
2015-03-06 13:07 ` [PATCH v3 5/7] cache: allocate buffers in one big chunk Petr Tesarik
2015-03-06 13:10 ` [PATCH v3 6/7] cache: allow arbitrary size of cache entries Petr Tesarik
2015-03-06 13:23 ` [PATCH v3 7/7] cache: store mapped regions directly in the cache Petr Tesarik
2015-03-18  4:17 ` [PATCH v2 0/7] Handle mmaped regions in cache Atsushi Kumagai

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.