* [PATCH] [GNTTAB] expandable grant table
[not found] <eab087540611141441s4fbe89b8k48e062c51730932a@mail.gmail.com>
@ 2007-01-05 13:43 ` Keir Fraser
2007-01-06 6:48 ` Isaku Yamahata
0 siblings, 1 reply; 9+ messages in thread
From: Keir Fraser @ 2007-01-05 13:43 UTC (permalink / raw)
To: xen-devel
[-- Attachment #1: Type: text/plain, Size: 209 bytes --]
This one is waiting for final review before being checked in. Some people
want the functionality this enables (>3 VIFs per guest) so I'm posting the
patch in advance of applying it to xen-unstable.
-- Keir
[-- Attachment #2: expandable_grant_vs_12410.patch --]
[-- Type: application/octet-stream, Size: 44767 bytes --]
diff -r f026d4091322 linux-2.6-xen-sparse/drivers/xen/core/gnttab.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c Tue Nov 14 18:52:58 2006 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c Tue Nov 14 15:47:51 2006 +0000
@@ -3,7 +3,7 @@
*
* Granting foreign access to our memory reservation.
*
- * Copyright (c) 2005, Christopher Clark
+ * Copyright (c) 2005-2006, Christopher Clark
* Copyright (c) 2004-2005, K A Fraser
*
* This program is free software; you can redistribute it and/or
@@ -35,7 +35,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <xen/interface/xen.h>
#include <xen/gnttab.h>
#include <asm/pgtable.h>
@@ -50,37 +49,63 @@
/* External tools reserve first few grant table entries. */
#define NR_RESERVED_ENTRIES 8
-
-#define NR_GRANT_ENTRIES \
- (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(struct grant_entry))
-#define GNTTAB_LIST_END (NR_GRANT_ENTRIES + 1)
-
-static grant_ref_t gnttab_list[NR_GRANT_ENTRIES];
+#define GNTTAB_LIST_END 0xffffffff
+#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
+
+static grant_ref_t **gnttab_list;
+static unsigned int nr_grant_frames;
+static unsigned int boot_max_nr_grant_frames;
static int gnttab_free_count;
static grant_ref_t gnttab_free_head;
static DEFINE_SPINLOCK(gnttab_list_lock);
static struct grant_entry *shared;
+#ifndef CONFIG_XEN
+static unsigned long resume_frames;
+#endif
static struct gnttab_free_callback *gnttab_free_callback_list;
+static int gnttab_expand(unsigned int req_entries);
+
+static inline unsigned int
+gnttab_list_table(grant_ref_t ref)
+{
+ return (ref / (PAGE_SIZE / sizeof(grant_ref_t *)));
+}
+
+static inline unsigned int
+gnttab_list_index(grant_ref_t ref)
+{
+ return (ref % (PAGE_SIZE / sizeof(grant_ref_t *)));
+}
+
static int get_free_entries(int count)
{
unsigned long flags;
- int ref;
+ int ref, rc;
grant_ref_t head;
+
spin_lock_irqsave(&gnttab_list_lock, flags);
- if (gnttab_free_count < count) {
+
+ if ((gnttab_free_count < count) &&
+ ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
spin_unlock_irqrestore(&gnttab_list_lock, flags);
- return -1;
- }
+ return rc;
+ }
+
ref = head = gnttab_free_head;
gnttab_free_count -= count;
while (count-- > 1)
- head = gnttab_list[head];
- gnttab_free_head = gnttab_list[head];
- gnttab_list[head] = GNTTAB_LIST_END;
+ head = gnttab_list[gnttab_list_table(head)]
+ [gnttab_list_index(head)];
+ gnttab_free_head = gnttab_list[gnttab_list_table(head)]
+ [gnttab_list_index(head)];
+ gnttab_list[gnttab_list_table(head)]
+ [gnttab_list_index(head)] = GNTTAB_LIST_END;
+
spin_unlock_irqrestore(&gnttab_list_lock, flags);
+
return ref;
}
@@ -116,7 +141,8 @@ static void put_free_entry(grant_ref_t r
{
unsigned long flags;
spin_lock_irqsave(&gnttab_list_lock, flags);
- gnttab_list[ref] = gnttab_free_head;
+ gnttab_list[gnttab_list_table(ref)]
+ [gnttab_list_index(ref)] = gnttab_free_head;
gnttab_free_head = ref;
gnttab_free_count++;
check_free_callbacks();
@@ -132,7 +158,7 @@ int gnttab_grant_foreign_access(domid_t
{
int ref;
- if (unlikely((ref = get_free_entry()) == -1))
+ if (unlikely((ref = get_free_entry()) < 0))
return -ENOSPC;
shared[ref].frame = frame;
@@ -202,7 +228,7 @@ int gnttab_grant_foreign_transfer(domid_
{
int ref;
- if (unlikely((ref = get_free_entry()) == -1))
+ if (unlikely((ref = get_free_entry()) < 0))
return -ENOSPC;
gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
@@ -273,11 +299,14 @@ void gnttab_free_grant_references(grant_
return;
spin_lock_irqsave(&gnttab_list_lock, flags);
ref = head;
- while (gnttab_list[ref] != GNTTAB_LIST_END) {
- ref = gnttab_list[ref];
+ while (gnttab_list[gnttab_list_table(ref)]
+ [gnttab_list_index(ref)] != GNTTAB_LIST_END) {
+ ref = gnttab_list[gnttab_list_table(ref)]
+ [gnttab_list_index(ref)];
count++;
}
- gnttab_list[ref] = gnttab_free_head;
+ gnttab_list[gnttab_list_table(ref)]
+ [gnttab_list_index(ref)] = gnttab_free_head;
gnttab_free_head = head;
gnttab_free_count += count;
check_free_callbacks();
@@ -289,7 +318,7 @@ int gnttab_alloc_grant_references(u16 co
{
int h = get_free_entries(count);
- if (h == -1)
+ if (h < 0)
return -ENOSPC;
*head = h;
@@ -309,7 +338,8 @@ int gnttab_claim_grant_reference(grant_r
grant_ref_t g = *private_head;
if (unlikely(g == GNTTAB_LIST_END))
return -ENOSPC;
- *private_head = gnttab_list[g];
+ *private_head = gnttab_list[gnttab_list_table(g)]
+ [gnttab_list_index(g)];
return g;
}
EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
@@ -317,7 +347,8 @@ void gnttab_release_grant_reference(gran
void gnttab_release_grant_reference(grant_ref_t *private_head,
grant_ref_t release)
{
- gnttab_list[release] = *private_head;
+ gnttab_list[gnttab_list_table(release)]
+ [gnttab_list_index(release)] = *private_head;
*private_head = release;
}
EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
@@ -356,6 +387,65 @@ void gnttab_cancel_free_callback(struct
}
EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
+static int grow_gnttab_list(unsigned int more_frames)
+{
+ unsigned int new_nr_grant_frames, extra_entries, i;
+
+ new_nr_grant_frames = nr_grant_frames + more_frames;
+ extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
+
+ for (i = nr_grant_frames; i < new_nr_grant_frames; i++)
+ {
+ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
+ if (!gnttab_list[i])
+ goto grow_nomem;
+ }
+
+
+ for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+ i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
+ gnttab_list[gnttab_list_table(i)][gnttab_list_index(i)] = i + 1;
+
+ gnttab_list[new_nr_grant_frames - 1][GREFS_PER_GRANT_FRAME - 1] =
+ gnttab_free_head;
+ gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+ gnttab_free_count += extra_entries;
+
+ nr_grant_frames = new_nr_grant_frames;
+
+ check_free_callbacks();
+
+ return 0;
+
+grow_nomem:
+ for ( ; i >= nr_grant_frames; i--)
+ free_page((unsigned long) gnttab_list[i]);
+ return -ENOMEM;
+}
+
+static unsigned int __max_nr_grant_frames(void)
+{
+ struct gnttab_query_size query;
+ int rc;
+
+ query.dom = DOMID_SELF;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
+ if ((rc < 0) || (query.status != GNTST_okay))
+ return 4; /* Legacy max supported number of frames */
+
+ return query.max_nr_frames;
+}
+
+static inline unsigned int max_nr_grant_frames(void)
+{
+ unsigned int xen_max = __max_nr_grant_frames();
+
+ if (xen_max > boot_max_nr_grant_frames)
+ return boot_max_nr_grant_frames;
+ return xen_max;
+}
+
#ifdef CONFIG_XEN
#ifndef __ia64__
@@ -378,49 +468,68 @@ static int unmap_pte_fn(pte_t *pte, stru
}
#endif
-int gnttab_resume(void)
+static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
- unsigned long frames[NR_GRANT_FRAMES];
+ unsigned long *frames;
+ unsigned int nr_gframes = end_idx + 1;
int rc;
#ifndef __ia64__
- void *pframes = frames;
+ void *pframes;
struct vm_struct *area;
#endif
+ frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
+ if (!frames)
+ return -ENOMEM;
+
setup.dom = DOMID_SELF;
- setup.nr_frames = NR_GRANT_FRAMES;
+ setup.nr_frames = nr_gframes;
set_xen_guest_handle(setup.frame_list, frames);
rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
- if (rc == -ENOSYS)
+ if (rc == -ENOSYS) {
+ kfree(frames);
return -ENOSYS;
+ }
BUG_ON(rc || setup.status);
#ifndef __ia64__
if (shared == NULL) {
- area = get_vm_area(PAGE_SIZE * NR_GRANT_FRAMES, VM_IOREMAP);
+ area = get_vm_area(PAGE_SIZE * max_nr_grant_frames(),
+ VM_IOREMAP);
BUG_ON(area == NULL);
shared = area->addr;
}
+ /* The duplicate pointer pframes exists because apply_to_page_range
+ * modifies the pointer it uses. This complicates the kfree.
+ */
+ pframes = frames;
rc = apply_to_page_range(&init_mm, (unsigned long)shared,
- PAGE_SIZE * NR_GRANT_FRAMES,
+ PAGE_SIZE * nr_gframes,
map_pte_fn, &pframes);
BUG_ON(rc);
#else
shared = __va(frames[0] << PAGE_SHIFT);
- printk("grant table at %p\n", shared);
-#endif
-
- return 0;
+#endif
+ kfree(frames);
+
+ return 0;
+}
+
+int gnttab_resume(void)
+{
+ if (max_nr_grant_frames() < nr_grant_frames)
+ return -ENOSYS;
+ return gnttab_map(0, nr_grant_frames - 1);
}
int gnttab_suspend(void)
{
#ifndef __ia64__
apply_to_page_range(&init_mm, (unsigned long)shared,
- PAGE_SIZE * NR_GRANT_FRAMES,
+ PAGE_SIZE * nr_grant_frames,
unmap_pte_fn, NULL);
#endif
return 0;
@@ -430,53 +539,111 @@ int gnttab_suspend(void)
#include <platform-pci.h>
-int gnttab_resume(void)
-{
- unsigned long frames;
+static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
+{
struct xen_add_to_physmap xatp;
unsigned int i;
- frames = alloc_xen_mmio(PAGE_SIZE * NR_GRANT_FRAMES);
-
- for (i = 0; i < NR_GRANT_FRAMES; i++) {
+ /* Loop backwards, so that the first hypercall has the largest index,
+ * ensuring that the table will only grow once.
+ */
+ for (i = end_idx; i >= start_idx; i--) {
xatp.domid = DOMID_SELF;
xatp.idx = i;
xatp.space = XENMAPSPACE_grant_table;
- xatp.gpfn = (frames >> PAGE_SHIFT) + i;
+ xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
BUG();
}
-
- shared = ioremap(frames, PAGE_SIZE * NR_GRANT_FRAMES);
+}
+
+int gnttab_resume(void)
+{
+ struct xen_add_to_physmap xatp;
+ unsigned int i, max_nr_gframes, nr_gframes;
+
+ nr_gframes = nr_grant_frames;
+ max_nr_gframes = max_nr_grant_frames();
+ if (max_nr_gframes < nr_gframes)
+ return -ENOSYS;
+
+ resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
+
+ gnttab_map_shared(0, nr_gframes - 1);
+
+ shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes);
if (shared == NULL) {
printk("error to ioremap gnttab share frames\n");
return -1;
}
-
return 0;
}
int gnttab_suspend(void)
{
iounmap(shared);
+ resume_frames = 0;
return 0;
}
#endif /* !CONFIG_XEN */
+static int gnttab_expand(unsigned int req_entries)
+{
+ int rc;
+ unsigned int cur, extra;
+
+ cur = nr_grant_frames;
+ extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
+ GREFS_PER_GRANT_FRAME);
+ if (cur + extra > max_nr_grant_frames())
+ return -ENOSPC;
+
+ if ((rc = gnttab_map(cur, cur + extra - 1)) == 0)
+ rc = grow_gnttab_list(extra);
+
+ return rc;
+}
+
int __init gnttab_init(void)
{
int i;
+ unsigned int max_nr_glist_frames;
if (!is_running_on_xen())
return -ENODEV;
+ nr_grant_frames = 1;
+ boot_max_nr_grant_frames = __max_nr_grant_frames();
+
+ /* Determine the maximum number of frames required for the
+ * grant reference free list on the current hypervisor.
+ */
+ max_nr_glist_frames = boot_max_nr_grant_frames * GREFS_PER_GRANT_FRAME /
+ (PAGE_SIZE / sizeof(grant_ref_t));
+
+ gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
+ GFP_KERNEL);
+ if (gnttab_list == NULL)
+ return -ENOMEM;
+
+ gnttab_list[0] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
+ if (gnttab_list[0] == NULL)
+ {
+ kfree(gnttab_list);
+ return -ENOMEM;
+ }
+
if (gnttab_resume() < 0)
return -ENODEV;
- for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
- gnttab_list[i] = i + 1;
- gnttab_free_count = NR_GRANT_ENTRIES - NR_RESERVED_ENTRIES;
+ for (i = NR_RESERVED_ENTRIES; i < GREFS_PER_GRANT_FRAME - 1; i++)
+ gnttab_list[gnttab_list_table(i)][gnttab_list_index(i)] = i + 1;
+
+ gnttab_list[gnttab_list_table(GREFS_PER_GRANT_FRAME - 1)]
+ [gnttab_list_index(GREFS_PER_GRANT_FRAME - 1)]
+ = GNTTAB_LIST_END;
+ gnttab_free_count = GREFS_PER_GRANT_FRAME - NR_RESERVED_ENTRIES;
gnttab_free_head = NR_RESERVED_ENTRIES;
printk("Grant table initialized\n");
diff -r f026d4091322 linux-2.6-xen-sparse/include/xen/gnttab.h
--- a/linux-2.6-xen-sparse/include/xen/gnttab.h Tue Nov 14 18:52:58 2006 +0000
+++ b/linux-2.6-xen-sparse/include/xen/gnttab.h Tue Nov 14 20:37:19 2006 +0000
@@ -42,13 +42,6 @@
#include <asm/maddr.h> /* maddr_t */
#include <xen/interface/grant_table.h>
#include <xen/features.h>
-
-/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
-#ifdef __ia64__
-#define NR_GRANT_FRAMES 1
-#else
-#define NR_GRANT_FRAMES 4
-#endif
struct gnttab_free_callback {
struct gnttab_free_callback *next;
@@ -109,12 +102,6 @@ void gnttab_grant_foreign_transfer_ref(g
void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
unsigned long pfn);
-#ifdef __ia64__
-#define gnttab_map_vaddr(map) __va(map.dev_bus_addr)
-#else
-#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
-#endif
-
int gnttab_suspend(void);
int gnttab_resume(void);
diff -r f026d4091322 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c Tue Nov 14 18:52:58 2006 +0000
+++ b/xen/arch/ia64/xen/mm.c Fri Nov 03 18:18:56 2006 +0000
@@ -1983,8 +1983,10 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(
mfn = virt_to_mfn(d->shared_info);
break;
case XENMAPSPACE_grant_table:
- if (xatp.idx < NR_GRANT_FRAMES)
+ spin_lock(d->grant_table->lock);
+ if ( xatp.idx < nr_grant_frames(d->grant_table) )
mfn = virt_to_mfn(d->grant_table->shared) + xatp.idx;
+ spin_unlock(d->grant_table->lock);
break;
default:
break;
diff -r f026d4091322 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Nov 14 18:52:58 2006 +0000
+++ b/xen/arch/x86/mm.c Tue Nov 14 20:37:19 2006 +0000
@@ -2932,8 +2932,16 @@ long arch_memory_op(int op, XEN_GUEST_HA
mfn = virt_to_mfn(d->shared_info);
break;
case XENMAPSPACE_grant_table:
- if ( xatp.idx < NR_GRANT_FRAMES )
- mfn = virt_to_mfn(d->grant_table->shared) + xatp.idx;
+ spin_lock(&d->grant_table->lock);
+
+ if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
+ (xatp.idx < max_nr_grant_frames) )
+ gnttab_grow_table(d, xatp.idx + 1);
+
+ if ( xatp.idx < nr_grant_frames(d->grant_table) )
+ mfn = virt_to_mfn(d->grant_table->shared[xatp.idx]);
+
+ spin_unlock(&d->grant_table->lock);
break;
default:
break;
diff -r f026d4091322 xen/common/grant_table.c
--- a/xen/common/grant_table.c Tue Nov 14 18:52:58 2006 +0000
+++ b/xen/common/grant_table.c Tue Nov 14 20:38:15 2006 +0000
@@ -4,7 +4,7 @@
* Mechanism for granting foreign access to page frames, and receiving
* page-ownership transfers.
*
- * Copyright (c) 2005 Christopher Clark
+ * Copyright (c) 2005-2006 Christopher Clark
* Copyright (c) 2004 K A Fraser
* Copyright (c) 2005 Andrew Warfield
* Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
@@ -33,6 +33,15 @@
#include <xen/domain_page.h>
#include <acm/acm_hooks.h>
+unsigned int max_nr_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
+integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
+
+/* The maximum number of grant mappings is defined as a multiplier of the
+ * maximum number of grant table entries. This defines the multiplier used.
+ * Pretty arbitrary. [POLICY]
+ */
+#define MAX_MAPTRACK_TO_GRANTS_RATIO 8
+
/*
* The first two members of a grant entry are updated as a combined pair.
* The following union allows that to happen in an endian-neutral fashion.
@@ -52,14 +61,84 @@ union grant_combo {
goto _lbl; \
} while ( 0 )
+static inline unsigned int
+nr_maptrack_frames(struct grant_table *t)
+{
+ return t->maptrack_limit / (PAGE_SIZE / sizeof(struct grant_mapping));
+}
+
+static unsigned inline int max_nr_maptrack_frames(void)
+{
+ return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
+}
+
+static inline unsigned int
+maptrack_table(grant_handle_t handle)
+{
+ return (handle / (PAGE_SIZE / sizeof(struct grant_mapping)));
+}
+
+static inline unsigned int
+maptrack_index(grant_handle_t handle)
+{
+ return (handle % (PAGE_SIZE / sizeof(struct grant_mapping)));
+}
+
+static inline unsigned int
+num_act_frames_from_sha_frames(const unsigned int num)
+{
+ /* How many frames are needed for the active grant table,
+ * given the size of the shared grant table?
+ *
+ * act_per_page = PAGE_SIZE / sizeof(active_grant_entry_t);
+ * sha_per_page = PAGE_SIZE / sizeof(grant_entry_t);
+ * num_sha_entries = num * sha_per_page;
+ * num_act_frames = (num_sha_entries + (act_per_page-1)) / act_per_page;
+ */
+ return ((num * (PAGE_SIZE / sizeof(grant_entry_t))) +
+ ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
+ / (PAGE_SIZE / sizeof(struct active_grant_entry));
+}
+
+static inline unsigned int
+nr_active_grant_frames(struct grant_table *gt)
+{
+ return num_act_frames_from_sha_frames(nr_grant_frames(gt));
+}
+
+static inline unsigned int
+shared_table(grant_ref_t ref)
+{
+ return (ref / (PAGE_SIZE / sizeof(grant_entry_t)));
+}
+
+static inline unsigned int
+shared_index(grant_ref_t ref)
+{
+ /* ASSERT(sizeof(grant_entry_t) == 8); */
+ return (ref & ((PAGE_SIZE / sizeof(grant_entry_t))-1));
+}
+
+static inline unsigned int
+active_table(grant_ref_t ref)
+{
+ return (ref / (PAGE_SIZE / sizeof(struct active_grant_entry)));
+}
+
+static inline unsigned int
+active_index(grant_ref_t ref)
+{
+ return (ref % (PAGE_SIZE / sizeof(struct active_grant_entry)));
+}
+
static inline int
-get_maptrack_handle(
+__get_maptrack_handle(
struct grant_table *t)
{
unsigned int h;
if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
return -1;
- t->maptrack_head = t->maptrack[h].ref;
+ t->maptrack_head = t->maptrack[maptrack_table(h)][maptrack_index(h)].ref;
t->map_count++;
return h;
}
@@ -68,9 +147,63 @@ put_maptrack_handle(
put_maptrack_handle(
struct grant_table *t, int handle)
{
- t->maptrack[handle].ref = t->maptrack_head;
+ t->maptrack[maptrack_table(handle)]
+ [maptrack_index(handle)].ref = t->maptrack_head;
t->maptrack_head = handle;
t->map_count--;
+}
+
+static inline int
+get_maptrack_handle(
+ struct grant_table *lgt)
+{
+ int i;
+ grant_handle_t handle;
+ struct grant_mapping *new_mt;
+ unsigned int new_mt_limit, nr_frames;
+
+ if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
+ {
+ spin_lock(&lgt->lock);
+
+ if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
+ {
+ nr_frames = nr_maptrack_frames(lgt);
+ if ( nr_frames >= max_nr_maptrack_frames() )
+ {
+ spin_unlock(&lgt->lock);
+ return -1;
+ }
+
+ new_mt = alloc_xenheap_page();
+ if ( new_mt == NULL )
+ {
+ spin_unlock(&lgt->lock);
+ return -1;
+ }
+
+ memset(new_mt, 0, PAGE_SIZE);
+
+ new_mt_limit = lgt->maptrack_limit +
+ (PAGE_SIZE / sizeof(struct grant_mapping));
+
+ for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
+ {
+ new_mt[maptrack_index(i)].ref = i+1;
+ new_mt[maptrack_index(i)].flags = 0;
+ }
+
+ lgt->maptrack[nr_frames] = new_mt;
+ lgt->maptrack_limit = new_mt_limit;
+
+ gdprintk(XENLOG_INFO,
+ "Increased maptrack size to %u frames.\n", nr_frames + 1);
+ handle = __get_maptrack_handle(lgt);
+ }
+
+ spin_unlock(&lgt->lock);
+ }
+ return handle;
}
/*
@@ -90,6 +223,7 @@ __gnttab_map_grant_ref(
unsigned long frame = 0;
int rc = GNTST_okay;
struct active_grant_entry *act;
+ struct grant_mapping *mt;
grant_entry_t *sha;
union grant_combo scombo, prev_scombo, new_scombo;
@@ -106,11 +240,9 @@ __gnttab_map_grant_ref(
led = current;
ld = led->domain;
- if ( unlikely(op->ref >= NR_GRANT_ENTRIES) ||
- unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
- {
- gdprintk(XENLOG_INFO, "Bad ref (%d) or flags (%x).\n",
- op->ref, op->flags);
+ if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
+ {
+ gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags);
op->status = GNTST_bad_gntref;
return;
}
@@ -130,51 +262,24 @@ __gnttab_map_grant_ref(
return;
}
- /* Get a maptrack handle. */
if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
{
- int i;
- struct grant_mapping *new_mt;
- struct grant_table *lgt = ld->grant_table;
-
- if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES )
- {
- put_domain(rd);
- gdprintk(XENLOG_INFO, "Maptrack table is at maximum size.\n");
- op->status = GNTST_no_device_space;
- return;
- }
-
- /* Grow the maptrack table. */
- new_mt = alloc_xenheap_pages(lgt->maptrack_order + 1);
- if ( new_mt == NULL )
- {
- put_domain(rd);
- gdprintk(XENLOG_INFO, "No more map handles available.\n");
- op->status = GNTST_no_device_space;
- return;
- }
-
- memcpy(new_mt, lgt->maptrack, PAGE_SIZE << lgt->maptrack_order);
- for ( i = lgt->maptrack_limit; i < (lgt->maptrack_limit << 1); i++ )
- {
- new_mt[i].ref = i+1;
- new_mt[i].flags = 0;
- }
-
- free_xenheap_pages(lgt->maptrack, lgt->maptrack_order);
- lgt->maptrack = new_mt;
- lgt->maptrack_order += 1;
- lgt->maptrack_limit <<= 1;
-
- gdprintk(XENLOG_INFO, "Doubled maptrack size\n");
- handle = get_maptrack_handle(ld->grant_table);
- }
-
- act = &rd->grant_table->active[op->ref];
- sha = &rd->grant_table->shared[op->ref];
+ put_domain(rd);
+ gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n");
+ op->status = GNTST_no_device_space;
+ return;
+ }
spin_lock(&rd->grant_table->lock);
+
+ act = &rd->grant_table->active[active_table(op->ref)]
+ [active_index(op->ref)];
+ sha = &rd->grant_table->shared[shared_table(op->ref)]
+ [shared_index(op->ref)];
+
+ /* Bounds check on the grant ref */
+ if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table)))
+ PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref);
/* If already pinned, check the active domid and avoid refcnt overflow. */
if ( act->pin &&
@@ -245,9 +350,10 @@ __gnttab_map_grant_ref(
act->pin += (op->flags & GNTMAP_readonly) ?
GNTPIN_hstr_inc : GNTPIN_hstw_inc;
+ frame = act->frame;
+
spin_unlock(&rd->grant_table->lock);
- frame = act->frame;
if ( unlikely(!mfn_valid(frame)) ||
unlikely(!((op->flags & GNTMAP_readonly) ?
get_page(mfn_to_page(frame), rd) :
@@ -281,9 +387,11 @@ __gnttab_map_grant_ref(
TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
- ld->grant_table->maptrack[handle].domid = op->dom;
- ld->grant_table->maptrack[handle].ref = op->ref;
- ld->grant_table->maptrack[handle].flags = op->flags;
+ mt = &ld->grant_table->maptrack[maptrack_table(handle)]
+ [maptrack_index(handle)];
+ mt->domid = op->dom;
+ mt->ref = op->ref;
+ mt->flags = op->flags;
op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
op->handle = handle;
@@ -294,6 +402,11 @@ __gnttab_map_grant_ref(
undo_out:
spin_lock(&rd->grant_table->lock);
+
+ act = &rd->grant_table->active[active_table(op->ref)]
+ [active_index(op->ref)];
+ sha = &rd->grant_table->shared[shared_table(op->ref)]
+ [shared_index(op->ref)];
if ( op->flags & GNTMAP_device_map )
act->pin -= (op->flags & GNTMAP_readonly) ?
@@ -353,12 +466,19 @@ __gnttab_unmap_grant_ref(
frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
- map = &ld->grant_table->maptrack[op->handle];
-
- if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) ||
- unlikely(!map->flags) )
+ if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) )
{
gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
+ op->status = GNTST_bad_handle;
+ return;
+ }
+
+ map = &ld->grant_table->maptrack[maptrack_table(op->handle)]
+ [maptrack_index(op->handle)];
+
+ if ( unlikely(!map->flags) )
+ {
+ gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
op->status = GNTST_bad_handle;
return;
}
@@ -378,10 +498,12 @@ __gnttab_unmap_grant_ref(
TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
- act = &rd->grant_table->active[ref];
- sha = &rd->grant_table->shared[ref];
-
spin_lock(&rd->grant_table->lock);
+
+ act = &rd->grant_table->active[active_table(ref)]
+ [active_index(ref)];
+ sha = &rd->grant_table->shared[shared_table(ref)]
+ [shared_index(ref)];
if ( frame == 0 )
{
@@ -476,6 +598,62 @@ fault:
return -EFAULT;
}
+int
+gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
+{
+ /* d's grant table lock must be held by the caller */
+
+ struct grant_table *gt = d->grant_table;
+ unsigned int i;
+
+ ASSERT(req_nr_frames <= max_nr_grant_frames);
+
+ gdprintk(XENLOG_INFO,
+ "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
+ d->domain_id, nr_grant_frames(gt), req_nr_frames);
+
+ /* Active */
+ for ( i = nr_active_grant_frames(gt);
+ i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
+ {
+ if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
+ goto active_alloc_failed;
+ memset(gt->active[i], 0, PAGE_SIZE);
+ }
+
+ /* Shared */
+ for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
+ {
+ if ( (gt->shared[i] = alloc_xenheap_page()) == NULL )
+ goto shared_alloc_failed;
+ memset(gt->shared[i], 0, PAGE_SIZE);
+ }
+
+ /* Share the new shared frames with the recipient domain */
+ for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
+ gnttab_create_shared_page(d, gt, i);
+
+ gt->nr_grant_frames = req_nr_frames;
+
+ return 1;
+
+shared_alloc_failed:
+ for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
+ {
+ free_xenheap_page(gt->shared[i]);
+ gt->shared[i] = NULL;
+ }
+active_alloc_failed:
+ for ( i = nr_active_grant_frames(gt);
+ i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
+ {
+ free_xenheap_page(gt->active[i]);
+ gt->active[i] = NULL;
+ }
+ gdprintk(XENLOG_INFO, "Allocation failure when expanding grant table.\n");
+ return 0;
+}
+
static long
gnttab_setup_table(
XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
@@ -495,11 +673,11 @@ gnttab_setup_table(
return -EFAULT;
}
- if ( unlikely(op.nr_frames > NR_GRANT_FRAMES) )
+ if ( unlikely(op.nr_frames > max_nr_grant_frames) )
{
gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
" per domain.\n",
- NR_GRANT_FRAMES);
+ max_nr_grant_frames);
op.status = GNTST_general_error;
goto out;
}
@@ -523,6 +701,21 @@ gnttab_setup_table(
}
ASSERT(d->grant_table != NULL);
+
+ spin_lock(&d->grant_table->lock);
+
+ if ( (op.nr_frames > nr_grant_frames(d->grant_table)) &&
+ !gnttab_grow_table(d, op.nr_frames) )
+ {
+ gdprintk(XENLOG_INFO,
+ "Expand grant table to (%d) failed. Current: (%d) Max: (%d).\n",
+ op.nr_frames,
+ nr_grant_frames(d->grant_table),
+ max_nr_grant_frames);
+ op.status = GNTST_general_error;
+ goto setup_unlock_out;
+ }
+
op.status = GNTST_okay;
for ( i = 0; i < op.nr_frames; i++ )
{
@@ -530,9 +723,66 @@ gnttab_setup_table(
(void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
}
+ setup_unlock_out:
+ spin_unlock(&d->grant_table->lock);
+
put_domain(d);
out:
+ if ( unlikely(copy_to_guest(uop, &op, 1)) )
+ return -EFAULT;
+
+ return 0;
+}
+
+static long
+gnttab_query_size(
+ XEN_GUEST_HANDLE(gnttab_query_size_t) uop, unsigned int count)
+{
+ struct gnttab_query_size op;
+ struct domain *d;
+ domid_t dom;
+
+ if ( count != 1 )
+ return -EINVAL;
+
+ if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
+ {
+ gdprintk(XENLOG_INFO, "Fault while reading gnttab_query_size_t.\n");
+ return -EFAULT;
+ }
+
+ dom = op.dom;
+ if ( dom == DOMID_SELF )
+ {
+ dom = current->domain->domain_id;
+ }
+ else if ( unlikely(!IS_PRIV(current->domain)) )
+ {
+ op.status = GNTST_permission_denied;
+ goto query_out;
+ }
+
+ if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
+ {
+ gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
+ op.status = GNTST_bad_domain;
+ goto query_out;
+ }
+
+ ASSERT(d->grant_table != NULL);
+
+ spin_lock(&d->grant_table->lock);
+
+ op.nr_frames = nr_grant_frames(d->grant_table);
+ op.max_nr_frames = max_nr_grant_frames;
+ op.status = GNTST_okay;
+
+ spin_unlock(&d->grant_table->lock);
+
+ put_domain(d);
+
+ query_out:
if ( unlikely(copy_to_guest(uop, &op, 1)) )
return -EFAULT;
@@ -552,17 +802,23 @@ gnttab_prepare_for_transfer(
union grant_combo scombo, prev_scombo, new_scombo;
int retries = 0;
- if ( unlikely((rgt = rd->grant_table) == NULL) ||
- unlikely(ref >= NR_GRANT_ENTRIES) )
- {
- gdprintk(XENLOG_INFO, "Dom %d has no g.t., or ref is bad (%d).\n",
- rd->domain_id, ref);
+ if ( unlikely((rgt = rd->grant_table) == NULL) )
+ {
+ gdprintk(XENLOG_INFO, "Dom %d has no grant table.\n", rd->domain_id);
return 0;
}
spin_lock(&rgt->lock);
- sha = &rgt->shared[ref];
+ if ( unlikely(ref >= nr_grant_entries(rd->grant_table)) )
+ {
+ gdprintk(XENLOG_INFO,
+ "Bad grant reference (%d) for transfer to domain(%d).\n",
+ ref, rd->domain_id);
+ goto fail;
+ }
+
+ sha = &rgt->shared[shared_table(ref)][shared_index(ref)];
scombo.word = *(u32 *)&sha->flags;
@@ -698,11 +954,16 @@ gnttab_transfer(
TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
/* Tell the guest about its new page frame. */
- sha = &e->grant_table->shared[gop.ref];
+ spin_lock(&e->grant_table->lock);
+
+ sha = &e->grant_table->shared[shared_table(gop.ref)]
+ [shared_index(gop.ref)];
guest_physmap_add_page(e, sha->frame, mfn);
sha->frame = mfn;
wmb();
sha->flags |= GTF_transfer_completed;
+
+ spin_unlock(&e->grant_table->lock);
put_domain(e);
@@ -726,17 +987,26 @@ __release_grant_for_copy(
__release_grant_for_copy(
struct domain *rd, unsigned long gref, int readonly)
{
- grant_entry_t *const sha = &rd->grant_table->shared[gref];
- struct active_grant_entry *const act = &rd->grant_table->active[gref];
+ grant_entry_t *sha;
+ struct active_grant_entry *act;
+ unsigned long r_frame;
spin_lock(&rd->grant_table->lock);
+ act = &rd->grant_table->active[active_table(gref)]
+ [active_index(gref)];
+ sha = &rd->grant_table->shared[shared_table(gref)]
+ [shared_index(gref)];
+ r_frame = act->frame;
+
if ( readonly )
{
act->pin -= GNTPIN_hstr_inc;
}
else
{
+ gnttab_mark_dirty(rd, r_frame);
+
act->pin -= GNTPIN_hstw_inc;
if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) )
gnttab_clear_flag(_GTF_writing, &sha->flags);
@@ -763,14 +1033,16 @@ __acquire_grant_for_copy(
int retries = 0;
union grant_combo scombo, prev_scombo, new_scombo;
- if ( unlikely(gref >= NR_GRANT_ENTRIES) )
- PIN_FAIL(error_out, GNTST_bad_gntref,
+ spin_lock(&rd->grant_table->lock);
+
+ if ( unlikely(gref >= nr_grant_entries(rd->grant_table)) )
+ PIN_FAIL(unlock_out, GNTST_bad_gntref,
"Bad grant reference %ld\n", gref);
-
- act = &rd->grant_table->active[gref];
- sha = &rd->grant_table->shared[gref];
-
- spin_lock(&rd->grant_table->lock);
+
+ act = &rd->grant_table->active[active_table(gref)]
+ [active_index(gref)];
+ sha = &rd->grant_table->shared[shared_table(gref)]
+ [shared_index(gref)];
/* If already pinned, check the active domid and avoid refcnt overflow. */
if ( act->pin &&
@@ -833,7 +1105,6 @@ __acquire_grant_for_copy(
unlock_out:
spin_unlock(&rd->grant_table->lock);
- error_out:
return rc;
}
@@ -1027,6 +1298,12 @@ do_grant_table_op(
rc = gnttab_copy(copy, count);
break;
}
+ case GNTTABOP_query_size:
+ {
+ rc = gnttab_query_size(
+ guest_handle_cast(uop, gnttab_query_size_t), count);
+ break;
+ }
default:
rc = -ENOSYS;
break;
@@ -1038,6 +1315,13 @@ do_grant_table_op(
return rc;
}
+static unsigned int max_nr_active_grant_frames(void)
+{
+ return (((max_nr_grant_frames * (PAGE_SIZE / sizeof(grant_entry_t))) +
+ ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
+ / (PAGE_SIZE / sizeof(struct active_grant_entry)));
+}
+
int
grant_table_create(
struct domain *d)
@@ -1045,50 +1329,75 @@ grant_table_create(
struct grant_table *t;
int i;
- BUG_ON(MAPTRACK_MAX_ENTRIES < NR_GRANT_ENTRIES);
+ /* If this sizeof assertion fails, fix the function: shared_index */
+ ASSERT(sizeof(grant_entry_t) == 8);
+
if ( (t = xmalloc(struct grant_table)) == NULL )
- goto no_mem;
+ goto no_mem_0;
/* Simple stuff. */
memset(t, 0, sizeof(*t));
spin_lock_init(&t->lock);
+ t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
/* Active grant table. */
- t->active = xmalloc_array(struct active_grant_entry, NR_GRANT_ENTRIES);
- if ( t->active == NULL )
- goto no_mem;
- memset(t->active, 0, sizeof(struct active_grant_entry) * NR_GRANT_ENTRIES);
+ if ( (t->active = xmalloc_array(struct active_grant_entry *,
+ max_nr_active_grant_frames())) == NULL )
+ goto no_mem_1;
+ memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
+ for ( i = 0;
+ i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
+ {
+ if ( (t->active[i] = alloc_xenheap_page()) == NULL )
+ goto no_mem_2;
+ memset(t->active[i], 0, PAGE_SIZE);
+ }
/* Tracking of mapped foreign frames table */
- if ( (t->maptrack = alloc_xenheap_page()) == NULL )
- goto no_mem;
- t->maptrack_order = 0;
+ if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
+ max_nr_maptrack_frames())) == NULL )
+ goto no_mem_2;
+ memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
+ if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
+ goto no_mem_3;
t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
- memset(t->maptrack, 0, PAGE_SIZE);
for ( i = 0; i < t->maptrack_limit; i++ )
- t->maptrack[i].ref = i+1;
+ t->maptrack[0][i].ref = i+1;
/* Shared grant table. */
- t->shared = alloc_xenheap_pages(ORDER_GRANT_FRAMES);
- if ( t->shared == NULL )
- goto no_mem;
- memset(t->shared, 0, NR_GRANT_FRAMES * PAGE_SIZE);
-
- for ( i = 0; i < NR_GRANT_FRAMES; i++ )
+ if ( (t->shared = xmalloc_array(struct grant_entry *,
+ max_nr_grant_frames)) == NULL )
+ goto no_mem_3;
+ memset(t->shared, 0, max_nr_grant_frames * sizeof(t->shared[0]));
+ for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+ {
+ if ( (t->shared[i] = alloc_xenheap_page()) == NULL )
+ goto no_mem_4;
+ memset(t->shared[i], 0, PAGE_SIZE);
+ }
+
+ for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
gnttab_create_shared_page(d, t, i);
/* Okay, install the structure. */
- wmb(); /* avoid races with lock-free access to d->grant_table */
d->grant_table = t;
return 0;
- no_mem:
- if ( t != NULL )
- {
- xfree(t->active);
- free_xenheap_page(t->maptrack);
- xfree(t);
- }
+ no_mem_4:
+ for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+ free_xenheap_page(t->shared[i]);
+ xfree(t->shared);
+ no_mem_3:
+ free_xenheap_page(t->maptrack[0]);
+ xfree(t->maptrack);
+ no_mem_2:
+ for ( i = 0;
+ i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
+ free_xenheap_page(t->active[i]);
+ xfree(t->active);
+ no_mem_1:
+ xfree(t);
+ no_mem_0:
return -ENOMEM;
}
@@ -1108,7 +1417,8 @@ gnttab_release_mappings(
for ( handle = 0; handle < gt->maptrack_limit; handle++ )
{
- map = >->maptrack[handle];
+ map = >->maptrack[maptrack_table(handle)]
+ [maptrack_index(handle)];
if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
continue;
@@ -1123,8 +1433,8 @@ gnttab_release_mappings(
spin_lock(&rd->grant_table->lock);
- act = &rd->grant_table->active[ref];
- sha = &rd->grant_table->shared[ref];
+ act = &rd->grant_table->active[active_table(ref)][active_index(ref)];
+ sha = &rd->grant_table->shared[shared_table(ref)][shared_index(ref)];
if ( map->flags & GNTMAP_readonly )
{
@@ -1181,15 +1491,24 @@ grant_table_destroy(
struct domain *d)
{
struct grant_table *t = d->grant_table;
+ int i;
if ( t == NULL )
return;
- free_xenheap_pages(t->shared, ORDER_GRANT_FRAMES);
- free_xenheap_pages(t->maptrack, t->maptrack_order);
+ for ( i = 0; i < nr_grant_frames(t); i++ )
+ free_xenheap_page(t->shared[i]);
+ xfree(t->shared);
+
+ for ( i = 0; i < nr_maptrack_frames(t); i++ )
+ free_xenheap_page(t->maptrack[i]);
+ xfree(t->maptrack);
+
+ for ( i = 0; i < nr_active_grant_frames(t); i++ )
+ free_xenheap_page(t->active[i]);
xfree(t->active);
+
xfree(t);
-
d->grant_table = NULL;
}
diff -r f026d4091322 xen/include/asm-ia64/grant_table.h
--- a/xen/include/asm-ia64/grant_table.h Tue Nov 14 18:52:58 2006 +0000
+++ b/xen/include/asm-ia64/grant_table.h Fri Nov 03 18:18:56 2006 +0000
@@ -5,7 +5,7 @@
#ifndef __ASM_GRANT_TABLE_H__
#define __ASM_GRANT_TABLE_H__
-#define ORDER_GRANT_FRAMES 0
+#define INITIAL_NR_GRANT_FRAMES 1
// for grant map/unmap
int create_grant_host_mapping(unsigned long gpaddr, unsigned long mfn, unsigned int flags);
diff -r f026d4091322 xen/include/asm-powerpc/grant_table.h
--- a/xen/include/asm-powerpc/grant_table.h Tue Nov 14 18:52:58 2006 +0000
+++ b/xen/include/asm-powerpc/grant_table.h Fri Nov 03 18:18:56 2006 +0000
@@ -23,7 +23,7 @@
#include <asm/mm.h>
-#define ORDER_GRANT_FRAMES 2
+#define INITIAL_NR_GRANT_FRAMES 4
/*
* Caller must own caller's BIGLOCK, is responsible for flushing the TLB, and
diff -r f026d4091322 xen/include/asm-x86/grant_table.h
--- a/xen/include/asm-x86/grant_table.h Tue Nov 14 18:52:58 2006 +0000
+++ b/xen/include/asm-x86/grant_table.h Tue Nov 14 20:38:15 2006 +0000
@@ -7,7 +7,7 @@
#ifndef __ASM_GRANT_TABLE_H__
#define __ASM_GRANT_TABLE_H__
-#define ORDER_GRANT_FRAMES 2
+#define INITIAL_NR_GRANT_FRAMES 1
/*
* Caller must own caller's BIGLOCK, is responsible for flushing the TLB, and
@@ -21,12 +21,12 @@ int destroy_grant_host_mapping(
#define gnttab_create_shared_page(d, t, i) \
do { \
share_xen_page_with_guest( \
- virt_to_page((char *)(t)->shared + ((i) * PAGE_SIZE)), \
+ virt_to_page((char *)(t)->shared[i]), \
(d), XENSHARE_writable); \
} while ( 0 )
#define gnttab_shared_mfn(d, t, i) \
- ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i))
+ ((virt_to_maddr((t)->shared[i]) >> PAGE_SHIFT))
#define gnttab_shared_gmfn(d, t, i) \
(mfn_to_gmfn(d, gnttab_shared_mfn(d, t, i)))
diff -r f026d4091322 xen/include/public/grant_table.h
--- a/xen/include/public/grant_table.h Tue Nov 14 18:52:58 2006 +0000
+++ b/xen/include/public/grant_table.h Tue Nov 14 20:38:15 2006 +0000
@@ -308,6 +308,25 @@ typedef struct gnttab_copy {
int16_t status;
} gnttab_copy_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
+
+/*
+ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
+ * grant table.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+#define GNTTABOP_query_size 6
+struct gnttab_query_size {
+ /* IN parameters. */
+ domid_t dom;
+ /* OUT parameters. */
+ uint32_t nr_frames;
+ uint32_t max_nr_frames;
+ int16_t status; /* GNTST_* */
+};
+typedef struct gnttab_query_size gnttab_query_size_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
/*
diff -r f026d4091322 xen/include/xen/grant_table.h
--- a/xen/include/xen/grant_table.h Tue Nov 14 18:52:58 2006 +0000
+++ b/xen/include/xen/grant_table.h Fri Nov 03 18:18:56 2006 +0000
@@ -52,9 +52,14 @@ struct active_grant_entry {
#define GNTPIN_devr_inc (1 << GNTPIN_devr_shift)
#define GNTPIN_devr_mask (0xFFU << GNTPIN_devr_shift)
-#define NR_GRANT_FRAMES (1U << ORDER_GRANT_FRAMES)
-#define NR_GRANT_ENTRIES \
- ((NR_GRANT_FRAMES << PAGE_SHIFT) / sizeof(grant_entry_t))
+/* Initial size of a grant table. */
+#define INITIAL_NR_GRANT_ENTRIES ((INITIAL_NR_GRANT_FRAMES << PAGE_SHIFT) / \
+ sizeof(grant_entry_t))
+
+/* Default maximum size of a grant table. [POLICY] */
+#define DEFAULT_MAX_NR_GRANT_FRAMES 32
+/* The maximum size of a grant table. */
+extern unsigned int max_nr_grant_frames;
/*
* Tracks a mapping of another domain's grant reference. Each domain has a
@@ -71,14 +76,15 @@ struct grant_mapping {
/* Per-domain grant information. */
struct grant_table {
+ /* Table size. Number of frames shared with guest */
+ unsigned int nr_grant_frames;
/* Shared grant table (see include/public/grant_table.h). */
- struct grant_entry *shared;
+ struct grant_entry **shared;
/* Active grant table. */
- struct active_grant_entry *active;
+ struct active_grant_entry **active;
/* Mapping tracking table. */
- struct grant_mapping *maptrack;
+ struct grant_mapping **maptrack;
unsigned int maptrack_head;
- unsigned int maptrack_order;
unsigned int maptrack_limit;
unsigned int map_count;
/* Lock protecting updates to active and shared grant tables. */
@@ -96,4 +102,22 @@ gnttab_release_mappings(
gnttab_release_mappings(
struct domain *d);
+/* Increase the size of a domain's grant table.
+ * Caller must hold d's grant table lock.
+ */
+int
+gnttab_grow_table(struct domain *d, unsigned int req_nr_frames);
+
+/* Number of grant table frames. Caller must hold d's grant table lock. */
+static inline unsigned int nr_grant_frames(struct grant_table *gt)
+{
+ return gt->nr_grant_frames;
+}
+
+/* Number of grant table entries. Caller must hold d's grant table lock. */
+static inline unsigned int nr_grant_entries(struct grant_table *gt)
+{
+ return (nr_grant_frames(gt) << PAGE_SHIFT) / sizeof(grant_entry_t);
+}
+
#endif /* __XEN_GRANT_TABLE_H__ */
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 9+ messages in thread