All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] libxc bitmap utils and vcpu-affinity
@ 2010-03-22  3:33 Dulloor
  2010-03-22  7:30 ` Keir Fraser
  0 siblings, 1 reply; 12+ messages in thread
From: Dulloor @ 2010-03-22  3:33 UTC (permalink / raw)
  To: xen-devel

[-- Attachment #1: Type: text/plain, Size: 575 bytes --]

This patch adds :

* A byte-based cpumask type(xenctl_cpumask) for setting vcpu-affinity
as well as numa-node-affinity, etc in libxc.

* Add common bitmap utils to libxc, which can be used both for
xenctl_cpumask (and with small changes for xenctl_cpumap, if desired),
so that we can do common operations on cpumask easily.

As opposed to xenctl_cpumap, xenctl_cpumask is a static structure
(just 4 bytes larger for 128 cpus), but keeps the interface/code
cleaner. The domctl_interface version keeps the size of xenctl_cpumask
consistent between xen and xen-tools.

-dulloor

[-- Attachment #2: xc-vcpu-affinity.patch --]
[-- Type: text/x-patch, Size: 31760 bytes --]

diff -r 04cb0829d138 tools/libxc/Makefile
--- a/tools/libxc/Makefile	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/Makefile	Sun Mar 21 23:22:26 2010 -0400
@@ -25,6 +25,7 @@
 CTRL_SRCS-y       += xc_mem_event.c
 CTRL_SRCS-y       += xc_mem_paging.c
 CTRL_SRCS-y       += xc_memshr.c
+CTRL_SRCS-y       += xc_bitmap.c
 CTRL_SRCS-$(CONFIG_X86) += xc_pagetab.c
 CTRL_SRCS-$(CONFIG_Linux) += xc_linux.c
 CTRL_SRCS-$(CONFIG_SunOS) += xc_solaris.c
diff -r 04cb0829d138 tools/libxc/xc_bitmap.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitmap.c	Sun Mar 21 23:22:26 2010 -0400
@@ -0,0 +1,250 @@
+#include "xc_bitmap.h"
+#include <stdio.h>
+
+/*
+ * xc_bitmap_find_next_bit is adapted from the definition of generic
+ * find_next_bit * in Linux, with following copyright.
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Adapted for byte-based bitmap by Dulloor (dulloor@gatech.edu)
+ */
+
+/**
+ * __ffs - find first bit in byte.
+ * @byte: The byte to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline int __xc_ffs(uint8_t byte)
+{
+	int num = 0;
+
+	if ((byte & 0xff) == 0) {
+		num += 8;
+		byte >>= 8;
+	}
+	if ((byte & 0xf) == 0) {
+		num += 4;
+		byte >>= 4;
+	}
+	if ((byte & 0x3) == 0) {
+		num += 2;
+		byte >>= 2;
+	}
+	if ((byte & 0x1) == 0)
+		num += 1;
+	return num;
+}
+
+int
+xc_bitmap_find_next_bit( const uint8_t *addr, uint32_t size, uint32_t offset)
+{
+    const uint8_t *p;
+    uint32_t result;
+    uint8_t tmp;
+
+    if (offset >= size)
+        return size;
+
+    p = addr + XC_BITMAP_BYTE(offset);
+    result = offset & ~(XC_BITS_PER_BYTE-1);
+
+    size -= result;
+    offset %= XC_BITS_PER_BYTE;
+    if (offset) {
+        tmp = *(p++);
+        tmp &= (0xff << offset);
+        if (size < XC_BITS_PER_BYTE)
+            goto found_first;
+        if (tmp)
+            goto found_middle;
+        size -= XC_BITS_PER_BYTE;
+        result += XC_BITS_PER_BYTE;
+    }
+    while (size & ~(XC_BITS_PER_BYTE-1)) {
+        if ((tmp = *(p++)))
+            goto found_middle;
+        result += XC_BITS_PER_BYTE;
+        size -= XC_BITS_PER_BYTE;
+    }
+    if (!size)
+        return result;
+    tmp = *p;
+
+found_first:
+    tmp &= (0xff >> (XC_BITS_PER_BYTE - size));
+    if (!tmp)
+        return result+size;
+found_middle:
+    return result + __xc_ffs(tmp);
+}
+
+void __xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] & s2p[k];
+}
+
+void __xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] | s2p[k];
+}
+
+void __xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] ^ s2p[k];
+}
+
+void __xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] & ~s2p[k];
+}
+
+void __xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        dp[k] = ~sp[k];
+
+    if (nbits % XC_BITS_PER_BYTE)
+        dp[k] = ~sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+int __xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] != s2p[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] ^ s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] & s2p[k])
+            return 1;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] & s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 1;
+
+    return 0;
+}
+
+int __xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] & ~s2p[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] & ~s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_empty(uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (sp[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if (sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_full(uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (~sp[k] & XC_BITMAP_BYTE_MASK)
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if (~sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+static inline uint8_t hweight8(uint8_t w)
+{
+    uint8_t res = (w & 0x55) + ((w >> 1) & 0x55);
+    res = (res & 0x33) + ((res >> 2) & 0x33);
+    return (res & 0x0F) + ((res >> 4) & 0x0F);
+}
+
+int __xc_bitmap_weight(const uint8_t *sp, int nbits)
+{
+    int k, w = 0, lim = nbits/XC_BITS_PER_BYTE;
+
+    for (k=0; k <lim; k++)
+        w += hweight8(sp[k]);
+
+    if (nbits % XC_BITS_PER_BYTE)
+        w += hweight8(sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return w;
+}
+
+/* xenctl_cpumask print functions */
+#define CHUNKSZ	8
+#define roundup_power2(val,modulus)	(((val) + (modulus) - 1) & ~((modulus) - 1))
+
+int xc_bitmap_snprintf(char *buf, unsigned int buflen,
+	const uint8_t *maskp, int nmaskbits)
+{
+	int i, word, bit, len = 0;
+	unsigned long val;
+	const char *sep = "";
+	int chunksz;
+	uint8_t chunkmask;
+
+	chunksz = nmaskbits & (CHUNKSZ - 1);
+	if (chunksz == 0)
+		chunksz = CHUNKSZ;
+
+	i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ;
+	for (; i >= 0; i -= CHUNKSZ) {
+		chunkmask = ((1ULL << chunksz) - 1);
+		word = i / XC_BITS_PER_BYTE;
+		bit = i % XC_BITS_PER_BYTE;
+		val = (maskp[word] >> bit) & chunkmask;
+		len += snprintf(buf+len, buflen-len, "%s%0*lx", sep,
+			(chunksz+3)/4, val);
+		chunksz = CHUNKSZ;
+		sep = ",";
+	}
+	return len;
+}
+
+
diff -r 04cb0829d138 tools/libxc/xc_bitmap.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitmap.h	Sun Mar 21 23:22:26 2010 -0400
@@ -0,0 +1,193 @@
+#ifndef __XENCTL_BITMAP_H
+#define __XENCTL_BITMAP_H
+
+#include <stdint.h>
+#include <string.h>
+
+#define XC_BITS_PER_BYTE 8
+#define XC_BITS_TO_BYTES(bits) \
+    (((bits)+XC_BITS_PER_BYTE-1)/XC_BITS_PER_BYTE)
+#define XC_BITMAP_BIT(nr)   (1 << (nr))
+#define XC_BITMAP_BIT_MASK(nr)  (1 << ((nr) % XC_BITS_PER_BYTE))
+#define XC_BITMAP_BYTE(nr)  ((nr) / XC_BITS_PER_BYTE)
+
+#define XC_BITMAP_BYTE_MASK (0xff)
+#define XC_BITMAP_LAST_BYTE_MASK(nbits)					\
+	(((nbits) % XC_BITS_PER_BYTE) ?		                \
+		((1<<((nbits) % XC_BITS_PER_BYTE))-1) :         \
+                            XC_BITMAP_BYTE_MASK)
+
+#define xc_bitmap_find_first_bit(addr, size)        \
+            xc_bitmap_find_next_bit(addr, size, 0)
+extern int
+xc_bitmap_find_next_bit(const uint8_t *addr, uint32_t size, uint32_t offset);
+extern int
+xc_bitmap_find_next_bit(const uint8_t *addr, uint32_t size, uint32_t offset);
+
+#define xc_bitmap_find_first_zero_bit(addr, size) \
+            xc_bitmap_find_next_zero_bit(addr, size, 0)
+extern int xc_bitmap_find_next_zero_bit(
+        const uint8_t *addr, uint32_t size, uint32_t offset);
+
+extern void __xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void
+__xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits);
+extern int __xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_empty(uint8_t *sp, int nbits);
+extern int __xc_bitmap_full(uint8_t *sp, int nbits);
+extern int __xc_bitmap_weight(const uint8_t *sp, int nbits);
+
+extern int xc_bitmap_snprintf(char *buf, unsigned int buflen,
+	const uint8_t *maskp, int nmaskbits);
+
+
+static inline void xc_bitmap_set_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    *p |= mask;
+}
+
+static inline void xc_bitmap_clear_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    *p &= ~mask;
+}
+
+static inline int xc_bitmap_test_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    return *p & mask;
+}
+
+static inline void xc_bitmap_fill(uint8_t *dp, int nbits)
+{
+    size_t nbytes = XC_BITS_TO_BYTES(nbits);
+    if (nbytes > 1)
+        memset(dp, 0xff, nbytes-1); 
+    dp[nbytes-1] = XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+static inline void xc_bitmap_zero(uint8_t *dp, int nbits)
+{
+    size_t nbytes = XC_BITS_TO_BYTES(nbits);
+    if (nbytes > 1)
+        memset(dp, 0x00, nbytes-1); 
+    dp[nbytes-1] = 0;
+}
+
+
+static inline void
+xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p & *s2p;
+    else
+        __xc_bitmap_and(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p | *s2p;
+    else
+        __xc_bitmap_or(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p ^ *s2p;
+    else
+        __xc_bitmap_xor(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p & ~(*s2p);
+    else
+        __xc_bitmap_andnot(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = ~(*sp) & XC_BITMAP_LAST_BYTE_MASK(nbits);
+    else
+        __xc_bitmap_complement(dp, sp, nbits);
+}
+
+static inline int
+xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return !((*s1p ^ *s2p) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_equal(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return ((*s1p & *s2p) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_intersects(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return !((*s1p & ~(*s2p)) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_subset(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_empty(uint8_t *sp, int nbits)
+{
+	if (nbits <= XC_BITS_PER_BYTE)
+		return ! (*sp & XC_BITMAP_LAST_BYTE_MASK(nbits));
+	else
+		return __xc_bitmap_empty(sp, nbits);
+}
+
+static inline int
+xc_bitmap_full(uint8_t *sp, int nbits)
+{
+	if (nbits <= XC_BITS_PER_BYTE)
+		return ! (~(*sp) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+	else
+		return __xc_bitmap_full(sp, nbits);
+}
+
+static inline uint32_t
+xc_bitmap_weight(const uint8_t *sp, int nbits)
+{
+	return __xc_bitmap_weight(sp, nbits);
+}
+
+
+static inline void
+xc_bitmap_copy(uint8_t *dp, const uint8_t *sp, int nbits)
+{
+    if (nbits > XC_BITS_PER_BYTE)
+        *dp = *sp;
+    else
+        memcpy(dp, sp, XC_BITS_TO_BYTES(nbits));
+}
+
+#endif
diff -r 04cb0829d138 tools/libxc/xc_cpumask.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpumask.h	Sun Mar 21 23:22:26 2010 -0400
@@ -0,0 +1,211 @@
+#ifndef __XENCTL_CPUMASK_H
+#define __XENCTL_CPUMASK_H
+
+#include <xen/domctl.h>
+#include "xc_bitmap.h"
+
+/* Number of cpus set in the bitmap */
+#define xc_cpumask_num_cpus(mask)	xc_cpumask_weight(mask)
+
+/**
+ * xc_cpumask_first - get the first cpu in a xenctl_cpumask
+ * @srcp: the xenctl_cpumask pointer
+ *
+ * Returns >= xc_cpumask_len(srcp) if no cpus set.
+ */
+static inline unsigned int
+xc_cpumask_first(struct xenctl_cpumask *srcp)
+{
+	return xc_bitmap_find_first_bit(xc_cpumask_bits(srcp),
+                                                xc_cpumask_len(srcp));
+}
+
+/**
+ * xc_cpumask_next - get the next cpu in a xenctl_cpumask
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the xenctl_cpumask pointer
+ *
+ * Returns >= xc_cpumask_len(srcp) if no further cpus set.
+ */
+static inline uint32_t
+xc_cpumask_next(int n, struct xenctl_cpumask *srcp)
+{
+	return xc_bitmap_find_next_bit(xc_cpumask_bits(srcp),
+                                        xc_cpumask_len(srcp), n+1);
+}
+
+#if 0
+static inline uint32_t
+xc_cpumask_next_zero(int n, struct xenctl_cpumask *srcp)
+{
+	return xc_bitmap_find_next_zero_bit(xc_cpumask_bits(srcp),
+                                        xc_cpumask_len(srcp), n+1);
+}
+#endif
+
+/**
+ * xc_for_each_cpu - iterate over every cpu in a mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the xenctl_cpumask pointer
+ *
+ * After the loop, cpu is >= xc_cpumask_len(mask)
+ */
+#define xc_for_each_cpu(cpu, mask)				\
+            __xc_for_each_cpu(cpu, &(mask))
+
+#define __xc_for_each_cpu(cpu, mask)            \
+	for ((cpu) = -1;				            \
+		(cpu) = xc_cpumask_next((cpu), (mask)),	\
+		(cpu) < xc_cpumask_len(mask);)
+
+
+#define xc_cpumask_equal(src1, src2) __xc_cpumask_equal(&(src1), &(src2))
+static inline int
+__xc_cpumask_equal(struct xenctl_cpumask *s1p, struct xenctl_cpumask *s2p)
+{
+	return xc_bitmap_equal(xc_cpumask_bits(s1p), xc_cpumask_bits(s2p), 
+                                                xc_cpumask_len(s1p));
+}
+
+#define xc_cpumask_set_cpu(cpu, dst) __xc_cpumask_set_cpu(cpu, &(dst))
+static inline void __xc_cpumask_set_cpu(int cpu, struct xenctl_cpumask *dstp)
+{
+	xc_bitmap_set_bit(cpu, xc_cpumask_bits(dstp));
+}
+
+#define xc_cpumask_clear_cpu(cpu, dst) __xc_cpumask_clear_cpu(cpu, &(dst))
+static inline void __xc_cpumask_clear_cpu(int cpu, struct xenctl_cpumask *dstp)
+{
+	xc_bitmap_clear_bit(cpu, xc_cpumask_bits(dstp));
+}
+
+#define xc_cpumask_test_cpu(cpu, dst) __xc_cpumask_test_cpu(cpu, &(dst))
+static inline int __xc_cpumask_test_cpu(int cpu, struct xenctl_cpumask *dstp)
+{
+    return xc_bitmap_test_bit(cpu, xc_cpumask_bits(dstp));
+}
+
+
+#define xc_cpumask_setall(dst) __xc_cpumask_setall(&(dst))
+static inline void __xc_cpumask_setall(struct xenctl_cpumask *dstp)
+{
+	xc_bitmap_fill(xc_cpumask_bits(dstp), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_clearall(dst) __xc_cpumask_clearall(&(dst))
+static inline void __xc_cpumask_clearall(struct xenctl_cpumask *dstp)
+{
+	xc_bitmap_zero(xc_cpumask_bits(dstp), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_and(dst, src1, src2) \
+                        __xc_cpumask_and(&(dst), &(src1), &(src2))
+static inline void __xc_cpumask_and(struct xenctl_cpumask *dstp,
+        struct xenctl_cpumask *src1p, struct xenctl_cpumask *src2p)
+{
+	xc_bitmap_and(xc_cpumask_bits(dstp), xc_cpumask_bits(src1p),
+                                xc_cpumask_bits(src2p), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_or(dst, src1, src2) \
+                        __xc_cpumask_or(&(dst), &(src1), &(src2))
+static inline void __xc_cpumask_or(struct xenctl_cpumask *dstp,
+        struct xenctl_cpumask *src1p, struct xenctl_cpumask *src2p)
+{
+	xc_bitmap_or(xc_cpumask_bits(dstp), xc_cpumask_bits(src1p),
+				                xc_cpumask_bits(src2p), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_xor(dst, src1, src2) \
+                        __xc_cpumask_xor(&(dst), &(src1), &(src2))
+static inline void __xc_cpumask_xor(struct xenctl_cpumask *dstp,
+        struct xenctl_cpumask *src1p, struct xenctl_cpumask *src2p)
+{
+	xc_bitmap_xor(xc_cpumask_bits(dstp), xc_cpumask_bits(src1p),
+                        xc_cpumask_bits(src2p), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_andnot(dst, src1, src2) \
+                        __xc_cpumask_andnot(&(dst), &(src1), &(src2))
+static inline void xenctl_cpumask_andnot(struct xenctl_cpumask *dstp,
+    struct xenctl_cpumask *src1p, struct xenctl_cpumask *src2p)
+{
+	xc_bitmap_andnot(xc_cpumask_bits(dstp), xc_cpumask_bits(src1p),
+                        xc_cpumask_bits(src2p), xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_complement(dst, src) \
+                        __xc_cpumask_complement(&(dst), &(src))
+static inline void __xc_cpumask_complement(struct xenctl_cpumask *dstp,
+				                        struct xenctl_cpumask *srcp)
+{
+	xc_bitmap_complement(xc_cpumask_bits(dstp), xc_cpumask_bits(srcp),
+                                                xc_cpumask_len(dstp));
+}
+
+#define xc_cpumask_intersects(src1, src2) \
+                        __xc_cpumask_intersects(&(src1), &(src2))
+static inline int __xc_cpumask_intersects(struct xenctl_cpumask *src1p,
+				                            struct xenctl_cpumask *src2p)
+{
+	return xc_bitmap_intersects(xc_cpumask_bits(src1p), xc_cpumask_bits(src2p),
+                                                        xc_cpumask_len(src1p));
+}
+
+#define xc_cpumask_subset(src1, src2) \
+                        __xc_cpumask_subset(&(src1), &(src2))
+static inline int __xc_cpumask_subset(struct xenctl_cpumask *src1p,
+				                        struct xenctl_cpumask *src2p)
+{
+	return xc_bitmap_subset(xc_cpumask_bits(src1p), xc_cpumask_bits(src2p),
+                                                        xc_cpumask_len(src1p));
+}
+
+#define xc_cpumask_empty(src) __xc_cpumask_empty(&(src))
+static inline int __xc_cpumask_empty(struct xenctl_cpumask *srcp)
+{
+	return xc_bitmap_empty(xc_cpumask_bits(srcp), xc_cpumask_len(srcp));
+}
+
+#define xc_cpumask_full(src) __xc_cpumask_full(&(src))
+static inline int __xc_cpumask_full(struct xenctl_cpumask *srcp)
+{
+	return xc_bitmap_full(xc_cpumask_bits(srcp), xc_cpumask_len(srcp));
+}
+
+#define xc_cpumask_weight(src) __xc_cpumask_weight(&(src))
+static inline uint32_t __xc_cpumask_weight(struct xenctl_cpumask *srcp)
+{
+	return xc_bitmap_weight(xc_cpumask_bits(srcp), xc_cpumask_len(srcp));
+}
+
+#define xc_cpumask_copy(dst, src) __xc_cpumask_copy(&(dst), &(src))
+static inline void __xc_cpumask_copy(struct xenctl_cpumask *dstp,
+				                    struct xenctl_cpumask *srcp)
+{
+	xc_bitmap_copy(xc_cpumask_bits(dstp), xc_cpumask_bits(srcp),
+                                                xc_cpumask_len(dstp));
+}
+
+#define XC_CPU_MASK_LAST_BYTE XC_BITMAP_LAST_BYTE_MASK(XENCTL_NR_CPUS)
+
+#define XC_CPUMASK_ALL							                    \
+/*(xenctl_cpumask)*/ { {							                \
+	[0 ... XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-2] = 0xff,		        \
+	[XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-1] = XC_CPU_MASK_LAST_BYTE    \
+} }
+
+#define XC_CPUMASK_NONE							            \
+/*(xenctl_cpumask)*/ { {							        \
+	[0 ... XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-1] =  0		    \
+} }
+
+#define xc_cpumask_snprintf(buf, len, src) \
+			__xc_cpumask_snprintf((buf), (len), &(src), XENCTL_NR_CPUS)
+static inline int __xc_cpumask_snprintf(char *buf, int len,
+					        const struct xenctl_cpumask *srcp, int nbits)
+{
+	return xc_bitmap_snprintf(buf, len, srcp->bits, nbits);
+}
+
+#endif /* __XENCTL_CPUMASK_H */
diff -r 04cb0829d138 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/xc_domain.c	Sun Mar 21 23:22:26 2010 -0400
@@ -8,6 +8,7 @@
 
 #include "xc_private.h"
 #include "xg_save_restore.h"
+#include "xc_cpumask.h"
 #include <xen/memory.h>
 #include <xen/hvm/hvm_op.h>
 
@@ -98,39 +99,18 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap, int cpusize)
+                        struct xenctl_cpumask *cpumask)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t *local = malloc(cpusize); 
 
-    if(local == NULL)
-    {
-        PERROR("Could not alloc memory for Xen hypercall");
-        goto out;
-    }
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu    = vcpu;
-
-    bitmap_64_to_byte(local, cpumap, cpusize * 8);
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
+    domctl.u.vcpuaffinity.cpumask = *cpumask;
     
-    if ( lock_pages(local, cpusize) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out;
-    }
-
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, cpusize);
-
- out:
-    free(local);
     return ret;
 }
 
@@ -138,39 +118,19 @@
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize)
+                        struct xenctl_cpumask *cpumask)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t * local = malloc(cpusize);
-
-    if(local == NULL)
-    {
-        PERROR("Could not alloc memory for Xen hypercall");
-        goto out;
-    }
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
-
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(local, sizeof(local)) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out;
-    }
+    xc_cpumask_clearall(domctl.u.vcpuaffinity.cpumask);
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, sizeof (local));
-    bitmap_byte_to_64(cpumap, local, cpusize * 8);
-out:
-    free(local);
+    *cpumask = domctl.u.vcpuaffinity.cpumask;
     return ret;
 }
 
diff -r 04cb0829d138 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/xenctrl.h	Sun Mar 21 23:22:26 2010 -0400
@@ -309,13 +309,11 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize);
+                        struct xenctl_cpumask *cpumask);
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize);
+                        struct xenctl_cpumask *cpumask);
 
 /**
  * This function will return information about one or more domains. It is
diff -r 04cb0829d138 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c	Sun Mar 21 23:22:26 2010 -0400
@@ -23,6 +23,7 @@
 #include "xc_dom.h"
 #include <xen/hvm/hvm_info_table.h>
 #include <xen/hvm/params.h>
+#include "xc_cpumask.h"
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 
@@ -215,12 +216,8 @@
 {
     uint32_t dom;
     int vcpu = 0, i;
-    uint64_t  *cpumap;
     PyObject *cpulist = NULL;
-    int nr_cpus, size;
-    xc_physinfo_t info; 
-    xc_cpu_to_node_t map[1];
-    uint64_t cpumap_size = sizeof(cpumap); 
+    struct xenctl_cpumask cpumask = XC_CPUMASK_NONE;
 
     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
     
@@ -229,40 +226,19 @@
                                       &dom, &vcpu, &cpulist) )
         return NULL;
 
-    set_xen_guest_handle(info.cpu_to_node, map);
-    info.max_cpu_id = 1;
-    if ( xc_physinfo(self->xc_handle, &info) != 0 )
-        return pyxc_error_to_exception();
-  
-    nr_cpus = info.nr_cpus;
-
-    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
-    cpumap = malloc(cpumap_size * size);
-    if(cpumap == NULL)
-        return pyxc_error_to_exception();
-    
-
     if ( (cpulist != NULL) && PyList_Check(cpulist) )
     {
-        for ( i = 0; i < size; i++)
-        {
-            cpumap[i] = 0ULL;
-        }
         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
         {
             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
-            *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu % (cpumap_size * 8));
+            xc_cpumask_set_cpu(cpu, cpumask);
         }
     }
   
-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 )
-    {
-        free(cpumap);
+    if (xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, &cpumask))
         return pyxc_error_to_exception();
-    }
 
     Py_INCREF(zero);
-    free(cpumap); 
     return zero;
 }
 
@@ -377,15 +353,10 @@
                                    PyObject *kwds)
 {
     PyObject *info_dict, *cpulist;
-
     uint32_t dom, vcpu = 0;
     xc_vcpuinfo_t info;
     int rc, i;
-    uint64_t *cpumap;
-    int nr_cpus, size;
-    xc_physinfo_t pinfo = { 0 };
-    xc_cpu_to_node_t map[1];
-    uint64_t cpumap_size = sizeof(cpumap);
+    struct xenctl_cpumask cpumask = XC_CPUMASK_NONE;
 
     static char *kwd_list[] = { "domid", "vcpu", NULL };
     
@@ -393,25 +364,13 @@
                                       &dom, &vcpu) )
         return NULL;
 
-    set_xen_guest_handle(pinfo.cpu_to_node, map);
-    pinfo.max_cpu_id = 1;
-    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
-        return pyxc_error_to_exception();
-    nr_cpus = pinfo.nr_cpus;
     rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
     if ( rc < 0 )
         return pyxc_error_to_exception();
-    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
 
-    if((cpumap = malloc(cpumap_size * size)) == NULL)
-        return pyxc_error_to_exception(); 
-
-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size);
+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumask);
     if ( rc < 0 )
-    {
-        free(cpumap);
         return pyxc_error_to_exception();
-    }
 
     info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
                               "online",   info.online,
@@ -421,18 +380,14 @@
                               "cpu",      info.cpu);
 
     cpulist = PyList_New(0);
-    for ( i = 0; i < size * cpumap_size * 8; i++ )
+    xc_for_each_cpu(i, cpumask)
     {
-        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
-            PyObject *pyint = PyInt_FromLong(i);
-            PyList_Append(cpulist, pyint);
-            Py_DECREF(pyint);
-        }
-        *(cpumap + i / (cpumap_size * 8)) >>= 1;
+        PyObject *pyint = PyInt_FromLong(i);
+        PyList_Append(cpulist, pyint);
+        Py_DECREF(pyint);
     }
     PyDict_SetItemString(info_dict, "cpumap", cpulist);
     Py_DECREF(cpulist);
-    free(cpumap);
     return info_dict;
 }
 
diff -r 04cb0829d138 xen/common/domctl.c
--- a/xen/common/domctl.c	Wed Mar 17 14:10:43 2010 +0000
+++ b/xen/common/domctl.c	Sun Mar 21 23:22:26 2010 -0400
@@ -76,6 +76,36 @@
     bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
 }
 
+void cpumask_to_xenctl_cpumask(
+    struct xenctl_cpumask *xenctl_cpumask, cpumask_t *cpumask)
+{
+    unsigned int nr_cpus;
+    uint8_t *bytemap;
+
+    /* caller must zero out the cpumask */
+    bytemap = xc_cpumask_bits(xenctl_cpumask);
+    nr_cpus  =
+        min_t(unsigned int, xc_cpumask_len(xenctl_cpumask), NR_CPUS);
+
+    bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), nr_cpus);
+}
+
+void xenctl_cpumask_to_cpumask(
+    cpumask_t *cpumask, struct xenctl_cpumask *xenctl_cpumask)
+{
+    unsigned int nr_cpus;
+    uint8_t *bytemap;
+
+    bytemap = xc_cpumask_bits(xenctl_cpumask);
+
+    nr_cpus  =
+        min_t(unsigned int, xc_cpumask_len(xenctl_cpumask), NR_CPUS);
+
+    bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, nr_cpus);
+
+    cpus_and(*cpumask, *cpumask, cpu_online_map);
+}
+
 static inline int is_free_domid(domid_t dom)
 {
     struct domain *d;
@@ -574,15 +604,18 @@
 
         if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
         {
-            xenctl_cpumap_to_cpumask(
-                &new_affinity, &op->u.vcpuaffinity.cpumap);
+            xenctl_cpumask_to_cpumask(
+                &new_affinity, &op->u.vcpuaffinity.cpumask);
             ret = vcpu_set_affinity(v, &new_affinity);
         }
         else
         {
-            cpumask_to_xenctl_cpumap(
-                &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
-            ret = 0;
+            cpumask_to_xenctl_cpumask(
+                &op->u.vcpuaffinity.cpumask, &v->cpu_affinity);
+            if ( copy_to_guest(u_domctl, op, 1) )
+                ret = -EFAULT;
+            else
+                ret = 0;
         }
 
     vcpuaffinity_out:
diff -r 04cb0829d138 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h	Wed Mar 17 14:10:43 2010 +0000
+++ b/xen/include/public/domctl.h	Sun Mar 21 23:22:26 2010 -0400
@@ -35,11 +35,32 @@
 #include "xen.h"
 #include "grant_table.h"
 
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000006
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000007
+
+#define XENCTL_NR_CPUS          128
+#define XENCTL_BITS_PER_BYTE    8
+
+#define XENCTL_BITS_TO_BYTES(bits) \
+    (((bits)+XENCTL_BITS_PER_BYTE-1)/XENCTL_BITS_PER_BYTE)
+#define XENCTL_DECLARE_BITMAP(name,bits) \
+    uint8_t name[XENCTL_BITS_TO_BYTES(bits)]
+
+/* xenctl_cpumask :
+ * This is a static structure as opposed to xenctl_cpumap.
+ * We don't pass any nr_cpus parameter, but XENCTL_NR_CPUS is kept consistent
+ * between xen and xen-tools through interface versions.
+ * However, XENCTL_NR_CPUS and NR_CPUS should be equal.
+ */
+
+#define xc_cpumask_bits(maskp) ((maskp)->bits)
+#define xc_cpumask_len(maskp) (XENCTL_NR_CPUS)
+struct xenctl_cpumask {
+    XENCTL_DECLARE_BITMAP(bits, XENCTL_NR_CPUS);
+};
 
 struct xenctl_cpumap {
+    uint32_t nr_cpus;
     XEN_GUEST_HANDLE_64(uint8) bitmap;
-    uint32_t nr_cpus;
 };
 
 /*
@@ -285,7 +306,7 @@
 /* XEN_DOMCTL_getvcpuaffinity */
 struct xen_domctl_vcpuaffinity {
     uint32_t  vcpu;              /* IN */
-    struct xenctl_cpumap cpumap; /* IN/OUT */
+    struct xenctl_cpumask cpumask; /* IN/OUT */
 };
 typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
diff -r 04cb0829d138 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h	Wed Mar 17 14:10:43 2010 +0000
+++ b/xen/include/xen/cpumask.h	Sun Mar 21 23:22:26 2010 -0400
@@ -425,8 +425,15 @@
 /* Copy to/from cpumap provided by control tools. */
 struct xenctl_cpumap;
 void cpumask_to_xenctl_cpumap(
-    struct xenctl_cpumap *enctl_cpumap, cpumask_t *cpumask);
+    struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask);
 void xenctl_cpumap_to_cpumask(
     cpumask_t *cpumask, struct xenctl_cpumap *enctl_cpumap);
 
+/* Copy to/from cpumask used by control tools. */
+struct xenctl_cpumask;
+void cpumask_to_xenctl_cpumask(
+    struct xenctl_cpumask *xenctl_cpumask, cpumask_t *cpumask);
+void xenctl_cpumask_to_cpumask(
+    cpumask_t *cpumask, struct xenctl_cpumask *xenctl_cpumask);
+
 #endif /* __XEN_CPUMASK_H */

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-22  3:33 [PATCH] libxc bitmap utils and vcpu-affinity Dulloor
@ 2010-03-22  7:30 ` Keir Fraser
  2010-03-22 17:44   ` Dulloor
  0 siblings, 1 reply; 12+ messages in thread
From: Keir Fraser @ 2010-03-22  7:30 UTC (permalink / raw)
  To: Dulloor, xen-devel

On 22/03/2010 03:33, "Dulloor" <dulloor@gmail.com> wrote:

> This patch adds :
> 
> * A byte-based cpumask type(xenctl_cpumask) for setting vcpu-affinity
> as well as numa-node-affinity, etc in libxc.
> 
> * Add common bitmap utils to libxc, which can be used both for
> xenctl_cpumask (and with small changes for xenctl_cpumap, if desired),
> so that we can do common operations on cpumask easily.
> 
> As opposed to xenctl_cpumap, xenctl_cpumask is a static structure
> (just 4 bytes larger for 128 cpus), but keeps the interface/code
> cleaner. The domctl_interface version keeps the size of xenctl_cpumask
> consistent between xen and xen-tools.

I'm missing the motivation for this. It sounds less good than what we have
already.

 -- Keir

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-22  7:30 ` Keir Fraser
@ 2010-03-22 17:44   ` Dulloor
  2010-03-23 10:10     ` Jan Beulich
  0 siblings, 1 reply; 12+ messages in thread
From: Dulloor @ 2010-03-22 17:44 UTC (permalink / raw)
  To: Keir Fraser; +Cc: xen-devel

> I'm missing the motivation for this. It sounds less good than what we have
> already.
xenctl_cpumap is dynamic and could work for any size, but there are
many reasons for adding xenctl_cpumask.

Motivation for new data type and bitmap utils in libxc :
- xenctl_cpumask is a simple and useful data type in xen-tools. We
deal with cpumasks at several places and the need increases with numa
(for instance, node selection with guest numa).
- The bitmap utils in the patch could work with both xenctl_cpumap as
well as xenctl_cpumask structure. So, that part is independent of the
new structure. And, I can just submit that part alone, if we decide to
stay with xenctl_cpumap.

Motivation for using xenctl_cpumask in Xen interfaces :
- xenctl_cpumap is just 4 bytes smaller than static xenctl_cpumask for
128 cpus (128 would be good for quite some time). However, the new
data type cleans up the code considerably, as you can see from its use
in vcpu_(set|get)affinity - no need to read physinfo, lock pages
separately, etc.
- Maintaining the size of xenctl_cpumask consistent between xen and
xen-tools is simple. And, conversion between xenctl_cpumask and
cpumask inside xen is simple too (not much different from before).
- There are other interfaces where xenctl_cpumask could be used. For
pv guest numa, I pass cpumasks in the start-info and the structure
needs to be static. The size of the structure is kept consistent
through a numa-interface version.

-dulloor

On Mon, Mar 22, 2010 at 3:30 AM, Keir Fraser <keir.fraser@eu.citrix.com> wrote:
> On 22/03/2010 03:33, "Dulloor" <dulloor@gmail.com> wrote:
>
>> This patch adds :
>>
>> * A byte-based cpumask type(xenctl_cpumask) for setting vcpu-affinity
>> as well as numa-node-affinity, etc in libxc.
>>
>> * Add common bitmap utils to libxc, which can be used both for
>> xenctl_cpumask (and with small changes for xenctl_cpumap, if desired),
>> so that we can do common operations on cpumask easily.
>>
>> As opposed to xenctl_cpumap, xenctl_cpumask is a static structure
>> (just 4 bytes larger for 128 cpus), but keeps the interface/code
>> cleaner. The domctl_interface version keeps the size of xenctl_cpumask
>> consistent between xen and xen-tools.
>
> I'm missing the motivation for this. It sounds less good than what we have
> already.
>
>  -- Keir
>
>
>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-22 17:44   ` Dulloor
@ 2010-03-23 10:10     ` Jan Beulich
  2010-03-23 11:05       ` Keir Fraser
  0 siblings, 1 reply; 12+ messages in thread
From: Jan Beulich @ 2010-03-23 10:10 UTC (permalink / raw)
  To: Dulloor; +Cc: xen-devel

>>> Dulloor <dulloor@gmail.com> 22.03.10 18:44 >>>
>Motivation for using xenctl_cpumask in Xen interfaces :
>- xenctl_cpumap is just 4 bytes smaller than static xenctl_cpumask for
>128 cpus (128 would be good for quite some time). However, the new

I don't buy this (we're already building for 256 CPUs, looking forward
to further bump this in the not too distant future), and I'm generally
opposed to introducing hard coded limits in a public interface.

Jan

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-23 10:10     ` Jan Beulich
@ 2010-03-23 11:05       ` Keir Fraser
  2010-03-23 16:40         ` Dulloor
  0 siblings, 1 reply; 12+ messages in thread
From: Keir Fraser @ 2010-03-23 11:05 UTC (permalink / raw)
  To: Jan Beulich, Dulloor; +Cc: xen-devel

On 23/03/2010 10:10, "Jan Beulich" <JBeulich@novell.com> wrote:

>>>> Dulloor <dulloor@gmail.com> 22.03.10 18:44 >>>
>> Motivation for using xenctl_cpumask in Xen interfaces :
>> - xenctl_cpumap is just 4 bytes smaller than static xenctl_cpumask for
>> 128 cpus (128 would be good for quite some time). However, the new
> 
> I don't buy this (we're already building for 256 CPUs, looking forward
> to further bump this in the not too distant future), and I'm generally
> opposed to introducing hard coded limits in a public interface.

We should use xenctl_cpumask everywhere for specifying physical CPU bitmaps,
even into guest NUMA interfaces if appropriate. I don't really care if it is
a bit harder to use than a static bitmap.

 -- Keir

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-23 11:05       ` Keir Fraser
@ 2010-03-23 16:40         ` Dulloor
  2010-03-23 16:41           ` Dulloor
  0 siblings, 1 reply; 12+ messages in thread
From: Dulloor @ 2010-03-23 16:40 UTC (permalink / raw)
  To: Keir Fraser; +Cc: xen-devel, Jan Beulich

[-- Attachment #1: Type: text/plain, Size: 1091 bytes --]

Fine, I agree with you both. Attached is a patch adding utils for
xenctl_bitmap (to libxc) and using the same in vcpu_(get|set)affinity.
For the guest-numa interface, I will see if I can use xenctl_cpumap.

-dulloor

On Tue, Mar 23, 2010 at 7:05 AM, Keir Fraser <keir.fraser@eu.citrix.com> wrote:
> On 23/03/2010 10:10, "Jan Beulich" <JBeulich@novell.com> wrote:
>
>>>>> Dulloor <dulloor@gmail.com> 22.03.10 18:44 >>>
>>> Motivation for using xenctl_cpumask in Xen interfaces :
>>> - xenctl_cpumap is just 4 bytes smaller than static xenctl_cpumask for
>>> 128 cpus (128 would be good for quite some time). However, the new
>>
>> I don't buy this (we're already building for 256 CPUs, looking forward
>> to further bump this in the not too distant future), and I'm generally
>> opposed to introducing hard coded limits in a public interface.
>
> We should use xenctl_cpumask everywhere for specifying physical CPU bitmaps,
> even into guest NUMA interfaces if appropriate. I don't really care if it is
> a bit harder to use than a static bitmap.
>
>  -- Keir
>
>
>

[-- Attachment #2: cpumap-utils.patch --]
[-- Type: text/x-patch, Size: 29195 bytes --]

diff -r 04cb0829d138 tools/libxc/Makefile
--- a/tools/libxc/Makefile	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/Makefile	Tue Mar 23 12:29:26 2010 -0400
@@ -25,6 +25,7 @@
 CTRL_SRCS-y       += xc_mem_event.c
 CTRL_SRCS-y       += xc_mem_paging.c
 CTRL_SRCS-y       += xc_memshr.c
+CTRL_SRCS-y       += xc_bitmap.c
 CTRL_SRCS-$(CONFIG_X86) += xc_pagetab.c
 CTRL_SRCS-$(CONFIG_Linux) += xc_linux.c
 CTRL_SRCS-$(CONFIG_SunOS) += xc_solaris.c
diff -r 04cb0829d138 tools/libxc/xc_bitmap.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitmap.c	Tue Mar 23 12:29:26 2010 -0400
@@ -0,0 +1,250 @@
+#include "xc_bitmap.h"
+#include <stdio.h>
+
+/*
+ * xc_bitmap_find_next_bit is adapted from the definition of generic
+ * find_next_bit * in Linux, with following copyright.
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Adapted for byte-based bitmap by Dulloor (dulloor@gatech.edu)
+ */
+
+/**
+ * __ffs - find first bit in byte.
+ * @byte: The byte to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline int __xc_ffs(uint8_t byte)
+{
+	int num = 0;
+
+	if ((byte & 0xff) == 0) {
+		num += 8;
+		byte >>= 8;
+	}
+	if ((byte & 0xf) == 0) {
+		num += 4;
+		byte >>= 4;
+	}
+	if ((byte & 0x3) == 0) {
+		num += 2;
+		byte >>= 2;
+	}
+	if ((byte & 0x1) == 0)
+		num += 1;
+	return num;
+}
+
+int
+xc_bitmap_find_next_bit( const uint8_t *addr, uint32_t size, uint32_t offset)
+{
+    const uint8_t *p;
+    uint32_t result;
+    uint8_t tmp;
+
+    if (offset >= size)
+        return size;
+
+    p = addr + XC_BITMAP_BYTE(offset);
+    result = offset & ~(XC_BITS_PER_BYTE-1);
+
+    size -= result;
+    offset %= XC_BITS_PER_BYTE;
+    if (offset) {
+        tmp = *(p++);
+        tmp &= (0xff << offset);
+        if (size < XC_BITS_PER_BYTE)
+            goto found_first;
+        if (tmp)
+            goto found_middle;
+        size -= XC_BITS_PER_BYTE;
+        result += XC_BITS_PER_BYTE;
+    }
+    while (size & ~(XC_BITS_PER_BYTE-1)) {
+        if ((tmp = *(p++)))
+            goto found_middle;
+        result += XC_BITS_PER_BYTE;
+        size -= XC_BITS_PER_BYTE;
+    }
+    if (!size)
+        return result;
+    tmp = *p;
+
+found_first:
+    tmp &= (0xff >> (XC_BITS_PER_BYTE - size));
+    if (!tmp)
+        return result+size;
+found_middle:
+    return result + __xc_ffs(tmp);
+}
+
+void __xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] & s2p[k];
+}
+
+void __xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] | s2p[k];
+}
+
+void __xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] ^ s2p[k];
+}
+
+void __xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] & ~s2p[k];
+}
+
+void __xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        dp[k] = ~sp[k];
+
+    if (nbits % XC_BITS_PER_BYTE)
+        dp[k] = ~sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+int __xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] != s2p[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] ^ s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] & s2p[k])
+            return 1;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] & s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 1;
+
+    return 0;
+}
+
+int __xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] & ~s2p[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] & ~s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_empty(uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (sp[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if (sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_full(uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (~sp[k] & XC_BITMAP_BYTE_MASK)
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if (~sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+static inline uint8_t hweight8(uint8_t w)
+{
+    uint8_t res = (w & 0x55) + ((w >> 1) & 0x55);
+    res = (res & 0x33) + ((res >> 2) & 0x33);
+    return (res & 0x0F) + ((res >> 4) & 0x0F);
+}
+
+int __xc_bitmap_weight(const uint8_t *sp, int nbits)
+{
+    int k, w = 0, lim = nbits/XC_BITS_PER_BYTE;
+
+    for (k=0; k <lim; k++)
+        w += hweight8(sp[k]);
+
+    if (nbits % XC_BITS_PER_BYTE)
+        w += hweight8(sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return w;
+}
+
+/* xenctl_cpumask print functions */
+#define CHUNKSZ	8
+#define roundup_power2(val,modulus)	(((val) + (modulus) - 1) & ~((modulus) - 1))
+
+int xc_bitmap_snprintf(char *buf, unsigned int buflen,
+	const uint8_t *maskp, int nmaskbits)
+{
+	int i, word, bit, len = 0;
+	unsigned long val;
+	const char *sep = "";
+	int chunksz;
+	uint8_t chunkmask;
+
+	chunksz = nmaskbits & (CHUNKSZ - 1);
+	if (chunksz == 0)
+		chunksz = CHUNKSZ;
+
+	i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ;
+	for (; i >= 0; i -= CHUNKSZ) {
+		chunkmask = ((1ULL << chunksz) - 1);
+		word = i / XC_BITS_PER_BYTE;
+		bit = i % XC_BITS_PER_BYTE;
+		val = (maskp[word] >> bit) & chunkmask;
+		len += snprintf(buf+len, buflen-len, "%s%0*lx", sep,
+			(chunksz+3)/4, val);
+		chunksz = CHUNKSZ;
+		sep = ",";
+	}
+	return len;
+}
+
+
diff -r 04cb0829d138 tools/libxc/xc_bitmap.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitmap.h	Tue Mar 23 12:29:26 2010 -0400
@@ -0,0 +1,193 @@
+#ifndef __XENCTL_BITMAP_H
+#define __XENCTL_BITMAP_H
+
+#include <stdint.h>
+#include <string.h>
+
+#define XC_BITS_PER_BYTE 8
+#define XC_BITS_TO_BYTES(bits) \
+    (((bits)+XC_BITS_PER_BYTE-1)/XC_BITS_PER_BYTE)
+#define XC_BITMAP_BIT(nr)   (1 << (nr))
+#define XC_BITMAP_BIT_MASK(nr)  (1 << ((nr) % XC_BITS_PER_BYTE))
+#define XC_BITMAP_BYTE(nr)  ((nr) / XC_BITS_PER_BYTE)
+
+#define XC_BITMAP_BYTE_MASK (0xff)
+#define XC_BITMAP_LAST_BYTE_MASK(nbits)					\
+	(((nbits) % XC_BITS_PER_BYTE) ?		                \
+		((1<<((nbits) % XC_BITS_PER_BYTE))-1) :         \
+                            XC_BITMAP_BYTE_MASK)
+
+#define xc_bitmap_find_first_bit(addr, size)        \
+            xc_bitmap_find_next_bit(addr, size, 0)
+extern int
+xc_bitmap_find_next_bit(const uint8_t *addr, uint32_t size, uint32_t offset);
+extern int
+xc_bitmap_find_next_bit(const uint8_t *addr, uint32_t size, uint32_t offset);
+
+#define xc_bitmap_find_first_zero_bit(addr, size) \
+            xc_bitmap_find_next_zero_bit(addr, size, 0)
+extern int xc_bitmap_find_next_zero_bit(
+        const uint8_t *addr, uint32_t size, uint32_t offset);
+
+extern void __xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void
+__xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits);
+extern int __xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_empty(uint8_t *sp, int nbits);
+extern int __xc_bitmap_full(uint8_t *sp, int nbits);
+extern int __xc_bitmap_weight(const uint8_t *sp, int nbits);
+
+extern int xc_bitmap_snprintf(char *buf, unsigned int buflen,
+	const uint8_t *maskp, int nmaskbits);
+
+
+static inline void xc_bitmap_set_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    *p |= mask;
+}
+
+static inline void xc_bitmap_clear_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    *p &= ~mask;
+}
+
+static inline int xc_bitmap_test_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    return *p & mask;
+}
+
+static inline void xc_bitmap_fill(uint8_t *dp, int nbits)
+{
+    size_t nbytes = XC_BITS_TO_BYTES(nbits);
+    if (nbytes > 1)
+        memset(dp, 0xff, nbytes-1); 
+    dp[nbytes-1] = XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+static inline void xc_bitmap_zero(uint8_t *dp, int nbits)
+{
+    size_t nbytes = XC_BITS_TO_BYTES(nbits);
+    if (nbytes > 1)
+        memset(dp, 0x00, nbytes-1); 
+    dp[nbytes-1] = 0;
+}
+
+
+static inline void
+xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p & *s2p;
+    else
+        __xc_bitmap_and(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p | *s2p;
+    else
+        __xc_bitmap_or(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p ^ *s2p;
+    else
+        __xc_bitmap_xor(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p & ~(*s2p);
+    else
+        __xc_bitmap_andnot(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = ~(*sp) & XC_BITMAP_LAST_BYTE_MASK(nbits);
+    else
+        __xc_bitmap_complement(dp, sp, nbits);
+}
+
+static inline int
+xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return !((*s1p ^ *s2p) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_equal(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return ((*s1p & *s2p) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_intersects(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return !((*s1p & ~(*s2p)) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_subset(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_empty(uint8_t *sp, int nbits)
+{
+	if (nbits <= XC_BITS_PER_BYTE)
+		return ! (*sp & XC_BITMAP_LAST_BYTE_MASK(nbits));
+	else
+		return __xc_bitmap_empty(sp, nbits);
+}
+
+static inline int
+xc_bitmap_full(uint8_t *sp, int nbits)
+{
+	if (nbits <= XC_BITS_PER_BYTE)
+		return ! (~(*sp) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+	else
+		return __xc_bitmap_full(sp, nbits);
+}
+
+static inline uint32_t
+xc_bitmap_weight(const uint8_t *sp, int nbits)
+{
+	return __xc_bitmap_weight(sp, nbits);
+}
+
+
+static inline void
+xc_bitmap_copy(uint8_t *dp, const uint8_t *sp, int nbits)
+{
+    if (nbits > XC_BITS_PER_BYTE)
+        *dp = *sp;
+    else
+        memcpy(dp, sp, XC_BITS_TO_BYTES(nbits));
+}
+
+#endif
diff -r 04cb0829d138 tools/libxc/xc_cpumap.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpumap.h	Tue Mar 23 12:29:26 2010 -0400
@@ -0,0 +1,274 @@
+#ifndef __XENCTL_CPUMAP_H
+#define __XENCTL_CPUMAP_H
+
+#include "xc_private.h"
+#include "xc_bitmap.h"
+
+#define xc_cpumap_bits(maskp)                                           \
+                    ({  uint8_t *bitmap;                                \
+                        get_xen_guest_handle(bitmap, (maskp)->bitmap);  \
+                        bitmap; })
+#define xc_cpumap_len(maskp) ((maskp)->nr_cpus)
+
+/* Number of cpus set in the bitmap */
+#define xc_cpumap_num_cpus(mask)	xc_cpumap_weight(mask)
+
+/**
+ * xc_cpumap_first - get the first cpu in a xenctl_cpumap
+ * @srcp: the xenctl_cpumap pointer
+ *
+ * Returns >= xc_cpumap_len(srcp) if no cpus set.
+ */
+static inline unsigned int
+xc_cpumap_first(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_find_first_bit(xc_cpumap_bits(srcp),
+                                                xc_cpumap_len(srcp));
+}
+
+/**
+ * xc_cpumap_next - get the next cpu in a xenctl_cpumap
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the xenctl_cpumap pointer
+ *
+ * Returns >= xc_cpumap_len(srcp) if no further cpus set.
+ */
+static inline uint32_t
+xc_cpumap_next(int n, struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_find_next_bit(xc_cpumap_bits(srcp),
+                                        xc_cpumap_len(srcp), n+1);
+}
+
+#if 0
+static inline uint32_t
+xc_cpumap_next_zero(int n, struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_find_next_zero_bit(xc_cpumap_bits(srcp),
+                                        xc_cpumap_len(srcp), n+1);
+}
+#endif
+
+/**
+ * xc_for_each_cpu - iterate over every cpu in a mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the xenctl_cpumap pointer
+ *
+ * After the loop, cpu is >= xc_cpumap_len(mask)
+ */
+#define xc_for_each_cpu(cpu, mask)				\
+            __xc_for_each_cpu(cpu, &(mask))
+
+#define __xc_for_each_cpu(cpu, mask)            \
+	for ((cpu) = -1;				            \
+		(cpu) = xc_cpumap_next((cpu), (mask)),	\
+		(cpu) < xc_cpumap_len(mask);)
+
+
+#define xc_cpumap_equal(src1, src2) __xc_cpumap_equal(&(src1), &(src2))
+static inline int
+__xc_cpumap_equal(struct xenctl_cpumap *s1p, struct xenctl_cpumap *s2p)
+{
+	return xc_bitmap_equal(xc_cpumap_bits(s1p), xc_cpumap_bits(s2p), 
+                                                xc_cpumap_len(s1p));
+}
+
+#define xc_cpumap_set_cpu(cpu, dst) __xc_cpumap_set_cpu(cpu, &(dst))
+static inline void __xc_cpumap_set_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_set_bit(cpu, xc_cpumap_bits(dstp));
+}
+
+#define xc_cpumap_clear_cpu(cpu, dst) __xc_cpumap_clear_cpu(cpu, &(dst))
+static inline void __xc_cpumap_clear_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_clear_bit(cpu, xc_cpumap_bits(dstp));
+}
+
+#define xc_cpumap_test_cpu(cpu, dst) __xc_cpumap_test_cpu(cpu, &(dst))
+static inline int __xc_cpumap_test_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+    return xc_bitmap_test_bit(cpu, xc_cpumap_bits(dstp));
+}
+
+
+#define xc_cpumap_setall(dst) __xc_cpumap_setall(&(dst))
+static inline void __xc_cpumap_setall(struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_fill(xc_cpumap_bits(dstp), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_clearall(dst) __xc_cpumap_clearall(&(dst))
+static inline void __xc_cpumap_clearall(struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_zero(xc_cpumap_bits(dstp), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_and(dst, src1, src2) \
+                        __xc_cpumap_and(&(dst), &(src1), &(src2))
+static inline void __xc_cpumap_and(struct xenctl_cpumap *dstp,
+        struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_and(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+                                xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_or(dst, src1, src2) \
+                        __xc_cpumap_or(&(dst), &(src1), &(src2))
+static inline void __xc_cpumap_or(struct xenctl_cpumap *dstp,
+        struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_or(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+				                xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_xor(dst, src1, src2) \
+                        __xc_cpumap_xor(&(dst), &(src1), &(src2))
+static inline void __xc_cpumap_xor(struct xenctl_cpumap *dstp,
+        struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_xor(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+                        xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_andnot(dst, src1, src2) \
+                        __xc_cpumap_andnot(&(dst), &(src1), &(src2))
+static inline void xenctl_cpumap_andnot(struct xenctl_cpumap *dstp,
+    struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_andnot(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+                        xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_complement(dst, src) \
+                        __xc_cpumap_complement(&(dst), &(src))
+static inline void __xc_cpumap_complement(struct xenctl_cpumap *dstp,
+				                        struct xenctl_cpumap *srcp)
+{
+	xc_bitmap_complement(xc_cpumap_bits(dstp), xc_cpumap_bits(srcp),
+                                                xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_intersects(src1, src2) \
+                        __xc_cpumap_intersects(&(src1), &(src2))
+static inline int __xc_cpumap_intersects(struct xenctl_cpumap *src1p,
+				                            struct xenctl_cpumap *src2p)
+{
+	return xc_bitmap_intersects(xc_cpumap_bits(src1p), xc_cpumap_bits(src2p),
+                                                        xc_cpumap_len(src1p));
+}
+
+#define xc_cpumap_subset(src1, src2) \
+                        __xc_cpumap_subset(&(src1), &(src2))
+static inline int __xc_cpumap_subset(struct xenctl_cpumap *src1p,
+				                        struct xenctl_cpumap *src2p)
+{
+	return xc_bitmap_subset(xc_cpumap_bits(src1p), xc_cpumap_bits(src2p),
+                                                        xc_cpumap_len(src1p));
+}
+
+#define xc_cpumap_empty(src) __xc_cpumap_empty(&(src))
+static inline int __xc_cpumap_empty(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_empty(xc_cpumap_bits(srcp), xc_cpumap_len(srcp));
+}
+
+#define xc_cpumap_full(src) __xc_cpumap_full(&(src))
+static inline int __xc_cpumap_full(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_full(xc_cpumap_bits(srcp), xc_cpumap_len(srcp));
+}
+
+#define xc_cpumap_weight(src) __xc_cpumap_weight(&(src))
+static inline uint32_t __xc_cpumap_weight(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_weight(xc_cpumap_bits(srcp), xc_cpumap_len(srcp));
+}
+
+#define xc_cpumap_copy(dst, src) __xc_cpumap_copy(&(dst), &(src))
+static inline void __xc_cpumap_copy(struct xenctl_cpumap *dstp,
+				                    struct xenctl_cpumap *srcp)
+{
+	xc_bitmap_copy(xc_cpumap_bits(dstp), xc_cpumap_bits(srcp),
+                                                xc_cpumap_len(dstp));
+}
+
+#if 0
+#define XC_CPUMASK_LAST_BYTE XC_BITMAP_LAST_BYTE_MASK(XENCTL_NR_CPUS)
+
+#define XC_CPUMASK_ALL							                    \
+/*(xenctl_cpumap)*/ { {							                \
+	[0 ... XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-2] = 0xff,		        \
+	[XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-1] = XC_CPUMASK_LAST_BYTE    \
+} }
+
+#define XC_CPUMASK_NONE							            \
+/*(xenctl_cpumap)*/ { {							        \
+	[0 ... XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-1] =  0		    \
+} }
+#endif
+
+#define xc_cpumap_snprintf(buf, len, src) \
+			__xc_cpumap_snprintf((buf), (len), &(src), XENCTL_NR_CPUS)
+static inline int __xc_cpumap_snprintf(char *buf, int len,
+					        const struct xenctl_cpumap *srcp, int nbits)
+{
+	return xc_bitmap_snprintf(buf, len, xc_cpumap_bits(srcp), nbits);
+}
+
+/***********************************************************************/
+
+static inline int
+xc_cpumap_allocz_bitmap(int xc_handle, struct xenctl_cpumap *map)
+{
+    int nr_cpus;
+    uint8_t *bitmap;
+    xc_physinfo_t pinfo = { 0 };
+
+    if (xc_physinfo(xc_handle, &pinfo))
+        goto failed;
+  
+    nr_cpus = pinfo.nr_cpus;
+    if (!(bitmap = malloc(XC_BITS_TO_BYTES(nr_cpus))))
+        goto failed;
+
+	xc_bitmap_zero(bitmap, nr_cpus);
+    map->nr_cpus = pinfo.nr_cpus;
+    set_xen_guest_handle(map->bitmap, bitmap);
+    return 0;
+failed:
+    return -1;
+}
+
+static inline void
+xc_cpumap_free_bitmap(struct xenctl_cpumap *map)
+{
+    uint8_t *bitmap;
+    get_xen_guest_handle(bitmap, map->bitmap);
+    free(bitmap);
+}
+
+static inline int
+xc_cpumap_lock_pages(struct xenctl_cpumap *map)
+{
+    uint8_t *bitmap;
+    uint32_t nr_bytes = XC_BITS_TO_BYTES(map->nr_cpus);
+
+    get_xen_guest_handle(bitmap, map->bitmap);
+   
+    if (lock_pages(bitmap, nr_bytes))
+        return -1;
+    return 0;
+}
+
+static inline void
+xc_cpumap_unlock_pages(struct xenctl_cpumap *map)
+{
+    uint8_t *bitmap;
+    uint32_t nr_bytes = XC_BITS_TO_BYTES(map->nr_cpus);
+
+    get_xen_guest_handle(bitmap, map->bitmap);
+    unlock_pages(bitmap, nr_bytes);
+}
+
+#endif /* __XENCTL_CPUMAP_H */
diff -r 04cb0829d138 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/xc_domain.c	Tue Mar 23 12:29:26 2010 -0400
@@ -8,6 +8,7 @@
 
 #include "xc_private.h"
 #include "xg_save_restore.h"
+#include "xc_cpumap.h"
 #include <xen/memory.h>
 #include <xen/hvm/hvm_op.h>
 
@@ -98,28 +99,17 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap, int cpusize)
+                        struct xenctl_cpumap *cpumap)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t *local = malloc(cpusize); 
 
-    if(local == NULL)
-    {
-        PERROR("Could not alloc memory for Xen hypercall");
-        goto out;
-    }
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
-    domctl.u.vcpuaffinity.vcpu    = vcpu;
+    domctl.u.vcpuaffinity.vcpu = vcpu;
+    domctl.u.vcpuaffinity.cpumap = *cpumap;
 
-    bitmap_64_to_byte(local, cpumap, cpusize * 8);
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(local, cpusize) != 0 )
+    if (xc_cpumap_lock_pages(cpumap))
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -127,10 +117,9 @@
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, cpusize);
+    xc_cpumap_unlock_pages(cpumap);
 
  out:
-    free(local);
     return ret;
 }
 
@@ -138,28 +127,18 @@
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize)
+                        struct xenctl_cpumap *cpumap)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t * local = malloc(cpusize);
-
-    if(local == NULL)
-    {
-        PERROR("Could not alloc memory for Xen hypercall");
-        goto out;
-    }
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
+    domctl.u.vcpuaffinity.cpumap = *cpumap;
 
 
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(local, sizeof(local)) != 0 )
+    if (xc_cpumap_lock_pages(cpumap))
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -167,10 +146,8 @@
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, sizeof (local));
-    bitmap_byte_to_64(cpumap, local, cpusize * 8);
+    xc_cpumap_unlock_pages(cpumap);
 out:
-    free(local);
     return ret;
 }
 
diff -r 04cb0829d138 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/xenctrl.h	Tue Mar 23 12:29:26 2010 -0400
@@ -309,13 +309,11 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize);
+                        struct xenctl_cpumap *cpumap);
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize);
+                        struct xenctl_cpumap *cpumap);
 
 /**
  * This function will return information about one or more domains. It is
diff -r 04cb0829d138 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c	Tue Mar 23 12:29:26 2010 -0400
@@ -23,6 +23,7 @@
 #include "xc_dom.h"
 #include <xen/hvm/hvm_info_table.h>
 #include <xen/hvm/params.h>
+#include "xc_cpumap.h"
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 
@@ -215,12 +216,8 @@
 {
     uint32_t dom;
     int vcpu = 0, i;
-    uint64_t  *cpumap;
+    struct xenctl_cpumap cpumap;
     PyObject *cpulist = NULL;
-    int nr_cpus, size;
-    xc_physinfo_t info; 
-    xc_cpu_to_node_t map[1];
-    uint64_t cpumap_size = sizeof(cpumap); 
 
     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
     
@@ -229,40 +226,26 @@
                                       &dom, &vcpu, &cpulist) )
         return NULL;
 
-    set_xen_guest_handle(info.cpu_to_node, map);
-    info.max_cpu_id = 1;
-    if ( xc_physinfo(self->xc_handle, &info) != 0 )
+    if (xc_cpumap_allocz_bitmap(self->xc_handle, &cpumap))
         return pyxc_error_to_exception();
-  
-    nr_cpus = info.nr_cpus;
-
-    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
-    cpumap = malloc(cpumap_size * size);
-    if(cpumap == NULL)
-        return pyxc_error_to_exception();
-    
 
     if ( (cpulist != NULL) && PyList_Check(cpulist) )
     {
-        for ( i = 0; i < size; i++)
-        {
-            cpumap[i] = 0ULL;
-        }
         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
         {
             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
-            *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu % (cpumap_size * 8));
+            xc_cpumap_set_cpu(cpu, cpumap);
         }
     }
   
-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 )
+    if (xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, &cpumap))
     {
-        free(cpumap);
+        xc_cpumap_free_bitmap(&cpumap);
         return pyxc_error_to_exception();
     }
 
     Py_INCREF(zero);
-    free(cpumap); 
+    xc_cpumap_free_bitmap(&cpumap);
     return zero;
 }
 
@@ -381,11 +364,7 @@
     uint32_t dom, vcpu = 0;
     xc_vcpuinfo_t info;
     int rc, i;
-    uint64_t *cpumap;
-    int nr_cpus, size;
-    xc_physinfo_t pinfo = { 0 };
-    xc_cpu_to_node_t map[1];
-    uint64_t cpumap_size = sizeof(cpumap);
+    struct xenctl_cpumap cpumap;
 
     static char *kwd_list[] = { "domid", "vcpu", NULL };
     
@@ -393,23 +372,14 @@
                                       &dom, &vcpu) )
         return NULL;
 
-    set_xen_guest_handle(pinfo.cpu_to_node, map);
-    pinfo.max_cpu_id = 1;
-    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
+    if ( xc_cpumap_allocz_bitmap(self->xc_handle, &cpumap) ) 
         return pyxc_error_to_exception();
-    nr_cpus = pinfo.nr_cpus;
-    rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
-    if ( rc < 0 )
+    if ((rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info)) < 0)
         return pyxc_error_to_exception();
-    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
 
-    if((cpumap = malloc(cpumap_size * size)) == NULL)
-        return pyxc_error_to_exception(); 
-
-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size);
-    if ( rc < 0 )
+    if ((rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap)) < 0)
     {
-        free(cpumap);
+        xc_cpumap_free_bitmap(&cpumap);
         return pyxc_error_to_exception();
     }
 
@@ -421,18 +391,15 @@
                               "cpu",      info.cpu);
 
     cpulist = PyList_New(0);
-    for ( i = 0; i < size * cpumap_size * 8; i++ )
+    xc_for_each_cpu(i, cpumap)
     {
-        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
-            PyObject *pyint = PyInt_FromLong(i);
-            PyList_Append(cpulist, pyint);
-            Py_DECREF(pyint);
-        }
-        *(cpumap + i / (cpumap_size * 8)) >>= 1;
+        PyObject *pyint = PyInt_FromLong(i);
+        PyList_Append(cpulist, pyint);
+        Py_DECREF(pyint);
     }
     PyDict_SetItemString(info_dict, "cpumap", cpulist);
     Py_DECREF(cpulist);
-    free(cpumap);
+    xc_cpumap_free_bitmap(&cpumap);
     return info_dict;
 }
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-23 16:40         ` Dulloor
@ 2010-03-23 16:41           ` Dulloor
  2010-03-23 16:55             ` Dulloor
  0 siblings, 1 reply; 12+ messages in thread
From: Dulloor @ 2010-03-23 16:41 UTC (permalink / raw)
  To: Keir Fraser; +Cc: xen-devel, Jan Beulich

I meant utils for **xenctl_cpumap**

On Tue, Mar 23, 2010 at 12:40 PM, Dulloor <dulloor@gmail.com> wrote:
> Fine, I agree with you both. Attached is a patch adding utils for
> xenctl_bitmap (to libxc) and using the same in vcpu_(get|set)affinity.
> For the guest-numa interface, I will see if I can use xenctl_cpumap.
>
> -dulloor
>
> On Tue, Mar 23, 2010 at 7:05 AM, Keir Fraser <keir.fraser@eu.citrix.com> wrote:
>> On 23/03/2010 10:10, "Jan Beulich" <JBeulich@novell.com> wrote:
>>
>>>>>> Dulloor <dulloor@gmail.com> 22.03.10 18:44 >>>
>>>> Motivation for using xenctl_cpumask in Xen interfaces :
>>>> - xenctl_cpumap is just 4 bytes smaller than static xenctl_cpumask for
>>>> 128 cpus (128 would be good for quite some time). However, the new
>>>
>>> I don't buy this (we're already building for 256 CPUs, looking forward
>>> to further bump this in the not too distant future), and I'm generally
>>> opposed to introducing hard coded limits in a public interface.
>>
>> We should use xenctl_cpumask everywhere for specifying physical CPU bitmaps,
>> even into guest NUMA interfaces if appropriate. I don't really care if it is
>> a bit harder to use than a static bitmap.
>>
>>  -- Keir
>>
>>
>>
>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-23 16:41           ` Dulloor
@ 2010-03-23 16:55             ` Dulloor
  2010-03-30 14:42               ` Fwd: " Dulloor
  0 siblings, 1 reply; 12+ messages in thread
From: Dulloor @ 2010-03-23 16:55 UTC (permalink / raw)
  To: Keir Fraser; +Cc: xen-devel, Jan Beulich

[-- Attachment #1: Type: text/plain, Size: 1464 bytes --]

Please use this patch, in which length of bitmap is
(physinfo.max_cpu_id+1), rather than (physinfo.nr_cpus).

-dulloor

On Tue, Mar 23, 2010 at 12:41 PM, Dulloor <dulloor@gmail.com> wrote:
> I meant utils for **xenctl_cpumap**
>
> On Tue, Mar 23, 2010 at 12:40 PM, Dulloor <dulloor@gmail.com> wrote:
>> Fine, I agree with you both. Attached is a patch adding utils for
>> xenctl_bitmap (to libxc) and using the same in vcpu_(get|set)affinity.
>> For the guest-numa interface, I will see if I can use xenctl_cpumap.
>>
>> -dulloor
>>
>> On Tue, Mar 23, 2010 at 7:05 AM, Keir Fraser <keir.fraser@eu.citrix.com> wrote:
>>> On 23/03/2010 10:10, "Jan Beulich" <JBeulich@novell.com> wrote:
>>>
>>>>>>> Dulloor <dulloor@gmail.com> 22.03.10 18:44 >>>
>>>>> Motivation for using xenctl_cpumask in Xen interfaces :
>>>>> - xenctl_cpumap is just 4 bytes smaller than static xenctl_cpumask for
>>>>> 128 cpus (128 would be good for quite some time). However, the new
>>>>
>>>> I don't buy this (we're already building for 256 CPUs, looking forward
>>>> to further bump this in the not too distant future), and I'm generally
>>>> opposed to introducing hard coded limits in a public interface.
>>>
>>> We should use xenctl_cpumask everywhere for specifying physical CPU bitmaps,
>>> even into guest NUMA interfaces if appropriate. I don't really care if it is
>>> a bit harder to use than a static bitmap.
>>>
>>>  -- Keir
>>>
>>>
>>>
>>
>

[-- Attachment #2: cpumap-utils.patch --]
[-- Type: text/x-patch, Size: 29200 bytes --]

diff -r 04cb0829d138 tools/libxc/Makefile
--- a/tools/libxc/Makefile	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/Makefile	Tue Mar 23 12:50:32 2010 -0400
@@ -25,6 +25,7 @@
 CTRL_SRCS-y       += xc_mem_event.c
 CTRL_SRCS-y       += xc_mem_paging.c
 CTRL_SRCS-y       += xc_memshr.c
+CTRL_SRCS-y       += xc_bitmap.c
 CTRL_SRCS-$(CONFIG_X86) += xc_pagetab.c
 CTRL_SRCS-$(CONFIG_Linux) += xc_linux.c
 CTRL_SRCS-$(CONFIG_SunOS) += xc_solaris.c
diff -r 04cb0829d138 tools/libxc/xc_bitmap.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitmap.c	Tue Mar 23 12:50:32 2010 -0400
@@ -0,0 +1,250 @@
+#include "xc_bitmap.h"
+#include <stdio.h>
+
+/*
+ * xc_bitmap_find_next_bit is adapted from the definition of generic
+ * find_next_bit * in Linux, with following copyright.
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Adapted for byte-based bitmap by Dulloor (dulloor@gatech.edu)
+ */
+
+/**
+ * __ffs - find first bit in byte.
+ * @byte: The byte to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline int __xc_ffs(uint8_t byte)
+{
+	int num = 0;
+
+	if ((byte & 0xff) == 0) {
+		num += 8;
+		byte >>= 8;
+	}
+	if ((byte & 0xf) == 0) {
+		num += 4;
+		byte >>= 4;
+	}
+	if ((byte & 0x3) == 0) {
+		num += 2;
+		byte >>= 2;
+	}
+	if ((byte & 0x1) == 0)
+		num += 1;
+	return num;
+}
+
+int
+xc_bitmap_find_next_bit( const uint8_t *addr, uint32_t size, uint32_t offset)
+{
+    const uint8_t *p;
+    uint32_t result;
+    uint8_t tmp;
+
+    if (offset >= size)
+        return size;
+
+    p = addr + XC_BITMAP_BYTE(offset);
+    result = offset & ~(XC_BITS_PER_BYTE-1);
+
+    size -= result;
+    offset %= XC_BITS_PER_BYTE;
+    if (offset) {
+        tmp = *(p++);
+        tmp &= (0xff << offset);
+        if (size < XC_BITS_PER_BYTE)
+            goto found_first;
+        if (tmp)
+            goto found_middle;
+        size -= XC_BITS_PER_BYTE;
+        result += XC_BITS_PER_BYTE;
+    }
+    while (size & ~(XC_BITS_PER_BYTE-1)) {
+        if ((tmp = *(p++)))
+            goto found_middle;
+        result += XC_BITS_PER_BYTE;
+        size -= XC_BITS_PER_BYTE;
+    }
+    if (!size)
+        return result;
+    tmp = *p;
+
+found_first:
+    tmp &= (0xff >> (XC_BITS_PER_BYTE - size));
+    if (!tmp)
+        return result+size;
+found_middle:
+    return result + __xc_ffs(tmp);
+}
+
+void __xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] & s2p[k];
+}
+
+void __xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] | s2p[k];
+}
+
+void __xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] ^ s2p[k];
+}
+
+void __xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] & ~s2p[k];
+}
+
+void __xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        dp[k] = ~sp[k];
+
+    if (nbits % XC_BITS_PER_BYTE)
+        dp[k] = ~sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+int __xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] != s2p[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] ^ s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] & s2p[k])
+            return 1;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] & s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 1;
+
+    return 0;
+}
+
+int __xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] & ~s2p[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] & ~s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_empty(uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (sp[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if (sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_full(uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (~sp[k] & XC_BITMAP_BYTE_MASK)
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if (~sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+static inline uint8_t hweight8(uint8_t w)
+{
+    uint8_t res = (w & 0x55) + ((w >> 1) & 0x55);
+    res = (res & 0x33) + ((res >> 2) & 0x33);
+    return (res & 0x0F) + ((res >> 4) & 0x0F);
+}
+
+int __xc_bitmap_weight(const uint8_t *sp, int nbits)
+{
+    int k, w = 0, lim = nbits/XC_BITS_PER_BYTE;
+
+    for (k=0; k <lim; k++)
+        w += hweight8(sp[k]);
+
+    if (nbits % XC_BITS_PER_BYTE)
+        w += hweight8(sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return w;
+}
+
+/* xenctl_cpumask print functions */
+#define CHUNKSZ	8
+#define roundup_power2(val,modulus)	(((val) + (modulus) - 1) & ~((modulus) - 1))
+
+int xc_bitmap_snprintf(char *buf, unsigned int buflen,
+	const uint8_t *maskp, int nmaskbits)
+{
+	int i, word, bit, len = 0;
+	unsigned long val;
+	const char *sep = "";
+	int chunksz;
+	uint8_t chunkmask;
+
+	chunksz = nmaskbits & (CHUNKSZ - 1);
+	if (chunksz == 0)
+		chunksz = CHUNKSZ;
+
+	i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ;
+	for (; i >= 0; i -= CHUNKSZ) {
+		chunkmask = ((1ULL << chunksz) - 1);
+		word = i / XC_BITS_PER_BYTE;
+		bit = i % XC_BITS_PER_BYTE;
+		val = (maskp[word] >> bit) & chunkmask;
+		len += snprintf(buf+len, buflen-len, "%s%0*lx", sep,
+			(chunksz+3)/4, val);
+		chunksz = CHUNKSZ;
+		sep = ",";
+	}
+	return len;
+}
+
+
diff -r 04cb0829d138 tools/libxc/xc_bitmap.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitmap.h	Tue Mar 23 12:50:32 2010 -0400
@@ -0,0 +1,193 @@
+#ifndef __XENCTL_BITMAP_H
+#define __XENCTL_BITMAP_H
+
+#include <stdint.h>
+#include <string.h>
+
+#define XC_BITS_PER_BYTE 8
+#define XC_BITS_TO_BYTES(bits) \
+    (((bits)+XC_BITS_PER_BYTE-1)/XC_BITS_PER_BYTE)
+#define XC_BITMAP_BIT(nr)   (1 << (nr))
+#define XC_BITMAP_BIT_MASK(nr)  (1 << ((nr) % XC_BITS_PER_BYTE))
+#define XC_BITMAP_BYTE(nr)  ((nr) / XC_BITS_PER_BYTE)
+
+#define XC_BITMAP_BYTE_MASK (0xff)
+#define XC_BITMAP_LAST_BYTE_MASK(nbits)					\
+	(((nbits) % XC_BITS_PER_BYTE) ?		                \
+		((1<<((nbits) % XC_BITS_PER_BYTE))-1) :         \
+                            XC_BITMAP_BYTE_MASK)
+
+#define xc_bitmap_find_first_bit(addr, size)        \
+            xc_bitmap_find_next_bit(addr, size, 0)
+extern int
+xc_bitmap_find_next_bit(const uint8_t *addr, uint32_t size, uint32_t offset);
+extern int
+xc_bitmap_find_next_bit(const uint8_t *addr, uint32_t size, uint32_t offset);
+
+#define xc_bitmap_find_first_zero_bit(addr, size) \
+            xc_bitmap_find_next_zero_bit(addr, size, 0)
+extern int xc_bitmap_find_next_zero_bit(
+        const uint8_t *addr, uint32_t size, uint32_t offset);
+
+extern void __xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void
+__xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits);
+extern int __xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_empty(uint8_t *sp, int nbits);
+extern int __xc_bitmap_full(uint8_t *sp, int nbits);
+extern int __xc_bitmap_weight(const uint8_t *sp, int nbits);
+
+extern int xc_bitmap_snprintf(char *buf, unsigned int buflen,
+	const uint8_t *maskp, int nmaskbits);
+
+
+static inline void xc_bitmap_set_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    *p |= mask;
+}
+
+static inline void xc_bitmap_clear_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    *p &= ~mask;
+}
+
+static inline int xc_bitmap_test_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    return *p & mask;
+}
+
+static inline void xc_bitmap_fill(uint8_t *dp, int nbits)
+{
+    size_t nbytes = XC_BITS_TO_BYTES(nbits);
+    if (nbytes > 1)
+        memset(dp, 0xff, nbytes-1); 
+    dp[nbytes-1] = XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+static inline void xc_bitmap_zero(uint8_t *dp, int nbits)
+{
+    size_t nbytes = XC_BITS_TO_BYTES(nbits);
+    if (nbytes > 1)
+        memset(dp, 0x00, nbytes-1); 
+    dp[nbytes-1] = 0;
+}
+
+
+static inline void
+xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p & *s2p;
+    else
+        __xc_bitmap_and(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p | *s2p;
+    else
+        __xc_bitmap_or(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p ^ *s2p;
+    else
+        __xc_bitmap_xor(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p & ~(*s2p);
+    else
+        __xc_bitmap_andnot(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = ~(*sp) & XC_BITMAP_LAST_BYTE_MASK(nbits);
+    else
+        __xc_bitmap_complement(dp, sp, nbits);
+}
+
+static inline int
+xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return !((*s1p ^ *s2p) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_equal(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return ((*s1p & *s2p) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_intersects(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return !((*s1p & ~(*s2p)) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_subset(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_empty(uint8_t *sp, int nbits)
+{
+	if (nbits <= XC_BITS_PER_BYTE)
+		return ! (*sp & XC_BITMAP_LAST_BYTE_MASK(nbits));
+	else
+		return __xc_bitmap_empty(sp, nbits);
+}
+
+static inline int
+xc_bitmap_full(uint8_t *sp, int nbits)
+{
+	if (nbits <= XC_BITS_PER_BYTE)
+		return ! (~(*sp) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+	else
+		return __xc_bitmap_full(sp, nbits);
+}
+
+static inline uint32_t
+xc_bitmap_weight(const uint8_t *sp, int nbits)
+{
+	return __xc_bitmap_weight(sp, nbits);
+}
+
+
+static inline void
+xc_bitmap_copy(uint8_t *dp, const uint8_t *sp, int nbits)
+{
+    if (nbits > XC_BITS_PER_BYTE)
+        *dp = *sp;
+    else
+        memcpy(dp, sp, XC_BITS_TO_BYTES(nbits));
+}
+
+#endif
diff -r 04cb0829d138 tools/libxc/xc_cpumap.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpumap.h	Tue Mar 23 12:50:32 2010 -0400
@@ -0,0 +1,274 @@
+#ifndef __XENCTL_CPUMAP_H
+#define __XENCTL_CPUMAP_H
+
+#include "xc_private.h"
+#include "xc_bitmap.h"
+
+#define xc_cpumap_bits(maskp)                                           \
+                    ({  uint8_t *bitmap;                                \
+                        get_xen_guest_handle(bitmap, (maskp)->bitmap);  \
+                        bitmap; })
+#define xc_cpumap_len(maskp) ((maskp)->nr_cpus)
+
+/* Number of cpus set in the bitmap */
+#define xc_cpumap_num_cpus(mask)	xc_cpumap_weight(mask)
+
+/**
+ * xc_cpumap_first - get the first cpu in a xenctl_cpumap
+ * @srcp: the xenctl_cpumap pointer
+ *
+ * Returns >= xc_cpumap_len(srcp) if no cpus set.
+ */
+static inline unsigned int
+xc_cpumap_first(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_find_first_bit(xc_cpumap_bits(srcp),
+                                                xc_cpumap_len(srcp));
+}
+
+/**
+ * xc_cpumap_next - get the next cpu in a xenctl_cpumap
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the xenctl_cpumap pointer
+ *
+ * Returns >= xc_cpumap_len(srcp) if no further cpus set.
+ */
+static inline uint32_t
+xc_cpumap_next(int n, struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_find_next_bit(xc_cpumap_bits(srcp),
+                                        xc_cpumap_len(srcp), n+1);
+}
+
+#if 0
+static inline uint32_t
+xc_cpumap_next_zero(int n, struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_find_next_zero_bit(xc_cpumap_bits(srcp),
+                                        xc_cpumap_len(srcp), n+1);
+}
+#endif
+
+/**
+ * xc_for_each_cpu - iterate over every cpu in a mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the xenctl_cpumap pointer
+ *
+ * After the loop, cpu is >= xc_cpumap_len(mask)
+ */
+#define xc_for_each_cpu(cpu, mask)				\
+            __xc_for_each_cpu(cpu, &(mask))
+
+#define __xc_for_each_cpu(cpu, mask)            \
+	for ((cpu) = -1;				            \
+		(cpu) = xc_cpumap_next((cpu), (mask)),	\
+		(cpu) < xc_cpumap_len(mask);)
+
+
+#define xc_cpumap_equal(src1, src2) __xc_cpumap_equal(&(src1), &(src2))
+static inline int
+__xc_cpumap_equal(struct xenctl_cpumap *s1p, struct xenctl_cpumap *s2p)
+{
+	return xc_bitmap_equal(xc_cpumap_bits(s1p), xc_cpumap_bits(s2p), 
+                                                xc_cpumap_len(s1p));
+}
+
+#define xc_cpumap_set_cpu(cpu, dst) __xc_cpumap_set_cpu(cpu, &(dst))
+static inline void __xc_cpumap_set_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_set_bit(cpu, xc_cpumap_bits(dstp));
+}
+
+#define xc_cpumap_clear_cpu(cpu, dst) __xc_cpumap_clear_cpu(cpu, &(dst))
+static inline void __xc_cpumap_clear_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_clear_bit(cpu, xc_cpumap_bits(dstp));
+}
+
+#define xc_cpumap_test_cpu(cpu, dst) __xc_cpumap_test_cpu(cpu, &(dst))
+static inline int __xc_cpumap_test_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+    return xc_bitmap_test_bit(cpu, xc_cpumap_bits(dstp));
+}
+
+
+#define xc_cpumap_setall(dst) __xc_cpumap_setall(&(dst))
+static inline void __xc_cpumap_setall(struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_fill(xc_cpumap_bits(dstp), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_clearall(dst) __xc_cpumap_clearall(&(dst))
+static inline void __xc_cpumap_clearall(struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_zero(xc_cpumap_bits(dstp), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_and(dst, src1, src2) \
+                        __xc_cpumap_and(&(dst), &(src1), &(src2))
+static inline void __xc_cpumap_and(struct xenctl_cpumap *dstp,
+        struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_and(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+                                xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_or(dst, src1, src2) \
+                        __xc_cpumap_or(&(dst), &(src1), &(src2))
+static inline void __xc_cpumap_or(struct xenctl_cpumap *dstp,
+        struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_or(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+				                xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_xor(dst, src1, src2) \
+                        __xc_cpumap_xor(&(dst), &(src1), &(src2))
+static inline void __xc_cpumap_xor(struct xenctl_cpumap *dstp,
+        struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_xor(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+                        xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_andnot(dst, src1, src2) \
+                        __xc_cpumap_andnot(&(dst), &(src1), &(src2))
+static inline void xenctl_cpumap_andnot(struct xenctl_cpumap *dstp,
+    struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_andnot(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+                        xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_complement(dst, src) \
+                        __xc_cpumap_complement(&(dst), &(src))
+static inline void __xc_cpumap_complement(struct xenctl_cpumap *dstp,
+				                        struct xenctl_cpumap *srcp)
+{
+	xc_bitmap_complement(xc_cpumap_bits(dstp), xc_cpumap_bits(srcp),
+                                                xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_intersects(src1, src2) \
+                        __xc_cpumap_intersects(&(src1), &(src2))
+static inline int __xc_cpumap_intersects(struct xenctl_cpumap *src1p,
+				                            struct xenctl_cpumap *src2p)
+{
+	return xc_bitmap_intersects(xc_cpumap_bits(src1p), xc_cpumap_bits(src2p),
+                                                        xc_cpumap_len(src1p));
+}
+
+#define xc_cpumap_subset(src1, src2) \
+                        __xc_cpumap_subset(&(src1), &(src2))
+static inline int __xc_cpumap_subset(struct xenctl_cpumap *src1p,
+				                        struct xenctl_cpumap *src2p)
+{
+	return xc_bitmap_subset(xc_cpumap_bits(src1p), xc_cpumap_bits(src2p),
+                                                        xc_cpumap_len(src1p));
+}
+
+#define xc_cpumap_empty(src) __xc_cpumap_empty(&(src))
+static inline int __xc_cpumap_empty(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_empty(xc_cpumap_bits(srcp), xc_cpumap_len(srcp));
+}
+
+#define xc_cpumap_full(src) __xc_cpumap_full(&(src))
+static inline int __xc_cpumap_full(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_full(xc_cpumap_bits(srcp), xc_cpumap_len(srcp));
+}
+
+#define xc_cpumap_weight(src) __xc_cpumap_weight(&(src))
+static inline uint32_t __xc_cpumap_weight(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_weight(xc_cpumap_bits(srcp), xc_cpumap_len(srcp));
+}
+
+#define xc_cpumap_copy(dst, src) __xc_cpumap_copy(&(dst), &(src))
+static inline void __xc_cpumap_copy(struct xenctl_cpumap *dstp,
+				                    struct xenctl_cpumap *srcp)
+{
+	xc_bitmap_copy(xc_cpumap_bits(dstp), xc_cpumap_bits(srcp),
+                                                xc_cpumap_len(dstp));
+}
+
+#if 0
+#define XC_CPUMASK_LAST_BYTE XC_BITMAP_LAST_BYTE_MASK(XENCTL_NR_CPUS)
+
+#define XC_CPUMASK_ALL							                    \
+/*(xenctl_cpumap)*/ { {							                \
+	[0 ... XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-2] = 0xff,		        \
+	[XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-1] = XC_CPUMASK_LAST_BYTE    \
+} }
+
+#define XC_CPUMASK_NONE							            \
+/*(xenctl_cpumap)*/ { {							        \
+	[0 ... XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-1] =  0		    \
+} }
+#endif
+
+#define xc_cpumap_snprintf(buf, len, src) \
+			__xc_cpumap_snprintf((buf), (len), &(src), XENCTL_NR_CPUS)
+static inline int __xc_cpumap_snprintf(char *buf, int len,
+					        const struct xenctl_cpumap *srcp, int nbits)
+{
+	return xc_bitmap_snprintf(buf, len, xc_cpumap_bits(srcp), nbits);
+}
+
+/***********************************************************************/
+
+static inline int
+xc_cpumap_allocz_bitmap(int xc_handle, struct xenctl_cpumap *map)
+{
+    int nr_cpus;
+    uint8_t *bitmap;
+    xc_physinfo_t pinfo = { 0 };
+
+    if (xc_physinfo(xc_handle, &pinfo))
+        goto failed;
+  
+    nr_cpus = pinfo.max_cpu_id+1;
+    if (!(bitmap = malloc(XC_BITS_TO_BYTES(nr_cpus))))
+        goto failed;
+
+	xc_bitmap_zero(bitmap, nr_cpus);
+    map->nr_cpus = pinfo.nr_cpus;
+    set_xen_guest_handle(map->bitmap, bitmap);
+    return 0;
+failed:
+    return -1;
+}
+
+static inline void
+xc_cpumap_free_bitmap(struct xenctl_cpumap *map)
+{
+    uint8_t *bitmap;
+    get_xen_guest_handle(bitmap, map->bitmap);
+    free(bitmap);
+}
+
+static inline int
+xc_cpumap_lock_pages(struct xenctl_cpumap *map)
+{
+    uint8_t *bitmap;
+    uint32_t nr_bytes = XC_BITS_TO_BYTES(map->nr_cpus);
+
+    get_xen_guest_handle(bitmap, map->bitmap);
+   
+    if (lock_pages(bitmap, nr_bytes))
+        return -1;
+    return 0;
+}
+
+static inline void
+xc_cpumap_unlock_pages(struct xenctl_cpumap *map)
+{
+    uint8_t *bitmap;
+    uint32_t nr_bytes = XC_BITS_TO_BYTES(map->nr_cpus);
+
+    get_xen_guest_handle(bitmap, map->bitmap);
+    unlock_pages(bitmap, nr_bytes);
+}
+
+#endif /* __XENCTL_CPUMAP_H */
diff -r 04cb0829d138 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/xc_domain.c	Tue Mar 23 12:50:32 2010 -0400
@@ -8,6 +8,7 @@
 
 #include "xc_private.h"
 #include "xg_save_restore.h"
+#include "xc_cpumap.h"
 #include <xen/memory.h>
 #include <xen/hvm/hvm_op.h>
 
@@ -98,28 +99,17 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap, int cpusize)
+                        struct xenctl_cpumap *cpumap)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t *local = malloc(cpusize); 
 
-    if(local == NULL)
-    {
-        PERROR("Could not alloc memory for Xen hypercall");
-        goto out;
-    }
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
-    domctl.u.vcpuaffinity.vcpu    = vcpu;
+    domctl.u.vcpuaffinity.vcpu = vcpu;
+    domctl.u.vcpuaffinity.cpumap = *cpumap;
 
-    bitmap_64_to_byte(local, cpumap, cpusize * 8);
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(local, cpusize) != 0 )
+    if (xc_cpumap_lock_pages(cpumap))
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -127,10 +117,9 @@
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, cpusize);
+    xc_cpumap_unlock_pages(cpumap);
 
  out:
-    free(local);
     return ret;
 }
 
@@ -138,28 +127,18 @@
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize)
+                        struct xenctl_cpumap *cpumap)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t * local = malloc(cpusize);
-
-    if(local == NULL)
-    {
-        PERROR("Could not alloc memory for Xen hypercall");
-        goto out;
-    }
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
+    domctl.u.vcpuaffinity.cpumap = *cpumap;
 
 
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(local, sizeof(local)) != 0 )
+    if (xc_cpumap_lock_pages(cpumap))
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -167,10 +146,8 @@
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, sizeof (local));
-    bitmap_byte_to_64(cpumap, local, cpusize * 8);
+    xc_cpumap_unlock_pages(cpumap);
 out:
-    free(local);
     return ret;
 }
 
diff -r 04cb0829d138 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/xenctrl.h	Tue Mar 23 12:50:32 2010 -0400
@@ -309,13 +309,11 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize);
+                        struct xenctl_cpumap *cpumap);
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize);
+                        struct xenctl_cpumap *cpumap);
 
 /**
  * This function will return information about one or more domains. It is
diff -r 04cb0829d138 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c	Tue Mar 23 12:50:32 2010 -0400
@@ -23,6 +23,7 @@
 #include "xc_dom.h"
 #include <xen/hvm/hvm_info_table.h>
 #include <xen/hvm/params.h>
+#include "xc_cpumap.h"
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 
@@ -215,12 +216,8 @@
 {
     uint32_t dom;
     int vcpu = 0, i;
-    uint64_t  *cpumap;
+    struct xenctl_cpumap cpumap;
     PyObject *cpulist = NULL;
-    int nr_cpus, size;
-    xc_physinfo_t info; 
-    xc_cpu_to_node_t map[1];
-    uint64_t cpumap_size = sizeof(cpumap); 
 
     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
     
@@ -229,40 +226,26 @@
                                       &dom, &vcpu, &cpulist) )
         return NULL;
 
-    set_xen_guest_handle(info.cpu_to_node, map);
-    info.max_cpu_id = 1;
-    if ( xc_physinfo(self->xc_handle, &info) != 0 )
+    if (xc_cpumap_allocz_bitmap(self->xc_handle, &cpumap))
         return pyxc_error_to_exception();
-  
-    nr_cpus = info.nr_cpus;
-
-    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
-    cpumap = malloc(cpumap_size * size);
-    if(cpumap == NULL)
-        return pyxc_error_to_exception();
-    
 
     if ( (cpulist != NULL) && PyList_Check(cpulist) )
     {
-        for ( i = 0; i < size; i++)
-        {
-            cpumap[i] = 0ULL;
-        }
         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
         {
             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
-            *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu % (cpumap_size * 8));
+            xc_cpumap_set_cpu(cpu, cpumap);
         }
     }
   
-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 )
+    if (xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, &cpumap))
     {
-        free(cpumap);
+        xc_cpumap_free_bitmap(&cpumap);
         return pyxc_error_to_exception();
     }
 
     Py_INCREF(zero);
-    free(cpumap); 
+    xc_cpumap_free_bitmap(&cpumap);
     return zero;
 }
 
@@ -381,11 +364,7 @@
     uint32_t dom, vcpu = 0;
     xc_vcpuinfo_t info;
     int rc, i;
-    uint64_t *cpumap;
-    int nr_cpus, size;
-    xc_physinfo_t pinfo = { 0 };
-    xc_cpu_to_node_t map[1];
-    uint64_t cpumap_size = sizeof(cpumap);
+    struct xenctl_cpumap cpumap;
 
     static char *kwd_list[] = { "domid", "vcpu", NULL };
     
@@ -393,23 +372,14 @@
                                       &dom, &vcpu) )
         return NULL;
 
-    set_xen_guest_handle(pinfo.cpu_to_node, map);
-    pinfo.max_cpu_id = 1;
-    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
+    if ( xc_cpumap_allocz_bitmap(self->xc_handle, &cpumap) ) 
         return pyxc_error_to_exception();
-    nr_cpus = pinfo.nr_cpus;
-    rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
-    if ( rc < 0 )
+    if ((rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info)) < 0)
         return pyxc_error_to_exception();
-    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
 
-    if((cpumap = malloc(cpumap_size * size)) == NULL)
-        return pyxc_error_to_exception(); 
-
-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size);
-    if ( rc < 0 )
+    if ((rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap)) < 0)
     {
-        free(cpumap);
+        xc_cpumap_free_bitmap(&cpumap);
         return pyxc_error_to_exception();
     }
 
@@ -421,18 +391,15 @@
                               "cpu",      info.cpu);
 
     cpulist = PyList_New(0);
-    for ( i = 0; i < size * cpumap_size * 8; i++ )
+    xc_for_each_cpu(i, cpumap)
     {
-        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
-            PyObject *pyint = PyInt_FromLong(i);
-            PyList_Append(cpulist, pyint);
-            Py_DECREF(pyint);
-        }
-        *(cpumap + i / (cpumap_size * 8)) >>= 1;
+        PyObject *pyint = PyInt_FromLong(i);
+        PyList_Append(cpulist, pyint);
+        Py_DECREF(pyint);
     }
     PyDict_SetItemString(info_dict, "cpumap", cpulist);
     Py_DECREF(cpulist);
-    free(cpumap);
+    xc_cpumap_free_bitmap(&cpumap);
     return info_dict;
 }
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Fwd: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-23 16:55             ` Dulloor
@ 2010-03-30 14:42               ` Dulloor
  2010-03-30 15:16                 ` Keir Fraser
  0 siblings, 1 reply; 12+ messages in thread
From: Dulloor @ 2010-03-30 14:42 UTC (permalink / raw)
  To: xen-devel; +Cc: Keir Fraser, Jan Beulich

[-- Attachment #1: Type: text/plain, Size: 1840 bytes --]

Resubmitting the patch.

-dulloor

---------- Forwarded message ----------
From: Dulloor <dulloor@gmail.com>
Date: Tue, Mar 23, 2010 at 12:55 PM
Subject: Re: [Xen-devel][PATCH] libxc bitmap utils and vcpu-affinity
To: Keir Fraser <keir.fraser@eu.citrix.com>
Cc: Jan Beulich <JBeulich@novell.com>, "xen-devel@lists.xensource.com"
<xen-devel@lists.xensource.com>


Please use this patch, in which length of bitmap is
(physinfo.max_cpu_id+1), rather than (physinfo.nr_cpus).

-dulloor

On Tue, Mar 23, 2010 at 12:41 PM, Dulloor <dulloor@gmail.com> wrote:
> I meant utils for **xenctl_cpumap**
>
> On Tue, Mar 23, 2010 at 12:40 PM, Dulloor <dulloor@gmail.com> wrote:
>> Fine, I agree with you both. Attached is a patch adding utils for
>> xenctl_bitmap (to libxc) and using the same in vcpu_(get|set)affinity.
>> For the guest-numa interface, I will see if I can use xenctl_cpumap.
>>
>> -dulloor
>>
>> On Tue, Mar 23, 2010 at 7:05 AM, Keir Fraser <keir.fraser@eu.citrix.com> wrote:
>>> On 23/03/2010 10:10, "Jan Beulich" <JBeulich@novell.com> wrote:
>>>
>>>>>>> Dulloor <dulloor@gmail.com> 22.03.10 18:44 >>>
>>>>> Motivation for using xenctl_cpumask in Xen interfaces :
>>>>> - xenctl_cpumap is just 4 bytes smaller than static xenctl_cpumask for
>>>>> 128 cpus (128 would be good for quite some time). However, the new
>>>>
>>>> I don't buy this (we're already building for 256 CPUs, looking forward
>>>> to further bump this in the not too distant future), and I'm generally
>>>> opposed to introducing hard coded limits in a public interface.
>>>
>>> We should use xenctl_cpumask everywhere for specifying physical CPU bitmaps,
>>> even into guest NUMA interfaces if appropriate. I don't really care if it is
>>> a bit harder to use than a static bitmap.
>>>
>>>  -- Keir
>>>
>>>
>>>
>>
>

[-- Attachment #2: cpumap-utils.patch --]
[-- Type: text/x-patch, Size: 29200 bytes --]

diff -r 04cb0829d138 tools/libxc/Makefile
--- a/tools/libxc/Makefile	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/Makefile	Tue Mar 23 12:50:32 2010 -0400
@@ -25,6 +25,7 @@
 CTRL_SRCS-y       += xc_mem_event.c
 CTRL_SRCS-y       += xc_mem_paging.c
 CTRL_SRCS-y       += xc_memshr.c
+CTRL_SRCS-y       += xc_bitmap.c
 CTRL_SRCS-$(CONFIG_X86) += xc_pagetab.c
 CTRL_SRCS-$(CONFIG_Linux) += xc_linux.c
 CTRL_SRCS-$(CONFIG_SunOS) += xc_solaris.c
diff -r 04cb0829d138 tools/libxc/xc_bitmap.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitmap.c	Tue Mar 23 12:50:32 2010 -0400
@@ -0,0 +1,250 @@
+#include "xc_bitmap.h"
+#include <stdio.h>
+
+/*
+ * xc_bitmap_find_next_bit is adapted from the definition of generic
+ * find_next_bit * in Linux, with following copyright.
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Adapted for byte-based bitmap by Dulloor (dulloor@gatech.edu)
+ */
+
+/**
+ * __ffs - find first bit in byte.
+ * @byte: The byte to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline int __xc_ffs(uint8_t byte)
+{
+	int num = 0;
+
+	if ((byte & 0xff) == 0) {
+		num += 8;
+		byte >>= 8;
+	}
+	if ((byte & 0xf) == 0) {
+		num += 4;
+		byte >>= 4;
+	}
+	if ((byte & 0x3) == 0) {
+		num += 2;
+		byte >>= 2;
+	}
+	if ((byte & 0x1) == 0)
+		num += 1;
+	return num;
+}
+
+int
+xc_bitmap_find_next_bit( const uint8_t *addr, uint32_t size, uint32_t offset)
+{
+    const uint8_t *p;
+    uint32_t result;
+    uint8_t tmp;
+
+    if (offset >= size)
+        return size;
+
+    p = addr + XC_BITMAP_BYTE(offset);
+    result = offset & ~(XC_BITS_PER_BYTE-1);
+
+    size -= result;
+    offset %= XC_BITS_PER_BYTE;
+    if (offset) {
+        tmp = *(p++);
+        tmp &= (0xff << offset);
+        if (size < XC_BITS_PER_BYTE)
+            goto found_first;
+        if (tmp)
+            goto found_middle;
+        size -= XC_BITS_PER_BYTE;
+        result += XC_BITS_PER_BYTE;
+    }
+    while (size & ~(XC_BITS_PER_BYTE-1)) {
+        if ((tmp = *(p++)))
+            goto found_middle;
+        result += XC_BITS_PER_BYTE;
+        size -= XC_BITS_PER_BYTE;
+    }
+    if (!size)
+        return result;
+    tmp = *p;
+
+found_first:
+    tmp &= (0xff >> (XC_BITS_PER_BYTE - size));
+    if (!tmp)
+        return result+size;
+found_middle:
+    return result + __xc_ffs(tmp);
+}
+
+void __xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] & s2p[k];
+}
+
+void __xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] | s2p[k];
+}
+
+void __xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] ^ s2p[k];
+}
+
+void __xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k;
+    int nr = XC_BITS_TO_BYTES(nbits);
+
+    for (k=0; k<nr; k++)
+        dp[k] = s1p[k] & ~s2p[k];
+}
+
+void __xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        dp[k] = ~sp[k];
+
+    if (nbits % XC_BITS_PER_BYTE)
+        dp[k] = ~sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+int __xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] != s2p[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] ^ s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] & s2p[k])
+            return 1;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] & s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 1;
+
+    return 0;
+}
+
+int __xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (s1p[k] & ~s2p[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if ((s1p[k] & ~s2p[k]) & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_empty(uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (sp[k])
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if (sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+int __xc_bitmap_full(uint8_t *sp, int nbits)
+{
+    int k, lim = nbits/XC_BITS_PER_BYTE;
+    for (k=0; k<lim; k++)
+        if (~sp[k] & XC_BITMAP_BYTE_MASK)
+            return 0;
+
+    if (nbits % XC_BITS_PER_BYTE)
+        if (~sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits))
+            return 0;
+
+    return 1;
+}
+
+static inline uint8_t hweight8(uint8_t w)
+{
+    uint8_t res = (w & 0x55) + ((w >> 1) & 0x55);
+    res = (res & 0x33) + ((res >> 2) & 0x33);
+    return (res & 0x0F) + ((res >> 4) & 0x0F);
+}
+
+int __xc_bitmap_weight(const uint8_t *sp, int nbits)
+{
+    int k, w = 0, lim = nbits/XC_BITS_PER_BYTE;
+
+    for (k=0; k <lim; k++)
+        w += hweight8(sp[k]);
+
+    if (nbits % XC_BITS_PER_BYTE)
+        w += hweight8(sp[k] & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return w;
+}
+
+/* xenctl_cpumask print functions */
+#define CHUNKSZ	8
+#define roundup_power2(val,modulus)	(((val) + (modulus) - 1) & ~((modulus) - 1))
+
+int xc_bitmap_snprintf(char *buf, unsigned int buflen,
+	const uint8_t *maskp, int nmaskbits)
+{
+	int i, word, bit, len = 0;
+	unsigned long val;
+	const char *sep = "";
+	int chunksz;
+	uint8_t chunkmask;
+
+	chunksz = nmaskbits & (CHUNKSZ - 1);
+	if (chunksz == 0)
+		chunksz = CHUNKSZ;
+
+	i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ;
+	for (; i >= 0; i -= CHUNKSZ) {
+		chunkmask = ((1ULL << chunksz) - 1);
+		word = i / XC_BITS_PER_BYTE;
+		bit = i % XC_BITS_PER_BYTE;
+		val = (maskp[word] >> bit) & chunkmask;
+		len += snprintf(buf+len, buflen-len, "%s%0*lx", sep,
+			(chunksz+3)/4, val);
+		chunksz = CHUNKSZ;
+		sep = ",";
+	}
+	return len;
+}
+
+
diff -r 04cb0829d138 tools/libxc/xc_bitmap.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_bitmap.h	Tue Mar 23 12:50:32 2010 -0400
@@ -0,0 +1,193 @@
+#ifndef __XENCTL_BITMAP_H
+#define __XENCTL_BITMAP_H
+
+#include <stdint.h>
+#include <string.h>
+
+#define XC_BITS_PER_BYTE 8
+#define XC_BITS_TO_BYTES(bits) \
+    (((bits)+XC_BITS_PER_BYTE-1)/XC_BITS_PER_BYTE)
+#define XC_BITMAP_BIT(nr)   (1 << (nr))
+#define XC_BITMAP_BIT_MASK(nr)  (1 << ((nr) % XC_BITS_PER_BYTE))
+#define XC_BITMAP_BYTE(nr)  ((nr) / XC_BITS_PER_BYTE)
+
+#define XC_BITMAP_BYTE_MASK (0xff)
+#define XC_BITMAP_LAST_BYTE_MASK(nbits)					\
+	(((nbits) % XC_BITS_PER_BYTE) ?		                \
+		((1<<((nbits) % XC_BITS_PER_BYTE))-1) :         \
+                            XC_BITMAP_BYTE_MASK)
+
+#define xc_bitmap_find_first_bit(addr, size)        \
+            xc_bitmap_find_next_bit(addr, size, 0)
+extern int
+xc_bitmap_find_next_bit(const uint8_t *addr, uint32_t size, uint32_t offset);
+extern int
+xc_bitmap_find_next_bit(const uint8_t *addr, uint32_t size, uint32_t offset);
+
+#define xc_bitmap_find_first_zero_bit(addr, size) \
+            xc_bitmap_find_next_zero_bit(addr, size, 0)
+extern int xc_bitmap_find_next_zero_bit(
+        const uint8_t *addr, uint32_t size, uint32_t offset);
+
+extern void __xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void
+__xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits);
+extern void __xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits);
+extern int __xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits);
+extern int __xc_bitmap_empty(uint8_t *sp, int nbits);
+extern int __xc_bitmap_full(uint8_t *sp, int nbits);
+extern int __xc_bitmap_weight(const uint8_t *sp, int nbits);
+
+extern int xc_bitmap_snprintf(char *buf, unsigned int buflen,
+	const uint8_t *maskp, int nmaskbits);
+
+
+static inline void xc_bitmap_set_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    *p |= mask;
+}
+
+static inline void xc_bitmap_clear_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    *p &= ~mask;
+}
+
+static inline int xc_bitmap_test_bit(int nr, volatile uint8_t *addr)
+{
+    uint8_t mask = XC_BITMAP_BIT_MASK(nr);
+    uint8_t *p = ((uint8_t *)addr) + XC_BITMAP_BYTE(nr);
+    return *p & mask;
+}
+
+static inline void xc_bitmap_fill(uint8_t *dp, int nbits)
+{
+    size_t nbytes = XC_BITS_TO_BYTES(nbits);
+    if (nbytes > 1)
+        memset(dp, 0xff, nbytes-1); 
+    dp[nbytes-1] = XC_BITMAP_LAST_BYTE_MASK(nbits);
+}
+
+static inline void xc_bitmap_zero(uint8_t *dp, int nbits)
+{
+    size_t nbytes = XC_BITS_TO_BYTES(nbits);
+    if (nbytes > 1)
+        memset(dp, 0x00, nbytes-1); 
+    dp[nbytes-1] = 0;
+}
+
+
+static inline void
+xc_bitmap_and(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p & *s2p;
+    else
+        __xc_bitmap_and(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_or(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p | *s2p;
+    else
+        __xc_bitmap_or(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_xor(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p ^ *s2p;
+    else
+        __xc_bitmap_xor(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_andnot(uint8_t *dp, uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = *s1p & ~(*s2p);
+    else
+        __xc_bitmap_andnot(dp, s1p, s2p, nbits);
+}
+
+static inline void
+xc_bitmap_complement(uint8_t *dp, uint8_t *sp, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        *dp = ~(*sp) & XC_BITMAP_LAST_BYTE_MASK(nbits);
+    else
+        __xc_bitmap_complement(dp, sp, nbits);
+}
+
+static inline int
+xc_bitmap_equal(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return !((*s1p ^ *s2p) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_equal(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_intersects(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return ((*s1p & *s2p) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_intersects(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_subset(uint8_t *s1p, uint8_t *s2p, int nbits)
+{
+    if (nbits <= XC_BITS_PER_BYTE)
+        return !((*s1p & ~(*s2p)) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+
+    return __xc_bitmap_subset(s1p, s2p, nbits);
+}
+
+static inline int
+xc_bitmap_empty(uint8_t *sp, int nbits)
+{
+	if (nbits <= XC_BITS_PER_BYTE)
+		return ! (*sp & XC_BITMAP_LAST_BYTE_MASK(nbits));
+	else
+		return __xc_bitmap_empty(sp, nbits);
+}
+
+static inline int
+xc_bitmap_full(uint8_t *sp, int nbits)
+{
+	if (nbits <= XC_BITS_PER_BYTE)
+		return ! (~(*sp) & XC_BITMAP_LAST_BYTE_MASK(nbits));
+	else
+		return __xc_bitmap_full(sp, nbits);
+}
+
+static inline uint32_t
+xc_bitmap_weight(const uint8_t *sp, int nbits)
+{
+	return __xc_bitmap_weight(sp, nbits);
+}
+
+
+static inline void
+xc_bitmap_copy(uint8_t *dp, const uint8_t *sp, int nbits)
+{
+    if (nbits > XC_BITS_PER_BYTE)
+        *dp = *sp;
+    else
+        memcpy(dp, sp, XC_BITS_TO_BYTES(nbits));
+}
+
+#endif
diff -r 04cb0829d138 tools/libxc/xc_cpumap.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpumap.h	Tue Mar 23 12:50:32 2010 -0400
@@ -0,0 +1,274 @@
+#ifndef __XENCTL_CPUMAP_H
+#define __XENCTL_CPUMAP_H
+
+#include "xc_private.h"
+#include "xc_bitmap.h"
+
+#define xc_cpumap_bits(maskp)                                           \
+                    ({  uint8_t *bitmap;                                \
+                        get_xen_guest_handle(bitmap, (maskp)->bitmap);  \
+                        bitmap; })
+#define xc_cpumap_len(maskp) ((maskp)->nr_cpus)
+
+/* Number of cpus set in the bitmap */
+#define xc_cpumap_num_cpus(mask)	xc_cpumap_weight(mask)
+
+/**
+ * xc_cpumap_first - get the first cpu in a xenctl_cpumap
+ * @srcp: the xenctl_cpumap pointer
+ *
+ * Returns >= xc_cpumap_len(srcp) if no cpus set.
+ */
+static inline unsigned int
+xc_cpumap_first(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_find_first_bit(xc_cpumap_bits(srcp),
+                                                xc_cpumap_len(srcp));
+}
+
+/**
+ * xc_cpumap_next - get the next cpu in a xenctl_cpumap
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the xenctl_cpumap pointer
+ *
+ * Returns >= xc_cpumap_len(srcp) if no further cpus set.
+ */
+static inline uint32_t
+xc_cpumap_next(int n, struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_find_next_bit(xc_cpumap_bits(srcp),
+                                        xc_cpumap_len(srcp), n+1);
+}
+
+#if 0
+static inline uint32_t
+xc_cpumap_next_zero(int n, struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_find_next_zero_bit(xc_cpumap_bits(srcp),
+                                        xc_cpumap_len(srcp), n+1);
+}
+#endif
+
+/**
+ * xc_for_each_cpu - iterate over every cpu in a mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the xenctl_cpumap pointer
+ *
+ * After the loop, cpu is >= xc_cpumap_len(mask)
+ */
+#define xc_for_each_cpu(cpu, mask)				\
+            __xc_for_each_cpu(cpu, &(mask))
+
+#define __xc_for_each_cpu(cpu, mask)            \
+	for ((cpu) = -1;				            \
+		(cpu) = xc_cpumap_next((cpu), (mask)),	\
+		(cpu) < xc_cpumap_len(mask);)
+
+
+#define xc_cpumap_equal(src1, src2) __xc_cpumap_equal(&(src1), &(src2))
+static inline int
+__xc_cpumap_equal(struct xenctl_cpumap *s1p, struct xenctl_cpumap *s2p)
+{
+	return xc_bitmap_equal(xc_cpumap_bits(s1p), xc_cpumap_bits(s2p), 
+                                                xc_cpumap_len(s1p));
+}
+
+#define xc_cpumap_set_cpu(cpu, dst) __xc_cpumap_set_cpu(cpu, &(dst))
+static inline void __xc_cpumap_set_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_set_bit(cpu, xc_cpumap_bits(dstp));
+}
+
+#define xc_cpumap_clear_cpu(cpu, dst) __xc_cpumap_clear_cpu(cpu, &(dst))
+static inline void __xc_cpumap_clear_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_clear_bit(cpu, xc_cpumap_bits(dstp));
+}
+
+#define xc_cpumap_test_cpu(cpu, dst) __xc_cpumap_test_cpu(cpu, &(dst))
+static inline int __xc_cpumap_test_cpu(int cpu, struct xenctl_cpumap *dstp)
+{
+    return xc_bitmap_test_bit(cpu, xc_cpumap_bits(dstp));
+}
+
+
+#define xc_cpumap_setall(dst) __xc_cpumap_setall(&(dst))
+static inline void __xc_cpumap_setall(struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_fill(xc_cpumap_bits(dstp), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_clearall(dst) __xc_cpumap_clearall(&(dst))
+static inline void __xc_cpumap_clearall(struct xenctl_cpumap *dstp)
+{
+	xc_bitmap_zero(xc_cpumap_bits(dstp), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_and(dst, src1, src2) \
+                        __xc_cpumap_and(&(dst), &(src1), &(src2))
+static inline void __xc_cpumap_and(struct xenctl_cpumap *dstp,
+        struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_and(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+                                xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_or(dst, src1, src2) \
+                        __xc_cpumap_or(&(dst), &(src1), &(src2))
+static inline void __xc_cpumap_or(struct xenctl_cpumap *dstp,
+        struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_or(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+				                xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_xor(dst, src1, src2) \
+                        __xc_cpumap_xor(&(dst), &(src1), &(src2))
+static inline void __xc_cpumap_xor(struct xenctl_cpumap *dstp,
+        struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_xor(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+                        xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_andnot(dst, src1, src2) \
+                        __xc_cpumap_andnot(&(dst), &(src1), &(src2))
+static inline void xenctl_cpumap_andnot(struct xenctl_cpumap *dstp,
+    struct xenctl_cpumap *src1p, struct xenctl_cpumap *src2p)
+{
+	xc_bitmap_andnot(xc_cpumap_bits(dstp), xc_cpumap_bits(src1p),
+                        xc_cpumap_bits(src2p), xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_complement(dst, src) \
+                        __xc_cpumap_complement(&(dst), &(src))
+static inline void __xc_cpumap_complement(struct xenctl_cpumap *dstp,
+				                        struct xenctl_cpumap *srcp)
+{
+	xc_bitmap_complement(xc_cpumap_bits(dstp), xc_cpumap_bits(srcp),
+                                                xc_cpumap_len(dstp));
+}
+
+#define xc_cpumap_intersects(src1, src2) \
+                        __xc_cpumap_intersects(&(src1), &(src2))
+static inline int __xc_cpumap_intersects(struct xenctl_cpumap *src1p,
+				                            struct xenctl_cpumap *src2p)
+{
+	return xc_bitmap_intersects(xc_cpumap_bits(src1p), xc_cpumap_bits(src2p),
+                                                        xc_cpumap_len(src1p));
+}
+
+#define xc_cpumap_subset(src1, src2) \
+                        __xc_cpumap_subset(&(src1), &(src2))
+static inline int __xc_cpumap_subset(struct xenctl_cpumap *src1p,
+				                        struct xenctl_cpumap *src2p)
+{
+	return xc_bitmap_subset(xc_cpumap_bits(src1p), xc_cpumap_bits(src2p),
+                                                        xc_cpumap_len(src1p));
+}
+
+#define xc_cpumap_empty(src) __xc_cpumap_empty(&(src))
+static inline int __xc_cpumap_empty(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_empty(xc_cpumap_bits(srcp), xc_cpumap_len(srcp));
+}
+
+#define xc_cpumap_full(src) __xc_cpumap_full(&(src))
+static inline int __xc_cpumap_full(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_full(xc_cpumap_bits(srcp), xc_cpumap_len(srcp));
+}
+
+#define xc_cpumap_weight(src) __xc_cpumap_weight(&(src))
+static inline uint32_t __xc_cpumap_weight(struct xenctl_cpumap *srcp)
+{
+	return xc_bitmap_weight(xc_cpumap_bits(srcp), xc_cpumap_len(srcp));
+}
+
+#define xc_cpumap_copy(dst, src) __xc_cpumap_copy(&(dst), &(src))
+static inline void __xc_cpumap_copy(struct xenctl_cpumap *dstp,
+				                    struct xenctl_cpumap *srcp)
+{
+	xc_bitmap_copy(xc_cpumap_bits(dstp), xc_cpumap_bits(srcp),
+                                                xc_cpumap_len(dstp));
+}
+
+#if 0
+#define XC_CPUMASK_LAST_BYTE XC_BITMAP_LAST_BYTE_MASK(XENCTL_NR_CPUS)
+
+#define XC_CPUMASK_ALL							                    \
+/*(xenctl_cpumap)*/ { {							                \
+	[0 ... XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-2] = 0xff,		        \
+	[XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-1] = XC_CPUMASK_LAST_BYTE    \
+} }
+
+#define XC_CPUMASK_NONE							            \
+/*(xenctl_cpumap)*/ { {							        \
+	[0 ... XC_BITS_TO_BYTES(XENCTL_NR_CPUS)-1] =  0		    \
+} }
+#endif
+
+#define xc_cpumap_snprintf(buf, len, src) \
+			__xc_cpumap_snprintf((buf), (len), &(src), XENCTL_NR_CPUS)
+static inline int __xc_cpumap_snprintf(char *buf, int len,
+					        const struct xenctl_cpumap *srcp, int nbits)
+{
+	return xc_bitmap_snprintf(buf, len, xc_cpumap_bits(srcp), nbits);
+}
+
+/***********************************************************************/
+
+static inline int
+xc_cpumap_allocz_bitmap(int xc_handle, struct xenctl_cpumap *map)
+{
+    int nr_cpus;
+    uint8_t *bitmap;
+    xc_physinfo_t pinfo = { 0 };
+
+    if (xc_physinfo(xc_handle, &pinfo))
+        goto failed;
+  
+    nr_cpus = pinfo.max_cpu_id+1;
+    if (!(bitmap = malloc(XC_BITS_TO_BYTES(nr_cpus))))
+        goto failed;
+
+	xc_bitmap_zero(bitmap, nr_cpus);
+    map->nr_cpus = pinfo.nr_cpus;
+    set_xen_guest_handle(map->bitmap, bitmap);
+    return 0;
+failed:
+    return -1;
+}
+
+static inline void
+xc_cpumap_free_bitmap(struct xenctl_cpumap *map)
+{
+    uint8_t *bitmap;
+    get_xen_guest_handle(bitmap, map->bitmap);
+    free(bitmap);
+}
+
+static inline int
+xc_cpumap_lock_pages(struct xenctl_cpumap *map)
+{
+    uint8_t *bitmap;
+    uint32_t nr_bytes = XC_BITS_TO_BYTES(map->nr_cpus);
+
+    get_xen_guest_handle(bitmap, map->bitmap);
+   
+    if (lock_pages(bitmap, nr_bytes))
+        return -1;
+    return 0;
+}
+
+static inline void
+xc_cpumap_unlock_pages(struct xenctl_cpumap *map)
+{
+    uint8_t *bitmap;
+    uint32_t nr_bytes = XC_BITS_TO_BYTES(map->nr_cpus);
+
+    get_xen_guest_handle(bitmap, map->bitmap);
+    unlock_pages(bitmap, nr_bytes);
+}
+
+#endif /* __XENCTL_CPUMAP_H */
diff -r 04cb0829d138 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/xc_domain.c	Tue Mar 23 12:50:32 2010 -0400
@@ -8,6 +8,7 @@
 
 #include "xc_private.h"
 #include "xg_save_restore.h"
+#include "xc_cpumap.h"
 #include <xen/memory.h>
 #include <xen/hvm/hvm_op.h>
 
@@ -98,28 +99,17 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap, int cpusize)
+                        struct xenctl_cpumap *cpumap)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t *local = malloc(cpusize); 
 
-    if(local == NULL)
-    {
-        PERROR("Could not alloc memory for Xen hypercall");
-        goto out;
-    }
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
-    domctl.u.vcpuaffinity.vcpu    = vcpu;
+    domctl.u.vcpuaffinity.vcpu = vcpu;
+    domctl.u.vcpuaffinity.cpumap = *cpumap;
 
-    bitmap_64_to_byte(local, cpumap, cpusize * 8);
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(local, cpusize) != 0 )
+    if (xc_cpumap_lock_pages(cpumap))
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -127,10 +117,9 @@
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, cpusize);
+    xc_cpumap_unlock_pages(cpumap);
 
  out:
-    free(local);
     return ret;
 }
 
@@ -138,28 +127,18 @@
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize)
+                        struct xenctl_cpumap *cpumap)
 {
     DECLARE_DOMCTL;
     int ret = -1;
-    uint8_t * local = malloc(cpusize);
-
-    if(local == NULL)
-    {
-        PERROR("Could not alloc memory for Xen hypercall");
-        goto out;
-    }
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
+    domctl.u.vcpuaffinity.cpumap = *cpumap;
 
 
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-    domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(local, sizeof(local)) != 0 )
+    if (xc_cpumap_lock_pages(cpumap))
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out;
@@ -167,10 +146,8 @@
 
     ret = do_domctl(xc_handle, &domctl);
 
-    unlock_pages(local, sizeof (local));
-    bitmap_byte_to_64(cpumap, local, cpusize * 8);
+    xc_cpumap_unlock_pages(cpumap);
 out:
-    free(local);
     return ret;
 }
 
diff -r 04cb0829d138 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/libxc/xenctrl.h	Tue Mar 23 12:50:32 2010 -0400
@@ -309,13 +309,11 @@
 int xc_vcpu_setaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize);
+                        struct xenctl_cpumap *cpumap);
 int xc_vcpu_getaffinity(int xc_handle,
                         uint32_t domid,
                         int vcpu,
-                        uint64_t *cpumap,
-                        int cpusize);
+                        struct xenctl_cpumap *cpumap);
 
 /**
  * This function will return information about one or more domains. It is
diff -r 04cb0829d138 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c	Wed Mar 17 14:10:43 2010 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c	Tue Mar 23 12:50:32 2010 -0400
@@ -23,6 +23,7 @@
 #include "xc_dom.h"
 #include <xen/hvm/hvm_info_table.h>
 #include <xen/hvm/params.h>
+#include "xc_cpumap.h"
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 
@@ -215,12 +216,8 @@
 {
     uint32_t dom;
     int vcpu = 0, i;
-    uint64_t  *cpumap;
+    struct xenctl_cpumap cpumap;
     PyObject *cpulist = NULL;
-    int nr_cpus, size;
-    xc_physinfo_t info; 
-    xc_cpu_to_node_t map[1];
-    uint64_t cpumap_size = sizeof(cpumap); 
 
     static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
     
@@ -229,40 +226,26 @@
                                       &dom, &vcpu, &cpulist) )
         return NULL;
 
-    set_xen_guest_handle(info.cpu_to_node, map);
-    info.max_cpu_id = 1;
-    if ( xc_physinfo(self->xc_handle, &info) != 0 )
+    if (xc_cpumap_allocz_bitmap(self->xc_handle, &cpumap))
         return pyxc_error_to_exception();
-  
-    nr_cpus = info.nr_cpus;
-
-    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
-    cpumap = malloc(cpumap_size * size);
-    if(cpumap == NULL)
-        return pyxc_error_to_exception();
-    
 
     if ( (cpulist != NULL) && PyList_Check(cpulist) )
     {
-        for ( i = 0; i < size; i++)
-        {
-            cpumap[i] = 0ULL;
-        }
         for ( i = 0; i < PyList_Size(cpulist); i++ ) 
         {
             long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
-            *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu % (cpumap_size * 8));
+            xc_cpumap_set_cpu(cpu, cpumap);
         }
     }
   
-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 )
+    if (xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, &cpumap))
     {
-        free(cpumap);
+        xc_cpumap_free_bitmap(&cpumap);
         return pyxc_error_to_exception();
     }
 
     Py_INCREF(zero);
-    free(cpumap); 
+    xc_cpumap_free_bitmap(&cpumap);
     return zero;
 }
 
@@ -381,11 +364,7 @@
     uint32_t dom, vcpu = 0;
     xc_vcpuinfo_t info;
     int rc, i;
-    uint64_t *cpumap;
-    int nr_cpus, size;
-    xc_physinfo_t pinfo = { 0 };
-    xc_cpu_to_node_t map[1];
-    uint64_t cpumap_size = sizeof(cpumap);
+    struct xenctl_cpumap cpumap;
 
     static char *kwd_list[] = { "domid", "vcpu", NULL };
     
@@ -393,23 +372,14 @@
                                       &dom, &vcpu) )
         return NULL;
 
-    set_xen_guest_handle(pinfo.cpu_to_node, map);
-    pinfo.max_cpu_id = 1;
-    if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) 
+    if ( xc_cpumap_allocz_bitmap(self->xc_handle, &cpumap) ) 
         return pyxc_error_to_exception();
-    nr_cpus = pinfo.nr_cpus;
-    rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
-    if ( rc < 0 )
+    if ((rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info)) < 0)
         return pyxc_error_to_exception();
-    size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); 
 
-    if((cpumap = malloc(cpumap_size * size)) == NULL)
-        return pyxc_error_to_exception(); 
-
-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size);
-    if ( rc < 0 )
+    if ((rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap)) < 0)
     {
-        free(cpumap);
+        xc_cpumap_free_bitmap(&cpumap);
         return pyxc_error_to_exception();
     }
 
@@ -421,18 +391,15 @@
                               "cpu",      info.cpu);
 
     cpulist = PyList_New(0);
-    for ( i = 0; i < size * cpumap_size * 8; i++ )
+    xc_for_each_cpu(i, cpumap)
     {
-        if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
-            PyObject *pyint = PyInt_FromLong(i);
-            PyList_Append(cpulist, pyint);
-            Py_DECREF(pyint);
-        }
-        *(cpumap + i / (cpumap_size * 8)) >>= 1;
+        PyObject *pyint = PyInt_FromLong(i);
+        PyList_Append(cpulist, pyint);
+        Py_DECREF(pyint);
     }
     PyDict_SetItemString(info_dict, "cpumap", cpulist);
     Py_DECREF(cpulist);
-    free(cpumap);
+    xc_cpumap_free_bitmap(&cpumap);
     return info_dict;
 }
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-30 14:42               ` Fwd: " Dulloor
@ 2010-03-30 15:16                 ` Keir Fraser
  2010-03-30 16:05                   ` Dulloor
  0 siblings, 1 reply; 12+ messages in thread
From: Keir Fraser @ 2010-03-30 15:16 UTC (permalink / raw)
  To: Dulloor, xen-devel; +Cc: Jan Beulich

No changeset comment. No signed-off-by line. It actually bloats the
libraries by a net 650 LOC (747 added, 87 deleted according to diffstat).
And below I append the very first function I read: it doesn't inspire
confidence that the implementation is over-complicated/long and
unnecessarily handles 16-bit values. Why should I show your patch some love?

 -- Keir

+static inline int __xc_ffs(uint8_t byte)
+{
+       int num = 0;
+
+       if ((byte & 0xff) == 0) {
+               num += 8;
+               byte >>= 8;
+       }
+       if ((byte & 0xf) == 0) {
+               num += 4;
+               byte >>= 4;
+       }
+       if ((byte & 0x3) == 0) {
+               num += 2;
+               byte >>= 2;
+       }
+       if ((byte & 0x1) == 0)
+               num += 1;
+       return num;
+}

On 30/03/2010 15:42, "Dulloor" <dulloor@gmail.com> wrote:

> Resubmitting the patch.
> 
> -dulloor
> 
> ---------- Forwarded message ----------
> From: Dulloor <dulloor@gmail.com>
> Date: Tue, Mar 23, 2010 at 12:55 PM
> Subject: Re: [Xen-devel][PATCH] libxc bitmap utils and vcpu-affinity
> To: Keir Fraser <keir.fraser@eu.citrix.com>
> Cc: Jan Beulich <JBeulich@novell.com>, "xen-devel@lists.xensource.com"
> <xen-devel@lists.xensource.com>
> 
> 
> Please use this patch, in which length of bitmap is
> (physinfo.max_cpu_id+1), rather than (physinfo.nr_cpus).
> 
> -dulloor
> 
> On Tue, Mar 23, 2010 at 12:41 PM, Dulloor <dulloor@gmail.com> wrote:
>> I meant utils for **xenctl_cpumap**
>> 
>> On Tue, Mar 23, 2010 at 12:40 PM, Dulloor <dulloor@gmail.com> wrote:
>>> Fine, I agree with you both. Attached is a patch adding utils for
>>> xenctl_bitmap (to libxc) and using the same in vcpu_(get|set)affinity.
>>> For the guest-numa interface, I will see if I can use xenctl_cpumap.
>>> 
>>> -dulloor
>>> 
>>> On Tue, Mar 23, 2010 at 7:05 AM, Keir Fraser <keir.fraser@eu.citrix.com>
>>> wrote:
>>>> On 23/03/2010 10:10, "Jan Beulich" <JBeulich@novell.com> wrote:
>>>> 
>>>>>>>> Dulloor <dulloor@gmail.com> 22.03.10 18:44 >>>
>>>>>> Motivation for using xenctl_cpumask in Xen interfaces :
>>>>>> - xenctl_cpumap is just 4 bytes smaller than static xenctl_cpumask for
>>>>>> 128 cpus (128 would be good for quite some time). However, the new
>>>>> 
>>>>> I don't buy this (we're already building for 256 CPUs, looking forward
>>>>> to further bump this in the not too distant future), and I'm generally
>>>>> opposed to introducing hard coded limits in a public interface.
>>>> 
>>>> We should use xenctl_cpumask everywhere for specifying physical CPU
>>>> bitmaps,
>>>> even into guest NUMA interfaces if appropriate. I don't really care if it
>>>> is
>>>> a bit harder to use than a static bitmap.
>>>> 
>>>>  -- Keir
>>>> 
>>>> 
>>>> 
>>> 
>> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-30 15:16                 ` Keir Fraser
@ 2010-03-30 16:05                   ` Dulloor
  2010-03-30 16:27                     ` Keir Fraser
  0 siblings, 1 reply; 12+ messages in thread
From: Dulloor @ 2010-03-30 16:05 UTC (permalink / raw)
  To: Keir Fraser; +Cc: xen-devel, Jan Beulich

> No changeset comment. No signed-off-by line.
Sorry, I forgot. Will do once we finalize on the patch.

> It actually bloats the libraries by a net 650 LOC
> (747 added, 87 deleted according to diffstat).
In the patch, we have used the library only for vcpu get/set affinity.
There are clearly other opportunities (right now and in future) to use
most of the functions provided by the library, which will offset this.
Also, this provides a cleaner/standard way of using the cpumap
structure in libxc.

> And below I append the very first function I read: it doesn't inspire
> confidence that the implementation is over-complicated/long and
> unnecessarily handles 16-bit values.
I guess you mean 8-bit values. The library works with byte-based
bitmap structures, instead of "uint32_t or uint64_t" bitmap
structures, so that we don't need to convert when using the
bitmap-library with xenctl_cpumap. Please let know if you would rather
that we keep the bitmap utilities separate from xenctl_cpumap and
provide for conversion functions. That would be a small change.

The function that you quote is inspired from linux kernel
implementation (as mentioned) and is a simple generic function. And I
have tested the library thoroughly.

> Why should I show your patch some love?
We can work towards that ;)

-dulloor

On Tue, Mar 30, 2010 at 11:16 AM, Keir Fraser <keir.fraser@eu.citrix.com> wrote:
> No changeset comment. No signed-off-by line. It actually bloats the
> libraries by a net 650 LOC (747 added, 87 deleted according to diffstat).
> And below I append the very first function I read: it doesn't inspire
> confidence that the implementation is over-complicated/long and
> unnecessarily handles 16-bit values. Why should I show your patch some love?
>
>  -- Keir
>
> +static inline int __xc_ffs(uint8_t byte)
> +{
> +       int num = 0;
> +
> +       if ((byte & 0xff) == 0) {
> +               num += 8;
> +               byte >>= 8;
> +       }
> +       if ((byte & 0xf) == 0) {
> +               num += 4;
> +               byte >>= 4;
> +       }
> +       if ((byte & 0x3) == 0) {
> +               num += 2;
> +               byte >>= 2;
> +       }
> +       if ((byte & 0x1) == 0)
> +               num += 1;
> +       return num;
> +}
>
> On 30/03/2010 15:42, "Dulloor" <dulloor@gmail.com> wrote:
>
>> Resubmitting the patch.
>>
>> -dulloor
>>
>> ---------- Forwarded message ----------
>> From: Dulloor <dulloor@gmail.com>
>> Date: Tue, Mar 23, 2010 at 12:55 PM
>> Subject: Re: [Xen-devel][PATCH] libxc bitmap utils and vcpu-affinity
>> To: Keir Fraser <keir.fraser@eu.citrix.com>
>> Cc: Jan Beulich <JBeulich@novell.com>, "xen-devel@lists.xensource.com"
>> <xen-devel@lists.xensource.com>
>>
>>
>> Please use this patch, in which length of bitmap is
>> (physinfo.max_cpu_id+1), rather than (physinfo.nr_cpus).
>>
>> -dulloor
>>
>> On Tue, Mar 23, 2010 at 12:41 PM, Dulloor <dulloor@gmail.com> wrote:
>>> I meant utils for **xenctl_cpumap**
>>>
>>> On Tue, Mar 23, 2010 at 12:40 PM, Dulloor <dulloor@gmail.com> wrote:
>>>> Fine, I agree with you both. Attached is a patch adding utils for
>>>> xenctl_bitmap (to libxc) and using the same in vcpu_(get|set)affinity.
>>>> For the guest-numa interface, I will see if I can use xenctl_cpumap.
>>>>
>>>> -dulloor
>>>>
>>>> On Tue, Mar 23, 2010 at 7:05 AM, Keir Fraser <keir.fraser@eu.citrix.com>
>>>> wrote:
>>>>> On 23/03/2010 10:10, "Jan Beulich" <JBeulich@novell.com> wrote:
>>>>>
>>>>>>>>> Dulloor <dulloor@gmail.com> 22.03.10 18:44 >>>
>>>>>>> Motivation for using xenctl_cpumask in Xen interfaces :
>>>>>>> - xenctl_cpumap is just 4 bytes smaller than static xenctl_cpumask for
>>>>>>> 128 cpus (128 would be good for quite some time). However, the new
>>>>>>
>>>>>> I don't buy this (we're already building for 256 CPUs, looking forward
>>>>>> to further bump this in the not too distant future), and I'm generally
>>>>>> opposed to introducing hard coded limits in a public interface.
>>>>>
>>>>> We should use xenctl_cpumask everywhere for specifying physical CPU
>>>>> bitmaps,
>>>>> even into guest NUMA interfaces if appropriate. I don't really care if it
>>>>> is
>>>>> a bit harder to use than a static bitmap.
>>>>>
>>>>>  -- Keir
>>>>>
>>>>>
>>>>>
>>>>
>>>
>
>
>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH] libxc bitmap utils and vcpu-affinity
  2010-03-30 16:05                   ` Dulloor
@ 2010-03-30 16:27                     ` Keir Fraser
  0 siblings, 0 replies; 12+ messages in thread
From: Keir Fraser @ 2010-03-30 16:27 UTC (permalink / raw)
  To: Dulloor; +Cc: xen-devel, Jan Beulich

On 30/03/2010 17:05, "Dulloor" <dulloor@gmail.com> wrote:

>> It actually bloats the libraries by a net 650 LOC
>> (747 added, 87 deleted according to diffstat).
> In the patch, we have used the library only for vcpu get/set affinity.
> There are clearly other opportunities (right now and in future) to use
> most of the functions provided by the library, which will offset this.
> Also, this provides a cleaner/standard way of using the cpumap
> structure in libxc.

Clearly it's not a simplicity win right now as it net adds a lot of code.
I'd rather see this as part of a patch series that actually uses it more
substantially. And even then I'd bet that half of this patch could be
removed as unused. If NUMA changes end up manipulating cpumaps in, say, a
dozen places then I could see this approach being useful, instead of
pointless abstraction (how it appears currently).

 -- Keir

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2010-03-30 16:27 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-03-22  3:33 [PATCH] libxc bitmap utils and vcpu-affinity Dulloor
2010-03-22  7:30 ` Keir Fraser
2010-03-22 17:44   ` Dulloor
2010-03-23 10:10     ` Jan Beulich
2010-03-23 11:05       ` Keir Fraser
2010-03-23 16:40         ` Dulloor
2010-03-23 16:41           ` Dulloor
2010-03-23 16:55             ` Dulloor
2010-03-30 14:42               ` Fwd: " Dulloor
2010-03-30 15:16                 ` Keir Fraser
2010-03-30 16:05                   ` Dulloor
2010-03-30 16:27                     ` Keir Fraser

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.