* [PATCH 1/4] xen/tmem: Switch to using bool
@ 2017-06-28 11:16 Andrew Cooper
2017-06-28 11:16 ` [PATCH 2/4] xen/flask: " Andrew Cooper
` (4 more replies)
0 siblings, 5 replies; 12+ messages in thread
From: Andrew Cooper @ 2017-06-28 11:16 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper
* Drop redundant initialisers
* Style corrections while changing client_over_quota()
* Drop all write-only bools from do_tmem_op()
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
xen/common/tmem.c | 39 ++++++++++++++++-----------------------
xen/common/tmem_control.c | 16 ++++++++--------
xen/common/tmem_xen.c | 12 ++++++------
xen/include/xen/tmem_xen.h | 24 ++++++++++++------------
4 files changed, 42 insertions(+), 49 deletions(-)
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index 306dda6..c955cf7 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -71,7 +71,7 @@ struct tmem_page_descriptor {
pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
else compressed data (cdata). */
uint32_t index;
- bool_t eviction_attempted; /* CHANGE TO lifetimes? (settable). */
+ bool eviction_attempted; /* CHANGE TO lifetimes? (settable). */
union {
struct page_info *pfp; /* Page frame pointer. */
char *cdata; /* Compressed data. */
@@ -884,39 +884,41 @@ static void client_flush(struct client *client)
client_free(client);
}
-static bool_t client_over_quota(struct client *client)
+static bool client_over_quota(const struct client *client)
{
int total = _atomic_read(tmem_global.client_weight_total);
ASSERT(client != NULL);
if ( (total == 0) || (client->info.weight == 0) ||
(client->eph_count == 0) )
- return 0;
- return ( ((tmem_global.eph_count*100L) / client->eph_count ) >
- ((total*100L) / client->info.weight) );
+ return false;
+
+ return (((tmem_global.eph_count * 100L) / client->eph_count) >
+ ((total * 100L) / client->info.weight));
}
/************ MEMORY REVOCATION ROUTINES *******************************/
-static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t *hold_pool_rwlock)
+static bool tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp,
+ bool *hold_pool_rwlock)
{
struct tmem_object_root *obj = pgp->us.obj;
struct tmem_pool *pool = obj->pool;
if ( pool->is_dying )
- return 0;
+ return false;
if ( spin_trylock(&obj->obj_spinlock) )
{
if ( obj->pgp_count > 1 )
- return 1;
+ return true;
if ( write_trylock(&pool->pool_rwlock) )
{
*hold_pool_rwlock = 1;
- return 1;
+ return true;
}
spin_unlock(&obj->obj_spinlock);
}
- return 0;
+ return false;
}
int tmem_evict(void)
@@ -926,7 +928,7 @@ int tmem_evict(void)
struct tmem_object_root *obj;
struct tmem_pool *pool;
int ret = 0;
- bool_t hold_pool_rwlock = 0;
+ bool hold_pool_rwlock = false;
tmem_stats.evict_attempts++;
spin_lock(&eph_lists_spinlock);
@@ -995,7 +997,7 @@ int tmem_evict(void)
* is a minimum amount of memory (1MB) available BEFORE any data structure
* locks are held.
*/
-static inline bool_t tmem_ensure_avail_pages(void)
+static inline bool tmem_ensure_avail_pages(void)
{
int failed_evict = 10;
unsigned long free_mem;
@@ -1004,12 +1006,12 @@ static inline bool_t tmem_ensure_avail_pages(void)
free_mem = (tmem_page_list_pages + total_free_pages())
>> (20 - PAGE_SHIFT);
if ( free_mem )
- return 1;
+ return true;
if ( !tmem_evict() )
failed_evict--;
} while ( failed_evict > 0 );
- return 0;
+ return false;
}
/************ TMEM CORE OPERATIONS ************************************/
@@ -1879,9 +1881,6 @@ long do_tmem_op(tmem_cli_op_t uops)
struct tmem_pool *pool = NULL;
struct xen_tmem_oid *oidp;
int rc = 0;
- bool_t succ_get = 0, succ_put = 0;
- bool_t non_succ_get = 0, non_succ_put = 0;
- bool_t flush = 0, flush_obj = 0;
if ( !tmem_initialized )
return -ENODEV;
@@ -1965,22 +1964,16 @@ long do_tmem_op(tmem_cli_op_t uops)
tmem_cli_buf_null);
else
rc = -ENOMEM;
- if (rc == 1) succ_put = 1;
- else non_succ_put = 1;
break;
case TMEM_GET_PAGE:
rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
tmem_cli_buf_null);
- if (rc == 1) succ_get = 1;
- else non_succ_get = 1;
break;
case TMEM_FLUSH_PAGE:
- flush = 1;
rc = do_tmem_flush_page(pool, oidp, op.u.gen.index);
break;
case TMEM_FLUSH_OBJECT:
rc = do_tmem_flush_object(pool, oidp);
- flush_obj = 1;
break;
default:
tmem_client_warn("tmem: op %d not implemented\n", op.cmd);
diff --git a/xen/common/tmem_control.c b/xen/common/tmem_control.c
index 2d980e3..30bf6fb 100644
--- a/xen/common/tmem_control.c
+++ b/xen/common/tmem_control.c
@@ -19,8 +19,8 @@
static int tmemc_freeze_pools(domid_t cli_id, int arg)
{
struct client *client;
- bool_t freeze = (arg == XEN_SYSCTL_TMEM_OP_FREEZE) ? 1 : 0;
- bool_t destroy = (arg == XEN_SYSCTL_TMEM_OP_DESTROY) ? 1 : 0;
+ bool freeze = arg == XEN_SYSCTL_TMEM_OP_FREEZE;
+ bool destroy = arg == XEN_SYSCTL_TMEM_OP_DESTROY;
char *s;
s = destroy ? "destroyed" : ( freeze ? "frozen" : "thawed" );
@@ -96,12 +96,12 @@ static int tmemc_flush_mem(domid_t cli_id, uint32_t kb)
#define BSIZE 1024
static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf,
- int off, uint32_t len, bool_t use_long)
+ int off, uint32_t len, bool use_long)
{
char info[BSIZE];
int i, n = 0, sum = 0;
struct tmem_pool *p;
- bool_t s;
+ bool s;
n = scnprintf(info,BSIZE,"C=CI:%d,ww:%d,co:%d,fr:%d,"
"Tc:%"PRIu64",Ge:%ld,Pp:%ld,Gp:%ld%c",
@@ -149,7 +149,7 @@ static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf,
}
static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len,
- bool_t use_long)
+ bool use_long)
{
char info[BSIZE];
int i, n = 0, sum = 0;
@@ -188,7 +188,7 @@ static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len,
}
static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off,
- uint32_t len, bool_t use_long)
+ uint32_t len, bool use_long)
{
char info[BSIZE];
int n = 0, sum = 0;
@@ -204,7 +204,7 @@ static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off,
}
static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len,
- bool_t use_long)
+ bool use_long)
{
char info[BSIZE];
int n = 0, sum = off;
@@ -238,7 +238,7 @@ static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len,
}
static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len,
- bool_t use_long)
+ bool use_long)
{
struct client *client;
int off = 0;
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 725ae93..20f74b2 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -14,10 +14,10 @@
#include <xen/cpu.h>
#include <xen/init.h>
-bool_t __read_mostly opt_tmem = 0;
+bool __read_mostly opt_tmem;
boolean_param("tmem", opt_tmem);
-bool_t __read_mostly opt_tmem_compress = 0;
+bool __read_mostly opt_tmem_compress;
boolean_param("tmem_compress", opt_tmem_compress);
atomic_t freeable_page_count = ATOMIC_INIT(0);
@@ -32,14 +32,14 @@ static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page);
#if defined(CONFIG_ARM)
static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
- struct page_info **pcli_pfp, bool_t cli_write)
+ struct page_info **pcli_pfp, bool cli_write)
{
ASSERT_UNREACHABLE();
return NULL;
}
static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
- unsigned long cli_mfn, bool_t mark_dirty)
+ unsigned long cli_mfn, bool mark_dirty)
{
ASSERT_UNREACHABLE();
}
@@ -47,7 +47,7 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
#include <asm/p2m.h>
static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
- struct page_info **pcli_pfp, bool_t cli_write)
+ struct page_info **pcli_pfp, bool cli_write)
{
p2m_type_t t;
struct page_info *page;
@@ -72,7 +72,7 @@ static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
}
static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
- unsigned long cli_mfn, bool_t mark_dirty)
+ unsigned long cli_mfn, bool mark_dirty)
{
if ( mark_dirty )
{
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index dc5888c..542c0b3 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -35,27 +35,27 @@ extern atomic_t freeable_page_count;
extern int tmem_init(void);
#define tmem_hash hash_long
-extern bool_t opt_tmem_compress;
-static inline bool_t tmem_compression_enabled(void)
+extern bool opt_tmem_compress;
+static inline bool tmem_compression_enabled(void)
{
return opt_tmem_compress;
}
#ifdef CONFIG_TMEM
-extern bool_t opt_tmem;
-static inline bool_t tmem_enabled(void)
+extern bool opt_tmem;
+static inline bool tmem_enabled(void)
{
return opt_tmem;
}
static inline void tmem_disable(void)
{
- opt_tmem = 0;
+ opt_tmem = false;
}
#else
-static inline bool_t tmem_enabled(void)
+static inline bool tmem_enabled(void)
{
- return 0;
+ return false;
}
static inline void tmem_disable(void)
@@ -266,7 +266,7 @@ struct tmem_global {
struct list_head ephemeral_page_list; /* All pages in ephemeral pools. */
struct list_head client_list;
struct tmem_pool *shared_pools[MAX_GLOBAL_SHARED_POOLS];
- bool_t shared_auth;
+ bool shared_auth;
long eph_count; /* Atomicity depends on eph_lists_spinlock. */
atomic_t client_weight_total;
};
@@ -286,7 +286,7 @@ struct client {
domid_t cli_id;
xen_tmem_client_t info;
/* For save/restore/migration. */
- bool_t was_frozen;
+ bool was_frozen;
struct list_head persistent_invalidated_list;
struct tmem_page_descriptor *cur_pgp;
/* Statistics collection. */
@@ -307,9 +307,9 @@ struct client {
#define is_shared(_p) (_p->shared)
struct tmem_pool {
- bool_t shared;
- bool_t persistent;
- bool_t is_dying;
+ bool shared;
+ bool persistent;
+ bool is_dying;
struct client *client;
uint64_t uuid[2]; /* 0 for private, non-zero for shared. */
uint32_t pool_id;
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 2/4] xen/flask: Switch to using bool
2017-06-28 11:16 [PATCH 1/4] xen/tmem: Switch to using bool Andrew Cooper
@ 2017-06-28 11:16 ` Andrew Cooper
2017-08-10 14:37 ` Daniel De Graaf
2017-06-28 11:16 ` [PATCH 3/4] xen/efi: " Andrew Cooper
` (3 subsequent siblings)
4 siblings, 1 reply; 12+ messages in thread
From: Andrew Cooper @ 2017-06-28 11:16 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Daniel De Graaf
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
xen/xsm/flask/flask_op.c | 4 ++--
xen/xsm/flask/include/avc.h | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/xen/xsm/flask/flask_op.c b/xen/xsm/flask/flask_op.c
index 719c2d7..15aa96b 100644
--- a/xen/xsm/flask/flask_op.c
+++ b/xen/xsm/flask/flask_op.c
@@ -29,7 +29,7 @@ enum flask_bootparam_t __read_mostly flask_bootparam = FLASK_BOOTPARAM_ENFORCING
static void parse_flask_param(char *s);
custom_param("flask", parse_flask_param);
-bool_t __read_mostly flask_enforcing = 1;
+bool __read_mostly flask_enforcing = true;
#define MAX_POLICY_SIZE 0x4000000
@@ -453,7 +453,7 @@ static int flask_security_load(struct xen_flask_load *load)
{
int ret;
void *buf = NULL;
- bool_t is_reload = ss_initialized;
+ bool is_reload = ss_initialized;
ret = domain_has_security(current->domain, SECURITY__LOAD_POLICY);
if ( ret )
diff --git a/xen/xsm/flask/include/avc.h b/xen/xsm/flask/include/avc.h
index c153c8e..bfc69f4 100644
--- a/xen/xsm/flask/include/avc.h
+++ b/xen/xsm/flask/include/avc.h
@@ -17,7 +17,7 @@
#include "av_permissions.h"
#include "security.h"
-extern bool_t flask_enforcing;
+extern bool flask_enforcing;
/*
* An entry in the AVC.
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 3/4] xen/efi: Switch to using bool
2017-06-28 11:16 [PATCH 1/4] xen/tmem: Switch to using bool Andrew Cooper
2017-06-28 11:16 ` [PATCH 2/4] xen/flask: " Andrew Cooper
@ 2017-06-28 11:16 ` Andrew Cooper
2017-06-28 11:53 ` Jan Beulich
2017-06-28 11:16 ` [PATCH 4/4] x86/shadow: " Andrew Cooper
` (2 subsequent siblings)
4 siblings, 1 reply; 12+ messages in thread
From: Andrew Cooper @ 2017-06-28 11:16 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Jan Beulich
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
---
xen/arch/arm/efi/efi-boot.h | 7 +++---
xen/arch/x86/efi/efi-boot.h | 4 ++--
xen/arch/x86/efi/stub.c | 6 ++---
xen/common/efi/boot.c | 58 ++++++++++++++++++++++-----------------------
xen/common/efi/runtime.c | 4 ++--
xen/include/xen/efi.h | 4 ++--
6 files changed, 42 insertions(+), 41 deletions(-)
diff --git a/xen/arch/arm/efi/efi-boot.h b/xen/arch/arm/efi/efi-boot.h
index 2986c83..56de26e 100644
--- a/xen/arch/arm/efi/efi-boot.h
+++ b/xen/arch/arm/efi/efi-boot.h
@@ -559,7 +559,7 @@ static void __init efi_arch_load_addr_check(EFI_LOADED_IMAGE *loaded_image)
blexit(L"Xen must be loaded at a 4 KByte boundary.");
}
-static bool_t __init efi_arch_use_config_file(EFI_SYSTEM_TABLE *SystemTable)
+static bool __init efi_arch_use_config_file(EFI_SYSTEM_TABLE *SystemTable)
{
/*
* For arm, we may get a device tree from GRUB (or other bootloader)
@@ -578,12 +578,13 @@ static bool_t __init efi_arch_use_config_file(EFI_SYSTEM_TABLE *SystemTable)
* We either have no FDT, or one without modules, so we must have a
* Xen EFI configuration file to specify modules. (dom0 required)
*/
- return 1;
+ return true;
}
PrintStr(L"Using modules provided by bootloader in FDT\r\n");
/* We have modules already defined in fdt, just add space. */
fdt = fdt_increase_size(&dtbfile, EFI_PAGE_SIZE);
- return 0;
+
+ return false;
}
static void __init efi_arch_console_init(UINTN cols, UINTN rows)
diff --git a/xen/arch/x86/efi/efi-boot.h b/xen/arch/x86/efi/efi-boot.h
index 34537d4..bedac5c 100644
--- a/xen/arch/x86/efi/efi-boot.h
+++ b/xen/arch/x86/efi/efi-boot.h
@@ -662,9 +662,9 @@ static void __init efi_arch_load_addr_check(EFI_LOADED_IMAGE *loaded_image)
trampoline_xen_phys_start = xen_phys_start;
}
-static bool_t __init efi_arch_use_config_file(EFI_SYSTEM_TABLE *SystemTable)
+static bool __init efi_arch_use_config_file(EFI_SYSTEM_TABLE *SystemTable)
{
- return 1; /* x86 always uses a config file */
+ return true; /* x86 always uses a config file */
}
static void efi_arch_flush_dcache_area(const void *vaddr, UINTN size) { }
diff --git a/xen/arch/x86/efi/stub.c b/xen/arch/x86/efi/stub.c
index 17da050..0c481e3 100644
--- a/xen/arch/x86/efi/stub.c
+++ b/xen/arch/x86/efi/stub.c
@@ -52,9 +52,9 @@ void __init efi_init_memory(void) { }
void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t l4e) { }
-bool_t efi_rs_using_pgtables(void)
+bool efi_rs_using_pgtables(void)
{
- return 0;
+ return false;
}
unsigned long efi_get_time(void)
@@ -64,7 +64,7 @@ unsigned long efi_get_time(void)
}
void efi_halt_system(void) { }
-void efi_reset_system(bool_t warm) { }
+void efi_reset_system(bool warm) { }
int efi_get_info(uint32_t idx, union xenpf_efi_info *info)
{
diff --git a/xen/common/efi/boot.c b/xen/common/efi/boot.c
index daf0c80..11bdc7a 100644
--- a/xen/common/efi/boot.c
+++ b/xen/common/efi/boot.c
@@ -113,11 +113,11 @@ static char *get_value(const struct file *cfg, const char *section,
static char *split_string(char *s);
static CHAR16 *s2w(union string *str);
static char *w2s(const union string *str);
-static bool_t read_file(EFI_FILE_HANDLE dir_handle, CHAR16 *name,
- struct file *file, char *options);
+static bool read_file(EFI_FILE_HANDLE dir_handle, CHAR16 *name,
+ struct file *file, char *options);
static size_t wstrlen(const CHAR16 * s);
static int set_color(u32 mask, int bpp, u8 *pos, u8 *sz);
-static bool_t match_guid(const EFI_GUID *guid1, const EFI_GUID *guid2);
+static bool match_guid(const EFI_GUID *guid1, const EFI_GUID *guid2);
static void efi_init(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable);
static void efi_console_set_mode(void);
@@ -138,7 +138,7 @@ static SIMPLE_TEXT_OUTPUT_INTERFACE *__initdata StdOut;
static SIMPLE_TEXT_OUTPUT_INTERFACE *__initdata StdErr;
static UINT32 __initdata mdesc_ver;
-static bool_t __initdata map_bs;
+static bool __initdata map_bs;
static struct file __initdata cfg;
static struct file __initdata kernel;
@@ -307,7 +307,7 @@ static char *__init w2s(const union string *str)
return str->s;
}
-static bool_t __init match_guid(const EFI_GUID *guid1, const EFI_GUID *guid2)
+static bool __init match_guid(const EFI_GUID *guid1, const EFI_GUID *guid2)
{
return guid1->Data1 == guid2->Data1 &&
guid1->Data2 == guid2->Data2 &&
@@ -378,12 +378,12 @@ static unsigned int __init get_argv(unsigned int argc, CHAR16 **argv,
CHAR16 **options)
{
CHAR16 *ptr = (CHAR16 *)(argv + argc + 1), *prev = NULL;
- bool_t prev_sep = TRUE;
+ bool prev_sep = true;
for ( ; cmdsize > sizeof(*cmdline) && *cmdline;
cmdsize -= sizeof(*cmdline), ++cmdline )
{
- bool_t cur_sep = *cmdline == L' ' || *cmdline == L'\t';
+ bool cur_sep = *cmdline == L' ' || *cmdline == L'\t';
if ( !prev_sep )
{
@@ -538,8 +538,8 @@ static char * __init split_string(char *s)
return NULL;
}
-static bool_t __init read_file(EFI_FILE_HANDLE dir_handle, CHAR16 *name,
- struct file *file, char *options)
+static bool __init read_file(EFI_FILE_HANDLE dir_handle, CHAR16 *name,
+ struct file *file, char *options)
{
EFI_FILE_HANDLE FileHandle = NULL;
UINT64 size;
@@ -551,7 +551,7 @@ static bool_t __init read_file(EFI_FILE_HANDLE dir_handle, CHAR16 *name,
ret = dir_handle->Open(dir_handle, &FileHandle, name,
EFI_FILE_MODE_READ, 0);
if ( file == &cfg && ret == EFI_NOT_FOUND )
- return 0;
+ return false;
if ( EFI_ERROR(ret) )
what = L"Open";
else
@@ -611,27 +611,27 @@ static bool_t __init read_file(EFI_FILE_HANDLE dir_handle, CHAR16 *name,
efi_arch_flush_dcache_area(file->ptr, file->size);
- return 1;
+ return true;
}
static void __init pre_parse(const struct file *cfg)
{
char *ptr = cfg->ptr, *end = ptr + cfg->size;
- bool_t start = 1, comment = 0;
+ bool start = true, comment = false;
for ( ; ptr < end; ++ptr )
{
if ( iscntrl(*ptr) )
{
- comment = 0;
- start = 1;
+ comment = false;
+ start = true;
*ptr = 0;
}
else if ( comment || (start && isspace(*ptr)) )
*ptr = 0;
else if ( *ptr == '#' || (start && *ptr == ';') )
{
- comment = 1;
+ comment = true;
*ptr = 0;
}
else
@@ -647,7 +647,7 @@ static char *__init get_value(const struct file *cfg, const char *section,
{
char *ptr = cfg->ptr, *end = ptr + cfg->size;
size_t slen = section ? strlen(section) : 0, ilen = strlen(item);
- bool_t match = !slen;
+ bool match = !slen;
for ( ; ptr < end; ++ptr )
{
@@ -1000,7 +1000,7 @@ static void __init efi_exit_boot(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *Syste
{
EFI_STATUS status;
UINTN info_size = 0, map_key;
- bool_t retry;
+ bool retry;
efi_bs->GetMemoryMap(&info_size, NULL, &map_key,
&efi_mdesc_size, &mdesc_ver);
@@ -1009,7 +1009,7 @@ static void __init efi_exit_boot(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *Syste
if ( !efi_memmap )
blexit(L"Unable to allocate memory for EFI memory map");
- for ( retry = 0; ; retry = 1 )
+ for ( retry = false; ; retry = true )
{
efi_memmap_size = info_size;
status = SystemTable->BootServices->GetMemoryMap(&efi_memmap_size,
@@ -1071,9 +1071,9 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable)
EFI_SHIM_LOCK_PROTOCOL *shim_lock;
EFI_GRAPHICS_OUTPUT_PROTOCOL *gop = NULL;
union string section = { NULL }, name;
- bool_t base_video = 0;
+ bool base_video = false;
char *option_str;
- bool_t use_cfg_file;
+ bool use_cfg_file;
__set_bit(EFI_BOOT, &efi_flags);
__set_bit(EFI_LOADER, &efi_flags);
@@ -1115,9 +1115,9 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable)
if ( *ptr == L'/' || *ptr == L'-' )
{
if ( wstrcmp(ptr + 1, L"basevideo") == 0 )
- base_video = 1;
+ base_video = true;
else if ( wstrcmp(ptr + 1, L"mapbs") == 0 )
- map_bs = 1;
+ map_bs = true;
else if ( wstrncmp(ptr + 1, L"cfg=", 4) == 0 )
cfg_file_name = ptr + 5;
else if ( i + 1 < argc && wstrcmp(ptr + 1, L"cfg") == 0 )
@@ -1304,14 +1304,14 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SYSTEM_TABLE *SystemTable)
#ifndef CONFIG_ARM /* TODO - runtime service support */
-static bool_t __initdata efi_map_uc;
+static bool __initdata efi_map_uc;
static void __init parse_efi_param(char *s)
{
char *ss;
do {
- bool_t val = !!strncmp(s, "no-", 3);
+ bool val = strncmp(s, "no-", 3);
if ( !val )
s += 3;
@@ -1337,8 +1337,8 @@ custom_param("efi", parse_efi_param);
#ifndef USE_SET_VIRTUAL_ADDRESS_MAP
static __init void copy_mapping(unsigned long mfn, unsigned long end,
- bool_t (*is_valid)(unsigned long smfn,
- unsigned long emfn))
+ bool (*is_valid)(unsigned long smfn,
+ unsigned long emfn))
{
unsigned long next;
@@ -1366,7 +1366,7 @@ static __init void copy_mapping(unsigned long mfn, unsigned long end,
}
}
-static bool_t __init ram_range_valid(unsigned long smfn, unsigned long emfn)
+static bool __init ram_range_valid(unsigned long smfn, unsigned long emfn)
{
unsigned long sz = pfn_to_pdx(emfn - 1) / PDX_GROUP_COUNT + 1;
@@ -1375,9 +1375,9 @@ static bool_t __init ram_range_valid(unsigned long smfn, unsigned long emfn)
pfn_to_pdx(smfn) / PDX_GROUP_COUNT) < sz;
}
-static bool_t __init rt_range_valid(unsigned long smfn, unsigned long emfn)
+static bool __init rt_range_valid(unsigned long smfn, unsigned long emfn)
{
- return 1;
+ return true;
}
#endif
diff --git a/xen/common/efi/runtime.c b/xen/common/efi/runtime.c
index 20bc532..c38f00a 100644
--- a/xen/common/efi/runtime.c
+++ b/xen/common/efi/runtime.c
@@ -136,7 +136,7 @@ void efi_rs_leave(struct efi_rs_state *state)
stts();
}
-bool_t efi_rs_using_pgtables(void)
+bool efi_rs_using_pgtables(void)
{
return efi_l4_pgtable &&
(smp_processor_id() == efi_rs_on_cpu) &&
@@ -177,7 +177,7 @@ void efi_halt_system(void)
printk(XENLOG_WARNING "EFI: could not halt system (%#lx)\n", status);
}
-void efi_reset_system(bool_t warm)
+void efi_reset_system(bool warm)
{
EFI_STATUS status;
struct efi_rs_state state = efi_rs_enter();
diff --git a/xen/include/xen/efi.h b/xen/include/xen/efi.h
index 68c68a8..44b7d3e 100644
--- a/xen/include/xen/efi.h
+++ b/xen/include/xen/efi.h
@@ -33,10 +33,10 @@ struct compat_pf_efi_runtime_call;
bool efi_enabled(unsigned int feature);
void efi_init_memory(void);
-bool_t efi_rs_using_pgtables(void);
+bool efi_rs_using_pgtables(void);
unsigned long efi_get_time(void);
void efi_halt_system(void);
-void efi_reset_system(bool_t warm);
+void efi_reset_system(bool warm);
#ifndef COMPAT
int efi_get_info(uint32_t idx, union xenpf_efi_info *);
int efi_runtime_call(struct xenpf_efi_runtime_call *);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 4/4] x86/shadow: Switch to using bool
2017-06-28 11:16 [PATCH 1/4] xen/tmem: Switch to using bool Andrew Cooper
2017-06-28 11:16 ` [PATCH 2/4] xen/flask: " Andrew Cooper
2017-06-28 11:16 ` [PATCH 3/4] xen/efi: " Andrew Cooper
@ 2017-06-28 11:16 ` Andrew Cooper
2017-06-28 13:55 ` Tim Deegan
2017-06-28 13:08 ` [PATCH 1/4] xen/tmem: " Wei Liu
2017-06-28 13:53 ` Konrad Rzeszutek Wilk
4 siblings, 1 reply; 12+ messages in thread
From: Andrew Cooper @ 2017-06-28 11:16 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Tim Deegan
sh_pin() has boolean properties, so switch its return type.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Tim Deegan <tim@xen.org>
---
xen/arch/x86/mm/shadow/common.c | 4 ++--
xen/arch/x86/mm/shadow/multi.c | 38 +++++++++++++++++++-------------------
xen/arch/x86/mm/shadow/none.c | 6 +++---
xen/arch/x86/mm/shadow/private.h | 12 ++++++------
xen/include/asm-x86/shadow.h | 2 +-
5 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 2e64a77..36f5746 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -40,7 +40,7 @@
DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags);
-static int sh_enable_log_dirty(struct domain *, bool_t log_global);
+static int sh_enable_log_dirty(struct domain *, bool log_global);
static int sh_disable_log_dirty(struct domain *);
static void sh_clean_dirty_bitmap(struct domain *);
@@ -3553,7 +3553,7 @@ shadow_write_p2m_entry(struct domain *d, unsigned long gfn,
/* Shadow specific code which is called in paging_log_dirty_enable().
* Return 0 if no problem found.
*/
-static int sh_enable_log_dirty(struct domain *d, bool_t log_global)
+static int sh_enable_log_dirty(struct domain *d, bool log_global)
{
int ret;
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index f65ffc6..cbf5a35 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -367,9 +367,9 @@ static void sh_audit_gw(struct vcpu *v, const walk_t *gw)
/*
* Write a new value into the guest pagetable, and update the shadows
- * appropriately. Returns 0 if we page-faulted, 1 for success.
+ * appropriately. Returns false if we page-faulted, true for success.
*/
-static bool_t
+static bool
sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
{
#if CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS
@@ -383,17 +383,17 @@ sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
return !failed;
#else
- return 0;
+ return false;
#endif
}
/*
* Cmpxchg a new value into the guest pagetable, and update the shadows
- * appropriately. Returns 0 if we page-faulted, 1 if not.
+ * appropriately. Returns false if we page-faulted, true if not.
* N.B. caller should check the value of "old" to see if the cmpxchg itself
* was successful.
*/
-static bool_t
+static bool
sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old,
intpte_t new, mfn_t gmfn)
{
@@ -410,7 +410,7 @@ sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old,
return !failed;
#else
- return 0;
+ return false;
#endif
}
@@ -530,7 +530,7 @@ _sh_propagate(struct vcpu *v,
gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
u32 pass_thru_flags;
u32 gflags, sflags;
- bool_t mmio_mfn;
+ bool mmio_mfn;
/* We don't shadow PAE l3s */
ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
@@ -3620,7 +3620,7 @@ static int sh_page_fault(struct vcpu *v,
* instruction should be issued on the hardware, or 0 if it's safe not
* to do so.
*/
-static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
+static bool sh_invlpg(struct vcpu *v, unsigned long va)
{
mfn_t sl1mfn;
shadow_l2e_t sl2e;
@@ -3645,7 +3645,7 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
if ( !(shadow_l4e_get_flags(
sh_linear_l4_table(v)[shadow_l4_linear_offset(va)])
& _PAGE_PRESENT) )
- return 0;
+ return false;
/* This must still be a copy-from-user because we don't have the
* paging lock, and the higher-level shadows might disappear
* under our feet. */
@@ -3654,16 +3654,16 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
sizeof (sl3e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
- return 0;
+ return false;
}
if ( !(shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) )
- return 0;
+ return false;
}
#else /* SHADOW_PAGING_LEVELS == 3 */
if ( !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
& _PAGE_PRESENT) )
// no need to flush anything if there's no SL2...
- return 0;
+ return false;
#endif
/* This must still be a copy-from-user because we don't have the shadow
@@ -3673,14 +3673,14 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
sizeof (sl2e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
- return 0;
+ return false;
}
// If there's nothing shadowed for this particular sl2e, then
// there is no need to do an invlpg, either...
//
if ( !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) )
- return 0;
+ return false;
// Check to see if the SL2 is a splintered superpage...
// If so, then we'll need to flush the entire TLB (because that's
@@ -3691,7 +3691,7 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
== SH_type_fl1_shadow )
{
flush_tlb_local();
- return 0;
+ return false;
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -3718,13 +3718,13 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
{
perfc_incr(shadow_invlpg_fault);
paging_unlock(d);
- return 0;
+ return false;
}
if ( !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) )
{
paging_unlock(d);
- return 0;
+ return false;
}
sl1mfn = shadow_l2e_get_mfn(sl2e);
@@ -3742,12 +3742,12 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
}
paging_unlock(d);
/* Need the invlpg, to pick up the disappeareance of the sl1e */
- return 1;
+ return true;
}
}
#endif
- return 1;
+ return true;
}
diff --git a/xen/arch/x86/mm/shadow/none.c b/xen/arch/x86/mm/shadow/none.c
index 41ce593..9e6ad23 100644
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -1,7 +1,7 @@
#include <xen/mm.h>
#include <asm/shadow.h>
-static int _enable_log_dirty(struct domain *d, bool_t log_global)
+static int _enable_log_dirty(struct domain *d, bool log_global)
{
ASSERT(is_pv_domain(d));
return -EOPNOTSUPP;
@@ -37,10 +37,10 @@ static int _page_fault(struct vcpu *v, unsigned long va,
return 0;
}
-static bool_t _invlpg(struct vcpu *v, unsigned long va)
+static bool _invlpg(struct vcpu *v, unsigned long va)
{
ASSERT_UNREACHABLE();
- return 1;
+ return true;
}
static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 472676c..2e6a060 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -624,13 +624,13 @@ prev_pinned_shadow(struct page_info *page,
/* Pin a shadow page: take an extra refcount, set the pin bit,
* and put the shadow at the head of the list of pinned shadows.
- * Returns 0 for failure, 1 for success. */
-static inline int sh_pin(struct domain *d, mfn_t smfn)
+ * Returns false for failure, true for success. */
+static inline bool sh_pin(struct domain *d, mfn_t smfn)
{
struct page_info *sp[4];
struct page_list_head *pin_list;
unsigned int i, pages;
- bool_t already_pinned;
+ bool already_pinned;
ASSERT(mfn_valid(smfn));
sp[0] = mfn_to_page(smfn);
@@ -641,7 +641,7 @@ static inline int sh_pin(struct domain *d, mfn_t smfn)
pin_list = &d->arch.paging.shadow.pinned_shadows;
if ( already_pinned && sp[0] == page_list_first(pin_list) )
- return 1;
+ return true;
/* Treat the up-to-four pages of the shadow as a unit in the list ops */
for ( i = 1; i < pages; i++ )
@@ -661,7 +661,7 @@ static inline int sh_pin(struct domain *d, mfn_t smfn)
{
/* Not pinned: pin it! */
if ( !sh_get_ref(d, smfn, 0) )
- return 0;
+ return false;
sp[0]->u.sh.pinned = 1;
}
@@ -669,7 +669,7 @@ static inline int sh_pin(struct domain *d, mfn_t smfn)
for ( i = pages; i > 0; i-- )
page_list_add(sp[i - 1], pin_list);
- return 1;
+ return true;
}
/* Unpin a shadow page: unset the pin bit, take the shadow off the list
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 7e1ed3b..668930a 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -102,7 +102,7 @@ int shadow_set_allocation(struct domain *d, unsigned int pages,
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
- bool_t fast, bool_t all) {}
+ bool fast, bool all) {}
static inline void shadow_blow_tables_per_domain(struct domain *d) {}
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH 3/4] xen/efi: Switch to using bool
2017-06-28 11:16 ` [PATCH 3/4] xen/efi: " Andrew Cooper
@ 2017-06-28 11:53 ` Jan Beulich
0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2017-06-28 11:53 UTC (permalink / raw)
To: andrew.cooper3; +Cc: xen-devel
>>> Andrew Cooper <andrew.cooper3@citrix.com> 06/28/17 1:17 PM >>>
>Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/4] xen/tmem: Switch to using bool
2017-06-28 11:16 [PATCH 1/4] xen/tmem: Switch to using bool Andrew Cooper
` (2 preceding siblings ...)
2017-06-28 11:16 ` [PATCH 4/4] x86/shadow: " Andrew Cooper
@ 2017-06-28 13:08 ` Wei Liu
2017-06-28 13:53 ` Konrad Rzeszutek Wilk
4 siblings, 0 replies; 12+ messages in thread
From: Wei Liu @ 2017-06-28 13:08 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Wei Liu, Xen-devel
There doesn't seem to be a cover letter.
All four patches:
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/4] xen/tmem: Switch to using bool
2017-06-28 11:16 [PATCH 1/4] xen/tmem: Switch to using bool Andrew Cooper
` (3 preceding siblings ...)
2017-06-28 13:08 ` [PATCH 1/4] xen/tmem: " Wei Liu
@ 2017-06-28 13:53 ` Konrad Rzeszutek Wilk
2017-06-28 13:54 ` Andrew Cooper
4 siblings, 1 reply; 12+ messages in thread
From: Konrad Rzeszutek Wilk @ 2017-06-28 13:53 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Xen-devel
On Wed, Jun 28, 2017 at 12:16:19PM +0100, Andrew Cooper wrote:
> * Drop redundant initialisers
> * Style corrections while changing client_over_quota()
> * Drop all write-only bools from do_tmem_op()
s/write-only/useless write-only/
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> ---
> CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> ---
> xen/common/tmem.c | 39 ++++++++++++++++-----------------------
> xen/common/tmem_control.c | 16 ++++++++--------
> xen/common/tmem_xen.c | 12 ++++++------
> xen/include/xen/tmem_xen.h | 24 ++++++++++++------------
> 4 files changed, 42 insertions(+), 49 deletions(-)
>
> diff --git a/xen/common/tmem.c b/xen/common/tmem.c
> index 306dda6..c955cf7 100644
> --- a/xen/common/tmem.c
> +++ b/xen/common/tmem.c
> @@ -71,7 +71,7 @@ struct tmem_page_descriptor {
> pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid,
> else compressed data (cdata). */
> uint32_t index;
> - bool_t eviction_attempted; /* CHANGE TO lifetimes? (settable). */
> + bool eviction_attempted; /* CHANGE TO lifetimes? (settable). */
> union {
> struct page_info *pfp; /* Page frame pointer. */
> char *cdata; /* Compressed data. */
> @@ -884,39 +884,41 @@ static void client_flush(struct client *client)
> client_free(client);
> }
>
> -static bool_t client_over_quota(struct client *client)
> +static bool client_over_quota(const struct client *client)
> {
> int total = _atomic_read(tmem_global.client_weight_total);
>
> ASSERT(client != NULL);
> if ( (total == 0) || (client->info.weight == 0) ||
> (client->eph_count == 0) )
> - return 0;
> - return ( ((tmem_global.eph_count*100L) / client->eph_count ) >
> - ((total*100L) / client->info.weight) );
> + return false;
> +
> + return (((tmem_global.eph_count * 100L) / client->eph_count) >
> + ((total * 100L) / client->info.weight));
> }
>
> /************ MEMORY REVOCATION ROUTINES *******************************/
>
> -static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t *hold_pool_rwlock)
> +static bool tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp,
> + bool *hold_pool_rwlock)
> {
> struct tmem_object_root *obj = pgp->us.obj;
> struct tmem_pool *pool = obj->pool;
>
> if ( pool->is_dying )
> - return 0;
> + return false;
> if ( spin_trylock(&obj->obj_spinlock) )
> {
> if ( obj->pgp_count > 1 )
> - return 1;
> + return true;
> if ( write_trylock(&pool->pool_rwlock) )
> {
> *hold_pool_rwlock = 1;
> - return 1;
> + return true;
> }
> spin_unlock(&obj->obj_spinlock);
> }
> - return 0;
> + return false;
> }
>
> int tmem_evict(void)
> @@ -926,7 +928,7 @@ int tmem_evict(void)
> struct tmem_object_root *obj;
> struct tmem_pool *pool;
> int ret = 0;
> - bool_t hold_pool_rwlock = 0;
> + bool hold_pool_rwlock = false;
>
> tmem_stats.evict_attempts++;
> spin_lock(&eph_lists_spinlock);
> @@ -995,7 +997,7 @@ int tmem_evict(void)
> * is a minimum amount of memory (1MB) available BEFORE any data structure
> * locks are held.
> */
> -static inline bool_t tmem_ensure_avail_pages(void)
> +static inline bool tmem_ensure_avail_pages(void)
> {
> int failed_evict = 10;
> unsigned long free_mem;
> @@ -1004,12 +1006,12 @@ static inline bool_t tmem_ensure_avail_pages(void)
> free_mem = (tmem_page_list_pages + total_free_pages())
> >> (20 - PAGE_SHIFT);
> if ( free_mem )
> - return 1;
> + return true;
> if ( !tmem_evict() )
> failed_evict--;
> } while ( failed_evict > 0 );
>
> - return 0;
> + return false;
> }
>
> /************ TMEM CORE OPERATIONS ************************************/
> @@ -1879,9 +1881,6 @@ long do_tmem_op(tmem_cli_op_t uops)
> struct tmem_pool *pool = NULL;
> struct xen_tmem_oid *oidp;
> int rc = 0;
> - bool_t succ_get = 0, succ_put = 0;
> - bool_t non_succ_get = 0, non_succ_put = 0;
> - bool_t flush = 0, flush_obj = 0;
>
> if ( !tmem_initialized )
> return -ENODEV;
> @@ -1965,22 +1964,16 @@ long do_tmem_op(tmem_cli_op_t uops)
> tmem_cli_buf_null);
> else
> rc = -ENOMEM;
> - if (rc == 1) succ_put = 1;
> - else non_succ_put = 1;
> break;
> case TMEM_GET_PAGE:
> rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
> tmem_cli_buf_null);
> - if (rc == 1) succ_get = 1;
> - else non_succ_get = 1;
> break;
> case TMEM_FLUSH_PAGE:
> - flush = 1;
> rc = do_tmem_flush_page(pool, oidp, op.u.gen.index);
> break;
> case TMEM_FLUSH_OBJECT:
> rc = do_tmem_flush_object(pool, oidp);
> - flush_obj = 1;
> break;
> default:
> tmem_client_warn("tmem: op %d not implemented\n", op.cmd);
> diff --git a/xen/common/tmem_control.c b/xen/common/tmem_control.c
> index 2d980e3..30bf6fb 100644
> --- a/xen/common/tmem_control.c
> +++ b/xen/common/tmem_control.c
> @@ -19,8 +19,8 @@
> static int tmemc_freeze_pools(domid_t cli_id, int arg)
> {
> struct client *client;
> - bool_t freeze = (arg == XEN_SYSCTL_TMEM_OP_FREEZE) ? 1 : 0;
> - bool_t destroy = (arg == XEN_SYSCTL_TMEM_OP_DESTROY) ? 1 : 0;
> + bool freeze = arg == XEN_SYSCTL_TMEM_OP_FREEZE;
> + bool destroy = arg == XEN_SYSCTL_TMEM_OP_DESTROY;
> char *s;
>
> s = destroy ? "destroyed" : ( freeze ? "frozen" : "thawed" );
> @@ -96,12 +96,12 @@ static int tmemc_flush_mem(domid_t cli_id, uint32_t kb)
> #define BSIZE 1024
>
> static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf,
> - int off, uint32_t len, bool_t use_long)
> + int off, uint32_t len, bool use_long)
> {
> char info[BSIZE];
> int i, n = 0, sum = 0;
> struct tmem_pool *p;
> - bool_t s;
> + bool s;
>
> n = scnprintf(info,BSIZE,"C=CI:%d,ww:%d,co:%d,fr:%d,"
> "Tc:%"PRIu64",Ge:%ld,Pp:%ld,Gp:%ld%c",
> @@ -149,7 +149,7 @@ static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf,
> }
>
> static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len,
> - bool_t use_long)
> + bool use_long)
> {
> char info[BSIZE];
> int i, n = 0, sum = 0;
> @@ -188,7 +188,7 @@ static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len,
> }
>
> static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off,
> - uint32_t len, bool_t use_long)
> + uint32_t len, bool use_long)
> {
> char info[BSIZE];
> int n = 0, sum = 0;
> @@ -204,7 +204,7 @@ static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off,
> }
>
> static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len,
> - bool_t use_long)
> + bool use_long)
> {
> char info[BSIZE];
> int n = 0, sum = off;
> @@ -238,7 +238,7 @@ static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len,
> }
>
> static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len,
> - bool_t use_long)
> + bool use_long)
> {
> struct client *client;
> int off = 0;
> diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
> index 725ae93..20f74b2 100644
> --- a/xen/common/tmem_xen.c
> +++ b/xen/common/tmem_xen.c
> @@ -14,10 +14,10 @@
> #include <xen/cpu.h>
> #include <xen/init.h>
>
> -bool_t __read_mostly opt_tmem = 0;
> +bool __read_mostly opt_tmem;
> boolean_param("tmem", opt_tmem);
>
> -bool_t __read_mostly opt_tmem_compress = 0;
> +bool __read_mostly opt_tmem_compress;
> boolean_param("tmem_compress", opt_tmem_compress);
>
> atomic_t freeable_page_count = ATOMIC_INIT(0);
> @@ -32,14 +32,14 @@ static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page);
>
> #if defined(CONFIG_ARM)
> static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
> - struct page_info **pcli_pfp, bool_t cli_write)
> + struct page_info **pcli_pfp, bool cli_write)
> {
> ASSERT_UNREACHABLE();
> return NULL;
> }
>
> static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
> - unsigned long cli_mfn, bool_t mark_dirty)
> + unsigned long cli_mfn, bool mark_dirty)
> {
> ASSERT_UNREACHABLE();
> }
> @@ -47,7 +47,7 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
> #include <asm/p2m.h>
>
> static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
> - struct page_info **pcli_pfp, bool_t cli_write)
> + struct page_info **pcli_pfp, bool cli_write)
> {
> p2m_type_t t;
> struct page_info *page;
> @@ -72,7 +72,7 @@ static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
> }
>
> static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
> - unsigned long cli_mfn, bool_t mark_dirty)
> + unsigned long cli_mfn, bool mark_dirty)
> {
> if ( mark_dirty )
> {
> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
> index dc5888c..542c0b3 100644
> --- a/xen/include/xen/tmem_xen.h
> +++ b/xen/include/xen/tmem_xen.h
> @@ -35,27 +35,27 @@ extern atomic_t freeable_page_count;
> extern int tmem_init(void);
> #define tmem_hash hash_long
>
> -extern bool_t opt_tmem_compress;
> -static inline bool_t tmem_compression_enabled(void)
> +extern bool opt_tmem_compress;
> +static inline bool tmem_compression_enabled(void)
> {
> return opt_tmem_compress;
> }
>
> #ifdef CONFIG_TMEM
> -extern bool_t opt_tmem;
> -static inline bool_t tmem_enabled(void)
> +extern bool opt_tmem;
> +static inline bool tmem_enabled(void)
> {
> return opt_tmem;
> }
>
> static inline void tmem_disable(void)
> {
> - opt_tmem = 0;
> + opt_tmem = false;
> }
> #else
> -static inline bool_t tmem_enabled(void)
> +static inline bool tmem_enabled(void)
> {
> - return 0;
> + return false;
> }
>
> static inline void tmem_disable(void)
> @@ -266,7 +266,7 @@ struct tmem_global {
> struct list_head ephemeral_page_list; /* All pages in ephemeral pools. */
> struct list_head client_list;
> struct tmem_pool *shared_pools[MAX_GLOBAL_SHARED_POOLS];
> - bool_t shared_auth;
> + bool shared_auth;
> long eph_count; /* Atomicity depends on eph_lists_spinlock. */
> atomic_t client_weight_total;
> };
> @@ -286,7 +286,7 @@ struct client {
> domid_t cli_id;
> xen_tmem_client_t info;
> /* For save/restore/migration. */
> - bool_t was_frozen;
> + bool was_frozen;
> struct list_head persistent_invalidated_list;
> struct tmem_page_descriptor *cur_pgp;
> /* Statistics collection. */
> @@ -307,9 +307,9 @@ struct client {
> #define is_shared(_p) (_p->shared)
>
> struct tmem_pool {
> - bool_t shared;
> - bool_t persistent;
> - bool_t is_dying;
> + bool shared;
> + bool persistent;
> + bool is_dying;
> struct client *client;
> uint64_t uuid[2]; /* 0 for private, non-zero for shared. */
> uint32_t pool_id;
> --
> 2.1.4
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/4] xen/tmem: Switch to using bool
2017-06-28 13:53 ` Konrad Rzeszutek Wilk
@ 2017-06-28 13:54 ` Andrew Cooper
0 siblings, 0 replies; 12+ messages in thread
From: Andrew Cooper @ 2017-06-28 13:54 UTC (permalink / raw)
To: Konrad Rzeszutek Wilk; +Cc: Xen-devel
On 28/06/17 14:53, Konrad Rzeszutek Wilk wrote:
> On Wed, Jun 28, 2017 at 12:16:19PM +0100, Andrew Cooper wrote:
>> * Drop redundant initialisers
>> * Style corrections while changing client_over_quota()
>> * Drop all write-only bools from do_tmem_op()
> s/write-only/useless write-only/
:) The useless was implicit, but I will change it if you really want.
>
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thanks.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 4/4] x86/shadow: Switch to using bool
2017-06-28 11:16 ` [PATCH 4/4] x86/shadow: " Andrew Cooper
@ 2017-06-28 13:55 ` Tim Deegan
2017-06-30 15:40 ` [PATCH v2 " Andrew Cooper
0 siblings, 1 reply; 12+ messages in thread
From: Tim Deegan @ 2017-06-28 13:55 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Xen-devel
Hi,
At 12:16 +0100 on 28 Jun (1498652182), Andrew Cooper wrote:
> sh_pin() has boolean properties, so switch its return type.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Good idea, thanks.
> -static bool_t
> +static bool
> sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
Can you please update paging.h too? We need matching changes to
write_guest_entry and cmpxchg_guest_entry in struct shadow_paging_mode,
and invlpg in struct paging_mode.
I'm a little surprised that the compiler doesn't complain. I suppose
the implicit promotion to int makes it all equivalent.
> @@ -3620,7 +3620,7 @@ static int sh_page_fault(struct vcpu *v,
> * instruction should be issued on the hardware, or 0 if it's safe not
> * to do so.
> */
> -static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
> +static bool sh_invlpg(struct vcpu *v, unsigned long va)
This comment needs to be updated too.
> @@ -102,7 +102,7 @@ int shadow_set_allocation(struct domain *d, unsigned int pages,
> ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
>
> static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
> - bool_t fast, bool_t all) {}
> + bool fast, bool all) {}
Actually, please make these ints, to match the main implementation.
Cheers,
Tim.
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2 4/4] x86/shadow: Switch to using bool
2017-06-28 13:55 ` Tim Deegan
@ 2017-06-30 15:40 ` Andrew Cooper
2017-06-30 15:44 ` Tim Deegan
0 siblings, 1 reply; 12+ messages in thread
From: Andrew Cooper @ 2017-06-30 15:40 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Tim Deegan
* sh_pin() has boolean properties, so switch its return type.
* sh_remove_shadows() uses ints everywhere other than its stub.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Tim Deegan <tim@xen.org>
v2:
* Swich sh_remove_shadows() back to using ints.
* Fix more comments and prototypes.
---
xen/arch/x86/mm/shadow/common.c | 4 ++--
xen/arch/x86/mm/shadow/multi.c | 42 ++++++++++++++++++++--------------------
xen/arch/x86/mm/shadow/none.c | 6 +++---
xen/arch/x86/mm/shadow/private.h | 16 ++++++++-------
xen/include/asm-x86/paging.h | 33 +++++++++++++++++--------------
xen/include/asm-x86/shadow.h | 2 +-
6 files changed, 54 insertions(+), 49 deletions(-)
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 2e64a77..36f5746 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -40,7 +40,7 @@
DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags);
-static int sh_enable_log_dirty(struct domain *, bool_t log_global);
+static int sh_enable_log_dirty(struct domain *, bool log_global);
static int sh_disable_log_dirty(struct domain *);
static void sh_clean_dirty_bitmap(struct domain *);
@@ -3553,7 +3553,7 @@ shadow_write_p2m_entry(struct domain *d, unsigned long gfn,
/* Shadow specific code which is called in paging_log_dirty_enable().
* Return 0 if no problem found.
*/
-static int sh_enable_log_dirty(struct domain *d, bool_t log_global)
+static int sh_enable_log_dirty(struct domain *d, bool log_global)
{
int ret;
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index f65ffc6..c9c2252 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -367,9 +367,9 @@ static void sh_audit_gw(struct vcpu *v, const walk_t *gw)
/*
* Write a new value into the guest pagetable, and update the shadows
- * appropriately. Returns 0 if we page-faulted, 1 for success.
+ * appropriately. Returns false if we page-faulted, true for success.
*/
-static bool_t
+static bool
sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
{
#if CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS
@@ -383,17 +383,17 @@ sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
return !failed;
#else
- return 0;
+ return false;
#endif
}
/*
* Cmpxchg a new value into the guest pagetable, and update the shadows
- * appropriately. Returns 0 if we page-faulted, 1 if not.
+ * appropriately. Returns false if we page-faulted, true if not.
* N.B. caller should check the value of "old" to see if the cmpxchg itself
* was successful.
*/
-static bool_t
+static bool
sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old,
intpte_t new, mfn_t gmfn)
{
@@ -410,7 +410,7 @@ sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old,
return !failed;
#else
- return 0;
+ return false;
#endif
}
@@ -530,7 +530,7 @@ _sh_propagate(struct vcpu *v,
gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
u32 pass_thru_flags;
u32 gflags, sflags;
- bool_t mmio_mfn;
+ bool mmio_mfn;
/* We don't shadow PAE l3s */
ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
@@ -3616,11 +3616,11 @@ static int sh_page_fault(struct vcpu *v,
/*
- * Called when the guest requests an invlpg. Returns 1 if the invlpg
- * instruction should be issued on the hardware, or 0 if it's safe not
+ * Called when the guest requests an invlpg. Returns true if the invlpg
+ * instruction should be issued on the hardware, or false if it's safe not
* to do so.
*/
-static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
+static bool sh_invlpg(struct vcpu *v, unsigned long va)
{
mfn_t sl1mfn;
shadow_l2e_t sl2e;
@@ -3645,7 +3645,7 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
if ( !(shadow_l4e_get_flags(
sh_linear_l4_table(v)[shadow_l4_linear_offset(va)])
& _PAGE_PRESENT) )
- return 0;
+ return false;
/* This must still be a copy-from-user because we don't have the
* paging lock, and the higher-level shadows might disappear
* under our feet. */
@@ -3654,16 +3654,16 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
sizeof (sl3e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
- return 0;
+ return false;
}
if ( !(shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) )
- return 0;
+ return false;
}
#else /* SHADOW_PAGING_LEVELS == 3 */
if ( !(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
& _PAGE_PRESENT) )
// no need to flush anything if there's no SL2...
- return 0;
+ return false;
#endif
/* This must still be a copy-from-user because we don't have the shadow
@@ -3673,14 +3673,14 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
sizeof (sl2e)) != 0 )
{
perfc_incr(shadow_invlpg_fault);
- return 0;
+ return false;
}
// If there's nothing shadowed for this particular sl2e, then
// there is no need to do an invlpg, either...
//
if ( !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) )
- return 0;
+ return false;
// Check to see if the SL2 is a splintered superpage...
// If so, then we'll need to flush the entire TLB (because that's
@@ -3691,7 +3691,7 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
== SH_type_fl1_shadow )
{
flush_tlb_local();
- return 0;
+ return false;
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -3718,13 +3718,13 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
{
perfc_incr(shadow_invlpg_fault);
paging_unlock(d);
- return 0;
+ return false;
}
if ( !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) )
{
paging_unlock(d);
- return 0;
+ return false;
}
sl1mfn = shadow_l2e_get_mfn(sl2e);
@@ -3742,12 +3742,12 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
}
paging_unlock(d);
/* Need the invlpg, to pick up the disappeareance of the sl1e */
- return 1;
+ return true;
}
}
#endif
- return 1;
+ return true;
}
diff --git a/xen/arch/x86/mm/shadow/none.c b/xen/arch/x86/mm/shadow/none.c
index 41ce593..9e6ad23 100644
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -1,7 +1,7 @@
#include <xen/mm.h>
#include <asm/shadow.h>
-static int _enable_log_dirty(struct domain *d, bool_t log_global)
+static int _enable_log_dirty(struct domain *d, bool log_global)
{
ASSERT(is_pv_domain(d));
return -EOPNOTSUPP;
@@ -37,10 +37,10 @@ static int _page_fault(struct vcpu *v, unsigned long va,
return 0;
}
-static bool_t _invlpg(struct vcpu *v, unsigned long va)
+static bool _invlpg(struct vcpu *v, unsigned long va)
{
ASSERT_UNREACHABLE();
- return 1;
+ return true;
}
static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 472676c..46d9bab 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -622,15 +622,17 @@ prev_pinned_shadow(struct page_info *page,
pos ? (tmp = prev_pinned_shadow(pos, (dom)), 1) : 0; \
pos = tmp )
-/* Pin a shadow page: take an extra refcount, set the pin bit,
+/*
+ * Pin a shadow page: take an extra refcount, set the pin bit,
* and put the shadow at the head of the list of pinned shadows.
- * Returns 0 for failure, 1 for success. */
-static inline int sh_pin(struct domain *d, mfn_t smfn)
+ * Returns false for failure, true for success.
+ */
+static inline bool sh_pin(struct domain *d, mfn_t smfn)
{
struct page_info *sp[4];
struct page_list_head *pin_list;
unsigned int i, pages;
- bool_t already_pinned;
+ bool already_pinned;
ASSERT(mfn_valid(smfn));
sp[0] = mfn_to_page(smfn);
@@ -641,7 +643,7 @@ static inline int sh_pin(struct domain *d, mfn_t smfn)
pin_list = &d->arch.paging.shadow.pinned_shadows;
if ( already_pinned && sp[0] == page_list_first(pin_list) )
- return 1;
+ return true;
/* Treat the up-to-four pages of the shadow as a unit in the list ops */
for ( i = 1; i < pages; i++ )
@@ -661,7 +663,7 @@ static inline int sh_pin(struct domain *d, mfn_t smfn)
{
/* Not pinned: pin it! */
if ( !sh_get_ref(d, smfn, 0) )
- return 0;
+ return false;
sp[0]->u.sh.pinned = 1;
}
@@ -669,7 +671,7 @@ static inline int sh_pin(struct domain *d, mfn_t smfn)
for ( i = pages; i > 0; i-- )
page_list_add(sp[i - 1], pin_list);
- return 1;
+ return true;
}
/* Unpin a shadow page: unset the pin bit, take the shadow off the list
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index f262c9e..44e86d6 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -93,9 +93,9 @@ struct shadow_paging_mode {
unsigned long new,
unsigned int bytes,
struct sh_emulate_ctxt *sh_ctxt);
- bool_t (*write_guest_entry )(struct vcpu *v, intpte_t *p,
+ bool (*write_guest_entry )(struct vcpu *v, intpte_t *p,
intpte_t new, mfn_t gmfn);
- bool_t (*cmpxchg_guest_entry )(struct vcpu *v, intpte_t *p,
+ bool (*cmpxchg_guest_entry )(struct vcpu *v, intpte_t *p,
intpte_t *old, intpte_t new,
mfn_t gmfn);
mfn_t (*make_monitor_table )(struct vcpu *v);
@@ -115,7 +115,7 @@ struct shadow_paging_mode {
struct paging_mode {
int (*page_fault )(struct vcpu *v, unsigned long va,
struct cpu_user_regs *regs);
- bool_t (*invlpg )(struct vcpu *v, unsigned long va);
+ bool (*invlpg )(struct vcpu *v, unsigned long va);
unsigned long (*gva_to_gfn )(struct vcpu *v,
struct p2m_domain *p2m,
unsigned long va,
@@ -292,11 +292,13 @@ static inline void paging_update_paging_modes(struct vcpu *v)
}
-/* Write a new value into the guest pagetable, and update the
- * paging-assistance state appropriately. Returns 0 if we page-faulted,
- * 1 for success. */
-static inline bool_t paging_write_guest_entry(struct vcpu *v, intpte_t *p,
- intpte_t new, mfn_t gmfn)
+/*
+ * Write a new value into the guest pagetable, and update the
+ * paging-assistance state appropriately. Returns false if we page-faulted,
+ * true for success.
+ */
+static inline bool paging_write_guest_entry(
+ struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
{
#ifdef CONFIG_SHADOW_PAGING
if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
@@ -307,13 +309,14 @@ static inline bool_t paging_write_guest_entry(struct vcpu *v, intpte_t *p,
}
-/* Cmpxchg a new value into the guest pagetable, and update the
- * paging-assistance state appropriately. Returns 0 if we page-faulted,
- * 1 if not. N.B. caller should check the value of "old" to see if the
- * cmpxchg itself was successful. */
-static inline bool_t paging_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
- intpte_t *old, intpte_t new,
- mfn_t gmfn)
+/*
+ * Cmpxchg a new value into the guest pagetable, and update the
+ * paging-assistance state appropriately. Returns false if we page-faulted,
+ * true if not. N.B. caller should check the value of "old" to see if the
+ * cmpxchg itself was successful.
+ */
+static inline bool paging_cmpxchg_guest_entry(
+ struct vcpu *v, intpte_t *p, intpte_t *old, intpte_t new, mfn_t gmfn)
{
#ifdef CONFIG_SHADOW_PAGING
if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 7e1ed3b..678b5d4 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -102,7 +102,7 @@ int shadow_set_allocation(struct domain *d, unsigned int pages,
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
- bool_t fast, bool_t all) {}
+ int fast, int all) {}
static inline void shadow_blow_tables_per_domain(struct domain *d) {}
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH v2 4/4] x86/shadow: Switch to using bool
2017-06-30 15:40 ` [PATCH v2 " Andrew Cooper
@ 2017-06-30 15:44 ` Tim Deegan
0 siblings, 0 replies; 12+ messages in thread
From: Tim Deegan @ 2017-06-30 15:44 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Xen-devel
At 16:40 +0100 on 30 Jun (1498840853), Andrew Cooper wrote:
> * sh_pin() has boolean properties, so switch its return type.
> * sh_remove_shadows() uses ints everywhere other than its stub.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/4] xen/flask: Switch to using bool
2017-06-28 11:16 ` [PATCH 2/4] xen/flask: " Andrew Cooper
@ 2017-08-10 14:37 ` Daniel De Graaf
0 siblings, 0 replies; 12+ messages in thread
From: Daniel De Graaf @ 2017-08-10 14:37 UTC (permalink / raw)
To: Andrew Cooper, Xen-devel
On 06/28/2017 07:16 AM, Andrew Cooper wrote:
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2017-08-10 14:37 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-06-28 11:16 [PATCH 1/4] xen/tmem: Switch to using bool Andrew Cooper
2017-06-28 11:16 ` [PATCH 2/4] xen/flask: " Andrew Cooper
2017-08-10 14:37 ` Daniel De Graaf
2017-06-28 11:16 ` [PATCH 3/4] xen/efi: " Andrew Cooper
2017-06-28 11:53 ` Jan Beulich
2017-06-28 11:16 ` [PATCH 4/4] x86/shadow: " Andrew Cooper
2017-06-28 13:55 ` Tim Deegan
2017-06-30 15:40 ` [PATCH v2 " Andrew Cooper
2017-06-30 15:44 ` Tim Deegan
2017-06-28 13:08 ` [PATCH 1/4] xen/tmem: " Wei Liu
2017-06-28 13:53 ` Konrad Rzeszutek Wilk
2017-06-28 13:54 ` Andrew Cooper
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.