* [PATCH] x86/shadow: adjust callback arrays
@ 2021-04-12 10:42 Jan Beulich
2021-04-15 15:59 ` Tim Deegan
0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2021-04-12 10:42 UTC (permalink / raw)
To: xen-devel
Cc: Tim Deegan, George Dunlap, Andrew Cooper, Wei Liu, Roger Pau Monné
Some of them have entries with stale comments. Rather than correcting
these comments, re-arrange how these arrays get populated, shrinking
their sizes at the same time (by omitting trailing NULL entries: Use
dedicated element initializers, serving the purpose of what the
comments did so far. This then also makes these arrays independent of
the actual ordering of the individual SH_type_*.
While tightening respective ASSERT()s in hash_{vcpu,domain}_foreach(),
also tighten related ones in shadow_hash_{insert,delete}().
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1565,7 +1565,7 @@ void shadow_hash_insert(struct domain *d
ASSERT(paging_locked_by_me(d));
ASSERT(d->arch.paging.shadow.hash_table);
- ASSERT(t);
+ ASSERT(t >= SH_type_min_shadow && t <= SH_type_max_shadow);
sh_hash_audit(d);
@@ -1590,7 +1590,7 @@ void shadow_hash_delete(struct domain *d
ASSERT(paging_locked_by_me(d));
ASSERT(d->arch.paging.shadow.hash_table);
- ASSERT(t);
+ ASSERT(t >= SH_type_min_shadow && t <= SH_type_max_shadow);
sh_hash_audit(d);
@@ -1668,7 +1668,7 @@ static void hash_vcpu_foreach(struct vcp
{
if ( callback_mask & (1 << x->u.sh.type) )
{
- ASSERT(x->u.sh.type < SH_type_unused);
+ ASSERT(x->u.sh.type <= SH_type_max_shadow);
ASSERT(callbacks[x->u.sh.type] != NULL);
done = callbacks[x->u.sh.type](v, page_to_mfn(x),
callback_mfn);
@@ -1715,7 +1715,7 @@ static void hash_domain_foreach(struct d
{
if ( callback_mask & (1 << x->u.sh.type) )
{
- ASSERT(x->u.sh.type < SH_type_unused);
+ ASSERT(x->u.sh.type <= SH_type_max_shadow);
ASSERT(callbacks[x->u.sh.type] != NULL);
done = callbacks[x->u.sh.type](d, page_to_mfn(x),
callback_mfn);
@@ -1819,26 +1819,16 @@ int sh_remove_write_access(struct domain
unsigned long fault_addr)
{
/* Dispatch table for getting per-type functions */
- static const hash_domain_callback_t callbacks[SH_type_unused] = {
- NULL, /* none */
+ static const hash_domain_callback_t callbacks[] = {
#ifdef CONFIG_HVM
- SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* l1_32 */
- SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* fl1_32 */
- NULL, /* l2_32 */
- SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* l1_pae */
- SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* fl1_pae */
- NULL, /* l2_pae */
+ [SH_type_l1_32_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2),
+ [SH_type_fl1_32_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2),
+ [SH_type_l1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3),
+ [SH_type_fl1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3),
#endif
- SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* l1_64 */
- SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* fl1_64 */
- NULL, /* l2_64 */
- NULL, /* l2h_64 */
- NULL, /* l3_64 */
- NULL, /* l4_64 */
- NULL, /* p2m */
- NULL /* unused */
+ [SH_type_l1_64_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4),
+ [SH_type_fl1_64_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4),
};
-
static const unsigned int callback_mask = SHF_L1_ANY | SHF_FL1_ANY;
struct page_info *pg = mfn_to_page(gmfn);
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
@@ -2044,26 +2034,16 @@ int sh_remove_all_mappings(struct domain
struct page_info *page = mfn_to_page(gmfn);
/* Dispatch table for getting per-type functions */
- static const hash_domain_callback_t callbacks[SH_type_unused] = {
- NULL, /* none */
+ static const hash_domain_callback_t callbacks[] = {
#ifdef CONFIG_HVM
- SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* l1_32 */
- SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* fl1_32 */
- NULL, /* l2_32 */
- SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* l1_pae */
- SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* fl1_pae */
- NULL, /* l2_pae */
+ [SH_type_l1_32_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2),
+ [SH_type_fl1_32_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2),
+ [SH_type_l1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3),
+ [SH_type_fl1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3),
#endif
- SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* l1_64 */
- SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* fl1_64 */
- NULL, /* l2_64 */
- NULL, /* l2h_64 */
- NULL, /* l3_64 */
- NULL, /* l4_64 */
- NULL, /* p2m */
- NULL /* unused */
+ [SH_type_l1_64_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4),
+ [SH_type_fl1_64_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4),
};
-
static const unsigned int callback_mask = SHF_L1_ANY | SHF_FL1_ANY;
perfc_incr(shadow_mappings);
@@ -2189,45 +2169,27 @@ void sh_remove_shadows(struct domain *d,
/* Dispatch table for getting per-type functions: each level must
* be called with the function to remove a lower-level shadow. */
- static const hash_domain_callback_t callbacks[SH_type_unused] = {
- NULL, /* none */
+ static const hash_domain_callback_t callbacks[] = {
#ifdef CONFIG_HVM
- NULL, /* l1_32 */
- NULL, /* fl1_32 */
- SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 2), /* l2_32 */
- NULL, /* l1_pae */
- NULL, /* fl1_pae */
- SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2_pae */
+ [SH_type_l2_32_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 2),
+ [SH_type_l2_pae_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3),
#endif
- NULL, /* l1_64 */
- NULL, /* fl1_64 */
- SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2_64 */
- SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2h_64 */
- SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, 4), /* l3_64 */
- SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, 4), /* l4_64 */
- NULL, /* p2m */
- NULL /* unused */
+ [SH_type_l2_64_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4),
+ [SH_type_l2h_64_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4),
+ [SH_type_l3_64_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, 4),
+ [SH_type_l4_64_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, 4),
};
/* Another lookup table, for choosing which mask to use */
- static const unsigned int masks[SH_type_unused] = {
- 0, /* none */
+ static const unsigned int masks[SH_type_max_shadow + 1] = {
#ifdef CONFIG_HVM
- SHF_L2_32, /* l1_32 */
- 0, /* fl1_32 */
- 0, /* l2_32 */
- SHF_L2_PAE, /* l1_pae */
- 0, /* fl1_pae */
- 0, /* l2_pae */
+ [SH_type_l1_32_shadow] = SHF_L2_32,
+ [SH_type_l1_pae_shadow] = SHF_L2_PAE,
#endif
- SHF_L2H_64 | SHF_L2_64, /* l1_64 */
- 0, /* fl1_64 */
- SHF_L3_64, /* l2_64 */
- SHF_L3_64, /* l2h_64 */
- SHF_L4_64, /* l3_64 */
- 0, /* l4_64 */
- 0, /* p2m */
- 0 /* unused */
+ [SH_type_l1_64_shadow] = SHF_L2H_64 | SHF_L2_64,
+ [SH_type_l2_64_shadow] = SHF_L3_64,
+ [SH_type_l2h_64_shadow] = SHF_L3_64,
+ [SH_type_l3_64_shadow] = SHF_L4_64,
};
ASSERT(!(all && fast));
@@ -2356,24 +2318,8 @@ static int sh_clear_up_pointer(struct vc
void sh_reset_l3_up_pointers(struct vcpu *v)
{
- static const hash_vcpu_callback_t callbacks[SH_type_unused] = {
- NULL, /* none */
-#ifdef CONFIG_HVM
- NULL, /* l1_32 */
- NULL, /* fl1_32 */
- NULL, /* l2_32 */
- NULL, /* l1_pae */
- NULL, /* fl1_pae */
- NULL, /* l2_pae */
-#endif
- NULL, /* l1_64 */
- NULL, /* fl1_64 */
- NULL, /* l2_64 */
- NULL, /* l2h_64 */
- sh_clear_up_pointer, /* l3_64 */
- NULL, /* l4_64 */
- NULL, /* p2m */
- NULL /* unused */
+ static const hash_vcpu_callback_t callbacks[] = {
+ [SH_type_l3_64_shadow] = sh_clear_up_pointer,
};
static const unsigned int callback_mask = SHF_L3_64;
@@ -3381,25 +3327,23 @@ int shadow_domctl(struct domain *d,
void shadow_audit_tables(struct vcpu *v)
{
/* Dispatch table for getting per-type functions */
- static const hash_vcpu_callback_t callbacks[SH_type_unused] = {
- NULL, /* none */
+ static const hash_vcpu_callback_t callbacks[] = {
#if SHADOW_AUDIT & (SHADOW_AUDIT_ENTRIES | SHADOW_AUDIT_ENTRIES_FULL)
# ifdef CONFIG_HVM
- SHADOW_INTERNAL_NAME(sh_audit_l1_table, 2), /* l1_32 */
- SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 2), /* fl1_32 */
- SHADOW_INTERNAL_NAME(sh_audit_l2_table, 2), /* l2_32 */
- SHADOW_INTERNAL_NAME(sh_audit_l1_table, 3), /* l1_pae */
- SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 3), /* fl1_pae */
- SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3), /* l2_pae */
+ [SH_type_l1_32_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l1_table, 2),
+ [SH_type_fl1_32_shadow] = SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 2),
+ [SH_type_l2_32_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l2_table, 2),
+ [SH_type_l1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l1_table, 3),
+ [SH_type_fl1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 3),
+ [SH_type_l2_pae_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3),
# endif
- SHADOW_INTERNAL_NAME(sh_audit_l1_table, 4), /* l1_64 */
- SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 4), /* fl1_64 */
- SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4), /* l2_64 */
- SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4), /* l2h_64 */
- SHADOW_INTERNAL_NAME(sh_audit_l3_table, 4), /* l3_64 */
- SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4), /* l4_64 */
+ [SH_type_l1_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l1_table, 4),
+ [SH_type_fl1_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 4),
+ [SH_type_l2_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4),
+ [SH_type_l2h_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4),
+ [SH_type_l3_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l3_table, 4),
+ [SH_type_l4_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4),
#endif
- NULL /* All the rest */
};
unsigned int mask;
@@ -3427,7 +3371,9 @@ void shadow_audit_tables(struct vcpu *v)
}
}
- HASH_CALLBACKS_CHECK(SHF_page_type_mask);
+ HASH_CALLBACKS_CHECK(SHADOW_AUDIT & (SHADOW_AUDIT_ENTRIES |
+ SHADOW_AUDIT_ENTRIES_FULL)
+ ? SHF_page_type_mask : 0);
hash_vcpu_foreach(v, mask, callbacks, INVALID_MFN);
}
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] x86/shadow: adjust callback arrays
2021-04-12 10:42 [PATCH] x86/shadow: adjust callback arrays Jan Beulich
@ 2021-04-15 15:59 ` Tim Deegan
2021-04-15 16:03 ` Jan Beulich
0 siblings, 1 reply; 4+ messages in thread
From: Tim Deegan @ 2021-04-15 15:59 UTC (permalink / raw)
To: Jan Beulich
Cc: xen-devel, George Dunlap, Andrew Cooper, Wei Liu, Roger Pau Monné
At 12:42 +0200 on 12 Apr (1618231332), Jan Beulich wrote:
> Some of them have entries with stale comments. Rather than correcting
> these comments, re-arrange how these arrays get populated, shrinking
> their sizes at the same time (by omitting trailing NULL entries: Use
> dedicated element initializers, serving the purpose of what the
> comments did so far. This then also makes these arrays independent of
> the actual ordering of the individual SH_type_*.
>
> While tightening respective ASSERT()s in hash_{vcpu,domain}_foreach(),
> also tighten related ones in shadow_hash_{insert,delete}().
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Looks good, but please leave the arrays at full size. With the full
array, a bug could lead to an assertion failure or NULL deref; with
a short array it could mean following a bogus funtion pointer.
With that change, Acked-by: Tim Deegan <tim@xen.org>
Cheers,
Tim.
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -1565,7 +1565,7 @@ void shadow_hash_insert(struct domain *d
>
> ASSERT(paging_locked_by_me(d));
> ASSERT(d->arch.paging.shadow.hash_table);
> - ASSERT(t);
> + ASSERT(t >= SH_type_min_shadow && t <= SH_type_max_shadow);
>
> sh_hash_audit(d);
>
> @@ -1590,7 +1590,7 @@ void shadow_hash_delete(struct domain *d
>
> ASSERT(paging_locked_by_me(d));
> ASSERT(d->arch.paging.shadow.hash_table);
> - ASSERT(t);
> + ASSERT(t >= SH_type_min_shadow && t <= SH_type_max_shadow);
>
> sh_hash_audit(d);
>
> @@ -1668,7 +1668,7 @@ static void hash_vcpu_foreach(struct vcp
> {
> if ( callback_mask & (1 << x->u.sh.type) )
> {
> - ASSERT(x->u.sh.type < SH_type_unused);
> + ASSERT(x->u.sh.type <= SH_type_max_shadow);
> ASSERT(callbacks[x->u.sh.type] != NULL);
> done = callbacks[x->u.sh.type](v, page_to_mfn(x),
> callback_mfn);
> @@ -1715,7 +1715,7 @@ static void hash_domain_foreach(struct d
> {
> if ( callback_mask & (1 << x->u.sh.type) )
> {
> - ASSERT(x->u.sh.type < SH_type_unused);
> + ASSERT(x->u.sh.type <= SH_type_max_shadow);
> ASSERT(callbacks[x->u.sh.type] != NULL);
> done = callbacks[x->u.sh.type](d, page_to_mfn(x),
> callback_mfn);
> @@ -1819,26 +1819,16 @@ int sh_remove_write_access(struct domain
> unsigned long fault_addr)
> {
> /* Dispatch table for getting per-type functions */
> - static const hash_domain_callback_t callbacks[SH_type_unused] = {
> - NULL, /* none */
> + static const hash_domain_callback_t callbacks[] = {
> #ifdef CONFIG_HVM
> - SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* l1_32 */
> - SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* fl1_32 */
> - NULL, /* l2_32 */
> - SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* l1_pae */
> - SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* fl1_pae */
> - NULL, /* l2_pae */
> + [SH_type_l1_32_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2),
> + [SH_type_fl1_32_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2),
> + [SH_type_l1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3),
> + [SH_type_fl1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3),
> #endif
> - SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* l1_64 */
> - SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* fl1_64 */
> - NULL, /* l2_64 */
> - NULL, /* l2h_64 */
> - NULL, /* l3_64 */
> - NULL, /* l4_64 */
> - NULL, /* p2m */
> - NULL /* unused */
> + [SH_type_l1_64_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4),
> + [SH_type_fl1_64_shadow] = SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4),
> };
> -
> static const unsigned int callback_mask = SHF_L1_ANY | SHF_FL1_ANY;
> struct page_info *pg = mfn_to_page(gmfn);
> #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
> @@ -2044,26 +2034,16 @@ int sh_remove_all_mappings(struct domain
> struct page_info *page = mfn_to_page(gmfn);
>
> /* Dispatch table for getting per-type functions */
> - static const hash_domain_callback_t callbacks[SH_type_unused] = {
> - NULL, /* none */
> + static const hash_domain_callback_t callbacks[] = {
> #ifdef CONFIG_HVM
> - SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* l1_32 */
> - SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* fl1_32 */
> - NULL, /* l2_32 */
> - SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* l1_pae */
> - SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* fl1_pae */
> - NULL, /* l2_pae */
> + [SH_type_l1_32_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2),
> + [SH_type_fl1_32_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2),
> + [SH_type_l1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3),
> + [SH_type_fl1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3),
> #endif
> - SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* l1_64 */
> - SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* fl1_64 */
> - NULL, /* l2_64 */
> - NULL, /* l2h_64 */
> - NULL, /* l3_64 */
> - NULL, /* l4_64 */
> - NULL, /* p2m */
> - NULL /* unused */
> + [SH_type_l1_64_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4),
> + [SH_type_fl1_64_shadow] = SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4),
> };
> -
> static const unsigned int callback_mask = SHF_L1_ANY | SHF_FL1_ANY;
>
> perfc_incr(shadow_mappings);
> @@ -2189,45 +2169,27 @@ void sh_remove_shadows(struct domain *d,
>
> /* Dispatch table for getting per-type functions: each level must
> * be called with the function to remove a lower-level shadow. */
> - static const hash_domain_callback_t callbacks[SH_type_unused] = {
> - NULL, /* none */
> + static const hash_domain_callback_t callbacks[] = {
> #ifdef CONFIG_HVM
> - NULL, /* l1_32 */
> - NULL, /* fl1_32 */
> - SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 2), /* l2_32 */
> - NULL, /* l1_pae */
> - NULL, /* fl1_pae */
> - SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2_pae */
> + [SH_type_l2_32_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 2),
> + [SH_type_l2_pae_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3),
> #endif
> - NULL, /* l1_64 */
> - NULL, /* fl1_64 */
> - SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2_64 */
> - SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2h_64 */
> - SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, 4), /* l3_64 */
> - SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, 4), /* l4_64 */
> - NULL, /* p2m */
> - NULL /* unused */
> + [SH_type_l2_64_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4),
> + [SH_type_l2h_64_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4),
> + [SH_type_l3_64_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, 4),
> + [SH_type_l4_64_shadow] = SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, 4),
> };
>
> /* Another lookup table, for choosing which mask to use */
> - static const unsigned int masks[SH_type_unused] = {
> - 0, /* none */
> + static const unsigned int masks[SH_type_max_shadow + 1] = {
> #ifdef CONFIG_HVM
> - SHF_L2_32, /* l1_32 */
> - 0, /* fl1_32 */
> - 0, /* l2_32 */
> - SHF_L2_PAE, /* l1_pae */
> - 0, /* fl1_pae */
> - 0, /* l2_pae */
> + [SH_type_l1_32_shadow] = SHF_L2_32,
> + [SH_type_l1_pae_shadow] = SHF_L2_PAE,
> #endif
> - SHF_L2H_64 | SHF_L2_64, /* l1_64 */
> - 0, /* fl1_64 */
> - SHF_L3_64, /* l2_64 */
> - SHF_L3_64, /* l2h_64 */
> - SHF_L4_64, /* l3_64 */
> - 0, /* l4_64 */
> - 0, /* p2m */
> - 0 /* unused */
> + [SH_type_l1_64_shadow] = SHF_L2H_64 | SHF_L2_64,
> + [SH_type_l2_64_shadow] = SHF_L3_64,
> + [SH_type_l2h_64_shadow] = SHF_L3_64,
> + [SH_type_l3_64_shadow] = SHF_L4_64,
> };
>
> ASSERT(!(all && fast));
> @@ -2356,24 +2318,8 @@ static int sh_clear_up_pointer(struct vc
>
> void sh_reset_l3_up_pointers(struct vcpu *v)
> {
> - static const hash_vcpu_callback_t callbacks[SH_type_unused] = {
> - NULL, /* none */
> -#ifdef CONFIG_HVM
> - NULL, /* l1_32 */
> - NULL, /* fl1_32 */
> - NULL, /* l2_32 */
> - NULL, /* l1_pae */
> - NULL, /* fl1_pae */
> - NULL, /* l2_pae */
> -#endif
> - NULL, /* l1_64 */
> - NULL, /* fl1_64 */
> - NULL, /* l2_64 */
> - NULL, /* l2h_64 */
> - sh_clear_up_pointer, /* l3_64 */
> - NULL, /* l4_64 */
> - NULL, /* p2m */
> - NULL /* unused */
> + static const hash_vcpu_callback_t callbacks[] = {
> + [SH_type_l3_64_shadow] = sh_clear_up_pointer,
> };
> static const unsigned int callback_mask = SHF_L3_64;
>
> @@ -3381,25 +3327,23 @@ int shadow_domctl(struct domain *d,
> void shadow_audit_tables(struct vcpu *v)
> {
> /* Dispatch table for getting per-type functions */
> - static const hash_vcpu_callback_t callbacks[SH_type_unused] = {
> - NULL, /* none */
> + static const hash_vcpu_callback_t callbacks[] = {
> #if SHADOW_AUDIT & (SHADOW_AUDIT_ENTRIES | SHADOW_AUDIT_ENTRIES_FULL)
> # ifdef CONFIG_HVM
> - SHADOW_INTERNAL_NAME(sh_audit_l1_table, 2), /* l1_32 */
> - SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 2), /* fl1_32 */
> - SHADOW_INTERNAL_NAME(sh_audit_l2_table, 2), /* l2_32 */
> - SHADOW_INTERNAL_NAME(sh_audit_l1_table, 3), /* l1_pae */
> - SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 3), /* fl1_pae */
> - SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3), /* l2_pae */
> + [SH_type_l1_32_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l1_table, 2),
> + [SH_type_fl1_32_shadow] = SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 2),
> + [SH_type_l2_32_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l2_table, 2),
> + [SH_type_l1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l1_table, 3),
> + [SH_type_fl1_pae_shadow] = SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 3),
> + [SH_type_l2_pae_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3),
> # endif
> - SHADOW_INTERNAL_NAME(sh_audit_l1_table, 4), /* l1_64 */
> - SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 4), /* fl1_64 */
> - SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4), /* l2_64 */
> - SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4), /* l2h_64 */
> - SHADOW_INTERNAL_NAME(sh_audit_l3_table, 4), /* l3_64 */
> - SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4), /* l4_64 */
> + [SH_type_l1_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l1_table, 4),
> + [SH_type_fl1_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 4),
> + [SH_type_l2_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4),
> + [SH_type_l2h_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4),
> + [SH_type_l3_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l3_table, 4),
> + [SH_type_l4_64_shadow] = SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4),
> #endif
> - NULL /* All the rest */
> };
> unsigned int mask;
>
> @@ -3427,7 +3371,9 @@ void shadow_audit_tables(struct vcpu *v)
> }
> }
>
> - HASH_CALLBACKS_CHECK(SHF_page_type_mask);
> + HASH_CALLBACKS_CHECK(SHADOW_AUDIT & (SHADOW_AUDIT_ENTRIES |
> + SHADOW_AUDIT_ENTRIES_FULL)
> + ? SHF_page_type_mask : 0);
> hash_vcpu_foreach(v, mask, callbacks, INVALID_MFN);
> }
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] x86/shadow: adjust callback arrays
2021-04-15 15:59 ` Tim Deegan
@ 2021-04-15 16:03 ` Jan Beulich
2021-04-15 16:12 ` Tim Deegan
0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2021-04-15 16:03 UTC (permalink / raw)
To: Tim Deegan
Cc: xen-devel, George Dunlap, Andrew Cooper, Wei Liu, Roger Pau Monné
On 15.04.2021 17:59, Tim Deegan wrote:
> At 12:42 +0200 on 12 Apr (1618231332), Jan Beulich wrote:
>> Some of them have entries with stale comments. Rather than correcting
>> these comments, re-arrange how these arrays get populated, shrinking
>> their sizes at the same time (by omitting trailing NULL entries: Use
>> dedicated element initializers, serving the purpose of what the
>> comments did so far. This then also makes these arrays independent of
>> the actual ordering of the individual SH_type_*.
>>
>> While tightening respective ASSERT()s in hash_{vcpu,domain}_foreach(),
>> also tighten related ones in shadow_hash_{insert,delete}().
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> Looks good, but please leave the arrays at full size. With the full
> array, a bug could lead to an assertion failure or NULL deref; with
> a short array it could mean following a bogus funtion pointer.
>
> With that change, Acked-by: Tim Deegan <tim@xen.org>
Thanks, but let me ask back about which of the two possble meanings
of "full" you mean: Dimensioned by SH_type_unused, or dimensioned
by SH_type_max_shadow + 1? The former would leave the arrays as they
are now, while the latter would shrink them a little.
Jan
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] x86/shadow: adjust callback arrays
2021-04-15 16:03 ` Jan Beulich
@ 2021-04-15 16:12 ` Tim Deegan
0 siblings, 0 replies; 4+ messages in thread
From: Tim Deegan @ 2021-04-15 16:12 UTC (permalink / raw)
To: Jan Beulich
Cc: xen-devel, George Dunlap, Andrew Cooper, Wei Liu, Roger Pau Monné
At 18:03 +0200 on 15 Apr (1618509812), Jan Beulich wrote:
> On 15.04.2021 17:59, Tim Deegan wrote:
> > At 12:42 +0200 on 12 Apr (1618231332), Jan Beulich wrote:
> >> Some of them have entries with stale comments. Rather than correcting
> >> these comments, re-arrange how these arrays get populated, shrinking
> >> their sizes at the same time (by omitting trailing NULL entries: Use
> >> dedicated element initializers, serving the purpose of what the
> >> comments did so far. This then also makes these arrays independent of
> >> the actual ordering of the individual SH_type_*.
> >>
> >> While tightening respective ASSERT()s in hash_{vcpu,domain}_foreach(),
> >> also tighten related ones in shadow_hash_{insert,delete}().
> >>
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >
> > Looks good, but please leave the arrays at full size. With the full
> > array, a bug could lead to an assertion failure or NULL deref; with
> > a short array it could mean following a bogus funtion pointer.
> >
> > With that change, Acked-by: Tim Deegan <tim@xen.org>
>
> Thanks, but let me ask back about which of the two possble meanings
> of "full" you mean: Dimensioned by SH_type_unused, or dimensioned
> by SH_type_max_shadow + 1? The former would leave the arrays as they
> are now, while the latter would shrink them a little.
As they are now, please.
Cheers,
Tim.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2021-04-15 16:12 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-12 10:42 [PATCH] x86/shadow: adjust callback arrays Jan Beulich
2021-04-15 15:59 ` Tim Deegan
2021-04-15 16:03 ` Jan Beulich
2021-04-15 16:12 ` Tim Deegan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).