File 19136-page-info-rearrange.patch of Package xen
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1233313843 0
# Node ID 162cdb596b9a7e49994b9305f34fadf92cfb3933
# Parent 6fe44eb28f525fc5879c43882ca089b9c636b3f6
x86: re-arrange struct page_info members
By combining the overlay fields that are 8 bytes long (on x86-64) into
a union separate from the one used for the 4 byte wide fields, no
unnecessary padding will be inserted while at the same time avoiding
to use __attribute__((__packed__)) on any of the sub-structures (which
risks misaligning structure members without immediately noticing).
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1505,7 +1505,7 @@ mfn_t shadow_alloc(struct domain *d,
while ( i != order )
{
i--;
- sp->u.sh.order = i;
+ sp->v.free.order = i;
page_list_add_tail(sp, &d->arch.paging.shadow.freelists[i]);
sp += 1 << i;
}
@@ -1532,7 +1532,7 @@ mfn_t shadow_alloc(struct domain *d,
sp[i].u.sh.type = shadow_type;
sp[i].u.sh.pinned = 0;
sp[i].u.sh.count = 0;
- sp[i].u.sh.back = backpointer;
+ sp[i].v.sh.back = backpointer;
set_next_shadow(&sp[i], NULL);
perfc_incr(shadow_alloc_count);
}
@@ -1592,20 +1592,20 @@ void shadow_free(struct domain *d, mfn_t
if ( (mfn_x(shadow_page_to_mfn(sp)) & mask) ) {
/* Merge with predecessor block? */
if ( ((sp-mask)->u.sh.type != PGT_none) ||
- ((sp-mask)->u.sh.order != order) )
+ ((sp-mask)->v.free.order != order) )
break;
sp -= mask;
page_list_del(sp, &d->arch.paging.shadow.freelists[order]);
} else {
/* Merge with successor block? */
if ( ((sp+mask)->u.sh.type != PGT_none) ||
- ((sp+mask)->u.sh.order != order) )
+ ((sp+mask)->v.free.order != order) )
break;
page_list_del(sp + mask, &d->arch.paging.shadow.freelists[order]);
}
}
- sp->u.sh.order = order;
+ sp->v.free.order = order;
page_list_add_tail(sp, &d->arch.paging.shadow.freelists[order]);
}
@@ -1788,7 +1788,7 @@ static unsigned int sh_set_allocation(st
sp[j].u.sh.count = 0;
sp[j].tlbflush_timestamp = 0; /* Not in any TLB */
}
- sp->u.sh.order = order;
+ sp->v.free.order = order;
page_list_add_tail(sp, &d->arch.paging.shadow.freelists[order]);
}
else if ( d->arch.paging.shadow.total_pages > pages )
@@ -1867,17 +1867,17 @@ static void sh_hash_audit_bucket(struct
BUG_ON( sp->u.sh.type == 0 );
BUG_ON( sp->u.sh.type > SH_type_max_shadow );
/* Wrong bucket? */
- BUG_ON( sh_hash(sp->u.sh.back, sp->u.sh.type) != bucket );
+ BUG_ON( sh_hash(sp->v.sh.back, sp->u.sh.type) != bucket );
/* Duplicate entry? */
for ( x = next_shadow(sp); x; x = next_shadow(x) )
- BUG_ON( x->u.sh.back == sp->u.sh.back &&
+ BUG_ON( x->v.sh.back == sp->v.sh.back &&
x->u.sh.type == sp->u.sh.type );
/* Follow the backpointer to the guest pagetable */
if ( sp->u.sh.type != SH_type_fl1_32_shadow
&& sp->u.sh.type != SH_type_fl1_pae_shadow
&& sp->u.sh.type != SH_type_fl1_64_shadow )
{
- struct page_info *gpg = mfn_to_page(_mfn(sp->u.sh.back));
+ struct page_info *gpg = mfn_to_page(_mfn(sp->v.sh.back));
/* Bad shadow flags on guest page? */
BUG_ON( !(gpg->shadow_flags & (1<<sp->u.sh.type)) );
/* Bad type count on guest page? */
@@ -1893,7 +1893,7 @@ static void sh_hash_audit_bucket(struct
{
SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
" and not OOS but has typecount %#lx\n",
- sp->u.sh.back,
+ sp->v.sh.back,
mfn_x(shadow_page_to_mfn(sp)),
gpg->u.inuse.type_info);
BUG();
@@ -1907,7 +1907,7 @@ static void sh_hash_audit_bucket(struct
{
SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
" but has typecount %#lx\n",
- sp->u.sh.back, mfn_x(shadow_page_to_mfn(sp)),
+ sp->v.sh.back, mfn_x(shadow_page_to_mfn(sp)),
gpg->u.inuse.type_info);
BUG();
}
@@ -1993,7 +1993,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
prev = NULL;
while(sp)
{
- if ( sp->u.sh.back == n && sp->u.sh.type == t )
+ if ( sp->v.sh.back == n && sp->u.sh.type == t )
{
/* Pull-to-front if 'sp' isn't already the head item */
if ( unlikely(sp != d->arch.paging.shadow.hash_table[key]) )
@@ -2160,7 +2160,7 @@ void sh_destroy_shadow(struct vcpu *v, m
t == SH_type_fl1_64_shadow ||
t == SH_type_monitor_table ||
(is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
- (page_get_owner(mfn_to_page(_mfn(sp->u.sh.back)))
+ (page_get_owner(mfn_to_page(_mfn(sp->v.sh.back)))
== v->domain));
/* The down-shifts here are so that the switch statement is on nice
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1272,7 +1272,7 @@ static int shadow_set_l2e(struct vcpu *v
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
{
struct shadow_page_info *sp = mfn_to_shadow_page(sl1mfn);
- mfn_t gl1mfn = _mfn(sp->u.sh.back);
+ mfn_t gl1mfn = _mfn(sp->v.sh.back);
/* If the shadow is a fl1 then the backpointer contains
the GFN instead of the GMFN, and it's definitely not
@@ -2220,7 +2220,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
ASSERT(t == SH_type_l4_shadow);
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
+ gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
/* Decrement refcounts of all the old entries */
@@ -2249,7 +2249,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
ASSERT(t == SH_type_l3_shadow);
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
+ gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
@@ -2284,7 +2284,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
#endif
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
+ gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
@@ -2314,12 +2314,12 @@ void sh_destroy_l1_shadow(struct vcpu *v
/* Record that the guest page isn't shadowed any more (in this type) */
if ( t == SH_type_fl1_shadow )
{
- gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->u.sh.back);
+ gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->v.sh.back);
delete_fl1_shadow_status(v, gfn, smfn);
}
else
{
- mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
+ mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
}
@@ -2643,7 +2643,7 @@ static int validate_gl1e(struct vcpu *v,
result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
- gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
+ gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
if ( mfn_valid(gl1mfn)
&& mfn_is_out_of_sync(gl1mfn) )
{
@@ -3134,7 +3134,7 @@ static int sh_page_fault(struct vcpu *v,
sizeof(sl2e)) != 0)
|| !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
|| !mfn_valid(gl1mfn = _mfn(mfn_to_shadow_page(
- shadow_l2e_get_mfn(sl2e))->u.sh.back))
+ shadow_l2e_get_mfn(sl2e))->v.sh.back))
|| unlikely(mfn_is_out_of_sync(gl1mfn)) )
{
/* Hit the slow path as if there had been no
@@ -3660,7 +3660,7 @@ sh_invlpg(struct vcpu *v, unsigned long
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Check to see if the SL1 is out of sync. */
{
- mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
+ mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
struct page_info *pg = mfn_to_page(gl1mfn);
if ( mfn_valid(gl1mfn)
&& page_is_out_of_sync(pg) )
@@ -3690,7 +3690,7 @@ sh_invlpg(struct vcpu *v, unsigned long
}
sl1mfn = shadow_l2e_get_mfn(sl2e);
- gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
+ gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
pg = mfn_to_page(gl1mfn);
if ( likely(sh_mfn_is_a_page_table(gl1mfn)
@@ -4995,7 +4995,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
int done = 0;
/* Follow the backpointer */
- gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
+ gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Out-of-sync l1 shadows can contain anything: just check the OOS hash */
@@ -5085,7 +5085,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
int done = 0;
/* Follow the backpointer */
- gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->u.sh.back);
+ gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */
@@ -5134,7 +5134,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
int done = 0;
/* Follow the backpointer */
- gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->u.sh.back);
+ gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */
@@ -5181,7 +5181,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
int done = 0;
/* Follow the backpointer */
- gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->u.sh.back);
+ gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->v.sh.back);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -588,7 +588,7 @@ static inline int sh_get_ref(struct vcpu
if ( unlikely(nx >= 1U<<26) )
{
SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRpgmfn " smfn=%lx\n",
- sp->u.sh.back, mfn_x(smfn));
+ sp->v.sh.back, mfn_x(smfn));
return 0;
}
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -15,7 +15,7 @@
* 1. 'struct page_info' contains a 'struct page_list_entry list'.
* 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
*/
-#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
+#define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
/*
* This definition is solely for the use in struct page_info (and
@@ -59,8 +59,6 @@ struct page_info
/* Page is in use: ((count_info & PGC_count_mask) != 0). */
struct {
- /* Owner of this page (NULL if page is anonymous). */
- u32 _domain; /* pickled format */
/* Type reference count and various PGT_xxx flags and fields. */
unsigned long type_info;
} inuse;
@@ -70,18 +68,10 @@ struct page_info
unsigned long type:5; /* What kind of shadow is this? */
unsigned long pinned:1; /* Is the shadow pinned? */
unsigned long count:26; /* Reference count */
- union {
- /* When in use, GMFN of guest page we're a shadow of. */
- __mfn_t back;
- /* When free, order of the freelist we're on. */
- unsigned int order;
- };
} sh;
/* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
struct {
- /* Order-size of the free chunk this page is the head of. */
- u32 order;
/* Mask of possibly-tainted TLBs. */
cpumask_t cpumask;
} free;
@@ -89,6 +79,28 @@ struct page_info
} u;
union {
+
+ /* Page is in use, but not as a shadow. */
+ struct {
+ /* Owner of this page (NULL if page is anonymous). */
+ u32 _domain; /* pickled format */
+ } inuse;
+
+ /* Page is in use as a shadow. */
+ struct {
+ /* GMFN of guest page we're a shadow of. */
+ __mfn_t back;
+ } sh;
+
+ /* Page is on a free list (including shadow code free lists). */
+ struct {
+ /* Order-size of the free chunk this page is the head of. */
+ unsigned int order;
+ } free;
+
+ } v;
+
+ union {
/*
* Timestamp from 'TLB clock', used to avoid extra safety flushes.
* Only valid for: a) free pages, and b) pages with zero type count
@@ -224,8 +236,8 @@ static inline u32 pickle_domptr(struct d
/* OOS fixup entries */
#define SHADOW_OOS_FIXUPS 2
-#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
-#define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
+#define page_get_owner(_p) (unpickle_domptr((_p)->v.inuse._domain))
+#define page_set_owner(_p,_d) ((_p)->v.inuse._domain = pickle_domptr(_d))
#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma))))
#define vaddr_get_owner(va) (page_get_owner(virt_to_page((va))))