File xsa410-09.patch of Package xen.26345
From: Roger Pau Monné <roger.pau@citrix.com>
Subject: x86/p2m: free the paging memory pool preemptively
The paging memory pool is currently freed in two different places:
from {shadow,hap}_teardown() via domain_relinquish_resources() and
from {shadow,hap}_final_teardown() via complete_domain_destroy().
While the former does handle preemption, the later doesn't.
Attempt to move as much p2m related freeing as possible to happen
before the call to {shadow,hap}_teardown(), so that most memory can be
freed in a preemptive way. In order to avoid causing issues to
existing callers leave the root p2m page tables set and free them in
{hap,shadow}_final_teardown(). Also modify {hap,shadow}_free to free
the page immediately if the domain is dying, so that pages don't
accumulate in the pool when {shadow,hap}_final_teardown() get called.
This is part of XSA-410.
Reported-by: Julien Grall <jgrall@amazon.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -537,18 +537,8 @@ void hap_final_teardown(struct domain *d
unsigned int i;
if ( hvm_altp2m_supported() )
- {
- d->arch.altp2m_active = 0;
-
- if ( d->arch.altp2m_eptp )
- {
- free_xenheap_page(d->arch.altp2m_eptp);
- d->arch.altp2m_eptp = NULL;
- }
-
for ( i = 0; i < MAX_ALTP2M; i++ )
p2m_teardown(d->arch.altp2m_p2m[i], true);
- }
/* Destroy nestedp2m's first */
for (i = 0; i < MAX_NESTEDP2M; i++) {
@@ -563,6 +553,8 @@ void hap_final_teardown(struct domain *d
paging_lock(d);
hap_set_allocation(d, 0, NULL);
ASSERT(d->arch.paging.hap.p2m_pages == 0);
+ ASSERT(d->arch.paging.hap.free_pages == 0);
+ ASSERT(d->arch.paging.hap.total_pages == 0);
paging_unlock(d);
}
@@ -570,6 +562,7 @@ void hap_teardown(struct domain *d, bool
{
struct vcpu *v;
mfn_t mfn;
+ unsigned int i;
ASSERT(d->is_dying);
ASSERT(d != current->domain);
@@ -591,6 +584,27 @@ void hap_teardown(struct domain *d, bool
}
}
+ paging_unlock(d);
+
+ /* Leave the root pt in case we get further attempts to modify the p2m. */
+ if ( hvm_altp2m_supported() )
+ {
+ d->arch.altp2m_active = 0;
+
+ FREE_XENHEAP_PAGE(d->arch.altp2m_eptp);
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ p2m_teardown(d->arch.altp2m_p2m[i], false);
+ }
+
+ /* Destroy nestedp2m's after altp2m. */
+ for ( i = 0; i < MAX_NESTEDP2M; i++ )
+ p2m_teardown(d->arch.nested_p2m[i], false);
+
+ p2m_teardown(p2m_get_hostp2m(d), false);
+
+ paging_lock(d);
+
if ( d->arch.paging.hap.total_pages != 0 )
{
hap_set_allocation(d, 0, preempted);
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3319,6 +3319,19 @@ void shadow_teardown(struct domain *d, b
}
}
+ paging_unlock(d);
+
+ p2m_teardown(p2m_get_hostp2m(d), false);
+
+ paging_lock(d);
+
+ /*
+ * Reclaim all shadow memory so that shadow_set_allocation() doesn't find
+ * in-use pages, as _shadow_prealloc() will no longer try to reclaim pages
+ * because the domain is dying.
+ */
+ shadow_blow_tables(d);
+
#if (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC))
/* Free the virtual-TLB array attached to each vcpu */
for_each_vcpu(d, v)
@@ -3436,6 +3449,9 @@ void shadow_final_teardown(struct domain
d->arch.paging.shadow.total_pages,
d->arch.paging.shadow.free_pages,
d->arch.paging.shadow.p2m_pages);
+ ASSERT(!d->arch.paging.shadow.total_pages);
+ ASSERT(!d->arch.paging.shadow.free_pages);
+ ASSERT(!d->arch.paging.shadow.p2m_pages);
paging_unlock(d);
}