File 5c011aaf-x86-dont-enable-shadow-with-too-small-allocation.patch of Package xen.10697
# Commit 2634b997afabfdc5a972e07e536dfbc6febb4385
# Date 2018-11-30 12:10:39 +0100
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/shadow: don't enable shadow mode with too small a shadow allocation
We've had more than one report of host crashes after failed migration,
and in at least one case we've had a hint towards a too far shrunk
shadow allocation pool. Instead of just checking the pool for being
empty, check whether the pool is smaller than what
shadow_set_allocation() would minimally bump it to if it was invoked in
the first place.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1229,7 +1229,7 @@ int shadow_cmpxchg_guest_entry(struct vc
* allow for more than ninety allocated pages per vcpu. We round that
* up to 128 pages, or half a megabyte per vcpu, and add 1 more vcpu's
* worth to make sure we never return zero. */
-static unsigned int shadow_min_acceptable_pages(struct domain *d)
+static unsigned int shadow_min_acceptable_pages(const struct domain *d)
{
u32 vcpu_count = 1;
struct vcpu *v;
@@ -1665,6 +1665,15 @@ shadow_free_p2m_page(struct domain *d, s
paging_unlock(d);
}
+static unsigned int sh_min_allocation(const struct domain *d)
+{
+ /*
+ * Don't allocate less than the minimum acceptable, plus one page per
+ * megabyte of RAM (for the p2m table).
+ */
+ return shadow_min_acceptable_pages(d) + (d->tot_pages / 256);
+}
+
/* Set the pool of shadow pages to the required number of pages.
* Input will be rounded up to at least shadow_min_acceptable_pages(),
* plus space for the p2m table.
@@ -1686,9 +1695,7 @@ static unsigned int sh_set_allocation(st
else
pages -= d->arch.paging.shadow.p2m_pages;
- /* Don't allocate less than the minimum acceptable, plus one page per
- * megabyte of RAM (for the p2m table) */
- lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256);
+ lower_bound = sh_min_allocation(d);
if ( pages < lower_bound )
pages = lower_bound;
}
@@ -2992,7 +2999,7 @@ int shadow_enable(struct domain *d, u32
/* Init the shadow memory allocation if the user hasn't done so */
old_pages = d->arch.paging.shadow.total_pages;
- if ( old_pages == 0 )
+ if ( old_pages < sh_min_allocation(d) + d->arch.paging.shadow.p2m_pages )
{
unsigned int r;
paging_lock(d);