File 564f0639-x86-P2M-consolidate-handling-of-types-without-valid-MFN.patch of Package xen.4507
References: bsc#958848
# Commit c35eefded2992fc9b979f99190422527650872fd
# Date 2015-11-20 12:38:33 +0100
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/P2M: consolidate handling of types not requiring a valid MFN
As noted regarding the mixture of checks in p2m_pt_set_entry(),
introduce a new P2M type group allowing to be used everywhere we
just care about accepting operations with either a valid MFN or a type
permitting to be used without (valid) MFN.
Note that p2m_mmio_dm is not included in P2M_NO_MFN_TYPES, as for the
intended purpose that one ought to be treated similar to p2m_invalid
(perhaps the two should ultimately get folded anyway).
Note further that PoD superpages now get INVALID_MFN used when creating
page table entries (was _mfn(0) before).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -357,8 +357,7 @@ ept_set_entry(struct p2m_domain *p2m, un
* Read-then-write is OK because we hold the p2m lock. */
old_entry = *ept_entry;
- if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) ||
- (p2mt == p2m_ram_paging_in) )
+ if ( mfn_valid(mfn_x(mfn)) || p2m_allows_invalid_mfn(p2mt) )
{
/* Construct the new entry, and then write it once */
new_entry.emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat,
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -727,7 +727,7 @@ p2m_pod_zero_check_superpage(struct p2m_
}
/* Try to remove the page, restoring old mapping if it fails. */
- set_p2m_entry(p2m, gfn, _mfn(0), PAGE_ORDER_2M,
+ set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
/* Make none of the MFNs are used elsewhere... for example, mapped
@@ -843,7 +843,7 @@ p2m_pod_zero_check(struct p2m_domain *p2
}
/* Try to remove the page, restoring old mapping if it fails. */
- set_p2m_entry(p2m, gfns[i], _mfn(0), PAGE_ORDER_4K,
+ set_p2m_entry(p2m, gfns[i], _mfn(INVALID_MFN), PAGE_ORDER_4K,
p2m_populate_on_demand, p2m->default_access);
/* See if the page was successfully unmapped. (Allow one refcount
@@ -1044,7 +1044,7 @@ p2m_pod_demand_populate(struct p2m_domai
* NOTE: In a fine-grained p2m locking scenario this operation
* may need to promote its locking from gfn->1g superpage
*/
- set_p2m_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M,
+ set_p2m_entry(p2m, gfn_aligned, _mfn(INVALID_MFN), PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
return 0;
}
@@ -1125,7 +1125,7 @@ remap_and_retry:
* need promoting the gfn lock from gfn->2M superpage */
gfn_aligned = (gfn>>order)<<order;
for(i=0; i<(1<<order); i++)
- set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), PAGE_ORDER_4K,
+ set_p2m_entry(p2m, gfn_aligned + i, _mfn(INVALID_MFN), PAGE_ORDER_4K,
p2m_populate_on_demand, p2m->default_access);
if ( tb_init_done )
{
@@ -1180,7 +1180,7 @@ guest_physmap_mark_populate_on_demand(st
}
/* Now, actually do the two-way mapping */
- if ( !set_p2m_entry(p2m, gfn, _mfn(0), order,
+ if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), order,
p2m_populate_on_demand, p2m->default_access) )
rc = -EINVAL;
else
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -337,7 +337,7 @@ p2m_set_entry(struct p2m_domain *p2m, un
}
ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
- l3e_content = mfn_valid(mfn)
+ l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
? l3e_from_pfn(mfn_x(mfn),
p2m_type_to_flags(p2mt, mfn) | _PAGE_PSE)
: l3e_empty();
@@ -367,8 +367,7 @@ p2m_set_entry(struct p2m_domain *p2m, un
ASSERT(p2m_entry);
old_mfn = l1e_get_pfn(*p2m_entry);
- if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct)
- || p2m_is_paging(p2mt) )
+ if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
p2m_type_to_flags(p2mt, mfn));
else
@@ -397,7 +396,7 @@ p2m_set_entry(struct p2m_domain *p2m, un
}
ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
- if ( mfn_valid(mfn) || p2m_is_pod(p2mt) )
+ if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
l2e_content = l2e_from_pfn(mfn_x(mfn),
p2m_type_to_flags(p2mt, mfn) |
_PAGE_PSE);
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -158,6 +158,11 @@ typedef unsigned int p2m_query_t;
| p2m_to_mask(p2m_ram_logdirty) )
#define P2M_SHARED_TYPES (p2m_to_mask(p2m_ram_shared))
+/* Valid types not necessarily associated with a (valid) MFN. */
+#define P2M_INVALID_MFN_TYPES (P2M_POD_TYPES \
+ | p2m_to_mask(p2m_mmio_direct) \
+ | P2M_PAGING_TYPES)
+
/* Broken type: the frame backing this pfn has failed in hardware
* and must not be touched. */
#define P2M_BROKEN_TYPES (p2m_to_mask(p2m_ram_broken))
@@ -181,6 +186,8 @@ typedef unsigned int p2m_query_t;
#define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES)
#define p2m_is_broken(_t) (p2m_to_mask(_t) & P2M_BROKEN_TYPES)
+#define p2m_allows_invalid_mfn(t) (p2m_to_mask(t) & P2M_INVALID_MFN_TYPES)
+
/* Per-p2m-table state */
struct p2m_domain {
/* Lock that protects updates to the p2m */