File 5726ffb1-x86-shadow-account-for-ioreq-server-pages-before-complaining.patch of Package xen.2959
References: bsc#976058
# Commit 77eb5dbeff78bbe549793325520f59ab46a187f8
# Date 2016-05-02 09:20:17 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/shadow: account for ioreq server pages before complaining about not found mapping
prepare_ring_for_helper(), just like share_xen_page_with_guest(),
takes a write reference on the page, and hence should similarly be
accounted for when determining whether to log a complaint.
This requires using recursive locking for the ioreq server lock, as the
offending invocation of sh_remove_all_mappings() is down the call stack
from hvm_set_ioreq_server_state(). (While not strictly needed to be
done in all other instances too, convert all of them for consistency.)
At once improve the usefulness of the shadow error message: Log all
values involved in triggering it as well as the GFN (to aid
understanding which guest page it is that there is a problem with - in
cases like the one here the GFN is invariant across invocations, while
the MFN obviously can [and will] vary).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -521,6 +521,30 @@ static int hvm_map_ioreq_page(
return 0;
}
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page)
+{
+ const struct hvm_ioreq_server *s;
+ bool_t found = 0;
+
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ list_for_each_entry ( s,
+ &d->arch.hvm_domain.ioreq_server.list,
+ list_entry )
+ {
+ if ( (s->ioreq.va && s->ioreq.page == page) ||
+ (s->bufioreq.va && s->bufioreq.page == page) )
+ {
+ found = 1;
+ break;
+ }
+ }
+
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+ return found;
+}
+
static void hvm_remove_ioreq_gmfn(
struct domain *d, struct hvm_ioreq_page *iorp)
{
@@ -1005,7 +1029,7 @@ static int hvm_create_ioreq_server(struc
goto fail1;
domain_pause(d);
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -EEXIST;
if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
@@ -1028,14 +1052,14 @@ static int hvm_create_ioreq_server(struc
if ( id )
*id = s->id;
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
domain_unpause(d);
return 0;
fail3:
fail2:
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
domain_unpause(d);
xfree(s);
@@ -1048,7 +1072,7 @@ static int hvm_destroy_ioreq_server(stru
struct hvm_ioreq_server *s;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -ENOENT;
list_for_each_entry ( s,
@@ -1077,7 +1101,7 @@ static int hvm_destroy_ioreq_server(stru
break;
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1090,7 +1114,7 @@ static int hvm_get_ioreq_server_info(str
struct hvm_ioreq_server *s;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -ENOENT;
list_for_each_entry ( s,
@@ -1115,7 +1139,7 @@ static int hvm_get_ioreq_server_info(str
break;
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1126,7 +1150,7 @@ static int hvm_map_io_range_to_ioreq_ser
struct hvm_ioreq_server *s;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -ENOENT;
list_for_each_entry ( s,
@@ -1166,7 +1190,7 @@ static int hvm_map_io_range_to_ioreq_ser
}
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1177,7 +1201,7 @@ static int hvm_unmap_io_range_from_ioreq
struct hvm_ioreq_server *s;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -ENOENT;
list_for_each_entry ( s,
@@ -1217,7 +1241,7 @@ static int hvm_unmap_io_range_from_ioreq
}
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1228,7 +1252,7 @@ static int hvm_set_ioreq_server_state(st
struct list_head *entry;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
rc = -ENOENT;
list_for_each ( entry,
@@ -1257,7 +1281,7 @@ static int hvm_set_ioreq_server_state(st
break;
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1266,7 +1290,7 @@ static int hvm_all_ioreq_servers_add_vcp
struct hvm_ioreq_server *s;
int rc;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
list_for_each_entry ( s,
&d->arch.hvm_domain.ioreq_server.list,
@@ -1279,7 +1303,7 @@ static int hvm_all_ioreq_servers_add_vcp
goto fail;
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return 0;
@@ -1289,7 +1313,7 @@ static int hvm_all_ioreq_servers_add_vcp
list_entry )
hvm_ioreq_server_remove_vcpu(s, v);
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
@@ -1298,21 +1322,21 @@ static void hvm_all_ioreq_servers_remove
{
struct hvm_ioreq_server *s;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
list_for_each_entry ( s,
&d->arch.hvm_domain.ioreq_server.list,
list_entry )
hvm_ioreq_server_remove_vcpu(s, v);
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
}
static void hvm_destroy_all_ioreq_servers(struct domain *d)
{
struct hvm_ioreq_server *s, *next;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
/* No need to domain_pause() as the domain is being torn down */
@@ -1335,7 +1359,7 @@ static void hvm_destroy_all_ioreq_server
xfree(s);
}
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
}
static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
@@ -1358,7 +1382,7 @@ static int hvm_set_dm_domain(struct doma
struct hvm_ioreq_server *s;
int rc = 0;
- spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
/*
* Lack of ioreq server is not a failure. HVM_PARAM_DM_DOMAIN will
@@ -1407,7 +1431,7 @@ static int hvm_set_dm_domain(struct doma
domain_unpause(d);
done:
- spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
return rc;
}
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2394,7 +2394,8 @@ int sh_remove_write_access_from_sl1p(str
/* Remove all mappings of a guest frame from the shadow tables.
* Returns non-zero if we need to flush TLBs. */
-int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn)
+static int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn,
+ unsigned long gfn)
{
struct page_info *page = mfn_to_page(gmfn);
@@ -2446,19 +2447,25 @@ int sh_remove_all_mappings(struct vcpu *
/* If that didn't catch the mapping, something is very wrong */
if ( !sh_check_page_has_no_refs(page) )
{
- /* Don't complain if we're in HVM and there are some extra mappings:
+ /*
+ * Don't complain if we're in HVM and there are some extra mappings:
* The qemu helper process has an untyped mapping of this dom's RAM
* and the HVM restore program takes another.
- * Also allow one typed refcount for xenheap pages, to match
- * share_xen_page_with_guest(). */
+ * Also allow one typed refcount for
+ * - Xen heap pages, to match share_xen_page_with_guest(),
+ * - ioreq server pages, to match prepare_ring_for_helper().
+ */
if ( !(shadow_mode_external(v->domain)
&& (page->count_info & PGC_count_mask) <= 3
&& ((page->u.inuse.type_info & PGT_count_mask)
- == !!is_xen_heap_page(page))) )
+ == (is_xen_heap_page(page) ||
+ is_ioreq_server_page(v->domain, page)))) )
{
- SHADOW_ERROR("can't find all mappings of mfn %lx: "
- "c=%08lx t=%08lx\n", mfn_x(gmfn),
- page->count_info, page->u.inuse.type_info);
+ SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): "
+ "c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn,
+ page->count_info, page->u.inuse.type_info,
+ !!is_xen_heap_page(page),
+ is_ioreq_server_page(v->domain, page));
}
}
@@ -3337,7 +3344,7 @@ static void sh_unshadow_for_p2m_change(s
if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
{
sh_remove_all_shadows_and_parents(v, mfn);
- if ( sh_remove_all_mappings(v, mfn) )
+ if ( sh_remove_all_mappings(v, mfn, gfn) )
flush_tlb_mask(d->domain_dirty_cpumask);
}
}
@@ -3372,7 +3379,8 @@ static void sh_unshadow_for_p2m_change(s
{
/* This GFN->MFN mapping has gone away */
sh_remove_all_shadows_and_parents(v, omfn);
- if ( sh_remove_all_mappings(v, omfn) )
+ if ( sh_remove_all_mappings(v, omfn,
+ gfn + (i << PAGE_SHIFT)) )
cpumask_or(&flushmask, &flushmask,
d->domain_dirty_cpumask);
}
@@ -3588,7 +3596,8 @@ int shadow_track_dirty_vram(struct domai
dirty = 1;
/* TODO: Heuristics for finding the single mapping of
* this gmfn */
- flush_tlb |= sh_remove_all_mappings(d->vcpu[0], mfn);
+ flush_tlb |= sh_remove_all_mappings(d->vcpu[0], mfn,
+ begin_pfn + i);
}
else
{
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -361,6 +361,7 @@ void hvm_cpuid(unsigned int input, unsig
void hvm_migrate_timers(struct vcpu *v);
bool_t hvm_has_dm(struct domain *d);
bool_t hvm_io_pending(struct vcpu *v);
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
void hvm_do_resume(struct vcpu *v);
void hvm_migrate_pirqs(struct vcpu *v);
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -91,18 +91,6 @@ void shadow_clean_dirty_bitmap(struct do
* has changed, and when bringing up a VCPU for the first time. */
void shadow_update_paging_modes(struct vcpu *v);
-
-/* Remove all mappings of the guest page from the shadows.
- * This is called from common code. It does not flush TLBs. */
-int sh_remove_all_mappings(struct vcpu *v, mfn_t target_mfn);
-static inline void
-shadow_drop_references(struct domain *d, struct page_info *p)
-{
- if ( unlikely(shadow_mode_enabled(d)) )
- /* See the comment about locking in sh_remove_all_mappings */
- sh_remove_all_mappings(d->vcpu[0], _mfn(page_to_mfn(p)));
-}
-
/* Remove all shadows of the guest mfn. */
void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all);
static inline void shadow_remove_all_shadows(struct vcpu *v, mfn_t gmfn)