Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
openSUSE:12.2:ARM
xen
23842-mem_event_use_different_ringbuffers_for_s...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 23842-mem_event_use_different_ringbuffers_for_share_paging_and_access.patch of Package xen
changeset: 23842:483c5f8319ad user: Olaf Hering <olaf@aepfle.de> date: Fri Sep 16 12:19:26 2011 +0100 files: tools/libxc/Makefile tools/libxc/xc_mem_access.c tools/libxc/xc_mem_event.c tools/libxc/xc_mem_paging.c tools/libxc/xc_memshr.c tools/libxc/xenctrl.h tools/tests/xen-access/xen-access.c tools/xenpaging/xenpaging.c xen/arch/ia64/xen/dom0_ops.c xen/arch/x86/hvm/hvm.c xen/arch/x86/mm/mem_event.c xen/arch/x86/mm/mem_paging.c xen/arch/x86/mm/mem_sharing.c xen/arch/x86/mm/p2m.c xen/include/public/domctl.h xen/include/xen/sched.h description: mem_event: use different ringbuffers for share, paging and access Up to now a single ring buffer was used for mem_share, xenpaging and xen-access. Each helper would have to cooperate and pull only its own requests from the ring. Unfortunately this was not implemented. And even if it was, it would make the whole concept fragile because a crash or early exit of one helper would stall the others. What happend up to now is that active xenpaging + memory_sharing would push memsharing requests in the buffer. xenpaging is not prepared for such requests. This patch creates an independet ring buffer for mem_share, xenpaging and xen-access and adds also new functions to enable xenpaging and xen-access. The xc_mem_event_enable/xc_mem_event_disable functions will be removed. The various XEN_DOMCTL_MEM_EVENT_* macros were cleaned up. Due to the removal the API changed, so the SONAME will be changed too. Signed-off-by: Olaf Hering <olaf@aepfle.de> Acked-by: Tim Deegan <tim@xen.org> Acked-by: Ian Jackson <ian.jackson@eu.citrix.com> Committed-by: Tim Deegan <tim@xen.org> --- tools/libxc/Makefile | 2 tools/libxc/xc_mem_access.c | 21 ++++ tools/libxc/xc_mem_event.c | 15 --- tools/libxc/xc_mem_paging.c | 33 +++++-- tools/libxc/xc_memshr.c | 16 +-- tools/libxc/xenctrl.h | 9 + tools/tests/xen-access/xen-access.c | 4 tools/xenpaging/xenpaging.c | 4 xen/arch/ia64/xen/dom0_ops.c | 2 xen/arch/x86/hvm/hvm.c | 4 xen/arch/x86/mm/mem_event.c | 163 ++++++++++++++++++++---------------- xen/arch/x86/mm/mem_sharing.c | 22 ++-- xen/arch/x86/mm/p2m.c | 18 +-- xen/include/public/domctl.h | 43 +++++---- xen/include/xen/sched.h | 6 + 15 files changed, 206 insertions(+), 156 deletions(-) Index: xen-4.1.2-testing/tools/libxc/Makefile =================================================================== --- xen-4.1.2-testing.orig/tools/libxc/Makefile +++ xen-4.1.2-testing/tools/libxc/Makefile @@ -1,7 +1,7 @@ XEN_ROOT = $(CURDIR)/../.. include $(XEN_ROOT)/tools/Rules.mk -MAJOR = 4.0 +MAJOR = 4.2 MINOR = 0 CTRL_SRCS-y := Index: xen-4.1.2-testing/tools/libxc/xc_mem_access.c =================================================================== --- xen-4.1.2-testing.orig/tools/libxc/xc_mem_access.c +++ xen-4.1.2-testing/tools/libxc/xc_mem_access.c @@ -24,12 +24,29 @@ #include "xc_private.h" +int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, + void *shared_page, void *ring_page) +{ + return xc_mem_event_control(xch, domain_id, + XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE, + XEN_DOMCTL_MEM_EVENT_OP_ACCESS, + shared_page, ring_page, INVALID_MFN); +} + +int xc_mem_access_disable(xc_interface *xch, domid_t domain_id) +{ + return xc_mem_event_control(xch, domain_id, + XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE, + XEN_DOMCTL_MEM_EVENT_OP_ACCESS, + NULL, NULL, INVALID_MFN); +} + int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn) { return xc_mem_event_control(xch, domain_id, XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME, - XEN_DOMCTL_MEM_EVENT_OP_ACCESS, NULL, NULL, - gfn); + XEN_DOMCTL_MEM_EVENT_OP_ACCESS, + NULL, NULL, gfn); } /* Index: xen-4.1.2-testing/tools/libxc/xc_mem_event.c =================================================================== --- xen-4.1.2-testing.orig/tools/libxc/xc_mem_event.c +++ xen-4.1.2-testing/tools/libxc/xc_mem_event.c @@ -42,18 +42,3 @@ int xc_mem_event_control(xc_interface *x return do_domctl(xch, &domctl); } -int xc_mem_event_enable(xc_interface *xch, domid_t domain_id, - void *shared_page, void *ring_page) -{ - return xc_mem_event_control(xch, domain_id, - XEN_DOMCTL_MEM_EVENT_OP_ENABLE, 0, - shared_page, ring_page, INVALID_MFN); -} - -int xc_mem_event_disable(xc_interface *xch, domid_t domain_id) -{ - return xc_mem_event_control(xch, domain_id, - XEN_DOMCTL_MEM_EVENT_OP_DISABLE, 0, - NULL, NULL, INVALID_MFN); -} - Index: xen-4.1.2-testing/tools/libxc/xc_mem_paging.c =================================================================== --- xen-4.1.2-testing.orig/tools/libxc/xc_mem_paging.c +++ xen-4.1.2-testing/tools/libxc/xc_mem_paging.c @@ -24,36 +24,53 @@ #include "xc_private.h" +int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, + void *shared_page, void *ring_page) +{ + return xc_mem_event_control(xch, domain_id, + XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE, + XEN_DOMCTL_MEM_EVENT_OP_PAGING, + shared_page, ring_page, INVALID_MFN); +} + +int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id) +{ + return xc_mem_event_control(xch, domain_id, + XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE, + XEN_DOMCTL_MEM_EVENT_OP_PAGING, + NULL, NULL, INVALID_MFN); +} + int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, unsigned long gfn) { return xc_mem_event_control(xch, domain_id, XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE, - XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL, - gfn); + XEN_DOMCTL_MEM_EVENT_OP_PAGING, + NULL, NULL, gfn); } int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, unsigned long gfn) { return xc_mem_event_control(xch, domain_id, XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT, - XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL, - gfn); + XEN_DOMCTL_MEM_EVENT_OP_PAGING, + NULL, NULL, gfn); } int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned long gfn) { return xc_mem_event_control(xch, domain_id, XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP, - XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL, - gfn); + XEN_DOMCTL_MEM_EVENT_OP_PAGING, + NULL, NULL, gfn); } int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn) { return xc_mem_event_control(xch, domain_id, XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME, - XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL, - gfn); + XEN_DOMCTL_MEM_EVENT_OP_PAGING, + NULL, NULL, gfn); } Index: xen-4.1.2-testing/tools/libxc/xc_memshr.c =================================================================== --- xen-4.1.2-testing.orig/tools/libxc/xc_memshr.c +++ xen-4.1.2-testing/tools/libxc/xc_memshr.c @@ -36,7 +36,7 @@ int xc_memshr_control(xc_interface *xch, domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); - op->op = XEN_DOMCTL_MEM_SHARING_OP_CONTROL; + op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL; op->u.enable = enable; return do_domctl(xch, &domctl); @@ -55,7 +55,7 @@ int xc_memshr_nominate_gfn(xc_interface domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); - op->op = XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN; + op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN; op->u.nominate.u.gfn = gfn; ret = do_domctl(xch, &domctl); @@ -77,7 +77,7 @@ int xc_memshr_nominate_gref(xc_interface domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); - op->op = XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF; + op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF; op->u.nominate.u.grant_ref = gref; ret = do_domctl(xch, &domctl); @@ -97,7 +97,7 @@ int xc_memshr_share(xc_interface *xch, domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; domctl.domain = 0; op = &(domctl.u.mem_sharing_op); - op->op = XEN_DOMCTL_MEM_SHARING_OP_SHARE; + op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE; op->u.share.source_handle = source_handle; op->u.share.client_handle = client_handle; @@ -114,7 +114,7 @@ int xc_memshr_domain_resume(xc_interface domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); - op->op = XEN_DOMCTL_MEM_SHARING_OP_RESUME; + op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME; return do_domctl(xch, &domctl); } @@ -130,7 +130,7 @@ int xc_memshr_debug_gfn(xc_interface *xc domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); - op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN; + op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN; op->u.debug.u.gfn = gfn; return do_domctl(xch, &domctl); @@ -147,7 +147,7 @@ int xc_memshr_debug_mfn(xc_interface *xc domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); - op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN; + op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN; op->u.debug.u.mfn = mfn; return do_domctl(xch, &domctl); @@ -164,7 +164,7 @@ int xc_memshr_debug_gref(xc_interface *x domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION; domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); - op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF; + op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF; op->u.debug.u.gref = gref; return do_domctl(xch, &domctl); Index: xen-4.1.2-testing/tools/libxc/xenctrl.h =================================================================== --- xen-4.1.2-testing.orig/tools/libxc/xenctrl.h +++ xen-4.1.2-testing/tools/libxc/xenctrl.h @@ -1734,16 +1734,19 @@ int xc_mem_event_control(xc_interface *x unsigned int mode, void *shared_page, void *ring_page, unsigned long gfn); -int xc_mem_event_enable(xc_interface *xch, domid_t domain_id, +int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, void *shared_page, void *ring_page); -int xc_mem_event_disable(xc_interface *xch, domid_t domain_id); - +int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id); int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, unsigned long gfn); int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, unsigned long gfn); int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned long gfn); int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn); + +int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, + void *shared_page, void *ring_page); +int xc_mem_access_disable(xc_interface *xch, domid_t domain_id); int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn); Index: xen-4.1.2-testing/tools/tests/xen-access/xen-access.c =================================================================== --- xen-4.1.2-testing.orig/tools/tests/xen-access/xen-access.c +++ xen-4.1.2-testing/tools/tests/xen-access/xen-access.c @@ -241,7 +241,7 @@ xenaccess_t *xenaccess_init(xc_interface mem_event_ring_lock_init(&xenaccess->mem_event); /* Initialise Xen */ - rc = xc_mem_event_enable(xenaccess->xc_handle, xenaccess->mem_event.domain_id, + rc = xc_mem_access_enable(xenaccess->xc_handle, xenaccess->mem_event.domain_id, xenaccess->mem_event.shared_page, xenaccess->mem_event.ring_page); if ( rc != 0 ) @@ -351,7 +351,7 @@ int xenaccess_teardown(xc_interface *xch return 0; /* Tear down domain xenaccess in Xen */ - rc = xc_mem_event_disable(xenaccess->xc_handle, xenaccess->mem_event.domain_id); + rc = xc_mem_access_disable(xenaccess->xc_handle, xenaccess->mem_event.domain_id); if ( rc != 0 ) { ERROR("Error tearing down domain xenaccess in xen"); Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c =================================================================== --- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c +++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c @@ -234,7 +234,7 @@ static xenpaging_t *xenpaging_init(domid PAGE_SIZE); /* Initialise Xen */ - rc = xc_mem_event_enable(xch, paging->mem_event.domain_id, + rc = xc_mem_paging_enable(xch, paging->mem_event.domain_id, paging->mem_event.shared_page, paging->mem_event.ring_page); if ( rc != 0 ) @@ -353,7 +353,7 @@ static int xenpaging_teardown(xenpaging_ xch = paging->xc_handle; paging->xc_handle = NULL; /* Tear down domain paging in Xen */ - rc = xc_mem_event_disable(xch, paging->mem_event.domain_id); + rc = xc_mem_paging_disable(xch, paging->mem_event.domain_id); if ( rc != 0 ) { ERROR("Error tearing down domain paging in xen"); Index: xen-4.1.2-testing/xen/arch/ia64/xen/dom0_ops.c =================================================================== --- xen-4.1.2-testing.orig/xen/arch/ia64/xen/dom0_ops.c +++ xen-4.1.2-testing/xen/arch/ia64/xen/dom0_ops.c @@ -688,7 +688,7 @@ long arch_do_domctl(xen_domctl_t *op, XE switch(mec->op) { - case XEN_DOMCTL_MEM_SHARING_OP_CONTROL: + case XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL: { if (mec->u.enable) { ret = -EINVAL; /* not implemented */ Index: xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c =================================================================== --- xen-4.1.2-testing.orig/xen/arch/x86/hvm/hvm.c +++ xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c @@ -3909,7 +3909,7 @@ static int hvm_memory_event_traps(long p if ( (p & HVMPME_onchangeonly) && (value == old) ) return 1; - rc = mem_event_check_ring(d, &d->mem_event); + rc = mem_event_check_ring(d, &d->mem_access); if ( rc ) return rc; @@ -3932,7 +3932,7 @@ static int hvm_memory_event_traps(long p req.gla_valid = 1; } - mem_event_put_request(d, &d->mem_event, &req); + mem_event_put_request(d, &d->mem_access, &req); return 1; } Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c =================================================================== --- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_event.c +++ xen-4.1.2-testing/xen/arch/x86/mm/mem_event.c @@ -37,24 +37,52 @@ #define mem_event_ring_lock(_med) spin_lock(&(_med)->ring_lock) #define mem_event_ring_unlock(_med) spin_unlock(&(_med)->ring_lock) -static int mem_event_enable(struct domain *d, struct mem_event_domain *med, mfn_t ring_mfn, mfn_t shared_mfn) +static int mem_event_enable(struct domain *d, + xen_domctl_mem_event_op_t *mec, + struct mem_event_domain *med) { int rc; + struct domain *dom_mem_event = current->domain; + struct vcpu *v = current; + unsigned long ring_addr = mec->ring_addr; + unsigned long shared_addr = mec->shared_addr; + l1_pgentry_t l1e; + unsigned long gfn; + p2m_type_t p2mt; + mfn_t ring_mfn; + mfn_t shared_mfn; + + /* Only one helper at a time. If the helper crashed, + * the ring is in an undefined state and so is the guest. + */ + if ( med->ring_page ) + return -EBUSY; + + /* Get MFN of ring page */ + guest_get_eff_l1e(v, ring_addr, &l1e); + gfn = l1e_get_pfn(l1e); + ring_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt); + + if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) ) + return -EINVAL; + + /* Get MFN of shared page */ + guest_get_eff_l1e(v, shared_addr, &l1e); + gfn = l1e_get_pfn(l1e); + shared_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt); + + if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) ) + return -EINVAL; /* Map ring and shared pages */ med->ring_page = map_domain_page(mfn_x(ring_mfn)); - if ( med->ring_page == NULL ) - goto err; - med->shared_page = map_domain_page(mfn_x(shared_mfn)); - if ( med->shared_page == NULL ) - goto err_ring; /* Allocate event channel */ rc = alloc_unbound_xen_event_channel(d->vcpu[0], current->domain->domain_id); if ( rc < 0 ) - goto err_shared; + goto err; ((mem_event_shared_page_t *)med->shared_page)->port = rc; med->xen_port = rc; @@ -71,14 +99,14 @@ static int mem_event_enable(struct domai return 0; - err_shared: + err: unmap_domain_page(med->shared_page); med->shared_page = NULL; - err_ring: + unmap_domain_page(med->ring_page); med->ring_page = NULL; - err: - return 1; + + return rc; } static int mem_event_disable(struct mem_event_domain *med) @@ -220,86 +248,79 @@ int mem_event_domctl(struct domain *d, x rc = -ENOSYS; - switch ( mec-> mode ) + switch ( mec->mode ) { - case 0: + case XEN_DOMCTL_MEM_EVENT_OP_PAGING: { + struct mem_event_domain *med = &d->mem_paging; + rc = -ENODEV; + /* Only HAP is supported */ + if ( !hap_enabled(d) ) + break; + + /* Currently only EPT is supported */ + if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) + break; + switch( mec->op ) { - case XEN_DOMCTL_MEM_EVENT_OP_ENABLE: + case XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE: { - struct domain *dom_mem_event = current->domain; - struct vcpu *v = current; - struct mem_event_domain *med = &d->mem_event; - unsigned long ring_addr = mec->ring_addr; - unsigned long shared_addr = mec->shared_addr; - l1_pgentry_t l1e; - unsigned long gfn; - p2m_type_t p2mt; - mfn_t ring_mfn; - mfn_t shared_mfn; - - /* Only one xenpaging at a time. If xenpaging crashed, - * the cache is in an undefined state and so is the guest - */ - rc = -EBUSY; - if ( med->ring_page ) - break; - - /* Currently only EPT is supported */ - rc = -ENODEV; - if ( !(hap_enabled(d) && - (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) ) - break; - - /* Get MFN of ring page */ - guest_get_eff_l1e(v, ring_addr, &l1e); - gfn = l1e_get_pfn(l1e); - ring_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt); - - rc = -EINVAL; - if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) ) - break; - - /* Get MFN of shared page */ - guest_get_eff_l1e(v, shared_addr, &l1e); - gfn = l1e_get_pfn(l1e); - shared_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt); - - rc = -EINVAL; - if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) ) - break; - - rc = -EINVAL; - if ( mem_event_enable(d, med, ring_mfn, shared_mfn) != 0 ) - break; - - rc = 0; + rc = mem_event_enable(d, mec, med); } break; - case XEN_DOMCTL_MEM_EVENT_OP_DISABLE: + case XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE: { - rc = mem_event_disable(&d->mem_event); + rc = mem_event_disable(med); } break; default: - rc = -ENOSYS; - break; + { + if ( med->ring_page ) + rc = mem_paging_domctl(d, mec, u_domctl); } break; + } } - case XEN_DOMCTL_MEM_EVENT_OP_PAGING: - { - rc = mem_paging_domctl(d, mec, u_domctl); - break; - } + break; + case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: { - rc = mem_access_domctl(d, mec, u_domctl); + struct mem_event_domain *med = &d->mem_access; + rc = -ENODEV; + /* Only HAP is supported */ + if ( !hap_enabled(d) ) + break; + + /* Currently only EPT is supported */ + if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) + break; + + switch( mec->op ) + { + case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE: + { + rc = mem_event_enable(d, mec, med); + } + break; + + case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE: + { + rc = mem_event_disable(&d->mem_access); + } + break; + + default: + { + if ( med->ring_page ) + rc = mem_access_domctl(d, mec, u_domctl); + } break; + } } + break; } return rc; Index: xen-4.1.2-testing/xen/arch/x86/mm/mem_sharing.c =================================================================== --- xen-4.1.2-testing.orig/xen/arch/x86/mm/mem_sharing.c +++ xen-4.1.2-testing/xen/arch/x86/mm/mem_sharing.c @@ -322,12 +322,12 @@ static struct page_info* mem_sharing_all req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; } - if(mem_event_check_ring(d, &d->mem_event)) return page; + if(mem_event_check_ring(d, &d->mem_share)) return page; req.gfn = gfn; req.p2mt = p2m_ram_shared; req.vcpu_id = v->vcpu_id; - mem_event_put_request(d, &d->mem_event, &req); + mem_event_put_request(d, &d->mem_share, &req); return page; } @@ -342,7 +342,7 @@ int mem_sharing_sharing_resume(struct do mem_event_response_t rsp; /* Get request off the ring */ - mem_event_get_response(&d->mem_event, &rsp); + mem_event_get_response(&d->mem_share, &rsp); /* Unpause domain/vcpu */ if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) @@ -739,7 +739,7 @@ int mem_sharing_domctl(struct domain *d, switch(mec->op) { - case XEN_DOMCTL_MEM_SHARING_OP_CONTROL: + case XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL: { d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable; mem_sharing_audit(); @@ -747,7 +747,7 @@ int mem_sharing_domctl(struct domain *d, } break; - case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN: + case XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN: { unsigned long gfn = mec->u.nominate.u.gfn; shr_handle_t handle; @@ -759,7 +759,7 @@ int mem_sharing_domctl(struct domain *d, } break; - case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF: + case XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF: { grant_ref_t gref = mec->u.nominate.u.grant_ref; unsigned long gfn; @@ -776,7 +776,7 @@ int mem_sharing_domctl(struct domain *d, } break; - case XEN_DOMCTL_MEM_SHARING_OP_SHARE: + case XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE: { shr_handle_t sh = mec->u.share.source_handle; shr_handle_t ch = mec->u.share.client_handle; @@ -785,7 +785,7 @@ int mem_sharing_domctl(struct domain *d, } break; - case XEN_DOMCTL_MEM_SHARING_OP_RESUME: + case XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME: { if(!mem_sharing_enabled(d)) return -EINVAL; @@ -794,7 +794,7 @@ int mem_sharing_domctl(struct domain *d, } break; - case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN: + case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN: { unsigned long gfn = mec->u.debug.u.gfn; rc = mem_sharing_debug_gfn(d, gfn); @@ -802,7 +802,7 @@ int mem_sharing_domctl(struct domain *d, } break; - case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN: + case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN: { unsigned long mfn = mec->u.debug.u.mfn; rc = mem_sharing_debug_mfn(mfn); @@ -810,7 +810,7 @@ int mem_sharing_domctl(struct domain *d, } break; - case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF: + case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF: { grant_ref_t gref = mec->u.debug.u.gref; rc = mem_sharing_debug_gref(d, gref); Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c =================================================================== --- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c +++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c @@ -2923,7 +2923,7 @@ void p2m_mem_paging_drop_page(struct p2m struct domain *d = p2m->domain; /* Check that there's space on the ring for this request */ - if ( mem_event_check_ring(d, &d->mem_event) == 0) + if ( mem_event_check_ring(d, &d->mem_paging) == 0) { /* Send release notification to pager */ memset(&req, 0, sizeof(req)); @@ -2931,7 +2931,7 @@ void p2m_mem_paging_drop_page(struct p2m req.gfn = gfn; req.vcpu_id = v->vcpu_id; - mem_event_put_request(d, &d->mem_event, &req); + mem_event_put_request(d, &d->mem_paging, &req); } } @@ -2943,7 +2943,7 @@ void p2m_mem_paging_populate(struct p2m_ struct domain *d = p2m->domain; /* Check that there's space on the ring for this request */ - if ( mem_event_check_ring(d, &d->mem_event) ) + if ( mem_event_check_ring(d, &d->mem_paging) ) return; memset(&req, 0, sizeof(req)); @@ -2970,7 +2970,7 @@ void p2m_mem_paging_populate(struct p2m_ else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged ) { /* gfn is already on its way back and vcpu is not paused */ - mem_event_put_req_producers(&d->mem_event); + mem_event_put_req_producers(&d->mem_paging); return; } @@ -2979,7 +2979,7 @@ void p2m_mem_paging_populate(struct p2m_ req.p2mt = p2mt; req.vcpu_id = v->vcpu_id; - mem_event_put_request(d, &d->mem_event, &req); + mem_event_put_request(d, &d->mem_paging, &req); } int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn) @@ -3008,7 +3008,7 @@ void p2m_mem_paging_resume(struct p2m_do mfn_t mfn; /* Pull the response off the ring */ - mem_event_get_response(&d->mem_event, &rsp); + mem_event_get_response(&d->mem_paging, &rsp); /* Fix p2m entry if the page was not dropped */ if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) ) @@ -3055,7 +3055,7 @@ void p2m_mem_access_check(unsigned long p2m_unlock(p2m); /* Otherwise, check if there is a memory event listener, and send the message along */ - res = mem_event_check_ring(d, &d->mem_event); + res = mem_event_check_ring(d, &d->mem_access); if ( res < 0 ) { /* No listener */ @@ -3099,7 +3099,7 @@ void p2m_mem_access_check(unsigned long req.vcpu_id = v->vcpu_id; - mem_event_put_request(d, &d->mem_event, &req); + mem_event_put_request(d, &d->mem_access, &req); /* VCPU paused, mem event request sent */ } @@ -3109,7 +3109,7 @@ void p2m_mem_access_resume(struct p2m_do struct domain *d = p2m->domain; mem_event_response_t rsp; - mem_event_get_response(&d->mem_event, &rsp); + mem_event_get_response(&d->mem_access, &rsp); /* Unpause domain */ if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) Index: xen-4.1.2-testing/xen/include/public/domctl.h =================================================================== --- xen-4.1.2-testing.orig/xen/include/public/domctl.h +++ xen-4.1.2-testing/xen/include/public/domctl.h @@ -707,20 +707,18 @@ struct xen_domctl_gdbsx_domstatus { /* XEN_DOMCTL_mem_event_op */ -/* Add and remove memory handlers */ -#define XEN_DOMCTL_MEM_EVENT_OP_ENABLE 0 -#define XEN_DOMCTL_MEM_EVENT_OP_DISABLE 1 - /* +* Domain memory paging * Page memory in and out. */ #define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1 -/* Domain memory paging */ -#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 0 -#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT 1 -#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 2 -#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 3 +#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0 +#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1 +#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 2 +#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT 3 +#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 4 +#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 5 /* * Access permissions. @@ -733,11 +731,14 @@ struct xen_domctl_gdbsx_domstatus { * ACCESS_RESUME mode for the following domctl. */ #define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2 -#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME 0 + +#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0 +#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1 +#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME 2 struct xen_domctl_mem_event_op { - uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ - uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */ + uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */ + uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */ /* OP_ENABLE */ uint64_aligned_t shared_addr; /* IN: Virtual address of shared page */ @@ -754,14 +755,16 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_e */ /* XEN_DOMCTL_mem_sharing_op */ -#define XEN_DOMCTL_MEM_SHARING_OP_CONTROL 0 -#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN 1 -#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF 2 -#define XEN_DOMCTL_MEM_SHARING_OP_SHARE 3 -#define XEN_DOMCTL_MEM_SHARING_OP_RESUME 4 -#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN 5 -#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN 6 -#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF 7 +#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3 + +#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL 0 +#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN 1 +#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF 2 +#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE 3 +#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME 4 +#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN 5 +#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN 6 +#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF 7 #define XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID (-10) #define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID (-9) Index: xen-4.1.2-testing/xen/include/xen/sched.h =================================================================== --- xen-4.1.2-testing.orig/xen/include/xen/sched.h +++ xen-4.1.2-testing/xen/include/xen/sched.h @@ -326,8 +326,12 @@ struct domain /* Non-migratable and non-restoreable? */ bool_t disable_migrate; + /* Memory sharing support */ + struct mem_event_domain mem_share; /* Memory paging support */ - struct mem_event_domain mem_event; + struct mem_event_domain mem_paging; + /* Memory access support */ + struct mem_event_domain mem_access; /* Currently computed from union of all vcpu cpu-affinity masks. */ nodemask_t node_affinity;
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor