xen: privcmd: support autotranslated physmap guests.
PVH and ARM only support the batch interface. To map a foreign page to a process, the PFN must be allocated and the autotranslated path uses ballooning for that purpose. The returned PFN is then mapped to the foreign page. xen_unmap_domain_mfn_range() is introduced to unmap these pages via the privcmd close call. Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com> [v1: Fix up privcmd_close] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Ian Campbell <ian.campbell@citrix.com> [v2: used for ARM too]
This commit is contained in:
parent
9a032e393a
commit
d71f513985
@ -33,11 +33,14 @@
|
|||||||
#include <xen/features.h>
|
#include <xen/features.h>
|
||||||
#include <xen/page.h>
|
#include <xen/page.h>
|
||||||
#include <xen/xen-ops.h>
|
#include <xen/xen-ops.h>
|
||||||
|
#include <xen/balloon.h>
|
||||||
|
|
||||||
#include "privcmd.h"
|
#include "privcmd.h"
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
|
#define PRIV_VMA_LOCKED ((void *)1)
|
||||||
|
|
||||||
#ifndef HAVE_ARCH_PRIVCMD_MMAP
|
#ifndef HAVE_ARCH_PRIVCMD_MMAP
|
||||||
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
|
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
|
||||||
#endif
|
#endif
|
||||||
@ -199,6 +202,10 @@ static long privcmd_ioctl_mmap(void __user *udata)
|
|||||||
if (!xen_initial_domain())
|
if (!xen_initial_domain())
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
|
/* We only support privcmd_ioctl_mmap_batch for auto translated. */
|
||||||
|
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||||
|
return -ENOSYS;
|
||||||
|
|
||||||
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
|
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
@ -246,6 +253,7 @@ struct mmap_batch_state {
|
|||||||
domid_t domain;
|
domid_t domain;
|
||||||
unsigned long va;
|
unsigned long va;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
int index;
|
||||||
/* A tristate:
|
/* A tristate:
|
||||||
* 0 for no errors
|
* 0 for no errors
|
||||||
* 1 if at least one error has happened (and no
|
* 1 if at least one error has happened (and no
|
||||||
@ -260,15 +268,24 @@ struct mmap_batch_state {
|
|||||||
xen_pfn_t __user *user_mfn;
|
xen_pfn_t __user *user_mfn;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* auto translated dom0 note: if domU being created is PV, then mfn is
|
||||||
|
* mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
|
||||||
|
*/
|
||||||
static int mmap_batch_fn(void *data, void *state)
|
static int mmap_batch_fn(void *data, void *state)
|
||||||
{
|
{
|
||||||
xen_pfn_t *mfnp = data;
|
xen_pfn_t *mfnp = data;
|
||||||
struct mmap_batch_state *st = state;
|
struct mmap_batch_state *st = state;
|
||||||
|
struct vm_area_struct *vma = st->vma;
|
||||||
|
struct page **pages = vma->vm_private_data;
|
||||||
|
struct page *cur_page = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||||
|
cur_page = pages[st->index++];
|
||||||
|
|
||||||
ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
|
ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
|
||||||
st->vma->vm_page_prot, st->domain,
|
st->vma->vm_page_prot, st->domain,
|
||||||
NULL);
|
&cur_page);
|
||||||
|
|
||||||
/* Store error code for second pass. */
|
/* Store error code for second pass. */
|
||||||
*(st->err++) = ret;
|
*(st->err++) = ret;
|
||||||
@ -304,6 +321,32 @@ static int mmap_return_errors_v1(void *data, void *state)
|
|||||||
return __put_user(*mfnp, st->user_mfn++);
|
return __put_user(*mfnp, st->user_mfn++);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
|
||||||
|
* the vma with the page info to use later.
|
||||||
|
* Returns: 0 if success, otherwise -errno
|
||||||
|
*/
|
||||||
|
static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
struct page **pages;
|
||||||
|
|
||||||
|
pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
|
||||||
|
if (pages == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
rc = alloc_xenballooned_pages(numpgs, pages, 0);
|
||||||
|
if (rc != 0) {
|
||||||
|
pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
|
||||||
|
numpgs, rc);
|
||||||
|
kfree(pages);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
|
||||||
|
vma->vm_private_data = pages;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct privcmd_vm_ops;
|
static struct vm_operations_struct privcmd_vm_ops;
|
||||||
|
|
||||||
static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
|
static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
|
||||||
@ -371,10 +414,18 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
|
|||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
if (xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||||
|
ret = alloc_empty_pages(vma, m.num);
|
||||||
|
if (ret < 0) {
|
||||||
|
up_write(&mm->mmap_sem);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
state.domain = m.dom;
|
state.domain = m.dom;
|
||||||
state.vma = vma;
|
state.vma = vma;
|
||||||
state.va = m.addr;
|
state.va = m.addr;
|
||||||
|
state.index = 0;
|
||||||
state.global_error = 0;
|
state.global_error = 0;
|
||||||
state.err = err_array;
|
state.err = err_array;
|
||||||
|
|
||||||
@ -439,6 +490,19 @@ static long privcmd_ioctl(struct file *file,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void privcmd_close(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
struct page **pages = vma->vm_private_data;
|
||||||
|
int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
|
||||||
|
return;
|
||||||
|
|
||||||
|
xen_unmap_domain_mfn_range(vma, numpgs, pages);
|
||||||
|
free_xenballooned_pages(numpgs, pages);
|
||||||
|
kfree(pages);
|
||||||
|
}
|
||||||
|
|
||||||
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
|
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
|
||||||
@ -449,6 +513,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct privcmd_vm_ops = {
|
static struct vm_operations_struct privcmd_vm_ops = {
|
||||||
|
.close = privcmd_close,
|
||||||
.fault = privcmd_fault
|
.fault = privcmd_fault
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -466,7 +531,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
|
|||||||
|
|
||||||
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
|
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
return (xchg(&vma->vm_private_data, (void *)1) == NULL);
|
return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct file_operations xen_privcmd_fops = {
|
const struct file_operations xen_privcmd_fops = {
|
||||||
|
Loading…
Reference in New Issue
Block a user