aboutsummaryrefslogtreecommitdiff
path: root/drivers/xen
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/Kconfig5
-rw-r--r--drivers/xen/Makefile3
-rw-r--r--drivers/xen/balloon.c27
-rw-r--r--drivers/xen/gntdev-dmabuf.c8
-rw-r--r--drivers/xen/privcmd.c33
-rw-r--r--drivers/xen/swiotlb-xen.c119
-rw-r--r--drivers/xen/xenbus/xenbus_client.c171
7 files changed, 189 insertions, 177 deletions
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 727f11eb46b2..ea6c1e7e3e42 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -52,9 +52,7 @@ config XEN_BALLOON_MEMORY_HOTPLUG
config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
int "Hotplugged memory limit (in GiB) for a PV guest"
- default 512 if X86_64
- default 4 if X86_32
- range 0 64 if X86_32
+ default 512
depends on XEN_HAVE_PVMMU
depends on XEN_BALLOON_MEMORY_HOTPLUG
help
@@ -179,6 +177,7 @@ config XEN_GRANT_DMA_ALLOC
config SWIOTLB_XEN
def_bool y
+ select DMA_OPS
select SWIOTLB
config XEN_PCIDEV_BACKEND
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 0d322f3d90cd..c25c9a699b48 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -5,8 +5,7 @@ obj-y += mem-reservation.o
obj-y += events/
obj-y += xenbus/
-nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_features.o := $(nostackp)
+CFLAGS_features.o := -fno-stack-protector
dom0-$(CONFIG_ARM64) += arm-device.o
dom0-$(CONFIG_PCI) += pci.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 77c57568e5d7..37ffccda8bb8 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -58,7 +58,6 @@
#include <linux/sysctl.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/xen/hypervisor.h>
@@ -266,20 +265,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
return NULL;
}
-#ifdef CONFIG_SPARSEMEM
- {
- unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
- unsigned long pfn = res->start >> PAGE_SHIFT;
-
- if (pfn > limit) {
- pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
- pfn, limit);
- release_memory_resource(res);
- return NULL;
- }
- }
-#endif
-
return res;
}
@@ -568,11 +553,13 @@ static int add_ballooned_pages(int nr_pages)
if (xen_hotplug_unpopulated) {
st = reserve_additional_memory();
if (st != BP_ECANCELED) {
+ int rc;
+
mutex_unlock(&balloon_mutex);
- wait_event(balloon_wq,
+ rc = wait_event_interruptible(balloon_wq,
!list_empty(&ballooned_pages));
mutex_lock(&balloon_mutex);
- return 0;
+ return rc ? -ENOMEM : 0;
}
}
@@ -630,6 +617,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
out_undo:
mutex_unlock(&balloon_mutex);
free_xenballooned_pages(pgno, pages);
+ /*
+ * NB: free_xenballooned_pages will only subtract pgno pages, but since
+ * target_unpopulated is incremented with nr_pages at the start we need
+ * to remove the remaining ones also, or accounting will be screwed.
+ */
+ balloon_stats.target_unpopulated -= nr_pages - pgno;
return ret;
}
EXPORT_SYMBOL(alloc_xenballooned_pages);
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 75d3bb948bf3..b1b6eebafd5d 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -613,6 +613,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
goto fail_detach;
}
+ /* Check that we have zero offset. */
+ if (sgt->sgl->offset) {
+ ret = ERR_PTR(-EINVAL);
+ pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
+ sgt->sgl->offset);
+ goto fail_unmap;
+ }
+
/* Check number of pages that imported buffer has. */
if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
ret = ERR_PTR(-EINVAL);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index a250d118144a..63abe6c3642b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -25,7 +25,6 @@
#include <linux/miscdevice.h>
#include <linux/moduleparam.h>
-#include <asm/pgalloc.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@@ -580,13 +579,13 @@ out_unlock:
static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num,
- struct page *pages[], unsigned int nr_pages)
+ struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{
unsigned int i;
for (i = 0; i < num; i++) {
unsigned int requested;
- int pinned;
+ int page_count;
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
@@ -594,14 +593,15 @@ static int lock_pages(
if (requested > nr_pages)
return -ENOSPC;
- pinned = get_user_pages_fast(
+ page_count = pin_user_pages_fast(
(unsigned long) kbufs[i].uptr,
requested, FOLL_WRITE, pages);
- if (pinned < 0)
- return pinned;
+ if (page_count < 0)
+ return page_count;
- nr_pages -= pinned;
- pages += pinned;
+ *pinned += page_count;
+ nr_pages -= page_count;
+ pages += page_count;
}
return 0;
@@ -609,15 +609,7 @@ static int lock_pages(
static void unlock_pages(struct page *pages[], unsigned int nr_pages)
{
- unsigned int i;
-
- if (!pages)
- return;
-
- for (i = 0; i < nr_pages; i++) {
- if (pages[i])
- put_page(pages[i]);
- }
+ unpin_user_pages_dirty_lock(pages, nr_pages, true);
}
static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
@@ -630,6 +622,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
struct xen_dm_op_buf *xbufs = NULL;
unsigned int i;
long rc;
+ unsigned int pinned = 0;
if (copy_from_user(&kdata, udata, sizeof(kdata)))
return -EFAULT;
@@ -683,9 +676,11 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
goto out;
}
- rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
- if (rc)
+ rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
+ if (rc < 0) {
+ nr_pages = pinned;
goto out;
+ }
for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index b6d27762c6f8..39a0f2e0847c 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -52,37 +52,39 @@ static unsigned long xen_io_tlb_nslabs;
* Quick lookup value of the bus address of the IOTLB.
*/
-static u64 start_dma_addr;
-
-/*
- * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
- * can be 32bit when dma_addr_t is 64bit leading to a loss in
- * information if the shift is done before casting to 64bit.
- */
-static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
+static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
{
unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
- dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
+ phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
- dma |= paddr & ~XEN_PAGE_MASK;
+ baddr |= paddr & ~XEN_PAGE_MASK;
+ return baddr;
+}
- return dma;
+static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
}
-static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
+static inline phys_addr_t xen_bus_to_phys(struct device *dev,
+ phys_addr_t baddr)
{
unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
- dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
- phys_addr_t paddr = dma;
-
- paddr |= baddr & ~XEN_PAGE_MASK;
+ phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
+ (baddr & ~XEN_PAGE_MASK);
return paddr;
}
-static inline dma_addr_t xen_virt_to_bus(void *address)
+static inline phys_addr_t xen_dma_to_phys(struct device *dev,
+ dma_addr_t dma_addr)
{
- return xen_phys_to_bus(virt_to_phys(address));
+ return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
+}
+
+static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
+{
+ return xen_phys_to_dma(dev, virt_to_phys(address));
}
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
@@ -99,11 +101,11 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
return 0;
}
-static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
+static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
{
- unsigned long bfn = XEN_PFN_DOWN(dma_addr);
+ unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
unsigned long xen_pfn = bfn_to_local_pfn(bfn);
- phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
+ phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
/* If the address is outside our domain, it CAN
* have the same virtual address as another address
@@ -241,7 +243,6 @@ retry:
m_ret = XEN_SWIOTLB_EFIXUP;
goto error;
}
- start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
if (early) {
if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
verbose))
@@ -307,12 +308,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
- /* At this point dma_handle is the physical address, next we are
+ /* At this point dma_handle is the dma address, next we are
* going to set it to the machine address.
* Do not use virt_to_phys(ret) because on ARM it doesn't correspond
* to *dma_handle. */
- phys = *dma_handle;
- dev_addr = xen_phys_to_bus(phys);
+ phys = dma_to_phys(hwdev, *dma_handle);
+ dev_addr = xen_phys_to_dma(hwdev, phys);
if (((dev_addr + size - 1 <= dma_mask)) &&
!range_straddles_page_boundary(phys, size))
*dma_handle = dev_addr;
@@ -322,6 +323,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL;
}
+ *dma_handle = phys_to_dma(hwdev, *dma_handle);
SetPageXenRemapped(virt_to_page(ret));
}
memset(ret, 0, size);
@@ -335,23 +337,30 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
int order = get_order(size);
phys_addr_t phys;
u64 dma_mask = DMA_BIT_MASK(32);
+ struct page *page;
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
/* do not use virt_to_phys because on ARM it doesn't return you the
* physical address */
- phys = xen_bus_to_phys(dev_addr);
+ phys = xen_dma_to_phys(hwdev, dev_addr);
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
+ if (is_vmalloc_addr(vaddr))
+ page = vmalloc_to_page(vaddr);
+ else
+ page = virt_to_page(vaddr);
+
if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
range_straddles_page_boundary(phys, size)) &&
- TestClearPageXenRemapped(virt_to_page(vaddr)))
+ TestClearPageXenRemapped(page))
xen_destroy_contiguous_region(phys, order);
- xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
+ xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
+ attrs);
}
/*
@@ -367,7 +376,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = xen_phys_to_bus(phys);
+ dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
BUG_ON(dir == DMA_NONE);
/*
@@ -386,13 +395,13 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
- size, size, dir, attrs);
+ map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
+ phys, size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
phys = map;
- dev_addr = xen_phys_to_bus(map);
+ dev_addr = xen_phys_to_dma(dev, map);
/*
* Ensure that the address returned is DMA'ble
@@ -404,8 +413,12 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
}
done:
- if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_device(dev_addr, phys, size, dir);
+ if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
+ arch_sync_dma_for_device(phys, size, dir);
+ else
+ xen_dma_sync_for_device(dev, dev_addr, size, dir);
+ }
return dev_addr;
}
@@ -420,15 +433,19 @@ done:
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- phys_addr_t paddr = xen_bus_to_phys(dev_addr);
+ phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
- if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_cpu(dev_addr, paddr, size, dir);
+ if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
+ arch_sync_dma_for_cpu(paddr, size, dir);
+ else
+ xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
+ }
/* NOTE: We use dev_addr here, not paddr! */
- if (is_xen_swiotlb_buffer(dev_addr))
+ if (is_xen_swiotlb_buffer(hwdev, dev_addr))
swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
}
@@ -436,12 +453,16 @@ static void
xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
- phys_addr_t paddr = xen_bus_to_phys(dma_addr);
+ phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
- if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_cpu(dma_addr, paddr, size, dir);
+ if (!dev_is_dma_coherent(dev)) {
+ if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
+ arch_sync_dma_for_cpu(paddr, size, dir);
+ else
+ xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
+ }
- if (is_xen_swiotlb_buffer(dma_addr))
+ if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
}
@@ -449,13 +470,17 @@ static void
xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
- phys_addr_t paddr = xen_bus_to_phys(dma_addr);
+ phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
- if (is_xen_swiotlb_buffer(dma_addr))
+ if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
- if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_device(dma_addr, paddr, size, dir);
+ if (!dev_is_dma_coherent(dev)) {
+ if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
+ arch_sync_dma_for_device(paddr, size, dir);
+ else
+ xen_dma_sync_for_device(dev, dma_addr, size, dir);
+ }
}
/*
@@ -536,7 +561,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
+ return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
}
const struct dma_map_ops xen_swiotlb_dma_ops = {
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 040d2a43e8e3..786fbb7d8be0 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -69,11 +69,27 @@ struct xenbus_map_node {
unsigned int nr_handles;
};
+struct map_ring_valloc {
+ struct xenbus_map_node *node;
+
+ /* Why do we need two arrays? See comment of __xenbus_map_ring */
+ union {
+ unsigned long addrs[XENBUS_MAX_RING_GRANTS];
+ pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+ };
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+
+ struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+
+ unsigned int idx; /* HVM only. */
+};
+
static DEFINE_SPINLOCK(xenbus_valloc_lock);
static LIST_HEAD(xenbus_valloc_pages);
struct xenbus_ring_ops {
- int (*map)(struct xenbus_device *dev,
+ int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
grant_ref_t *gnt_refs, unsigned int nr_grefs,
void **vaddr);
int (*unmap)(struct xenbus_device *dev, void *vaddr);
@@ -440,8 +456,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
* Map @nr_grefs pages of memory into this domain from another
* domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
* pages of virtual address space, maps the pages to that address, and
- * sets *vaddr to that address. Returns 0 on success, and GNTST_*
- * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
+ * sets *vaddr to that address. Returns 0 on success, and -errno on
* error. If an error is returned, device will switch to
* XenbusStateClosing and the error message will be saved in XenStore.
*/
@@ -449,12 +464,25 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr)
{
int err;
+ struct map_ring_valloc *info;
+
+ *vaddr = NULL;
+
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+ return -EINVAL;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
- /* Some hypervisors are buggy and can return 1. */
- if (err > 0)
- err = GNTST_general_error;
+ info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
+ if (!info->node)
+ err = -ENOMEM;
+ else
+ err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
+ kfree(info->node);
+ kfree(info);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
@@ -466,62 +494,57 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
grant_ref_t *gnt_refs,
unsigned int nr_grefs,
grant_handle_t *handles,
- phys_addr_t *addrs,
+ struct map_ring_valloc *info,
unsigned int flags,
bool *leaked)
{
- struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
int i, j;
- int err = GNTST_okay;
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
for (i = 0; i < nr_grefs; i++) {
- memset(&map[i], 0, sizeof(map[i]));
- gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
- dev->otherend_id);
+ gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
+ gnt_refs[i], dev->otherend_id);
handles[i] = INVALID_GRANT_HANDLE;
}
- gnttab_batch_map(map, i);
+ gnttab_batch_map(info->map, i);
for (i = 0; i < nr_grefs; i++) {
- if (map[i].status != GNTST_okay) {
- err = map[i].status;
- xenbus_dev_fatal(dev, map[i].status,
+ if (info->map[i].status != GNTST_okay) {
+ xenbus_dev_fatal(dev, info->map[i].status,
"mapping in shared page %d from domain %d",
gnt_refs[i], dev->otherend_id);
goto fail;
} else
- handles[i] = map[i].handle;
+ handles[i] = info->map[i].handle;
}
- return GNTST_okay;
+ return 0;
fail:
for (i = j = 0; i < nr_grefs; i++) {
if (handles[i] != INVALID_GRANT_HANDLE) {
- memset(&unmap[j], 0, sizeof(unmap[j]));
- gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
+ gnttab_set_unmap_op(&info->unmap[j],
+ info->phys_addrs[i],
GNTMAP_host_map, handles[i]);
j++;
}
}
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
BUG();
*leaked = false;
for (i = 0; i < j; i++) {
- if (unmap[i].status != GNTST_okay) {
+ if (info->unmap[i].status != GNTST_okay) {
*leaked = true;
break;
}
}
- return err;
+ return -ENOENT;
}
/**
@@ -566,21 +589,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
return err;
}
-struct map_ring_valloc_hvm
-{
- unsigned int idx;
-
- /* Why do we need two arrays? See comment of __xenbus_map_ring */
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
- unsigned long addrs[XENBUS_MAX_RING_GRANTS];
-};
-
static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
unsigned int goffset,
unsigned int len,
void *data)
{
- struct map_ring_valloc_hvm *info = data;
+ struct map_ring_valloc *info = data;
unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
info->phys_addrs[info->idx] = vaddr;
@@ -589,39 +603,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
info->idx++;
}
-static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
- grant_ref_t *gnt_ref,
- unsigned int nr_grefs,
- void **vaddr)
+static int xenbus_map_ring_hvm(struct xenbus_device *dev,
+ struct map_ring_valloc *info,
+ grant_ref_t *gnt_ref,
+ unsigned int nr_grefs,
+ void **vaddr)
{
- struct xenbus_map_node *node;
+ struct xenbus_map_node *node = info->node;
int err;
void *addr;
bool leaked = false;
- struct map_ring_valloc_hvm info = {
- .idx = 0,
- };
unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- *vaddr = NULL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
if (err)
goto out_err;
gnttab_foreach_grant(node->hvm.pages, nr_grefs,
xenbus_map_ring_setup_grant_hvm,
- &info);
+ info);
err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
- info.phys_addrs, GNTMAP_host_map, &leaked);
+ info, GNTMAP_host_map, &leaked);
node->nr_handles = nr_grefs;
if (err)
@@ -641,11 +644,13 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
spin_unlock(&xenbus_valloc_lock);
*vaddr = addr;
+ info->node = NULL;
+
return 0;
out_xenbus_unmap_ring:
if (!leaked)
- xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
+ xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
else
pr_alert("leaking %p size %u page(s)",
addr, nr_pages);
@@ -653,7 +658,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
if (!leaked)
free_xenballooned_pages(nr_pages, node->hvm.pages);
out_err:
- kfree(node);
return err;
}
@@ -676,40 +680,28 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
#ifdef CONFIG_XEN_PV
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
- grant_ref_t *gnt_refs,
- unsigned int nr_grefs,
- void **vaddr)
+static int xenbus_map_ring_pv(struct xenbus_device *dev,
+ struct map_ring_valloc *info,
+ grant_ref_t *gnt_refs,
+ unsigned int nr_grefs,
+ void **vaddr)
{
- struct xenbus_map_node *node;
+ struct xenbus_map_node *node = info->node;
struct vm_struct *area;
- pte_t *ptes[XENBUS_MAX_RING_GRANTS];
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
int err = GNTST_okay;
int i;
bool leaked;
- *vaddr = NULL;
-
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
+ area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
+ if (!area)
return -ENOMEM;
- area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
- if (!area) {
- kfree(node);
- return -ENOMEM;
- }
-
for (i = 0; i < nr_grefs; i++)
- phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+ info->phys_addrs[i] =
+ arbitrary_virt_to_machine(info->ptes[i]).maddr;
err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
- phys_addrs,
- GNTMAP_host_map | GNTMAP_contains_pte,
+ info, GNTMAP_host_map | GNTMAP_contains_pte,
&leaked);
if (err)
goto failed;
@@ -722,6 +714,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
spin_unlock(&xenbus_valloc_lock);
*vaddr = area->addr;
+ info->node = NULL;
+
return 0;
failed:
@@ -730,11 +724,10 @@ failed:
else
pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
- kfree(node);
return err;
}
-static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
{
struct xenbus_map_node *node;
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
@@ -798,12 +791,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
}
static const struct xenbus_ring_ops ring_ops_pv = {
- .map = xenbus_map_ring_valloc_pv,
- .unmap = xenbus_unmap_ring_vfree_pv,
+ .map = xenbus_map_ring_pv,
+ .unmap = xenbus_unmap_ring_pv,
};
#endif
-struct unmap_ring_vfree_hvm
+struct unmap_ring_hvm
{
unsigned int idx;
unsigned long addrs[XENBUS_MAX_RING_GRANTS];
@@ -814,19 +807,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
unsigned int len,
void *data)
{
- struct unmap_ring_vfree_hvm *info = data;
+ struct unmap_ring_hvm *info = data;
info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
info->idx++;
}
-static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
{
int rv;
struct xenbus_map_node *node;
void *addr;
- struct unmap_ring_vfree_hvm info = {
+ struct unmap_ring_hvm info = {
.idx = 0,
};
unsigned int nr_pages;
@@ -887,8 +880,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
static const struct xenbus_ring_ops ring_ops_hvm = {
- .map = xenbus_map_ring_valloc_hvm,
- .unmap = xenbus_unmap_ring_vfree_hvm,
+ .map = xenbus_map_ring_hvm,
+ .unmap = xenbus_unmap_ring_hvm,
};
void __init xenbus_ring_ops_init(void)