aboutsummaryrefslogtreecommitdiff
path: root/drivers/xen/xenbus
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/xenbus')
-rw-r--r--drivers/xen/xenbus/xenbus_client.c82
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c91
2 files changed, 133 insertions, 40 deletions
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index df6890681231..d6fdd2d209d3 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -363,50 +363,92 @@ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
__xenbus_switch_state(dev, XenbusStateClosing, 1);
}
-/**
- * xenbus_grant_ring
+/*
+ * xenbus_setup_ring
* @dev: xenbus device
- * @vaddr: starting virtual address of the ring
+ * @vaddr: pointer to starting virtual address of the ring
* @nr_pages: number of pages to be granted
* @grefs: grant reference array to be filled in
*
- * Grant access to the given @vaddr to the peer of the given device.
- * Then fill in @grefs with grant references. Return 0 on success, or
- * -errno on error. On error, the device will switch to
- * XenbusStateClosing, and the error will be saved in the store.
+ * Allocate physically contiguous pages for a shared ring buffer and grant it
+ * to the peer of the given device. The ring buffer is initially filled with
+ * zeroes. The virtual address of the ring is stored at @vaddr and the
+ * grant references are stored in the @grefs array. In case of error @vaddr
+ * will be set to NULL and @grefs will be filled with INVALID_GRANT_REF.
*/
-int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
+int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned int nr_pages, grant_ref_t *grefs)
{
- int err;
- unsigned int i;
+ unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
grant_ref_t gref_head;
+ unsigned int i;
+ int ret;
- err = gnttab_alloc_grant_references(nr_pages, &gref_head);
- if (err) {
- xenbus_dev_fatal(dev, err, "granting access to ring page");
- return err;
+ *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
+ if (!*vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = gnttab_alloc_grant_references(nr_pages, &gref_head);
+ if (ret) {
+ xenbus_dev_fatal(dev, ret, "granting access to %u ring pages",
+ nr_pages);
+ goto err;
}
for (i = 0; i < nr_pages; i++) {
unsigned long gfn;
- if (is_vmalloc_addr(vaddr))
- gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
+ if (is_vmalloc_addr(*vaddr))
+ gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i]));
else
- gfn = virt_to_gfn(vaddr);
+ gfn = virt_to_gfn(vaddr[i]);
grefs[i] = gnttab_claim_grant_reference(&gref_head);
gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
gfn, 0);
-
- vaddr = vaddr + XEN_PAGE_SIZE;
}
return 0;
+
+ err:
+ if (*vaddr)
+ free_pages_exact(*vaddr, ring_size);
+ for (i = 0; i < nr_pages; i++)
+ grefs[i] = INVALID_GRANT_REF;
+ *vaddr = NULL;
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(xenbus_grant_ring);
+EXPORT_SYMBOL_GPL(xenbus_setup_ring);
+/*
+ * xenbus_teardown_ring
+ * @vaddr: starting virtual address of the ring
+ * @nr_pages: number of pages
+ * @grefs: grant reference array
+ *
+ * Remove grants for the shared ring buffer and free the associated memory.
+ * On return the grant reference array is filled with INVALID_GRANT_REF.
+ */
+void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
+ grant_ref_t *grefs)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (grefs[i] != INVALID_GRANT_REF) {
+ gnttab_end_foreign_access(grefs[i], 0);
+ grefs[i] = INVALID_GRANT_REF;
+ }
+ }
+
+ if (*vaddr)
+ free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE);
+ *vaddr = NULL;
+}
+EXPORT_SYMBOL_GPL(xenbus_teardown_ring);
/**
* Allocate an event channel for the given xenbus_device, assigning the newly
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index fe360c33ce71..d367f2bd2b93 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -65,6 +65,7 @@
#include "xenbus.h"
+static int xs_init_irq;
int xen_store_evtchn;
EXPORT_SYMBOL_GPL(xen_store_evtchn);
@@ -750,6 +751,20 @@ static void xenbus_probe(void)
{
xenstored_ready = 1;
+ if (!xen_store_interface) {
+ xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE);
+ /*
+ * Now it is safe to free the IRQ used for xenstore late
+ * initialization. No need to unbind: it is about to be
+ * bound again from xb_init_comms. Note that calling
+ * unbind_from_irqhandler now would result in xen_evtchn_close()
+ * being called and the event channel not being enabled again
+ * afterwards, resulting in missed event notifications.
+ */
+ free_irq(xs_init_irq, &xb_waitq);
+ }
+
/*
* In the HVM case, xenbus_init() deferred its call to
* xs_init() in case callbacks were not operational yet.
@@ -798,20 +813,22 @@ static int __init xenbus_probe_initcall(void)
{
/*
* Probe XenBus here in the XS_PV case, and also XS_HVM unless we
- * need to wait for the platform PCI device to come up.
+ * need to wait for the platform PCI device to come up or
+ * xen_store_interface is not ready.
*/
if (xen_store_domain_type == XS_PV ||
(xen_store_domain_type == XS_HVM &&
- !xs_hvm_defer_init_for_callback()))
+ !xs_hvm_defer_init_for_callback() &&
+ xen_store_interface != NULL))
xenbus_probe();
/*
- * For XS_LOCAL, spawn a thread which will wait for xenstored
- * or a xenstore-stubdom to be started, then probe. It will be
- * triggered when communication starts happening, by waiting
- * on xb_waitq.
+ * For XS_LOCAL or when xen_store_interface is not ready, spawn a
+ * thread which will wait for xenstored or a xenstore-stubdom to be
+ * started, then probe. It will be triggered when communication
+ * starts happening, by waiting on xb_waitq.
*/
- if (xen_store_domain_type == XS_LOCAL) {
+ if (xen_store_domain_type == XS_LOCAL || xen_store_interface == NULL) {
struct task_struct *probe_task;
probe_task = kthread_run(xenbus_probe_thread, NULL,
@@ -907,10 +924,25 @@ static struct notifier_block xenbus_resume_nb = {
.notifier_call = xenbus_resume_cb,
};
+static irqreturn_t xenbus_late_init(int irq, void *unused)
+{
+ int err;
+ uint64_t v = 0;
+
+ err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ if (err || !v || !~v)
+ return IRQ_HANDLED;
+ xen_store_gfn = (unsigned long)v;
+
+ wake_up(&xb_waitq);
+ return IRQ_HANDLED;
+}
+
static int __init xenbus_init(void)
{
int err;
uint64_t v = 0;
+ bool wait = false;
xen_store_domain_type = XS_UNKNOWN;
if (!xen_domain())
@@ -957,25 +989,44 @@ static int __init xenbus_init(void)
* been properly initialized. Instead of attempting to map a
* wrong guest physical address return error.
*
- * Also recognize all bits set as an invalid value.
+ * Also recognize all bits set as an invalid/uninitialized value.
*/
- if (!v || !~v) {
+ if (!v) {
err = -ENOENT;
goto out_error;
}
- /* Avoid truncation on 32-bit. */
+ if (v == ~0ULL) {
+ wait = true;
+ } else {
+ /* Avoid truncation on 32-bit. */
#if BITS_PER_LONG == 32
- if (v > ULONG_MAX) {
- pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
- __func__, v);
- err = -EINVAL;
- goto out_error;
- }
+ if (v > ULONG_MAX) {
+ pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
+ __func__, v);
+ err = -EINVAL;
+ goto out_error;
+ }
#endif
- xen_store_gfn = (unsigned long)v;
- xen_store_interface =
- xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
- XEN_PAGE_SIZE);
+ xen_store_gfn = (unsigned long)v;
+ xen_store_interface =
+ xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE);
+ if (xen_store_interface->connection != XENSTORE_CONNECTED)
+ wait = true;
+ }
+ if (wait) {
+ err = bind_evtchn_to_irqhandler(xen_store_evtchn,
+ xenbus_late_init,
+ 0, "xenstore_late_init",
+ &xb_waitq);
+ if (err < 0) {
+ pr_err("xenstore_late_init couldn't bind irq err=%d\n",
+ err);
+ return err;
+ }
+
+ xs_init_irq = err;
+ }
break;
default:
pr_warn("Xenstore state unknown\n");