aboutsummaryrefslogtreecommitdiff
path: root/drivers/block/xen-blkback/blkback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/xen-blkback/blkback.c')
-rw-r--r--drivers/block/xen-blkback/blkback.c126
1 files changed, 115 insertions, 11 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index a5cf7f1e871c..c362f4ad80ab 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -239,9 +239,9 @@ static void put_persistent_gnt(struct xen_blkif_ring *ring,
atomic_dec(&ring->persistent_gnt_in_use);
}
-static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
- unsigned int num)
+static void free_persistent_gnts(struct xen_blkif_ring *ring)
{
+ struct rb_root *root = &ring->persistent_gnts;
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
@@ -249,6 +249,9 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
int segs_to_unmap = 0;
struct gntab_unmap_queue_data unmap_data;
+ if (RB_EMPTY_ROOT(root))
+ return;
+
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
@@ -277,9 +280,11 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
rb_erase(&persistent_gnt->node, root);
kfree(persistent_gnt);
- num--;
+ ring->persistent_gnt_c--;
}
- BUG_ON(num != 0);
+
+ BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
+ BUG_ON(ring->persistent_gnt_c != 0);
}
void xen_blkbk_unmap_purged_grants(struct work_struct *work)
@@ -631,12 +636,7 @@ purge_gnt_list:
void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
{
/* Free all persistent grant pages */
- if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
- free_persistent_gnts(ring, &ring->persistent_gnts,
- ring->persistent_gnt_c);
-
- BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
- ring->persistent_gnt_c = 0;
+ free_persistent_gnts(ring);
/* Since we are shutting down remove all pages from the buffer */
gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
@@ -891,7 +891,7 @@ next:
out:
for (i = last_map; i < num; i++) {
/* Don't zap current batch's valid persistent grants. */
- if(i >= map_until)
+ if (i >= map_until)
pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE;
}
@@ -1072,7 +1072,111 @@ static void end_block_io_op(struct bio *bio)
bio_put(bio);
}
+static void blkif_get_x86_32_req(struct blkif_request *dst,
+ const struct blkif_x86_32_request *src)
+{
+ unsigned int i, n;
+
+ dst->operation = READ_ONCE(src->operation);
+
+ switch (dst->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
+ dst->u.rw.sector_number = src->u.rw.sector_number;
+ n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
+ dst->u.rw.nr_segments);
+ for (i = 0; i < n; i++)
+ dst->u.rw.seg[i] = src->u.rw.seg[i];
+ break;
+
+ case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
+ dst->u.discard.sector_number = src->u.discard.sector_number;
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ break;
+
+ case BLKIF_OP_INDIRECT:
+ dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
+ dst->u.indirect.nr_segments =
+ READ_ONCE(src->u.indirect.nr_segments);
+ dst->u.indirect.handle = src->u.indirect.handle;
+ dst->u.indirect.id = src->u.indirect.id;
+ dst->u.indirect.sector_number = src->u.indirect.sector_number;
+ n = min(MAX_INDIRECT_PAGES,
+ INDIRECT_PAGES(dst->u.indirect.nr_segments));
+ for (i = 0; i < n; i++)
+ dst->u.indirect.indirect_grefs[i] =
+ src->u.indirect.indirect_grefs[i];
+ break;
+
+ default:
+ /*
+ * Don't know how to translate this op. Only get the
+ * ID so failure can be reported to the frontend.
+ */
+ dst->u.other.id = src->u.other.id;
+ break;
+ }
+}
+
+static void blkif_get_x86_64_req(struct blkif_request *dst,
+ const struct blkif_x86_64_request *src)
+{
+ unsigned int i, n;
+
+ dst->operation = READ_ONCE(src->operation);
+
+ switch (dst->operation) {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_WRITE_BARRIER:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
+ dst->u.rw.sector_number = src->u.rw.sector_number;
+ n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
+ dst->u.rw.nr_segments);
+ for (i = 0; i < n; i++)
+ dst->u.rw.seg[i] = src->u.rw.seg[i];
+ break;
+
+ case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
+ dst->u.discard.sector_number = src->u.discard.sector_number;
+ dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ break;
+
+ case BLKIF_OP_INDIRECT:
+ dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
+ dst->u.indirect.nr_segments =
+ READ_ONCE(src->u.indirect.nr_segments);
+ dst->u.indirect.handle = src->u.indirect.handle;
+ dst->u.indirect.id = src->u.indirect.id;
+ dst->u.indirect.sector_number = src->u.indirect.sector_number;
+ n = min(MAX_INDIRECT_PAGES,
+ INDIRECT_PAGES(dst->u.indirect.nr_segments));
+ for (i = 0; i < n; i++)
+ dst->u.indirect.indirect_grefs[i] =
+ src->u.indirect.indirect_grefs[i];
+ break;
+ default:
+ /*
+ * Don't know how to translate this op. Only get the
+ * ID so failure can be reported to the frontend.
+ */
+ dst->u.other.id = src->u.other.id;
+ break;
+ }
+}
/*
* Function to copy the from the ring buffer the 'struct blkif_request'