diff options
Diffstat (limited to 'drivers/block/xen-blkback/blkback.c')
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 23 |
1 files changed, 18 insertions, 5 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 713fc9ff1149..ced96777b677 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -84,6 +84,13 @@ MODULE_PARM_DESC(max_persistent_grants, "Maximum number of grants to map persistently"); /* + * Maximum order of pages to be used for the shared ring between front and + * backend, 4KB page granularity is used. + */ +unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER; +module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO); +MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); +/* * The LRU mechanism to clean the lists of persistent grants needs to * be executed periodically. The time interval between consecutive executions * of the purge mechanism is set in ms. @@ -729,7 +736,7 @@ static void xen_blkbk_unmap_and_respond(struct pending_req *req) struct grant_page **pages = req->segments; unsigned int invcount; - invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages, + invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_segs, req->unmap, req->unmap_pages); work->data = req; @@ -915,7 +922,7 @@ static int xen_blkbk_map_seg(struct pending_req *pending_req) int rc; rc = xen_blkbk_map(pending_req->blkif, pending_req->segments, - pending_req->nr_pages, + pending_req->nr_segs, (pending_req->operation != BLKIF_OP_READ)); return rc; @@ -931,7 +938,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, int indirect_grefs, rc, n, nseg, i; struct blkif_request_segment *segments = NULL; - nseg = pending_req->nr_pages; + nseg = pending_req->nr_segs; indirect_grefs = INDIRECT_PAGES(nseg); BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); @@ -1251,7 +1258,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, pending_req->id = req->u.rw.id; pending_req->operation = req_operation; pending_req->status = BLKIF_RSP_OKAY; - pending_req->nr_pages = nseg; + pending_req->nr_segs = nseg; if (req->operation != BLKIF_OP_INDIRECT) { preq.dev = req->u.rw.handle; @@ -1372,7 +1379,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, fail_flush: xen_blkbk_unmap(blkif, pending_req->segments, - pending_req->nr_pages); + pending_req->nr_segs); fail_response: /* Haven't submitted any bio's yet. */ make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); @@ -1438,6 +1445,12 @@ static int __init xen_blkif_init(void) if (!xen_domain()) return -ENODEV; + if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) { + pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", + xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER); + xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER; + } + rc = xen_blkif_interface_init(); if (rc) goto failed_init; |