diff options
author | Anthony PERARD <anthony.perard@citrix.com> | 2010-09-23 12:28:45 +0100 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2011-05-08 10:09:59 +0200 |
commit | 209cd7abe26cffe2cab680696771514757457f55 (patch) | |
tree | 5dd25b6699367efc3a9686809c08f974b3566c6f | |
parent | 67b724e69e8d65a4ac4355e3673969b5a3f26afd (diff) |
xen: Replace some tab-indents with spaces (clean-up).
And put braces for blocks with a single statement.
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
Acked-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r-- | hw/xen_backend.c | 411 | ||||
-rw-r--r-- | hw/xen_disk.c | 492 | ||||
-rw-r--r-- | hw/xen_nic.c | 265 |
3 files changed, 634 insertions, 534 deletions
diff --git a/hw/xen_backend.c b/hw/xen_backend.c index a2e408fa0e..9f4ec4b40e 100644 --- a/hw/xen_backend.c +++ b/hw/xen_backend.c @@ -58,8 +58,9 @@ int xenstore_write_str(const char *base, const char *node, const char *val) char abspath[XEN_BUFSIZE]; snprintf(abspath, sizeof(abspath), "%s/%s", base, node); - if (!xs_write(xenstore, 0, abspath, val, strlen(val))) - return -1; + if (!xs_write(xenstore, 0, abspath, val, strlen(val))) { + return -1; + } return 0; } @@ -94,8 +95,9 @@ int xenstore_read_int(const char *base, const char *node, int *ival) int rc = -1; val = xenstore_read_str(base, node); - if (val && 1 == sscanf(val, "%d", ival)) - rc = 0; + if (val && 1 == sscanf(val, "%d", ival)) { + rc = 0; + } qemu_free(val); return rc; } @@ -134,16 +136,16 @@ int xenstore_read_fe_int(struct XenDevice *xendev, const char *node, int *ival) const char *xenbus_strstate(enum xenbus_state state) { - static const char *const name[] = { - [ XenbusStateUnknown ] = "Unknown", - [ XenbusStateInitialising ] = "Initialising", - [ XenbusStateInitWait ] = "InitWait", - [ XenbusStateInitialised ] = "Initialised", - [ XenbusStateConnected ] = "Connected", - [ XenbusStateClosing ] = "Closing", - [ XenbusStateClosed ] = "Closed", - }; - return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; + static const char *const name[] = { + [ XenbusStateUnknown ] = "Unknown", + [ XenbusStateInitialising ] = "Initialising", + [ XenbusStateInitWait ] = "InitWait", + [ XenbusStateInitialised ] = "Initialised", + [ XenbusStateConnected ] = "Connected", + [ XenbusStateClosing ] = "Closing", + [ XenbusStateClosed ] = "Closed", + }; + return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } int xen_be_set_state(struct XenDevice *xendev, enum xenbus_state state) @@ -151,10 +153,11 @@ int xen_be_set_state(struct XenDevice *xendev, enum xenbus_state state) int rc; rc = xenstore_write_be_int(xendev, "state", state); - if (rc < 0) - return rc; + if (rc < 0) { + return rc; + } xen_be_printf(xendev, 1, "backend state: %s -> %s\n", - xenbus_strstate(xendev->be_state), xenbus_strstate(state)); + xenbus_strstate(xendev->be_state), xenbus_strstate(state)); xendev->be_state = state; return 0; } @@ -166,13 +169,16 @@ struct XenDevice *xen_be_find_xendev(const char *type, int dom, int dev) struct XenDevice *xendev; QTAILQ_FOREACH(xendev, &xendevs, next) { - if (xendev->dom != dom) - continue; - if (xendev->dev != dev) - continue; - if (strcmp(xendev->type, type) != 0) - continue; - return xendev; + if (xendev->dom != dom) { + continue; + } + if (xendev->dev != dev) { + continue; + } + if (strcmp(xendev->type, type) != 0) { + continue; + } + return xendev; } return NULL; } @@ -187,8 +193,9 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev, char *dom0; xendev = xen_be_find_xendev(type, dom, dev); - if (xendev) - return xendev; + if (xendev) { + return xendev; + } /* init new xendev */ xendev = qemu_mallocz(ops->size); @@ -199,9 +206,9 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev, dom0 = xs_get_domain_path(xenstore, 0); snprintf(xendev->be, sizeof(xendev->be), "%s/backend/%s/%d/%d", - dom0, xendev->type, xendev->dom, xendev->dev); + dom0, xendev->type, xendev->dom, xendev->dev); snprintf(xendev->name, sizeof(xendev->name), "%s-%d", - xendev->type, xendev->dev); + xendev->type, xendev->dev); free(dom0); xendev->debug = debug; @@ -209,28 +216,29 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev, xendev->evtchndev = xc_evtchn_open(); if (xendev->evtchndev < 0) { - xen_be_printf(NULL, 0, "can't open evtchn device\n"); - qemu_free(xendev); - return NULL; + xen_be_printf(NULL, 0, "can't open evtchn device\n"); + qemu_free(xendev); + return NULL; } fcntl(xc_evtchn_fd(xendev->evtchndev), F_SETFD, FD_CLOEXEC); if (ops->flags & DEVOPS_FLAG_NEED_GNTDEV) { - xendev->gnttabdev = xc_gnttab_open(); - if (xendev->gnttabdev < 0) { - xen_be_printf(NULL, 0, "can't open gnttab device\n"); - xc_evtchn_close(xendev->evtchndev); - qemu_free(xendev); - return NULL; - } + xendev->gnttabdev = xc_gnttab_open(); + if (xendev->gnttabdev < 0) { + xen_be_printf(NULL, 0, "can't open gnttab device\n"); + xc_evtchn_close(xendev->evtchndev); + qemu_free(xendev); + return NULL; + } } else { - xendev->gnttabdev = -1; + xendev->gnttabdev = -1; } QTAILQ_INSERT_TAIL(&xendevs, xendev, next); - if (xendev->ops->alloc) - xendev->ops->alloc(xendev); + if (xendev->ops->alloc) { + xendev->ops->alloc(xendev); + } return xendev; } @@ -251,28 +259,33 @@ static struct XenDevice *xen_be_del_xendev(int dom, int dev) xendev = xnext; xnext = xendev->next.tqe_next; - if (xendev->dom != dom) - continue; - if (xendev->dev != dev && dev != -1) - continue; - - if (xendev->ops->free) - xendev->ops->free(xendev); - - if (xendev->fe) { - char token[XEN_BUFSIZE]; - snprintf(token, sizeof(token), "fe:%p", xendev); - xs_unwatch(xenstore, xendev->fe, token); - qemu_free(xendev->fe); - } - - if (xendev->evtchndev >= 0) - xc_evtchn_close(xendev->evtchndev); - if (xendev->gnttabdev >= 0) - xc_gnttab_close(xendev->gnttabdev); - - QTAILQ_REMOVE(&xendevs, xendev, next); - qemu_free(xendev); + if (xendev->dom != dom) { + continue; + } + if (xendev->dev != dev && dev != -1) { + continue; + } + + if (xendev->ops->free) { + xendev->ops->free(xendev); + } + + if (xendev->fe) { + char token[XEN_BUFSIZE]; + snprintf(token, sizeof(token), "fe:%p", xendev); + xs_unwatch(xenstore, xendev->fe, token); + qemu_free(xendev->fe); + } + + if (xendev->evtchndev >= 0) { + xc_evtchn_close(xendev->evtchndev); + } + if (xendev->gnttabdev >= 0) { + xc_gnttab_close(xendev->gnttabdev); + } + + QTAILQ_REMOVE(&xendevs, xendev, next); + qemu_free(xendev); } return NULL; } @@ -285,14 +298,16 @@ static struct XenDevice *xen_be_del_xendev(int dom, int dev) static void xen_be_backend_changed(struct XenDevice *xendev, const char *node) { if (node == NULL || strcmp(node, "online") == 0) { - if (xenstore_read_be_int(xendev, "online", &xendev->online) == -1) - xendev->online = 0; + if (xenstore_read_be_int(xendev, "online", &xendev->online) == -1) { + xendev->online = 0; + } } if (node) { - xen_be_printf(xendev, 2, "backend update: %s\n", node); - if (xendev->ops->backend_changed) - xendev->ops->backend_changed(xendev, node); + xen_be_printf(xendev, 2, "backend update: %s\n", node); + if (xendev->ops->backend_changed) { + xendev->ops->backend_changed(xendev, node); + } } } @@ -301,25 +316,29 @@ static void xen_be_frontend_changed(struct XenDevice *xendev, const char *node) int fe_state; if (node == NULL || strcmp(node, "state") == 0) { - if (xenstore_read_fe_int(xendev, "state", &fe_state) == -1) - fe_state = XenbusStateUnknown; - if (xendev->fe_state != fe_state) - xen_be_printf(xendev, 1, "frontend state: %s -> %s\n", - xenbus_strstate(xendev->fe_state), - xenbus_strstate(fe_state)); - xendev->fe_state = fe_state; + if (xenstore_read_fe_int(xendev, "state", &fe_state) == -1) { + fe_state = XenbusStateUnknown; + } + if (xendev->fe_state != fe_state) { + xen_be_printf(xendev, 1, "frontend state: %s -> %s\n", + xenbus_strstate(xendev->fe_state), + xenbus_strstate(fe_state)); + } + xendev->fe_state = fe_state; } if (node == NULL || strcmp(node, "protocol") == 0) { - qemu_free(xendev->protocol); - xendev->protocol = xenstore_read_fe_str(xendev, "protocol"); - if (xendev->protocol) - xen_be_printf(xendev, 1, "frontend protocol: %s\n", xendev->protocol); + qemu_free(xendev->protocol); + xendev->protocol = xenstore_read_fe_str(xendev, "protocol"); + if (xendev->protocol) { + xen_be_printf(xendev, 1, "frontend protocol: %s\n", xendev->protocol); + } } if (node) { - xen_be_printf(xendev, 2, "frontend update: %s\n", node); - if (xendev->ops->frontend_changed) - xendev->ops->frontend_changed(xendev, node); + xen_be_printf(xendev, 2, "frontend update: %s\n", node); + if (xendev->ops->frontend_changed) { + xendev->ops->frontend_changed(xendev, node); + } } } @@ -340,28 +359,28 @@ static int xen_be_try_setup(struct XenDevice *xendev) int be_state; if (xenstore_read_be_int(xendev, "state", &be_state) == -1) { - xen_be_printf(xendev, 0, "reading backend state failed\n"); - return -1; + xen_be_printf(xendev, 0, "reading backend state failed\n"); + return -1; } if (be_state != XenbusStateInitialising) { - xen_be_printf(xendev, 0, "initial backend state is wrong (%s)\n", - xenbus_strstate(be_state)); - return -1; + xen_be_printf(xendev, 0, "initial backend state is wrong (%s)\n", + xenbus_strstate(be_state)); + return -1; } xendev->fe = xenstore_read_be_str(xendev, "frontend"); if (xendev->fe == NULL) { - xen_be_printf(xendev, 0, "reading frontend path failed\n"); - return -1; + xen_be_printf(xendev, 0, "reading frontend path failed\n"); + return -1; } /* setup frontend watch */ snprintf(token, sizeof(token), "fe:%p", xendev); if (!xs_watch(xenstore, xendev->fe, token)) { - xen_be_printf(xendev, 0, "watching frontend path (%s) failed\n", - xendev->fe); - return -1; + xen_be_printf(xendev, 0, "watching frontend path (%s) failed\n", + xendev->fe); + return -1; } xen_be_set_state(xendev, XenbusStateInitialising); @@ -383,15 +402,16 @@ static int xen_be_try_init(struct XenDevice *xendev) int rc = 0; if (!xendev->online) { - xen_be_printf(xendev, 1, "not online\n"); - return -1; + xen_be_printf(xendev, 1, "not online\n"); + return -1; } - if (xendev->ops->init) - rc = xendev->ops->init(xendev); + if (xendev->ops->init) { + rc = xendev->ops->init(xendev); + } if (rc != 0) { - xen_be_printf(xendev, 1, "init() failed\n"); - return rc; + xen_be_printf(xendev, 1, "init() failed\n"); + return rc; } xenstore_write_be_str(xendev, "hotplug-status", "connected"); @@ -411,20 +431,21 @@ static int xen_be_try_connect(struct XenDevice *xendev) int rc = 0; if (xendev->fe_state != XenbusStateInitialised && - xendev->fe_state != XenbusStateConnected) { - if (xendev->ops->flags & DEVOPS_FLAG_IGNORE_STATE) { - xen_be_printf(xendev, 2, "frontend not ready, ignoring\n"); - } else { - xen_be_printf(xendev, 2, "frontend not ready (yet)\n"); - return -1; - } + xendev->fe_state != XenbusStateConnected) { + if (xendev->ops->flags & DEVOPS_FLAG_IGNORE_STATE) { + xen_be_printf(xendev, 2, "frontend not ready, ignoring\n"); + } else { + xen_be_printf(xendev, 2, "frontend not ready (yet)\n"); + return -1; + } } - if (xendev->ops->connect) - rc = xendev->ops->connect(xendev); + if (xendev->ops->connect) { + rc = xendev->ops->connect(xendev); + } if (rc != 0) { - xen_be_printf(xendev, 0, "connect() failed\n"); - return rc; + xen_be_printf(xendev, 0, "connect() failed\n"); + return rc; } xen_be_set_state(xendev, XenbusStateConnected); @@ -440,10 +461,12 @@ static void xen_be_disconnect(struct XenDevice *xendev, enum xenbus_state state) { if (xendev->be_state != XenbusStateClosing && xendev->be_state != XenbusStateClosed && - xendev->ops->disconnect) - xendev->ops->disconnect(xendev); - if (xendev->be_state != state) + xendev->ops->disconnect) { + xendev->ops->disconnect(xendev); + } + if (xendev->be_state != state) { xen_be_set_state(xendev, state); + } } /* @@ -451,8 +474,9 @@ static void xen_be_disconnect(struct XenDevice *xendev, enum xenbus_state state) */ static int xen_be_try_reset(struct XenDevice *xendev) { - if (xendev->fe_state != XenbusStateInitialising) + if (xendev->fe_state != XenbusStateInitialising) { return -1; + } xen_be_printf(xendev, 1, "device reset (for re-connect)\n"); xen_be_set_state(xendev, XenbusStateInitialising); @@ -468,31 +492,32 @@ void xen_be_check_state(struct XenDevice *xendev) /* frontend may request shutdown from almost anywhere */ if (xendev->fe_state == XenbusStateClosing || - xendev->fe_state == XenbusStateClosed) { - xen_be_disconnect(xendev, xendev->fe_state); - return; + xendev->fe_state == XenbusStateClosed) { + xen_be_disconnect(xendev, xendev->fe_state); + return; } /* check for possible backend state transitions */ for (;;) { - switch (xendev->be_state) { - case XenbusStateUnknown: - rc = xen_be_try_setup(xendev); - break; - case XenbusStateInitialising: - rc = xen_be_try_init(xendev); - break; - case XenbusStateInitWait: - rc = xen_be_try_connect(xendev); - break; + switch (xendev->be_state) { + case XenbusStateUnknown: + rc = xen_be_try_setup(xendev); + break; + case XenbusStateInitialising: + rc = xen_be_try_init(xendev); + break; + case XenbusStateInitWait: + rc = xen_be_try_connect(xendev); + break; case XenbusStateClosed: rc = xen_be_try_reset(xendev); break; - default: - rc = -1; - } - if (rc != 0) - break; + default: + rc = -1; + } + if (rc != 0) { + break; + } } } @@ -511,26 +536,28 @@ static int xenstore_scan(const char *type, int dom, struct XenDevOps *ops) snprintf(path, sizeof(path), "%s/backend/%s/%d", dom0, type, dom); free(dom0); if (!xs_watch(xenstore, path, token)) { - xen_be_printf(NULL, 0, "xen be: watching backend path (%s) failed\n", path); - return -1; + xen_be_printf(NULL, 0, "xen be: watching backend path (%s) failed\n", path); + return -1; } /* look for backends */ dev = xs_directory(xenstore, 0, path, &cdev); - if (!dev) - return 0; + if (!dev) { + return 0; + } for (j = 0; j < cdev; j++) { - xendev = xen_be_get_xendev(type, dom, atoi(dev[j]), ops); - if (xendev == NULL) - continue; - xen_be_check_state(xendev); + xendev = xen_be_get_xendev(type, dom, atoi(dev[j]), ops); + if (xendev == NULL) { + continue; + } + xen_be_check_state(xendev); } free(dev); return 0; } static void xenstore_update_be(char *watch, char *type, int dom, - struct XenDevOps *ops) + struct XenDevOps *ops) { struct XenDevice *xendev; char path[XEN_BUFSIZE], *dom0; @@ -539,25 +566,28 @@ static void xenstore_update_be(char *watch, char *type, int dom, dom0 = xs_get_domain_path(xenstore, 0); len = snprintf(path, sizeof(path), "%s/backend/%s/%d", dom0, type, dom); free(dom0); - if (strncmp(path, watch, len) != 0) - return; + if (strncmp(path, watch, len) != 0) { + return; + } if (sscanf(watch+len, "/%u/%255s", &dev, path) != 2) { - strcpy(path, ""); - if (sscanf(watch+len, "/%u", &dev) != 1) - dev = -1; + strcpy(path, ""); + if (sscanf(watch+len, "/%u", &dev) != 1) { + dev = -1; + } + } + if (dev == -1) { + return; } - if (dev == -1) - return; if (0) { - /* FIXME: detect devices being deleted from xenstore ... */ - xen_be_del_xendev(dom, dev); + /* FIXME: detect devices being deleted from xenstore ... */ + xen_be_del_xendev(dom, dev); } xendev = xen_be_get_xendev(type, dom, dev, ops); if (xendev != NULL) { - xen_be_backend_changed(xendev, path); - xen_be_check_state(xendev); + xen_be_backend_changed(xendev, path); + xen_be_check_state(xendev); } } @@ -567,10 +597,12 @@ static void xenstore_update_fe(char *watch, struct XenDevice *xendev) unsigned int len; len = strlen(xendev->fe); - if (strncmp(xendev->fe, watch, len) != 0) - return; - if (watch[len] != '/') - return; + if (strncmp(xendev->fe, watch, len) != 0) { + return; + } + if (watch[len] != '/') { + return; + } node = watch + len + 1; xen_be_frontend_changed(xendev, node); @@ -584,14 +616,17 @@ static void xenstore_update(void *unused) unsigned int dom, count; vec = xs_read_watch(xenstore, &count); - if (vec == NULL) - goto cleanup; + if (vec == NULL) { + goto cleanup; + } if (sscanf(vec[XS_WATCH_TOKEN], "be:%" PRIxPTR ":%d:%" PRIxPTR, - &type, &dom, &ops) == 3) - xenstore_update_be(vec[XS_WATCH_PATH], (void*)type, dom, (void*)ops); - if (sscanf(vec[XS_WATCH_TOKEN], "fe:%" PRIxPTR, &ptr) == 1) - xenstore_update_fe(vec[XS_WATCH_PATH], (void*)ptr); + &type, &dom, &ops) == 3) { + xenstore_update_be(vec[XS_WATCH_PATH], (void*)type, dom, (void*)ops); + } + if (sscanf(vec[XS_WATCH_TOKEN], "fe:%" PRIxPTR, &ptr) == 1) { + xenstore_update_fe(vec[XS_WATCH_PATH], (void*)ptr); + } cleanup: free(vec); @@ -604,14 +639,15 @@ static void xen_be_evtchn_event(void *opaque) port = xc_evtchn_pending(xendev->evtchndev); if (port != xendev->local_port) { - xen_be_printf(xendev, 0, "xc_evtchn_pending returned %d (expected %d)\n", - port, xendev->local_port); - return; + xen_be_printf(xendev, 0, "xc_evtchn_pending returned %d (expected %d)\n", + port, xendev->local_port); + return; } xc_evtchn_unmask(xendev->evtchndev, port); - if (xendev->ops->event) - xendev->ops->event(xendev); + if (xendev->ops->event) { + xendev->ops->event(xendev); + } } /* -------------------------------------------------------------------- */ @@ -620,17 +656,18 @@ int xen_be_init(void) { xenstore = xs_daemon_open(); if (!xenstore) { - xen_be_printf(NULL, 0, "can't connect to xenstored\n"); - return -1; + xen_be_printf(NULL, 0, "can't connect to xenstored\n"); + return -1; } - if (qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL) < 0) - goto err; + if (qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL) < 0) { + goto err; + } xen_xc = xc_interface_open(); if (xen_xc == -1) { - xen_be_printf(NULL, 0, "can't open xen interface\n"); - goto err; + xen_be_printf(NULL, 0, "can't open xen interface\n"); + goto err; } return 0; @@ -649,24 +686,26 @@ int xen_be_register(const char *type, struct XenDevOps *ops) int xen_be_bind_evtchn(struct XenDevice *xendev) { - if (xendev->local_port != -1) - return 0; + if (xendev->local_port != -1) { + return 0; + } xendev->local_port = xc_evtchn_bind_interdomain - (xendev->evtchndev, xendev->dom, xendev->remote_port); + (xendev->evtchndev, xendev->dom, xendev->remote_port); if (xendev->local_port == -1) { - xen_be_printf(xendev, 0, "xc_evtchn_bind_interdomain failed\n"); - return -1; + xen_be_printf(xendev, 0, "xc_evtchn_bind_interdomain failed\n"); + return -1; } xen_be_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); qemu_set_fd_handler(xc_evtchn_fd(xendev->evtchndev), - xen_be_evtchn_event, NULL, xendev); + xen_be_evtchn_event, NULL, xendev); return 0; } void xen_be_unbind_evtchn(struct XenDevice *xendev) { - if (xendev->local_port == -1) - return; + if (xendev->local_port == -1) { + return; + } qemu_set_fd_handler(xc_evtchn_fd(xendev->evtchndev), NULL, NULL, NULL); xc_evtchn_unbind(xendev->evtchndev, xendev->local_port); xen_be_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port); @@ -690,17 +729,21 @@ void xen_be_printf(struct XenDevice *xendev, int msg_level, const char *fmt, ... va_list args; if (xendev) { - if (msg_level > xendev->debug) + if (msg_level > xendev->debug) { return; + } qemu_log("xen be: %s: ", xendev->name); - if (msg_level == 0) + if (msg_level == 0) { fprintf(stderr, "xen be: %s: ", xendev->name); + } } else { - if (msg_level > debug) + if (msg_level > debug) { return; + } qemu_log("xen be core: "); - if (msg_level == 0) + if (msg_level == 0) { fprintf(stderr, "xen be core: "); + } } va_start(args, fmt); qemu_log_vprintf(fmt, args); diff --git a/hw/xen_disk.c b/hw/xen_disk.c index 558bf8ae25..2b3a8feac7 100644 --- a/hw/xen_disk.c +++ b/hw/xen_disk.c @@ -120,17 +120,18 @@ static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) struct ioreq *ioreq = NULL; if (QLIST_EMPTY(&blkdev->freelist)) { - if (blkdev->requests_total >= max_requests) - goto out; - /* allocate new struct */ - ioreq = qemu_mallocz(sizeof(*ioreq)); - ioreq->blkdev = blkdev; - blkdev->requests_total++; + if (blkdev->requests_total >= max_requests) { + goto out; + } + /* allocate new struct */ + ioreq = qemu_mallocz(sizeof(*ioreq)); + ioreq->blkdev = blkdev; + blkdev->requests_total++; qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); } else { - /* get one from freelist */ - ioreq = QLIST_FIRST(&blkdev->freelist); - QLIST_REMOVE(ioreq, list); + /* get one from freelist */ + ioreq = QLIST_FIRST(&blkdev->freelist); + QLIST_REMOVE(ioreq, list); qemu_iovec_reset(&ioreq->v); } QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); @@ -173,30 +174,32 @@ static int ioreq_parse(struct ioreq *ioreq) int i; xen_be_printf(&blkdev->xendev, 3, - "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", - ioreq->req.operation, ioreq->req.nr_segments, - ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); + "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", + ioreq->req.operation, ioreq->req.nr_segments, + ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); switch (ioreq->req.operation) { case BLKIF_OP_READ: - ioreq->prot = PROT_WRITE; /* to memory */ - break; + ioreq->prot = PROT_WRITE; /* to memory */ + break; case BLKIF_OP_WRITE_BARRIER: if (!ioreq->req.nr_segments) { ioreq->presync = 1; return 0; } - if (!syncwrite) - ioreq->presync = ioreq->postsync = 1; - /* fall through */ + if (!syncwrite) { + ioreq->presync = ioreq->postsync = 1; + } + /* fall through */ case BLKIF_OP_WRITE: - ioreq->prot = PROT_READ; /* from memory */ - if (syncwrite) - ioreq->postsync = 1; - break; + ioreq->prot = PROT_READ; /* from memory */ + if (syncwrite) { + ioreq->postsync = 1; + } + break; default: - xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", - ioreq->req.operation); - goto err; + xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", + ioreq->req.operation); + goto err; }; if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') { @@ -206,29 +209,29 @@ static int ioreq_parse(struct ioreq *ioreq) ioreq->start = ioreq->req.sector_number * blkdev->file_blk; for (i = 0; i < ioreq->req.nr_segments; i++) { - if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { - xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); - goto err; - } - if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { - xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); - goto err; - } - if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { - xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); - goto err; - } - - ioreq->domids[i] = blkdev->xendev.dom; - ioreq->refs[i] = ioreq->req.seg[i].gref; - - mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; - len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; + if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { + xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); + goto err; + } + if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) { + xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); + goto err; + } + if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) { + xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); + goto err; + } + + ioreq->domids[i] = blkdev->xendev.dom; + ioreq->refs[i] = ioreq->req.seg[i].gref; + + mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; + len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk; qemu_iovec_add(&ioreq->v, (void*)mem, len); } if (ioreq->start + ioreq->v.size > blkdev->file_size) { - xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); - goto err; + xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); + goto err; } return 0; @@ -242,26 +245,31 @@ static void ioreq_unmap(struct ioreq *ioreq) int gnt = ioreq->blkdev->xendev.gnttabdev; int i; - if (ioreq->v.niov == 0) + if (ioreq->v.niov == 0) { return; + } if (batch_maps) { - if (!ioreq->pages) - return; - if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) - xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", - strerror(errno)); - ioreq->blkdev->cnt_map -= ioreq->v.niov; - ioreq->pages = NULL; + if (!ioreq->pages) { + return; + } + if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) { + xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", + strerror(errno)); + } + ioreq->blkdev->cnt_map -= ioreq->v.niov; + ioreq->pages = NULL; } else { - for (i = 0; i < ioreq->v.niov; i++) { - if (!ioreq->page[i]) - continue; - if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) - xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", - strerror(errno)); - ioreq->blkdev->cnt_map--; - ioreq->page[i] = NULL; - } + for (i = 0; i < ioreq->v.niov; i++) { + if (!ioreq->page[i]) { + continue; + } + if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) { + xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", + strerror(errno)); + } + ioreq->blkdev->cnt_map--; + ioreq->page[i] = NULL; + } } } @@ -270,35 +278,37 @@ static int ioreq_map(struct ioreq *ioreq) int gnt = ioreq->blkdev->xendev.gnttabdev; int i; - if (ioreq->v.niov == 0) + if (ioreq->v.niov == 0) { return 0; + } if (batch_maps) { - ioreq->pages = xc_gnttab_map_grant_refs - (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); - if (ioreq->pages == NULL) { - xen_be_printf(&ioreq->blkdev->xendev, 0, - "can't map %d grant refs (%s, %d maps)\n", - ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map); - return -1; - } - for (i = 0; i < ioreq->v.niov; i++) - ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE + - (uintptr_t)ioreq->v.iov[i].iov_base; - ioreq->blkdev->cnt_map += ioreq->v.niov; + ioreq->pages = xc_gnttab_map_grant_refs + (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); + if (ioreq->pages == NULL) { + xen_be_printf(&ioreq->blkdev->xendev, 0, + "can't map %d grant refs (%s, %d maps)\n", + ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map); + return -1; + } + for (i = 0; i < ioreq->v.niov; i++) { + ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE + + (uintptr_t)ioreq->v.iov[i].iov_base; + } + ioreq->blkdev->cnt_map += ioreq->v.niov; } else { - for (i = 0; i < ioreq->v.niov; i++) { - ioreq->page[i] = xc_gnttab_map_grant_ref - (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); - if (ioreq->page[i] == NULL) { - xen_be_printf(&ioreq->blkdev->xendev, 0, - "can't map grant ref %d (%s, %d maps)\n", - ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map); - ioreq_unmap(ioreq); - return -1; - } - ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base; - ioreq->blkdev->cnt_map++; - } + for (i = 0; i < ioreq->v.niov; i++) { + ioreq->page[i] = xc_gnttab_map_grant_ref + (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); + if (ioreq->page[i] == NULL) { + xen_be_printf(&ioreq->blkdev->xendev, 0, + "can't map grant ref %d (%s, %d maps)\n", + ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map); + ioreq_unmap(ioreq); + return -1; + } + ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base; + ioreq->blkdev->cnt_map++; + } } return 0; } @@ -309,54 +319,58 @@ static int ioreq_runio_qemu_sync(struct ioreq *ioreq) int i, rc, len = 0; off_t pos; - if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) - goto err_no_map; - if (ioreq->presync) - bdrv_flush(blkdev->bs); + if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { + goto err_no_map; + } + if (ioreq->presync) { + bdrv_flush(blkdev->bs); + } switch (ioreq->req.operation) { case BLKIF_OP_READ: - pos = ioreq->start; - for (i = 0; i < ioreq->v.niov; i++) { - rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE, - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len / BLOCK_SIZE); - if (rc != 0) { - xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n", - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len); - goto err; - } - len += ioreq->v.iov[i].iov_len; - pos += ioreq->v.iov[i].iov_len; - } - break; + pos = ioreq->start; + for (i = 0; i < ioreq->v.niov; i++) { + rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE, + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len / BLOCK_SIZE); + if (rc != 0) { + xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n", + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len); + goto err; + } + len += ioreq->v.iov[i].iov_len; + pos += ioreq->v.iov[i].iov_len; + } + break; case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: - if (!ioreq->req.nr_segments) + if (!ioreq->req.nr_segments) { break; - pos = ioreq->start; - for (i = 0; i < ioreq->v.niov; i++) { - rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE, - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len / BLOCK_SIZE); - if (rc != 0) { - xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n", - ioreq->v.iov[i].iov_base, - ioreq->v.iov[i].iov_len); - goto err; - } - len += ioreq->v.iov[i].iov_len; - pos += ioreq->v.iov[i].iov_len; - } - break; + } + pos = ioreq->start; + for (i = 0; i < ioreq->v.niov; i++) { + rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE, + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len / BLOCK_SIZE); + if (rc != 0) { + xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n", + ioreq->v.iov[i].iov_base, + ioreq->v.iov[i].iov_len); + goto err; + } + len += ioreq->v.iov[i].iov_len; + pos += ioreq->v.iov[i].iov_len; + } + break; default: - /* unknown operation (shouldn't happen -- parse catches this) */ - goto err; + /* unknown operation (shouldn't happen -- parse catches this) */ + goto err; } - if (ioreq->postsync) - bdrv_flush(blkdev->bs); + if (ioreq->postsync) { + bdrv_flush(blkdev->bs); + } ioreq->status = BLKIF_RSP_OKAY; ioreq_unmap(ioreq); @@ -382,8 +396,9 @@ static void qemu_aio_complete(void *opaque, int ret) } ioreq->aio_inflight--; - if (ioreq->aio_inflight > 0) + if (ioreq->aio_inflight > 0) { return; + } ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; ioreq_unmap(ioreq); @@ -395,12 +410,14 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) { struct XenBlkDev *blkdev = ioreq->blkdev; - if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) - goto err_no_map; + if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { + goto err_no_map; + } ioreq->aio_inflight++; - if (ioreq->presync) - bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + if (ioreq->presync) { + bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + } switch (ioreq->req.operation) { case BLKIF_OP_READ: @@ -408,23 +425,25 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE, &ioreq->v, ioreq->v.size / BLOCK_SIZE, qemu_aio_complete, ioreq); - break; + break; case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: - if (!ioreq->req.nr_segments) + if (!ioreq->req.nr_segments) { break; + } ioreq->aio_inflight++; bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE, &ioreq->v, ioreq->v.size / BLOCK_SIZE, qemu_aio_complete, ioreq); - break; + break; default: - /* unknown operation (shouldn't happen -- parse catches this) */ - goto err; + /* unknown operation (shouldn't happen -- parse catches this) */ + goto err; } - if (ioreq->postsync) - bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + if (ioreq->postsync) { + bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */ + } qemu_aio_complete(ioreq, 0); return 0; @@ -452,36 +471,37 @@ static int blk_send_response_one(struct ioreq *ioreq) /* Place on the response ring for the relevant domain. */ switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: - dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); - break; + dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); + break; case BLKIF_PROTOCOL_X86_32: dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part, blkdev->rings.x86_32_part.rsp_prod_pvt); - break; + break; case BLKIF_PROTOCOL_X86_64: dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part, blkdev->rings.x86_64_part.rsp_prod_pvt); - break; + break; default: - dst = NULL; + dst = NULL; } memcpy(dst, &resp, sizeof(resp)); blkdev->rings.common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify); if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) { - /* - * Tail check for pending requests. Allows frontend to avoid - * notifications if requests are already in flight (lower - * overheads and promotes batching). - */ - RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); + /* + * Tail check for pending requests. Allows frontend to avoid + * notifications if requests are already in flight (lower + * overheads and promotes batching). + */ + RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) { - have_requests = 1; + have_requests = 1; } - if (have_requests) - blkdev->more_work++; + if (have_requests) { + blkdev->more_work++; + } return send_notify; } @@ -493,28 +513,29 @@ static void blk_send_response_all(struct XenBlkDev *blkdev) while (!QLIST_EMPTY(&blkdev->finished)) { ioreq = QLIST_FIRST(&blkdev->finished); - send_notify += blk_send_response_one(ioreq); - ioreq_release(ioreq); + send_notify += blk_send_response_one(ioreq); + ioreq_release(ioreq); + } + if (send_notify) { + xen_be_send_notify(&blkdev->xendev); } - if (send_notify) - xen_be_send_notify(&blkdev->xendev); } static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc) { switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: - memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), - sizeof(ioreq->req)); - break; + memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), + sizeof(ioreq->req)); + break; case BLKIF_PROTOCOL_X86_32: blkif_get_x86_32_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc)); - break; + break; case BLKIF_PROTOCOL_X86_64: blkif_get_x86_64_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc)); - break; + break; } return 0; } @@ -530,12 +551,14 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) rp = blkdev->rings.common.sring->req_prod; xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ - if (use_aio) + if (use_aio) { blk_send_response_all(blkdev); + } while (rc != rp) { /* pull request from ring */ - if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) + if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) { break; + } ioreq = ioreq_start(blkdev); if (ioreq == NULL) { blkdev->more_work++; @@ -546,8 +569,9 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) /* parse them */ if (ioreq_parse(ioreq) != 0) { - if (blk_send_response_one(ioreq)) + if (blk_send_response_one(ioreq)) { xen_be_send_notify(&blkdev->xendev); + } ioreq_release(ioreq); continue; } @@ -560,11 +584,13 @@ static void blk_handle_requests(struct XenBlkDev *blkdev) ioreq_runio_qemu_sync(ioreq); } } - if (!use_aio) + if (!use_aio) { blk_send_response_all(blkdev); + } - if (blkdev->more_work && blkdev->requests_inflight < max_requests) + if (blkdev->more_work && blkdev->requests_inflight < max_requests) { qemu_bh_schedule(blkdev->bh); + } } /* ------------------------------------------------------------- */ @@ -583,8 +609,9 @@ static void blk_alloc(struct XenDevice *xendev) QLIST_INIT(&blkdev->finished); QLIST_INIT(&blkdev->freelist); blkdev->bh = qemu_bh_new(blk_bh, blkdev); - if (xen_mode != XEN_EMULATE) + if (xen_mode != XEN_EMULATE) { batch_maps = 1; + } } static int blk_init(struct XenDevice *xendev) @@ -595,44 +622,50 @@ static int blk_init(struct XenDevice *xendev) /* read xenstore entries */ if (blkdev->params == NULL) { - blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); + blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params"); h = strchr(blkdev->params, ':'); - if (h != NULL) { - blkdev->fileproto = blkdev->params; - blkdev->filename = h+1; - *h = 0; - } else { - blkdev->fileproto = "<unset>"; - blkdev->filename = blkdev->params; - } - } - if (blkdev->mode == NULL) - blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); - if (blkdev->type == NULL) - blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); - if (blkdev->dev == NULL) - blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); - if (blkdev->devtype == NULL) - blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); + if (h != NULL) { + blkdev->fileproto = blkdev->params; + blkdev->filename = h+1; + *h = 0; + } else { + blkdev->fileproto = "<unset>"; + blkdev->filename = blkdev->params; + } + } + if (blkdev->mode == NULL) { + blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode"); + } + if (blkdev->type == NULL) { + blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type"); + } + if (blkdev->dev == NULL) { + blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev"); + } + if (blkdev->devtype == NULL) { + blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type"); + } /* do we have all we need? */ if (blkdev->params == NULL || - blkdev->mode == NULL || - blkdev->type == NULL || - blkdev->dev == NULL) - return -1; + blkdev->mode == NULL || + blkdev->type == NULL || + blkdev->dev == NULL) { + return -1; + } /* read-only ? */ if (strcmp(blkdev->mode, "w") == 0) { - qflags = BDRV_O_RDWR; + qflags = BDRV_O_RDWR; } else { - qflags = 0; - info |= VDISK_READONLY; + qflags = 0; + info |= VDISK_READONLY; } /* cdrom ? */ - if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) - info |= VDISK_CDROM; + if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) { + info |= VDISK_CDROM; + } /* init qemu block driver */ index = (blkdev->xendev.dev - 202 * 256) / 16; @@ -649,7 +682,7 @@ static int blk_init(struct XenDevice *xendev) } else { /* setup via qemu cmdline -> already setup for us */ xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n"); - blkdev->bs = blkdev->dinfo->bdrv; + blkdev->bs = blkdev->dinfo->bdrv; } blkdev->file_blk = BLOCK_SIZE; blkdev->file_size = bdrv_getlength(blkdev->bs); @@ -657,21 +690,21 @@ static int blk_init(struct XenDevice *xendev) xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n", (int)blkdev->file_size, strerror(-blkdev->file_size), blkdev->bs->drv ? blkdev->bs->drv->format_name : "-"); - blkdev->file_size = 0; + blkdev->file_size = 0; } have_barriers = blkdev->bs->drv && blkdev->bs->drv->bdrv_flush ? 1 : 0; xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," - " size %" PRId64 " (%" PRId64 " MB)\n", - blkdev->type, blkdev->fileproto, blkdev->filename, - blkdev->file_size, blkdev->file_size >> 20); + " size %" PRId64 " (%" PRId64 " MB)\n", + blkdev->type, blkdev->fileproto, blkdev->filename, + blkdev->file_size, blkdev->file_size >> 20); /* fill info */ xenstore_write_be_int(&blkdev->xendev, "feature-barrier", have_barriers); xenstore_write_be_int(&blkdev->xendev, "info", info); xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk); xenstore_write_be_int(&blkdev->xendev, "sectors", - blkdev->file_size / blkdev->file_blk); + blkdev->file_size / blkdev->file_blk); return 0; } @@ -679,57 +712,62 @@ static int blk_connect(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); - if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) - return -1; + if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) { + return -1; + } if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", - &blkdev->xendev.remote_port) == -1) - return -1; + &blkdev->xendev.remote_port) == -1) { + return -1; + } blkdev->protocol = BLKIF_PROTOCOL_NATIVE; if (blkdev->xendev.protocol) { - if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) + if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { blkdev->protocol = BLKIF_PROTOCOL_X86_32; - if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) + } + if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { blkdev->protocol = BLKIF_PROTOCOL_X86_64; + } } blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev, - blkdev->xendev.dom, - blkdev->ring_ref, - PROT_READ | PROT_WRITE); - if (!blkdev->sring) - return -1; + blkdev->xendev.dom, + blkdev->ring_ref, + PROT_READ | PROT_WRITE); + if (!blkdev->sring) { + return -1; + } blkdev->cnt_map++; switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: { - blkif_sring_t *sring_native = blkdev->sring; - BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); - break; + blkif_sring_t *sring_native = blkdev->sring; + BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); + break; } case BLKIF_PROTOCOL_X86_32: { - blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; + blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); - break; + break; } case BLKIF_PROTOCOL_X86_64: { - blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; + blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); - break; + break; } } xen_be_bind_evtchn(&blkdev->xendev); xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " - "remote port %d, local port %d\n", - blkdev->xendev.protocol, blkdev->ring_ref, - blkdev->xendev.remote_port, blkdev->xendev.local_port); + "remote port %d, local port %d\n", + blkdev->xendev.protocol, blkdev->ring_ref, + blkdev->xendev.remote_port, blkdev->xendev.local_port); return 0; } @@ -743,14 +781,14 @@ static void blk_disconnect(struct XenDevice *xendev) bdrv_close(blkdev->bs); bdrv_delete(blkdev->bs); } - blkdev->bs = NULL; + blkdev->bs = NULL; } xen_be_unbind_evtchn(&blkdev->xendev); if (blkdev->sring) { - xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); - blkdev->cnt_map--; - blkdev->sring = NULL; + xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1); + blkdev->cnt_map--; + blkdev->sring = NULL; } } @@ -760,10 +798,10 @@ static int blk_free(struct XenDevice *xendev) struct ioreq *ioreq; while (!QLIST_EMPTY(&blkdev->freelist)) { - ioreq = QLIST_FIRST(&blkdev->freelist); + ioreq = QLIST_FIRST(&blkdev->freelist); QLIST_REMOVE(ioreq, list); qemu_iovec_destroy(&ioreq->v); - qemu_free(ioreq); + qemu_free(ioreq); } qemu_free(blkdev->params); diff --git a/hw/xen_nic.c b/hw/xen_nic.c index 08055b83ff..ff86491cfa 100644 --- a/hw/xen_nic.c +++ b/hw/xen_nic.c @@ -74,20 +74,23 @@ static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, i resp->status = st; #if 0 - if (txp->flags & NETTXF_extra_info) - RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL; + if (txp->flags & NETTXF_extra_info) { + RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL; + } #endif netdev->tx_ring.rsp_prod_pvt = ++i; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify); - if (notify) - xen_be_send_notify(&netdev->xendev); + if (notify) { + xen_be_send_notify(&netdev->xendev); + } if (i == netdev->tx_ring.req_cons) { - int more_to_do; - RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do); - if (more_to_do) - netdev->tx_work++; + int more_to_do; + RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do); + if (more_to_do) { + netdev->tx_work++; + } } } @@ -101,10 +104,11 @@ static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING RING_IDX cons = netdev->tx_ring.req_cons; do { - make_tx_response(netif, txp, NETIF_RSP_ERROR); - if (cons >= end) - break; - txp = RING_GET_REQUEST(&netdev->tx_ring, cons++); + make_tx_response(netif, txp, NETIF_RSP_ERROR); + if (cons >= end) { + break; + } + txp = RING_GET_REQUEST(&netdev->tx_ring, cons++); } while (1); netdev->tx_ring.req_cons = cons; netif_schedule_work(netif); @@ -122,75 +126,78 @@ static void net_tx_packets(struct XenNetDev *netdev) void *tmpbuf = NULL; for (;;) { - rc = netdev->tx_ring.req_cons; - rp = netdev->tx_ring.sring->req_prod; - xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ + rc = netdev->tx_ring.req_cons; + rp = netdev->tx_ring.sring->req_prod; + xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ - while ((rc != rp)) { - if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) - break; - memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); - netdev->tx_ring.req_cons = ++rc; + while ((rc != rp)) { + if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) { + break; + } + memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); + netdev->tx_ring.req_cons = ++rc; #if 1 - /* should not happen in theory, we don't announce the * - * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ - if (txreq.flags & NETTXF_extra_info) { - xen_be_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); - net_tx_error(netdev, &txreq, rc); - continue; - } - if (txreq.flags & NETTXF_more_data) { - xen_be_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); - net_tx_error(netdev, &txreq, rc); - continue; - } + /* should not happen in theory, we don't announce the * + * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ + if (txreq.flags & NETTXF_extra_info) { + xen_be_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); + net_tx_error(netdev, &txreq, rc); + continue; + } + if (txreq.flags & NETTXF_more_data) { + xen_be_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); + net_tx_error(netdev, &txreq, rc); + continue; + } #endif - if (txreq.size < 14) { - xen_be_printf(&netdev->xendev, 0, "bad packet size: %d\n", txreq.size); - net_tx_error(netdev, &txreq, rc); - continue; - } - - if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { - xen_be_printf(&netdev->xendev, 0, "error: page crossing\n"); - net_tx_error(netdev, &txreq, rc); - continue; - } - - xen_be_printf(&netdev->xendev, 3, "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", - txreq.gref, txreq.offset, txreq.size, txreq.flags, - (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", - (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", - (txreq.flags & NETTXF_more_data) ? " more_data" : "", - (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); - - page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, - netdev->xendev.dom, - txreq.gref, PROT_READ); - if (page == NULL) { - xen_be_printf(&netdev->xendev, 0, "error: tx gref dereference failed (%d)\n", + if (txreq.size < 14) { + xen_be_printf(&netdev->xendev, 0, "bad packet size: %d\n", txreq.size); + net_tx_error(netdev, &txreq, rc); + continue; + } + + if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { + xen_be_printf(&netdev->xendev, 0, "error: page crossing\n"); + net_tx_error(netdev, &txreq, rc); + continue; + } + + xen_be_printf(&netdev->xendev, 3, "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", + txreq.gref, txreq.offset, txreq.size, txreq.flags, + (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", + (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", + (txreq.flags & NETTXF_more_data) ? " more_data" : "", + (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); + + page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, + netdev->xendev.dom, + txreq.gref, PROT_READ); + if (page == NULL) { + xen_be_printf(&netdev->xendev, 0, "error: tx gref dereference failed (%d)\n", txreq.gref); - net_tx_error(netdev, &txreq, rc); - continue; - } - if (txreq.flags & NETTXF_csum_blank) { + net_tx_error(netdev, &txreq, rc); + continue; + } + if (txreq.flags & NETTXF_csum_blank) { /* have read-only mapping -> can't fill checksum in-place */ - if (!tmpbuf) + if (!tmpbuf) { tmpbuf = qemu_malloc(XC_PAGE_SIZE); + } memcpy(tmpbuf, page + txreq.offset, txreq.size); - net_checksum_calculate(tmpbuf, txreq.size); + net_checksum_calculate(tmpbuf, txreq.size); qemu_send_packet(&netdev->nic->nc, tmpbuf, txreq.size); } else { qemu_send_packet(&netdev->nic->nc, page + txreq.offset, txreq.size); } - xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1); - net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); - } - if (!netdev->tx_work) - break; - netdev->tx_work = 0; + xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1); + net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); + } + if (!netdev->tx_work) { + break; + } + netdev->tx_work = 0; } qemu_free(tmpbuf); } @@ -198,9 +205,9 @@ static void net_tx_packets(struct XenNetDev *netdev) /* ------------------------------------------------------------- */ static void net_rx_response(struct XenNetDev *netdev, - netif_rx_request_t *req, int8_t st, - uint16_t offset, uint16_t size, - uint16_t flags) + netif_rx_request_t *req, int8_t st, + uint16_t offset, uint16_t size, + uint16_t flags) { RING_IDX i = netdev->rx_ring.rsp_prod_pvt; netif_rx_response_t *resp; @@ -211,16 +218,18 @@ static void net_rx_response(struct XenNetDev *netdev, resp->flags = flags; resp->id = req->id; resp->status = (int16_t)size; - if (st < 0) - resp->status = (int16_t)st; + if (st < 0) { + resp->status = (int16_t)st; + } xen_be_printf(&netdev->xendev, 3, "rx response: idx %d, status %d, flags 0x%x\n", - i, resp->status, resp->flags); + i, resp->status, resp->flags); netdev->rx_ring.rsp_prod_pvt = ++i; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify); - if (notify) - xen_be_send_notify(&netdev->xendev); + if (notify) { + xen_be_send_notify(&netdev->xendev); + } } #define NET_IP_ALIGN 2 @@ -230,17 +239,18 @@ static int net_rx_ok(VLANClientState *nc) struct XenNetDev *netdev = DO_UPCAST(NICState, nc, nc)->opaque; RING_IDX rc, rp; - if (netdev->xendev.be_state != XenbusStateConnected) - return 0; + if (netdev->xendev.be_state != XenbusStateConnected) { + return 0; + } rc = netdev->rx_ring.req_cons; rp = netdev->rx_ring.sring->req_prod; xen_rmb(); if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { - xen_be_printf(&netdev->xendev, 2, "%s: no rx buffers (%d/%d)\n", - __FUNCTION__, rc, rp); - return 0; + xen_be_printf(&netdev->xendev, 2, "%s: no rx buffers (%d/%d)\n", + __FUNCTION__, rc, rp); + return 0; } return 1; } @@ -252,34 +262,35 @@ static ssize_t net_rx_packet(VLANClientState *nc, const uint8_t *buf, size_t siz RING_IDX rc, rp; void *page; - if (netdev->xendev.be_state != XenbusStateConnected) - return -1; + if (netdev->xendev.be_state != XenbusStateConnected) { + return -1; + } rc = netdev->rx_ring.req_cons; rp = netdev->rx_ring.sring->req_prod; xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { - xen_be_printf(&netdev->xendev, 2, "no buffer, drop packet\n"); - return -1; + xen_be_printf(&netdev->xendev, 2, "no buffer, drop packet\n"); + return -1; } if (size > XC_PAGE_SIZE - NET_IP_ALIGN) { - xen_be_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", - (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); - return -1; + xen_be_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", + (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); + return -1; } memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq)); netdev->rx_ring.req_cons = ++rc; page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, - netdev->xendev.dom, - rxreq.gref, PROT_WRITE); + netdev->xendev.dom, + rxreq.gref, PROT_WRITE); if (page == NULL) { - xen_be_printf(&netdev->xendev, 0, "error: rx gref dereference failed (%d)\n", + xen_be_printf(&netdev->xendev, 0, "error: rx gref dereference failed (%d)\n", rxreq.gref); - net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); - return -1; + net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); + return -1; } memcpy(page + NET_IP_ALIGN, buf, size); xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1); @@ -302,15 +313,18 @@ static int net_init(struct XenDevice *xendev) struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); /* read xenstore entries */ - if (netdev->mac == NULL) - netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); + if (netdev->mac == NULL) { + netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); + } /* do we have all we need? */ - if (netdev->mac == NULL) - return -1; + if (netdev->mac == NULL) { + return -1; + } - if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) + if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) { return -1; + } netdev->conf.vlan = qemu_find_vlan(netdev->xendev.dev, 1); netdev->conf.peer = NULL; @@ -334,41 +348,46 @@ static int net_connect(struct XenDevice *xendev) int rx_copy; if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref", - &netdev->tx_ring_ref) == -1) - return -1; + &netdev->tx_ring_ref) == -1) { + return -1; + } if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref", - &netdev->rx_ring_ref) == -1) - return 1; + &netdev->rx_ring_ref) == -1) { + return 1; + } if (xenstore_read_fe_int(&netdev->xendev, "event-channel", - &netdev->xendev.remote_port) == -1) - return -1; + &netdev->xendev.remote_port) == -1) { + return -1; + } - if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) - rx_copy = 0; + if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) { + rx_copy = 0; + } if (rx_copy == 0) { - xen_be_printf(&netdev->xendev, 0, "frontend doesn't support rx-copy.\n"); - return -1; + xen_be_printf(&netdev->xendev, 0, "frontend doesn't support rx-copy.\n"); + return -1; } netdev->txs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, - netdev->xendev.dom, - netdev->tx_ring_ref, - PROT_READ | PROT_WRITE); + netdev->xendev.dom, + netdev->tx_ring_ref, + PROT_READ | PROT_WRITE); netdev->rxs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, - netdev->xendev.dom, - netdev->rx_ring_ref, - PROT_READ | PROT_WRITE); - if (!netdev->txs || !netdev->rxs) - return -1; + netdev->xendev.dom, + netdev->rx_ring_ref, + PROT_READ | PROT_WRITE); + if (!netdev->txs || !netdev->rxs) { + return -1; + } BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE); BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE); xen_be_bind_evtchn(&netdev->xendev); xen_be_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, " - "remote port %d, local port %d\n", - netdev->tx_ring_ref, netdev->rx_ring_ref, - netdev->xendev.remote_port, netdev->xendev.local_port); + "remote port %d, local port %d\n", + netdev->tx_ring_ref, netdev->rx_ring_ref, + netdev->xendev.remote_port, netdev->xendev.local_port); net_tx_packets(netdev); return 0; @@ -381,12 +400,12 @@ static void net_disconnect(struct XenDevice *xendev) xen_be_unbind_evtchn(&netdev->xendev); if (netdev->txs) { - xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->txs, 1); - netdev->txs = NULL; + xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->txs, 1); + netdev->txs = NULL; } if (netdev->rxs) { - xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->rxs, 1); - netdev->rxs = NULL; + xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->rxs, 1); + netdev->rxs = NULL; } if (netdev->nic) { qemu_del_vlan_client(&netdev->nic->nc); |