summaryrefslogtreecommitdiff
path: root/fs/cifs/file.c
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2012-03-23 14:40:56 -0400
committerJeff Layton <jlayton@redhat.com>2012-03-23 14:40:56 -0400
commitda82f7e755d2808ba726c9b23267d5bb23980e94 (patch)
tree16fe36b382416b4bc01edd886f3f1670c3aac4db /fs/cifs/file.c
parent597b027f694481ffeebcffe634c24b807198d46c (diff)
cifs: convert cifs_iovec_write to use async writes
Signed-off-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru>
Diffstat (limited to 'fs/cifs/file.c')
-rw-r--r--fs/cifs/file.c223
1 files changed, 138 insertions, 85 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 6883b08f848c..daaaca82eeb2 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2106,24 +2106,79 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
return num_pages;
}
+static void
+cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
+{
+ int i;
+ size_t bytes = wdata->bytes;
+
+ /* marshal up the pages into iov array */
+ for (i = 0; i < wdata->nr_pages; i++) {
+ iov[i + 1].iov_len = min(bytes, PAGE_SIZE);
+ iov[i + 1].iov_base = kmap(wdata->pages[i]);
+ bytes -= iov[i + 1].iov_len;
+ }
+}
+
+static void
+cifs_uncached_writev_complete(struct work_struct *work)
+{
+ int i;
+ struct cifs_writedata *wdata = container_of(work,
+ struct cifs_writedata, work);
+ struct inode *inode = wdata->cfile->dentry->d_inode;
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+
+ spin_lock(&inode->i_lock);
+ cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
+ if (cifsi->server_eof > inode->i_size)
+ i_size_write(inode, cifsi->server_eof);
+ spin_unlock(&inode->i_lock);
+
+ complete(&wdata->done);
+
+ if (wdata->result != -EAGAIN) {
+ for (i = 0; i < wdata->nr_pages; i++)
+ put_page(wdata->pages[i]);
+ }
+
+ kref_put(&wdata->refcount, cifs_writedata_release);
+}
+
+/* attempt to send write to server, retry on any -EAGAIN errors */
+static int
+cifs_uncached_retry_writev(struct cifs_writedata *wdata)
+{
+ int rc;
+
+ do {
+ if (wdata->cfile->invalidHandle) {
+ rc = cifs_reopen_file(wdata->cfile, false);
+ if (rc != 0)
+ continue;
+ }
+ rc = cifs_async_writev(wdata);
+ } while (rc == -EAGAIN);
+
+ return rc;
+}
+
static ssize_t
cifs_iovec_write(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)
{
- unsigned int written;
- unsigned long num_pages, npages, i;
+ unsigned long nr_pages, i;
size_t copied, len, cur_len;
ssize_t total_written = 0;
- struct kvec *to_send;
- struct page **pages;
+ loff_t offset = *poffset;
struct iov_iter it;
- struct inode *inode;
struct cifsFileInfo *open_file;
- struct cifs_tcon *pTcon;
+ struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
- struct cifs_io_parms io_parms;
- int xid, rc;
- __u32 pid;
+ struct cifs_writedata *wdata, *tmp;
+ struct list_head wdata_list;
+ int rc;
+ pid_t pid;
len = iov_length(iov, nr_segs);
if (!len)
@@ -2133,105 +2188,103 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
if (rc)
return rc;
+ INIT_LIST_HEAD(&wdata_list);
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
-
- pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
- if (!to_send) {
- kfree(pages);
- return -ENOMEM;
- }
-
- rc = cifs_write_allocate_pages(pages, num_pages);
- if (rc) {
- kfree(pages);
- kfree(to_send);
- return rc;
- }
-
- xid = GetXid();
open_file = file->private_data;
+ tcon = tlink_tcon(open_file->tlink);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
else
pid = current->tgid;
- pTcon = tlink_tcon(open_file->tlink);
- inode = file->f_path.dentry->d_inode;
-
iov_iter_init(&it, iov, nr_segs, len, 0);
- npages = num_pages;
-
do {
- size_t save_len = cur_len;
- for (i = 0; i < npages; i++) {
- copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
- copied = iov_iter_copy_from_user(pages[i], &it, 0,
- copied);
+ size_t save_len;
+
+ nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
+ wdata = cifs_writedata_alloc(nr_pages,
+ cifs_uncached_writev_complete);
+ if (!wdata) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
+ if (rc) {
+ kfree(wdata);
+ break;
+ }
+
+ save_len = cur_len;
+ for (i = 0; i < nr_pages; i++) {
+ copied = min_t(const size_t, cur_len, PAGE_SIZE);
+ copied = iov_iter_copy_from_user(wdata->pages[i], &it,
+ 0, copied);
cur_len -= copied;
iov_iter_advance(&it, copied);
- to_send[i+1].iov_base = kmap(pages[i]);
- to_send[i+1].iov_len = copied;
}
-
cur_len = save_len - cur_len;
- do {
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, false);
- if (rc != 0)
- break;
- }
- io_parms.netfid = open_file->netfid;
- io_parms.pid = pid;
- io_parms.tcon = pTcon;
- io_parms.offset = *poffset;
- io_parms.length = cur_len;
- rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
- npages, 0);
- } while (rc == -EAGAIN);
-
- for (i = 0; i < npages; i++)
- kunmap(pages[i]);
-
- if (written) {
- len -= written;
- total_written += written;
- spin_lock(&inode->i_lock);
- cifs_update_eof(CIFS_I(inode), *poffset, written);
- spin_unlock(&inode->i_lock);
- *poffset += written;
- } else if (rc < 0) {
- if (!total_written)
- total_written = rc;
+ wdata->sync_mode = WB_SYNC_ALL;
+ wdata->nr_pages = nr_pages;
+ wdata->offset = (__u64)offset;
+ wdata->cfile = cifsFileInfo_get(open_file);
+ wdata->pid = pid;
+ wdata->bytes = cur_len;
+ wdata->marshal_iov = cifs_uncached_marshal_iov;
+ rc = cifs_uncached_retry_writev(wdata);
+ if (rc) {
+ kref_put(&wdata->refcount, cifs_writedata_release);
break;
}
- /* get length and number of kvecs of the next write */
- npages = get_numpages(cifs_sb->wsize, len, &cur_len);
+ list_add_tail(&wdata->list, &wdata_list);
+ offset += cur_len;
+ len -= cur_len;
} while (len > 0);
- if (total_written > 0) {
- spin_lock(&inode->i_lock);
- if (*poffset > inode->i_size)
- i_size_write(inode, *poffset);
- spin_unlock(&inode->i_lock);
+ /*
+ * If at least one write was successfully sent, then discard any rc
+ * value from the later writes. If the other write succeeds, then
+ * we'll end up returning whatever was written. If it fails, then
+ * we'll get a new rc value from that.
+ */
+ if (!list_empty(&wdata_list))
+ rc = 0;
+
+ /*
+ * Wait for and collect replies for any successful sends in order of
+ * increasing offset. Once an error is hit or we get a fatal signal
+ * while waiting, then return without waiting for any more replies.
+ */
+restart_loop:
+ list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
+ if (!rc) {
+ /* FIXME: freezable too? */
+ rc = wait_for_completion_killable(&wdata->done);
+ if (rc)
+ rc = -EINTR;
+ else if (wdata->result)
+ rc = wdata->result;
+ else
+ total_written += wdata->bytes;
+
+ /* resend call if it's a retryable error */
+ if (rc == -EAGAIN) {
+ rc = cifs_uncached_retry_writev(wdata);
+ goto restart_loop;
+ }
+ }
+ list_del_init(&wdata->list);
+ kref_put(&wdata->refcount, cifs_writedata_release);
}
- cifs_stats_bytes_written(pTcon, total_written);
- mark_inode_dirty_sync(inode);
+ if (total_written > 0)
+ *poffset += total_written;
- for (i = 0; i < num_pages; i++)
- put_page(pages[i]);
- kfree(to_send);
- kfree(pages);
- FreeXid(xid);
- return total_written;
+ cifs_stats_bytes_written(tcon, total_written);
+ return total_written ? total_written : (ssize_t)rc;
}
ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,