summaryrefslogtreecommitdiff
path: root/open-vm-tools/modules/linux/vmhgfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'open-vm-tools/modules/linux/vmhgfs/file.c')
-rw-r--r--open-vm-tools/modules/linux/vmhgfs/file.c376
1 files changed, 329 insertions, 47 deletions
diff --git a/open-vm-tools/modules/linux/vmhgfs/file.c b/open-vm-tools/modules/linux/vmhgfs/file.c
index 3ddbfefd..bbde3f4b 100644
--- a/open-vm-tools/modules/linux/vmhgfs/file.c
+++ b/open-vm-tools/modules/linux/vmhgfs/file.c
@@ -1,5 +1,5 @@
/*********************************************************
- * Copyright (C) 2006 VMware, Inc. All rights reserved.
+ * Copyright (C) 2006-2015 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -47,6 +47,31 @@
#include "vm_assert.h"
#include "vm_basic_types.h"
+/*
+ * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using
+ * the O_SYNC flag. We continue to use the existing numerical value
+ * for O_DSYNC semantics now, but using the correct symbolic name for it.
+ * This new value is used to request true Posix O_SYNC semantics. It is
+ * defined in this strange way to make sure applications compiled against
+ * new headers get at least O_DSYNC semantics on older kernels.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)
+#define HGFS_FILECTL_SYNC(flags) ((flags) & O_DSYNC)
+#else
+#define HGFS_FILECTL_SYNC(flags) ((flags) & O_SYNC)
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+typedef struct iov_iter *hgfs_iov;
+#define HGFS_IOV_TO_COUNT(iov, nr_segs) (iov_iter_count(iov))
+#define HGFS_IOV_TO_SEGS(iov, nr_segs) (0)
+#define HGFS_IOCB_TO_POS(iocb, pos) (iocb->ki_pos)
+#else
+typedef const struct iovec *hgfs_iov;
+#define HGFS_IOV_TO_COUNT(iov, nr_segs) (iov_length(iov, nr_segs))
+#define HGFS_IOV_TO_SEGS(iov, nr_segs) (nr_segs)
+#define HGFS_IOCB_TO_POS(iocb, pos) (pos)
+#endif
+
/* Private functions. */
static int HgfsPackOpenRequest(struct inode *inode,
struct file *file,
@@ -61,14 +86,21 @@ static int HgfsUnpackOpenReply(HgfsReq *req,
static int HgfsOpen(struct inode *inode,
struct file *file);
#if defined VMW_USE_AIO
-static ssize_t HgfsAioRead(struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long numSegs,
- loff_t offset);
-static ssize_t HgfsAioWrite(struct kiocb *iocb,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+static ssize_t HgfsFileRead(struct kiocb *iocb,
+ struct iov_iter *to);
+static ssize_t HgfsFileWrite(struct kiocb *iocb,
+ struct iov_iter *from);
+#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+static ssize_t HgfsFileRead(struct kiocb *iocb,
const struct iovec *iov,
unsigned long numSegs,
loff_t offset);
+static ssize_t HgfsFileWrite(struct kiocb *iocb,
+ const struct iovec *iov,
+ unsigned long numSegs,
+ loff_t offset);
+#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
#else
static ssize_t HgfsRead(struct file *file,
char __user *buf,
@@ -83,6 +115,15 @@ static ssize_t HgfsWrite(struct file *file,
static loff_t HgfsSeek(struct file *file,
loff_t offset,
int origin);
+static int HgfsFlush(struct file *file
+#if !defined VMW_FLUSH_HAS_1_ARG
+ ,fl_owner_t id
+#endif
+ );
+
+#if !defined VMW_FSYNC_31
+static int HgfsDoFsync(struct inode *inode);
+#endif
static int HgfsFsync(struct file *file,
#if defined VMW_FSYNC_OLD
@@ -125,9 +166,19 @@ struct file_operations HgfsFileFileOperations = {
.owner = THIS_MODULE,
.open = HgfsOpen,
.llseek = HgfsSeek,
+ .flush = HgfsFlush,
#if defined VMW_USE_AIO
- .aio_read = HgfsAioRead,
- .aio_write = HgfsAioWrite,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = HgfsFileRead,
+ .write_iter = HgfsFileWrite,
+#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = HgfsFileRead,
+ .aio_write = HgfsFileWrite,
+#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
#else
.read = HgfsRead,
.write = HgfsWrite,
@@ -642,7 +693,51 @@ out:
/*
*----------------------------------------------------------------------
*
- * HgfsAioRead --
+ * HgfsGenericFileRead --
+ *
+ * Called when the kernel initiates an asynchronous read from a file in
+ * our filesystem. Our function is just a thin wrapper around
+ * system generic read function.
+ *
+ *
+ * Results:
+ * Returns the number of bytes read on success, or an error on
+ * failure.
+ *
+ * Side effects:
+ * None
+ *
+ *----------------------------------------------------------------------
+ */
+
+static ssize_t
+HgfsGenericFileRead(struct kiocb *iocb, // IN: I/O control block
+ hgfs_iov iov, // IN: Array of I/O vectors
+ unsigned long iovSegs, // IN: Count of I/O vectors
+ loff_t pos) // IN: Position at which to read
+{
+ ssize_t result;
+
+ LOG(8, (KERN_DEBUG "VMware hgfs: %s(%lu@%Ld)\n",
+ __func__, (unsigned long)HGFS_IOV_TO_COUNT(iov, iovSegs),
+ (long long) pos));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ result = generic_file_read_iter(iocb, iov);
+#else
+ result = generic_file_aio_read(iocb, iov, iovSegs, pos);
+#endif
+
+ LOG(8, (KERN_DEBUG "VMware hgfs: %s return %"FMTSZ"d\n",
+ __func__, result));
+ return result;
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * HgfsFileRead --
*
* Called when the kernel initiates an asynchronous read to a file in
* our filesystem. Our function is just a thin wrapper around
@@ -658,35 +753,90 @@ out:
*----------------------------------------------------------------------
*/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
static ssize_t
-HgfsAioRead(struct kiocb *iocb, // IN: I/O control block
- const struct iovec *iov, // OUT: Array of I/O buffers
- unsigned long numSegs, // IN: Number of buffers
- loff_t offset) // IN: Offset at which to read
+HgfsFileRead(struct kiocb *iocb, // IN: I/O control block
+ struct iov_iter *iov) // OUT: Array of I/O buffers
+#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+static ssize_t
+HgfsFileRead(struct kiocb *iocb, // IN: I/O control block
+ const struct iovec *iov, // OUT: Array of I/O buffers
+ unsigned long numSegs, // IN: Number of buffers
+ loff_t offset) // IN: Offset at which to read
+#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
{
- int result;
+ ssize_t result;
struct dentry *readDentry;
+ loff_t pos;
+ unsigned long iovSegs;
ASSERT(iocb);
ASSERT(iocb->ki_filp);
ASSERT(iocb->ki_filp->f_dentry);
ASSERT(iov);
+ pos = HGFS_IOCB_TO_POS(iocb, offset);
+ iovSegs = HGFS_IOV_TO_SEGS(iov, numSegs);
+
readDentry = iocb->ki_filp->f_dentry;
- LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s, %lu@%lu)\n",
+ LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s)\n",
__func__, readDentry->d_parent->d_name.name,
- readDentry->d_name.name,
- (unsigned long) iov_length(iov, numSegs), (unsigned long) offset));
+ readDentry->d_name.name));
result = HgfsRevalidate(readDentry);
if (result) {
- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsAioRead: invalid dentry\n"));
+ LOG(4, (KERN_DEBUG "VMware hgfs: %s: invalid dentry\n", __func__));
goto out;
}
- result = generic_file_aio_read(iocb, iov, numSegs, offset);
- out:
+ result = HgfsGenericFileRead(iocb, iov, iovSegs, pos);
+
+out:
+ return result;
+}
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * HgfsGenericFileWrite --
+ *
+ * Called when the kernel initiates an asynchronous write to a file in
+ * our filesystem. Our function is just a thin wrapper around
+ * system generic write function.
+ *
+ *
+ * Results:
+ * Returns the number of bytes written on success, or an error on
+ * failure.
+ *
+ * Side effects:
+ * None
+ *
+ *----------------------------------------------------------------------
+ */
+
+static ssize_t
+HgfsGenericFileWrite(struct kiocb *iocb, // IN: I/O control block
+ hgfs_iov iov, // IN: Array of I/O vectors
+ unsigned long iovSegs, // IN: Count of I/O vectors
+ loff_t pos) // IN: Position at which to write
+{
+ ssize_t result;
+
+ LOG(8, (KERN_DEBUG "VMware hgfs: %s(%lu@%Ld)\n",
+ __func__, (unsigned long)HGFS_IOV_TO_COUNT(iov, iovSegs),
+ (long long) pos));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ result = generic_file_write_iter(iocb, iov);
+#else
+ result = generic_file_aio_write(iocb, iov, iovSegs, pos);
+#endif
+
+ LOG(8, (KERN_DEBUG "VMware hgfs: %s return %"FMTSZ"d\n",
+ __func__, result));
return result;
}
@@ -694,7 +844,7 @@ HgfsAioRead(struct kiocb *iocb, // IN: I/O control block
/*
*----------------------------------------------------------------------
*
- * HgfsAioWrite --
+ * HgfsFileWrite --
*
* Called when the kernel initiates an asynchronous write to a file in
* our filesystem. Our function is just a thin wrapper around
@@ -713,34 +863,60 @@ HgfsAioRead(struct kiocb *iocb, // IN: I/O control block
*----------------------------------------------------------------------
*/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+ssize_t
+HgfsFileWrite(struct kiocb *iocb, // IN: I/O control block
+ struct iov_iter *iov) // IN: Array of I/O buffers
+#else // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
static ssize_t
-HgfsAioWrite(struct kiocb *iocb, // IN: I/O control block
- const struct iovec *iov, // IN: Array of I/O buffers
- unsigned long numSegs, // IN: Number of buffers
- loff_t offset) // IN: Offset at which to read
+HgfsFileWrite(struct kiocb *iocb, // IN: I/O control block
+ const struct iovec *iov, // IN: Array of I/O buffers
+ unsigned long numSegs, // IN: Number of buffers
+ loff_t offset) // IN: Offset at which to write
+#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
{
- int result;
+ ssize_t result;
struct dentry *writeDentry;
+ loff_t pos;
+ unsigned long iovSegs;
ASSERT(iocb);
ASSERT(iocb->ki_filp);
ASSERT(iocb->ki_filp->f_dentry);
ASSERT(iov);
+ pos = HGFS_IOCB_TO_POS(iocb, offset);
+ iovSegs = HGFS_IOV_TO_SEGS(iov, numSegs);
+
writeDentry = iocb->ki_filp->f_dentry;
- LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s, %lu@%Ld)\n",
+ LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s)\n",
__func__, writeDentry->d_parent->d_name.name,
- writeDentry->d_name.name,
- (unsigned long) iov_length(iov, numSegs), (long long) offset));
+ writeDentry->d_name.name));
result = HgfsRevalidate(writeDentry);
if (result) {
- LOG(4, (KERN_DEBUG "VMware hgfs: HgfsAioWrite: invalid dentry\n"));
+ LOG(4, (KERN_DEBUG "VMware hgfs: %s: invalid dentry\n", __func__));
goto out;
}
- result = generic_file_aio_write(iocb, iov, numSegs, offset);
+ result = HgfsGenericFileWrite(iocb, iov, iovSegs, pos);
+
+ if (result >= 0) {
+ if (IS_SYNC(writeDentry->d_inode) ||
+ HGFS_FILECTL_SYNC(iocb->ki_filp->f_flags)) {
+ int error;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ error = vfs_fsync(iocb->ki_filp, 0);
+#else
+ error = HgfsDoFsync(writeDentry->d_inode);
+#endif
+ if (error < 0) {
+ result = error;
+ }
+ }
+ }
+
out:
return result;
}
@@ -896,6 +1072,101 @@ HgfsSeek(struct file *file, // IN: File to seek
}
+#if !defined VMW_FSYNC_31
+/*
+ *----------------------------------------------------------------------
+ *
+ * HgfsDoFsync --
+ *
+ * Helper for HgfsFlush() and HgfsFsync().
+ *
+ * The hgfs protocol doesn't support fsync explicityly yet.
+ * So for now, we flush all the pages to presumably honor the
+ * intent of an app calling fsync() which is to get the
+ * data onto persistent storage. As things stand now we're at
+ * the whim of the hgfs server code running on the host to fsync or
+ * not if and when it pleases.
+ *
+ *
+ * Results:
+ * Returns zero on success. Otherwise an error.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static int
+HgfsDoFsync(struct inode *inode) // IN: File we operate on
+{
+ int ret;
+
+ LOG(4, (KERN_DEBUG "VMware hgfs: %s(%"FMT64"u)\n",
+ __func__, INODE_GET_II_P(inode)->hostFileId));
+
+ ret = compat_filemap_write_and_wait(inode->i_mapping);
+
+ LOG(4, (KERN_DEBUG "VMware hgfs: %s: returns %d\n",
+ __func__, ret));
+
+ return ret;
+}
+#endif
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * HgfsFlush --
+ *
+ * Called when user process calls fflush() on an hgfs file.
+ * Flush all dirty pages and check for write errors.
+ *
+ *
+ * Results:
+ * Returns zero on success. (Currently always succeeds).
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static int
+HgfsFlush(struct file *file // IN: file to flush
+#if !defined VMW_FLUSH_HAS_1_ARG
+ ,fl_owner_t id // IN: id not used
+#endif
+ )
+{
+ int ret = 0;
+
+ LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s)\n",
+ __func__, file->f_dentry->d_parent->d_name.name,
+ file->f_dentry->d_name.name));
+
+ if ((file->f_mode & FMODE_WRITE) == 0) {
+ goto exit;
+ }
+
+
+ /* Flush writes to the server and return any errors */
+ LOG(6, (KERN_DEBUG "VMware hgfs: %s: calling vfs_sync ... \n",
+ __func__));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ ret = vfs_fsync(file, 0);
+#else
+ ret = HgfsDoFsync(file->f_dentry->d_inode);
+#endif
+
+exit:
+ LOG(4, (KERN_DEBUG "VMware hgfs: %s: returns %d\n",
+ __func__, ret));
+ return ret;
+}
+
+
/*
*----------------------------------------------------------------------
*
@@ -903,21 +1174,13 @@ HgfsSeek(struct file *file, // IN: File to seek
*
* Called when user process calls fsync() on hgfs file.
*
- * The hgfs protocol doesn't support fsync yet, so for now, we punt
- * and just return success. This is a little less sketchy than it
- * might sound, because hgfs skips the buffer cache in the guest
- * anyway (we always write to the host immediately).
- *
- * In the future we might want to try harder though, since
- * presumably the intent of an app calling fsync() is to get the
+ * The hgfs protocol doesn't support fsync explicitly yet,
+ * so for now, we flush all the pages to presumably honor the
+ * intent of an app calling fsync() which is to get the
* data onto persistent storage, and as things stand now we're at
* the whim of the hgfs server code running on the host to fsync or
* not if and when it pleases.
*
- * Note that do_fsync will call filemap_fdatawrite() before us and
- * filemap_fdatawait() after us, so there's no need to do anything
- * here w.r.t. writing out dirty pages.
- *
* Results:
* Returns zero on success. (Currently always succeeds).
*
@@ -937,18 +1200,37 @@ HgfsFsync(struct file *file, // IN: File we operate on
#endif
int datasync) // IN: fdatasync or fsync
{
- LOG(6, (KERN_DEBUG "VMware hgfs: %s(%s/%s, %lld, %lld, %d)\n",
+ int ret = 0;
+ loff_t startRange;
+ loff_t endRange;
+ struct inode *inode;
+
+#if defined VMW_FSYNC_31
+ startRange = start;
+ endRange = end;
+#else
+ startRange = 0;
+ endRange = MAX_INT64;
+#endif
+
+ LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s, %lld, %lld, %d)\n",
__func__,
file->f_dentry->d_parent->d_name.name,
file->f_dentry->d_name.name,
+ startRange, endRange,
+ datasync));
+
+ /* Flush writes to the server and return any errors */
+ inode = file->f_dentry->d_inode;
#if defined VMW_FSYNC_31
- start, end,
+ ret = filemap_write_and_wait_range(inode->i_mapping, startRange, endRange);
#else
- (loff_t)0, (loff_t)0,
+ ret = HgfsDoFsync(inode);
#endif
- datasync));
- return 0;
+ LOG(4, (KERN_DEBUG "VMware hgfs: %s: written pages %lld, %lld returns %d)\n",
+ __func__, startRange, endRange, ret));
+ return ret;
}