@@ -183,6 +183,7 @@ menuconfig MISC_FILESYSTEMS
if MISC_FILESYSTEMS
+source "fs/orangefs/Kconfig"
source "fs/adfs/Kconfig"
source "fs/affs/Kconfig"
source "fs/ecryptfs/Kconfig"
@@ -105,6 +105,7 @@ obj-$(CONFIG_AUTOFS4_FS) += autofs4/
obj-$(CONFIG_ADFS_FS) += adfs/
obj-$(CONFIG_FUSE_FS) += fuse/
obj-$(CONFIG_OVERLAY_FS) += overlayfs/
+obj-$(CONFIG_ORANGEFS_FS) += orangefs/
obj-$(CONFIG_UDF_FS) += udf/
obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
obj-$(CONFIG_OMFS_FS) += omfs/
new file mode 100644
@@ -0,0 +1,6 @@
+config ORANGEFS_FS
+ tristate "ORANGEFS (Powered by PVFS) support"
+ select FS_POSIX_ACL
+ help
+ Orange is a parallel file system designed for use on high end
+ computing (HEC) systems.
new file mode 100644
@@ -0,0 +1,9 @@
+#
+# Makefile for the ORANGEFS filesystem.
+#
+
+obj-$(CONFIG_ORANGEFS_FS) += orangefs.o
+
+orangefs-objs := acl.o file.o pvfs2-cache.o pvfs2-utils.o xattr.o dcache.o \
+ inode.o pvfs2-mod.o super.o devpvfs2-req.o namei.o symlink.o\
+ dir.o pvfs2-bufmap.o pvfs2-proc.o waitqueue.o
new file mode 100644
@@ -0,0 +1,903 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * Changes by Acxiom Corporation to add protocol version to kernel
+ * communication, Copyright Acxiom Corporation, 2005.
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "pvfs2-kernel.h"
+#include "pvfs2-dev-proto.h"
+#include "pvfs2-bufmap.h"
+
+/* this file implements the /dev/pvfs2-req device node */
+
+static int open_access_count;
+
+#define DUMP_DEVICE_ERROR() \
+do { \
+ gossip_err("*****************************************************\n");\
+ gossip_err("PVFS2 Device Error: You cannot open the device file "); \
+ gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
+ "are no ", PVFS2_REQDEVICE_NAME); \
+ gossip_err("instances of a program using this device\ncurrently " \
+ "running. (You must verify this!)\n"); \
+ gossip_err("For example, you can use the lsof program as follows:\n");\
+ gossip_err("'lsof | grep %s' (run this as root)\n", \
+ PVFS2_REQDEVICE_NAME); \
+ gossip_err(" open_access_count = %d\n", open_access_count); \
+ gossip_err("*****************************************************\n");\
+} while (0)
+
+static int hash_func(uint64_t tag, int table_size)
+{
+ return tag % ((unsigned int)table_size);
+}
+
+static void pvfs2_devreq_add_op(struct pvfs2_kernel_op *op)
+{
+ int index = hash_func(op->tag, hash_table_size);
+
+ spin_lock(&htable_ops_in_progress_lock);
+ list_add_tail(&op->list, &htable_ops_in_progress[index]);
+ spin_unlock(&htable_ops_in_progress_lock);
+}
+
+static struct pvfs2_kernel_op *pvfs2_devreq_remove_op(uint64_t tag)
+{
+ struct pvfs2_kernel_op *op, *next;
+ int index;
+
+ index = hash_func(tag, hash_table_size);
+
+ spin_lock(&htable_ops_in_progress_lock);
+ list_for_each_entry_safe(op,
+ next,
+ &htable_ops_in_progress[index],
+ list) {
+ if (op->tag == tag) {
+ list_del(&op->list);
+ spin_unlock(&htable_ops_in_progress_lock);
+ return op;
+ }
+ }
+
+ spin_unlock(&htable_ops_in_progress_lock);
+ return NULL;
+}
+
+static int pvfs2_devreq_open(struct inode *inode, struct file *file)
+{
+ int ret = -EINVAL;
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ gossip_err("pvfs2: device cannot be opened in blocking mode\n");
+ return ret;
+ }
+ ret = -EACCES;
+ gossip_debug(GOSSIP_DEV_DEBUG, "pvfs2-client-core: opening device\n");
+ mutex_lock(&devreq_mutex);
+
+ if (open_access_count == 0) {
+ ret = generic_file_open(inode, file);
+ if (ret == 0)
+ open_access_count++;
+ } else {
+ DUMP_DEVICE_ERROR();
+ }
+ mutex_unlock(&devreq_mutex);
+
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "pvfs2-client-core: open device complete (ret = %d)\n",
+ ret);
+ return ret;
+}
+
+static ssize_t pvfs2_devreq_read(struct file *file,
+ char __user *buf,
+ size_t count, loff_t *offset)
+{
+ int ret = 0;
+ ssize_t len = 0;
+ struct pvfs2_kernel_op *cur_op = NULL;
+ static int32_t magic = PVFS2_DEVREQ_MAGIC;
+ int32_t proto_ver = PVFS_KERNEL_PROTO_VERSION;
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ /* We do not support blocking reads/opens any more */
+ gossip_err("pvfs2: blocking reads are not supported! (pvfs2-client-core bug)\n");
+ return -EINVAL;
+ } else {
+ struct pvfs2_kernel_op *op = NULL, *temp = NULL;
+ /* get next op (if any) from top of list */
+ spin_lock(&pvfs2_request_list_lock);
+ list_for_each_entry_safe(op, temp, &pvfs2_request_list, list) {
+ int32_t fsid = fsid_of_op(op);
+ /*
+ * Check if this op's fsid is known and needs
+ * remounting
+ */
+ if (fsid != PVFS_FS_ID_NULL &&
+ fs_mount_pending(fsid) == 1) {
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "Skipping op tag %llu %s\n",
+ llu(op->tag),
+ get_opname_string(op));
+ continue;
+ } else {
+ /*
+ * op does not belong to any particular fsid
+ * or already mounted.. let it through
+ */
+ cur_op = op;
+ spin_lock(&cur_op->lock);
+ list_del(&cur_op->list);
+ cur_op->op_linger_tmp--;
+ /*
+ * if there is a trailer, re-add it to
+ * the request list.
+ */
+ if (cur_op->op_linger == 2 &&
+ cur_op->op_linger_tmp == 1) {
+ if (cur_op->upcall.trailer_size <= 0 ||
+ cur_op->upcall.trailer_buf == NULL)
+ gossip_err("BUG:trailer_size is %ld and trailer buf is %p\n", (long)cur_op->upcall.trailer_size, cur_op->upcall.trailer_buf);
+ /* re-add it to the head of the list */
+ list_add(&cur_op->list,
+ &pvfs2_request_list);
+ }
+ spin_unlock(&cur_op->lock);
+ break;
+ }
+ }
+ spin_unlock(&pvfs2_request_list_lock);
+ }
+
+ if (cur_op) {
+ spin_lock(&cur_op->lock);
+
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "client-core: reading op tag %llu %s\n",
+ llu(cur_op->tag), get_opname_string(cur_op));
+ if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
+ if (cur_op->op_linger == 1)
+ gossip_err("WARNING: Current op already queued...skipping\n");
+ } else if (cur_op->op_linger == 1 ||
+ (cur_op->op_linger == 2 &&
+ cur_op->op_linger_tmp == 0)) {
+ /*
+ * atomically move the operation to the
+ * htable_ops_in_progress
+ */
+ set_op_state_inprogress(cur_op);
+ pvfs2_devreq_add_op(cur_op);
+ }
+
+ spin_unlock(&cur_op->lock);
+
+ /* 2 cases
+ * a) OPs with no trailers
+ * b) OPs with trailers, Stage 1
+ * Either way push the upcall out
+ */
+ if (cur_op->op_linger == 1 ||
+ (cur_op->op_linger == 2 && cur_op->op_linger_tmp == 1)) {
+ len = MAX_ALIGNED_DEV_REQ_UPSIZE;
+ if ((size_t) len <= count) {
+ ret = copy_to_user(buf,
+ &proto_ver,
+ sizeof(int32_t));
+ if (ret == 0) {
+ ret = copy_to_user(buf + sizeof(int32_t),
+ &magic,
+ sizeof(int32_t));
+ if (ret == 0) {
+ ret = copy_to_user(buf+2 * sizeof(int32_t),
+ &cur_op->tag,
+ sizeof(uint64_t));
+ if (ret == 0) {
+ ret = copy_to_user(
+ buf +
+ 2 *
+ sizeof(int32_t) +
+ sizeof(uint64_t),
+ &cur_op->upcall,
+ sizeof(struct pvfs2_upcall_s));
+ }
+ }
+ }
+
+ if (ret) {
+ gossip_err("Failed to copy data to user space\n");
+ len = -EFAULT;
+ }
+ } else {
+ gossip_err
+ ("Failed to copy data to user space\n");
+ len = -EIO;
+ }
+ }
+ /* Stage 2: Push the trailer out */
+ else if (cur_op->op_linger == 2 && cur_op->op_linger_tmp == 0) {
+ len = cur_op->upcall.trailer_size;
+ if ((size_t) len <= count) {
+ ret = copy_to_user(buf,
+ cur_op->upcall.trailer_buf,
+ len);
+ if (ret) {
+ gossip_err("Failed to copy trailer to user space\n");
+ len = -EFAULT;
+ }
+ } else {
+ gossip_err("Read buffer for trailer is too small (%ld as opposed to %ld)\n",
+ (long)count,
+ (long)len);
+ len = -EIO;
+ }
+ } else {
+ gossip_err("cur_op: %p (op_linger %d), (op_linger_tmp %d), erroneous request list?\n",
+ cur_op,
+ cur_op->op_linger,
+ cur_op->op_linger_tmp);
+ len = 0;
+ }
+ } else if (file->f_flags & O_NONBLOCK) {
+ /*
+ * if in non-blocking mode, return EAGAIN since no requests are
+ * ready yet
+ */
+ len = -EAGAIN;
+ }
+ return len;
+}
+
+/* Function for writev() callers into the device */
+static ssize_t pvfs2_devreq_writev(struct file *file,
+ const struct iovec *iov,
+ size_t count,
+ loff_t *offset)
+{
+ struct pvfs2_kernel_op *op = NULL;
+ void *buffer = NULL;
+ void *ptr = NULL;
+ unsigned long i = 0;
+ static int max_downsize = MAX_ALIGNED_DEV_REQ_DOWNSIZE;
+ int ret = 0, num_remaining = max_downsize;
+ int notrailer_count = 4; /* num elements in iovec without trailer */
+ int payload_size = 0;
+ int32_t magic = 0;
+ int32_t proto_ver = 0;
+ uint64_t tag = 0;
+ ssize_t total_returned_size = 0;
+
+ /* Either there is a trailer or there isn't */
+ if (count != notrailer_count && count != (notrailer_count + 1)) {
+ gossip_err("Error: Number of iov vectors is (%ld) and notrailer count is %d\n",
+ count,
+ notrailer_count);
+ return -EPROTO;
+ }
+ buffer = dev_req_alloc();
+ if (!buffer)
+ return -ENOMEM;
+ ptr = buffer;
+
+ for (i = 0; i < notrailer_count; i++) {
+ if (iov[i].iov_len > num_remaining) {
+ gossip_err
+ ("writev error: Freeing buffer and returning\n");
+ dev_req_release(buffer);
+ return -EMSGSIZE;
+ }
+ ret = copy_from_user(ptr, iov[i].iov_base, iov[i].iov_len);
+ if (ret) {
+ gossip_err("Failed to copy data from user space\n");
+ dev_req_release(buffer);
+ return -EIO;
+ }
+ num_remaining -= iov[i].iov_len;
+ ptr += iov[i].iov_len;
+ payload_size += iov[i].iov_len;
+ }
+ total_returned_size = payload_size;
+
+ /* these elements are currently 8 byte aligned (8 bytes for (version +
+ * magic) 8 bytes for tag). If you add another element, either
+ * make it 8 bytes big, or use get_unaligned when asigning.
+ */
+ ptr = buffer;
+ proto_ver = *((int32_t *) ptr);
+ ptr += sizeof(int32_t);
+
+ magic = *((int32_t *) ptr);
+ ptr += sizeof(int32_t);
+
+ tag = *((uint64_t *) ptr);
+ ptr += sizeof(uint64_t);
+
+ if (magic != PVFS2_DEVREQ_MAGIC) {
+ gossip_err("Error: Device magic number does not match.\n");
+ dev_req_release(buffer);
+ return -EPROTO;
+ }
+ if (proto_ver != PVFS_KERNEL_PROTO_VERSION) {
+ gossip_err("Error: Device protocol version numbers do not match.\n");
+ gossip_err("Please check that your pvfs2 module and pvfs2-client versions are consistent.\n");
+ dev_req_release(buffer);
+ return -EPROTO;
+ }
+
+ op = pvfs2_devreq_remove_op(tag);
+ if (op) {
+ /* Increase ref count! */
+ get_op(op);
+ /* cut off magic and tag from payload size */
+ payload_size -= (2 * sizeof(int32_t) + sizeof(uint64_t));
+ if (payload_size <= sizeof(struct pvfs2_downcall))
+ /* copy the passed in downcall into the op */
+ memcpy(&op->downcall,
+ ptr,
+ sizeof(struct pvfs2_downcall));
+ else
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "writev: Ignoring %d bytes\n",
+ payload_size);
+
+ /* Do not allocate needlessly if client-core forgets
+ * to reset trailer size on op errors.
+ */
+ if (op->downcall.status == 0 && op->downcall.trailer_size > 0) {
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "writev: trailer size %ld\n",
+ (unsigned long)op->downcall.trailer_size);
+ if (count != (notrailer_count + 1)) {
+ gossip_err("Error: trailer size (%ld) is non-zero, no trailer elements though? (%ld)\n", (unsigned long)op->downcall.trailer_size, count);
+ dev_req_release(buffer);
+ put_op(op);
+ return -EPROTO;
+ }
+ if (iov[notrailer_count].iov_len >
+ op->downcall.trailer_size) {
+ gossip_err("writev error: trailer size (%ld) != iov_len (%ld)\n", (unsigned long)op->downcall.trailer_size, (unsigned long)iov[notrailer_count].iov_len);
+ dev_req_release(buffer);
+ put_op(op);
+ return -EMSGSIZE;
+ }
+ /* Allocate a buffer large enough to hold the
+ * trailer bytes.
+ */
+ op->downcall.trailer_buf =
+ vmalloc(op->downcall.trailer_size);
+ if (op->downcall.trailer_buf != NULL) {
+ gossip_debug(GOSSIP_DEV_DEBUG, "vmalloc: %p\n",
+ op->downcall.trailer_buf);
+ ret = copy_from_user(op->downcall.trailer_buf,
+ iov[notrailer_count].
+ iov_base,
+ iov[notrailer_count].
+ iov_len);
+ if (ret) {
+ gossip_err("Failed to copy trailer data from user space\n");
+ dev_req_release(buffer);
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "vfree: %p\n",
+ op->downcall.trailer_buf);
+ vfree(op->downcall.trailer_buf);
+ op->downcall.trailer_buf = NULL;
+ put_op(op);
+ return -EIO;
+ }
+ } else {
+ /* Change downcall status */
+ op->downcall.status = -ENOMEM;
+ gossip_err("writev: could not vmalloc for trailer!\n");
+ }
+ }
+
+ /* if this operation is an I/O operation and if it was
+ * initiated on behalf of a *synchronous* VFS I/O operation,
+ * only then we need to wait
+ * for all data to be copied before we can return to avoid
+ * buffer corruption and races that can pull the buffers
+ * out from under us.
+ *
+ * Essentially we're synchronizing with other parts of the
+ * vfs implicitly by not allowing the user space
+ * application reading/writing this device to return until
+ * the buffers are done being used.
+ */
+ if ((op->upcall.type == PVFS2_VFS_OP_FILE_IO &&
+ op->upcall.req.io.async_vfs_io == PVFS_VFS_SYNC_IO) ||
+ op->upcall.type == PVFS2_VFS_OP_FILE_IOX) {
+ int timed_out = 0;
+ DECLARE_WAITQUEUE(wait_entry, current);
+
+ /* tell the vfs op waiting on a waitqueue
+ * that this op is done
+ */
+ spin_lock(&op->lock);
+ set_op_state_serviced(op);
+ spin_unlock(&op->lock);
+
+ add_wait_queue_exclusive(&op->io_completion_waitq,
+ &wait_entry);
+ wake_up_interruptible(&op->waitq);
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock(&op->lock);
+ if (op->io_completed) {
+ spin_unlock(&op->lock);
+ break;
+ }
+ spin_unlock(&op->lock);
+
+ if (!signal_pending(current)) {
+ int timeout =
+ MSECS_TO_JIFFIES(1000 *
+ op_timeout_secs);
+ if (!schedule_timeout(timeout)) {
+ gossip_debug(GOSSIP_DEV_DEBUG, "*** I/O wait time is up\n");
+ timed_out = 1;
+ break;
+ }
+ continue;
+ }
+
+ gossip_debug(GOSSIP_DEV_DEBUG, "*** signal on I/O wait -- aborting\n");
+ break;
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&op->io_completion_waitq,
+ &wait_entry);
+
+ /* NOTE: for I/O operations we handle releasing the op
+ * object except in the case of timeout. the reason we
+ * can't free the op in timeout cases is that the op
+ * service logic in the vfs retries operations using
+ * the same op ptr, thus it can't be freed.
+ */
+ if (!timed_out)
+ op_release(op);
+ } else {
+
+ /*
+ * tell the vfs op waiting on a waitqueue that
+ * this op is done
+ */
+ spin_lock(&op->lock);
+ set_op_state_serviced(op);
+ spin_unlock(&op->lock);
+ /*
+ for every other operation (i.e. non-I/O), we need to
+ wake up the callers for downcall completion
+ notification
+ */
+ wake_up_interruptible(&op->waitq);
+ }
+ } else {
+ /* ignore downcalls that we're not interested in */
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "WARNING: No one's waiting for tag %llu\n",
+ llu(tag));
+ }
+ dev_req_release(buffer);
+
+ return total_returned_size;
+}
+
+static ssize_t pvfs2_devreq_write_iter(struct kiocb *iocb,
+ struct iov_iter *iter)
+{
+ return pvfs2_devreq_writev(iocb->ki_filp,
+ iter->iov,
+ iter->nr_segs,
+ &iocb->ki_pos);
+}
+
+/* Returns whether any FS are still pending remounted */
+static int mark_all_pending_mounts(void)
+{
+ int unmounted = 1;
+ struct pvfs2_sb_info_s *pvfs2_sb = NULL;
+
+ spin_lock(&pvfs2_superblocks_lock);
+ list_for_each_entry(pvfs2_sb, &pvfs2_superblocks, list) {
+ /* All of these file system require a remount */
+ pvfs2_sb->mount_pending = 1;
+ unmounted = 0;
+ }
+ spin_unlock(&pvfs2_superblocks_lock);
+ return unmounted;
+}
+
+/*
+ * Determine if a given file system needs to be remounted or not
+ * Returns -1 on error
+ * 0 if already mounted
+ * 1 if needs remount
+ */
+int fs_mount_pending(int32_t fsid)
+{
+ int mount_pending = -1;
+ struct pvfs2_sb_info_s *pvfs2_sb = NULL;
+
+ spin_lock(&pvfs2_superblocks_lock);
+ list_for_each_entry(pvfs2_sb, &pvfs2_superblocks, list) {
+ if (pvfs2_sb->fs_id == fsid) {
+ mount_pending = pvfs2_sb->mount_pending;
+ break;
+ }
+ }
+ spin_unlock(&pvfs2_superblocks_lock);
+ return mount_pending;
+}
+
+/*
+ * NOTE: gets called when the last reference to this device is dropped.
+ * Using the open_access_count variable, we enforce a reference count
+ * on this file so that it can be opened by only one process at a time.
+ * the devreq_mutex is used to make sure all i/o has completed
+ * before we call pvfs_bufmap_finalize, and similar such tricky
+ * situations
+ */
+static int pvfs2_devreq_release(struct inode *inode, struct file *file)
+{
+ int unmounted = 0;
+
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "%s:pvfs2-client-core: exiting, closing device\n",
+ __func__);
+
+ mutex_lock(&devreq_mutex);
+ pvfs_bufmap_finalize();
+
+ open_access_count--;
+
+ unmounted = mark_all_pending_mounts();
+ gossip_debug(GOSSIP_DEV_DEBUG, "PVFS2 Device Close: Filesystem(s) %s\n",
+ (unmounted ? "UNMOUNTED" : "MOUNTED"));
+ mutex_unlock(&devreq_mutex);
+
+ /*
+ * Walk through the list of ops in the request list, mark them
+ * as purged and wake them up.
+ */
+ purge_waiting_ops();
+ /*
+ * Walk through the hash table of in progress operations; mark
+ * them as purged and wake them up
+ */
+ purge_inprogress_ops();
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "pvfs2-client-core: device close complete\n");
+ return 0;
+}
+
+int is_daemon_in_service(void)
+{
+ int in_service;
+
+ /*
+ * What this function does is checks if client-core is alive
+ * based on the access count we maintain on the device.
+ */
+ mutex_lock(&devreq_mutex);
+ in_service = open_access_count == 1 ? 0 : -EIO;
+ mutex_unlock(&devreq_mutex);
+ return in_service;
+}
+
+static inline long check_ioctl_command(unsigned int command)
+{
+ /* Check for valid ioctl codes */
+ if (_IOC_TYPE(command) != PVFS_DEV_MAGIC) {
+ gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
+ command,
+ _IOC_TYPE(command),
+ PVFS_DEV_MAGIC);
+ return -EINVAL;
+ }
+ /* and valid ioctl commands */
+ if (_IOC_NR(command) >= PVFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
+ gossip_err("Invalid ioctl command number [%d >= %d]\n",
+ _IOC_NR(command), PVFS_DEV_MAXNR);
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
+{
+ static int32_t magic = PVFS2_DEVREQ_MAGIC;
+ static int32_t max_up_size = MAX_ALIGNED_DEV_REQ_UPSIZE;
+ static int32_t max_down_size = MAX_ALIGNED_DEV_REQ_DOWNSIZE;
+ struct PVFS_dev_map_desc user_desc;
+ int ret = 0;
+ struct dev_mask_info_t mask_info = { 0 };
+ struct list_head *tmp = NULL;
+ struct pvfs2_sb_info_s *pvfs2_sb = NULL;
+
+
+ /* mtmoore: add locking here */
+
+ switch (command) {
+ case PVFS_DEV_GET_MAGIC:
+ return ((put_user(magic, (int32_t __user *) arg) == -EFAULT) ?
+ -EIO :
+ 0);
+ case PVFS_DEV_GET_MAX_UPSIZE:
+ return ((put_user(max_up_size,
+ (int32_t __user *) arg) == -EFAULT) ?
+ -EIO :
+ 0);
+ case PVFS_DEV_GET_MAX_DOWNSIZE:
+ return ((put_user(max_down_size,
+ (int32_t __user *) arg) == -EFAULT) ?
+ -EIO :
+ 0);
+ case PVFS_DEV_MAP:
+ ret = copy_from_user(&user_desc,
+ (struct PVFS_dev_map_desc __user *)
+ arg,
+ sizeof(struct PVFS_dev_map_desc));
+ return ret ? -EIO : pvfs_bufmap_initialize(&user_desc);
+ case PVFS_DEV_REMOUNT_ALL:
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "pvfs2_devreq_ioctl: got PVFS_DEV_REMOUNT_ALL\n");
+
+ /*
+ * remount all mounted pvfs2 volumes to regain the lost
+ * dynamic mount tables (if any) -- NOTE: this is done
+ * without keeping the superblock list locked due to the
+ * upcall/downcall waiting. also, the request semaphore is
+ * used to ensure that no operations will be serviced until
+ * all of the remounts are serviced (to avoid ops between
+ * mounts to fail)
+ */
+ ret = mutex_lock_interruptible(&request_mutex);
+ if (ret < 0)
+ return ret;
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "pvfs2_devreq_ioctl: priority remount in progress\n");
+ list_for_each(tmp, &pvfs2_superblocks) {
+ pvfs2_sb =
+ list_entry(tmp, struct pvfs2_sb_info_s, list);
+ if (pvfs2_sb && (pvfs2_sb->sb)) {
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "Remounting SB %p\n",
+ pvfs2_sb);
+
+ ret = pvfs2_remount(pvfs2_sb->sb);
+ if (ret) {
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "SB %p remount failed\n",
+ pvfs2_sb);
+ break;
+ }
+ }
+ }
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "pvfs2_devreq_ioctl: priority remount complete\n");
+ mutex_unlock(&request_mutex);
+ return ret;
+ case PVFS_DEV_DEBUG:
+ ret = copy_from_user(&mask_info,
+ (void __user *)arg,
+ sizeof(mask_info));
+ if (ret != 0)
+ return -EIO;
+
+ if (mask_info.mask_type == KERNEL_MASK) {
+ if ((mask_info.mask_value == 0)
+ && (kernel_mask_set_mod_init)) {
+ /*
+ * the kernel debug mask was set when the
+ * kernel module was loaded; don't override
+ * it if the client-core was started without
+ * a value for PVFS2_KMODMASK.
+ */
+ return 0;
+ }
+ ret = PVFS_proc_kmod_mask_to_eventlog(
+ mask_info.
+ mask_value,
+ kernel_debug_string);
+ gossip_debug_mask = mask_info.mask_value;
+ pr_info("PVFS: kernel debug mask has been modified to \"%s\" (0x%08llx)\n",
+ kernel_debug_string,
+ llu(gossip_debug_mask));
+ } else if (mask_info.mask_type == CLIENT_MASK) {
+ ret = PVFS_proc_mask_to_eventlog(mask_info.mask_value,
+ client_debug_string);
+ pr_info("PVFS: client debug mask has been modified to \"%s\" (0x%08llx)\n",
+ client_debug_string,
+ llu(mask_info.mask_value));
+ } else {
+ gossip_lerr("Invalid mask type....\n");
+ return -EINVAL;
+ }
+
+ return ret;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return -ENOIOCTLCMD;
+}
+
+static long pvfs2_devreq_ioctl(struct file *file,
+ unsigned int command, unsigned long arg)
+{
+ long ret;
+
+ /* Check for properly constructed commands */
+ ret = check_ioctl_command(command);
+ if (ret < 0)
+ return (int)ret;
+
+ return (int)dispatch_ioctl_command(command, arg);
+}
+
+#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
+
+/* Compat structure for the PVFS_DEV_MAP ioctl */
+struct PVFS_dev_map_desc32 {
+ compat_uptr_t ptr;
+ int32_t total_size;
+ int32_t size;
+ int32_t count;
+};
+
+static unsigned long translate_dev_map26(unsigned long args, long *error)
+{
+ struct PVFS_dev_map_desc32 __user *p32 = (void __user *)args;
+ /*
+ * Depending on the architecture, allocate some space on the
+ * user-call-stack based on our expected layout.
+ */
+ struct PVFS_dev_map_desc __user *p =
+ compat_alloc_user_space(sizeof(*p));
+ u32 addr;
+
+ *error = 0;
+ /* get the ptr from the 32 bit user-space */
+ if (get_user(addr, &p32->ptr))
+ goto err;
+ /* try to put that into a 64-bit layout */
+ if (put_user(compat_ptr(addr), &p->ptr))
+ goto err;
+ /* copy the remaining fields */
+ if (copy_in_user(&p->total_size, &p32->total_size, sizeof(int32_t)))
+ goto err;
+ if (copy_in_user(&p->size, &p32->size, sizeof(int32_t)))
+ goto err;
+ if (copy_in_user(&p->count, &p32->count, sizeof(int32_t)))
+ goto err;
+ return (unsigned long)p;
+err:
+ *error = -EFAULT;
+ return 0;
+}
+
+/*
+ * 32 bit user-space apps' ioctl handlers when kernel modules
+ * is compiled as a 64 bit one
+ */
+static long pvfs2_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long args)
+{
+ long ret;
+ unsigned long arg = args;
+
+ /* Check for properly constructed commands */
+ ret = check_ioctl_command(cmd);
+ if (ret < 0)
+ return ret;
+ if (cmd == PVFS_DEV_MAP) {
+ /*
+ * convert the arguments to what we expect internally
+ * in kernel space
+ */
+ arg = translate_dev_map26(args, &ret);
+ if (ret < 0) {
+ gossip_err("Could not translate dev map\n");
+ return ret;
+ }
+ }
+ /* no other ioctl requires translation */
+ return dispatch_ioctl_command(cmd, arg);
+}
+
+static int pvfs2_ioctl32_init(void)
+{
+ return 0;
+}
+
+static void pvfs2_ioctl32_cleanup(void)
+{
+ return;
+}
+
+#endif /* CONFIG_COMPAT is in .config */
+
+/* the assigned character device major number */
+static int pvfs2_dev_major;
+
+/*
+ * Initialize pvfs2 device specific state:
+ * Must be called at module load time only
+ */
+int pvfs2_dev_init(void)
+{
+ int ret;
+
+ /* register the ioctl32 sub-system */
+ ret = pvfs2_ioctl32_init();
+ if (ret < 0)
+ return ret;
+
+ /* register pvfs2-req device */
+ pvfs2_dev_major = register_chrdev(0,
+ PVFS2_REQDEVICE_NAME,
+ &pvfs2_devreq_file_operations);
+ if (pvfs2_dev_major < 0) {
+ gossip_debug(GOSSIP_INIT_DEBUG,
+ "Failed to register /dev/%s (error %d)\n",
+ PVFS2_REQDEVICE_NAME, pvfs2_dev_major);
+ pvfs2_ioctl32_cleanup();
+ return pvfs2_dev_major;
+ }
+
+ gossip_debug(GOSSIP_INIT_DEBUG,
+ "*** /dev/%s character device registered ***\n",
+ PVFS2_REQDEVICE_NAME);
+ gossip_debug(GOSSIP_INIT_DEBUG, "'mknod /dev/%s c %d 0'.\n",
+ PVFS2_REQDEVICE_NAME, pvfs2_dev_major);
+ return 0;
+}
+
+void pvfs2_dev_cleanup(void)
+{
+ unregister_chrdev(pvfs2_dev_major, PVFS2_REQDEVICE_NAME);
+ gossip_debug(GOSSIP_INIT_DEBUG,
+ "*** /dev/%s character device unregistered ***\n",
+ PVFS2_REQDEVICE_NAME);
+ /* unregister the ioctl32 sub-system */
+ pvfs2_ioctl32_cleanup();
+ return;
+}
+
+static unsigned int pvfs2_devreq_poll(struct file *file,
+ struct poll_table_struct *poll_table)
+{
+ int poll_revent_mask = 0;
+
+ if (open_access_count == 1) {
+ poll_wait(file, &pvfs2_request_list_waitq, poll_table);
+
+ spin_lock(&pvfs2_request_list_lock);
+ if (!list_empty(&pvfs2_request_list))
+ poll_revent_mask |= POLL_IN;
+ spin_unlock(&pvfs2_request_list_lock);
+ }
+ return poll_revent_mask;
+}
+
+const struct file_operations pvfs2_devreq_file_operations = {
+ .owner = THIS_MODULE,
+ .read = pvfs2_devreq_read,
+ .write_iter = pvfs2_devreq_write_iter,
+ .open = pvfs2_devreq_open,
+ .release = pvfs2_devreq_release,
+ .unlocked_ioctl = pvfs2_devreq_ioctl,
+
+#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
+ .compat_ioctl = pvfs2_devreq_compat_ioctl,
+#endif
+ .poll = pvfs2_devreq_poll
+};
new file mode 100644
@@ -0,0 +1,990 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ * Linux VFS file operations.
+ */
+
+#include "protocol.h"
+#include "pvfs2-kernel.h"
+#include "pvfs2-bufmap.h"
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+#define wake_up_daemon_for_return(op) \
+do { \
+ spin_lock(&op->lock); \
+ op->io_completed = 1; \
+ spin_unlock(&op->lock); \
+ wake_up_interruptible(&op->io_completion_waitq);\
+} while (0)
+
+/*
+ * Copy to client-core's address space from the buffers specified
+ * by the iovec upto total_size bytes.
+ * NOTE: the iovector can either contain addresses which
+ * can futher be kernel-space or user-space addresses.
+ * or it can pointers to struct page's
+ */
+static int precopy_buffers(struct pvfs2_bufmap *bufmap,
+ int buffer_index,
+ const struct iovec *vec,
+ unsigned long nr_segs,
+ size_t total_size,
+ int from_user)
+{
+ int ret = 0;
+
+ /*
+ * copy data from application/kernel by pulling it out
+ * of the iovec.
+ */
+ /* Are we copying from User Virtual Addresses? */
+ if (from_user)
+ ret = pvfs_bufmap_copy_iovec_from_user(
+ bufmap,
+ buffer_index,
+ vec,
+ nr_segs,
+ total_size);
+ /* Are we copying from Kernel Virtual Addresses? */
+ else
+ ret = pvfs_bufmap_copy_iovec_from_kernel(
+ bufmap,
+ buffer_index,
+ vec,
+ nr_segs,
+ total_size);
+ if (ret < 0)
+ gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
+ __func__,
+ (long)ret);
+ return ret;
+}
+
+/*
+ * Copy from client-core's address space to the buffers specified
+ * by the iovec upto total_size bytes.
+ * NOTE: the iovector can either contain addresses which
+ * can futher be kernel-space or user-space addresses.
+ * or it can pointers to struct page's
+ */
+static int postcopy_buffers(struct pvfs2_bufmap *bufmap,
+ int buffer_index,
+ const struct iovec *vec,
+ int nr_segs,
+ size_t total_size,
+ int to_user)
+{
+ int ret = 0;
+
+ /*
+ * copy data to application/kernel by pushing it out to
+ * the iovec. NOTE; target buffers can be addresses or
+ * struct page pointers.
+ */
+ if (total_size) {
+ /* Are we copying to User Virtual Addresses? */
+ if (to_user)
+ ret = pvfs_bufmap_copy_to_user_iovec(
+ bufmap,
+ buffer_index,
+ vec,
+ nr_segs,
+ total_size);
+ /* Are we copying to Kern Virtual Addresses? */
+ else
+ ret = pvfs_bufmap_copy_to_kernel_iovec(
+ bufmap,
+ buffer_index,
+ vec,
+ nr_segs,
+ total_size);
+ if (ret < 0)
+ gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n",
+ __func__,
+ (long)ret);
+ }
+ return ret;
+}
+
+/*
+ * Post and wait for the I/O upcall to finish
+ */
+static ssize_t wait_for_direct_io(enum PVFS_io_type type, struct inode *inode,
+ loff_t *offset, struct iovec *vec, unsigned long nr_segs,
+ size_t total_size, loff_t readahead_size, int to_user)
+{
+ struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode);
+ struct pvfs2_khandle *handle = &pvfs2_inode->refn.khandle;
+ struct pvfs2_bufmap *bufmap = NULL;
+ struct pvfs2_kernel_op *new_op = NULL;
+ int buffer_index = -1;
+ ssize_t ret;
+
+ new_op = op_alloc(PVFS2_VFS_OP_FILE_IO);
+ if (!new_op) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* synchronous I/O */
+ new_op->upcall.req.io.async_vfs_io = PVFS_VFS_SYNC_IO;
+ new_op->upcall.req.io.readahead_size = readahead_size;
+ new_op->upcall.req.io.io_type = type;
+ new_op->upcall.req.io.refn = pvfs2_inode->refn;
+
+populate_shared_memory:
+ /* get a shared buffer index */
+ ret = pvfs_bufmap_get(&bufmap, &buffer_index);
+ if (ret < 0) {
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s: pvfs_bufmap_get failure (%ld)\n",
+ __func__, (long)ret);
+ goto out;
+ }
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): GET op %p -> buffer_index %d\n",
+ __func__,
+ handle,
+ new_op,
+ buffer_index);
+
+ new_op->uses_shared_memory = 1;
+ new_op->upcall.req.io.buf_index = buffer_index;
+ new_op->upcall.req.io.count = total_size;
+ new_op->upcall.req.io.offset = *offset;
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): copy_to_user %d nr_segs %lu, offset: %llu total_size: %zd\n",
+ __func__,
+ handle,
+ to_user,
+ nr_segs,
+ llu(*offset),
+ total_size);
+ /*
+ * Stage 1: copy the buffers into client-core's address space
+ * precopy_buffers only pertains to writes.
+ */
+ if (type == PVFS_IO_WRITE) {
+ ret = precopy_buffers(bufmap,
+ buffer_index,
+ vec,
+ nr_segs,
+ total_size,
+ to_user);
+ if (ret < 0)
+ goto out;
+ }
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): Calling post_io_request with tag (%llu)\n",
+ __func__,
+ handle,
+ llu(new_op->tag));
+
+ /* Stage 2: Service the I/O operation */
+ ret = service_operation(new_op,
+ type == PVFS_IO_WRITE ? "file_write" : "file_read",
+ get_interruptible_flag(inode));
+
+ /*
+ * If service_operation() returns -EAGAIN #and# the operation was
+ * purged from pvfs2_request_list or htable_ops_in_progress, then
+ * we know that the client was restarted, causing the shared memory
+ * area to be wiped clean. To restart a write operation in this
+ * case, we must re-copy the data from the user's iovec to a NEW
+ * shared memory location. To restart a read operation, we must get
+ * a new shared memory location.
+ */
+ if (ret == -EAGAIN && op_state_purged(new_op)) {
+ pvfs_bufmap_put(bufmap, buffer_index);
+ gossip_debug(GOSSIP_WAIT_DEBUG,
+ "%s:going to repopulate_shared_memory.\n",
+ __func__);
+ goto populate_shared_memory;
+ }
+
+ if (ret < 0) {
+ handle_io_error(); /* defined in pvfs2-kernel.h */
+ /*
+ don't write an error to syslog on signaled operation
+ termination unless we've got debugging turned on, as
+ this can happen regularly (i.e. ctrl-c)
+ */
+ if (ret == -EINTR)
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s: returning error %ld\n", __func__,
+ (long)ret);
+ else
+ gossip_err("%s: error in %s handle %pU, returning %zd\n",
+ __func__,
+ type == PVFS_IO_READ ?
+ "read from" : "write to",
+ handle, ret);
+ goto out;
+ }
+
+ /*
+ * Stage 3: Post copy buffers from client-core's address space
+ * postcopy_buffers only pertains to reads.
+ */
+ if (type == PVFS_IO_READ) {
+ ret = postcopy_buffers(bufmap,
+ buffer_index,
+ vec,
+ nr_segs,
+ new_op->downcall.resp.io.amt_complete,
+ to_user);
+ if (ret < 0) {
+ /*
+ * put error codes in downcall so that handle_io_error()
+ * preserves it properly
+ */
+ new_op->downcall.status = ret;
+ handle_io_error();
+ goto out;
+ }
+ }
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): Amount written as returned by the sys-io call:%d\n",
+ __func__,
+ handle,
+ (int)new_op->downcall.resp.io.amt_complete);
+
+ ret = new_op->downcall.resp.io.amt_complete;
+
+ /*
+ tell the device file owner waiting on I/O that this read has
+ completed and it can return now. in this exact case, on
+ wakeup the daemon will free the op, so we *cannot* touch it
+ after this.
+ */
+ wake_up_daemon_for_return(new_op);
+ new_op = NULL;
+
+out:
+ if (buffer_index >= 0) {
+ pvfs_bufmap_put(bufmap, buffer_index);
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): PUT buffer_index %d\n",
+ __func__, handle, buffer_index);
+ buffer_index = -1;
+ }
+ if (new_op) {
+ op_release(new_op);
+ new_op = NULL;
+ }
+ return ret;
+}
+
+/*
+ * The reason we need to do this is to be able to support readv and writev
+ * that are larger than (pvfs_bufmap_size_query()) Default is
+ * PVFS2_BUFMAP_DEFAULT_DESC_SIZE MB. What that means is that we will
+ * create a new io vec descriptor for those memory addresses that
+ * go beyond the limit. Return value for this routine is negative in case
+ * of errors and 0 in case of success.
+ *
+ * Further, the new_nr_segs pointer is updated to hold the new value
+ * of number of iovecs, the new_vec pointer is updated to hold the pointer
+ * to the new split iovec, and the size array is an array of integers holding
+ * the number of iovecs that straddle pvfs_bufmap_size_query().
+ * The max_new_nr_segs value is computed by the caller and returned.
+ * (It will be (count of all iov_len/ block_size) + 1).
+ */
+static int split_iovecs(unsigned long max_new_nr_segs, /* IN */
+ unsigned long nr_segs, /* IN */
+ const struct iovec *original_iovec, /* IN */
+ unsigned long *new_nr_segs, /* OUT */
+ struct iovec **new_vec, /* OUT */
+ unsigned long *seg_count, /* OUT */
+ unsigned long **seg_array) /* OUT */
+{
+ unsigned long seg;
+ unsigned long count = 0;
+ unsigned long begin_seg;
+ unsigned long tmpnew_nr_segs = 0;
+ struct iovec *new_iovec = NULL;
+ struct iovec *orig_iovec;
+ unsigned long *sizes = NULL;
+ unsigned long sizes_count = 0;
+
+ if (nr_segs <= 0 ||
+ original_iovec == NULL ||
+ new_nr_segs == NULL ||
+ new_vec == NULL ||
+ seg_count == NULL ||
+ seg_array == NULL ||
+ max_new_nr_segs <= 0) {
+ gossip_err("Invalid parameters to split_iovecs\n");
+ return -EINVAL;
+ }
+ *new_nr_segs = 0;
+ *new_vec = NULL;
+ *seg_count = 0;
+ *seg_array = NULL;
+ /* copy the passed in iovec descriptor to a temp structure */
+ orig_iovec = kmalloc(nr_segs * sizeof(*orig_iovec),
+ PVFS2_BUFMAP_GFP_FLAGS);
+ if (orig_iovec == NULL) {
+ gossip_err(
+ "split_iovecs: Could not allocate memory for %lu bytes!\n",
+ (unsigned long)(nr_segs * sizeof(*orig_iovec)));
+ return -ENOMEM;
+ }
+ new_iovec = kzalloc(max_new_nr_segs * sizeof(*new_iovec),
+ PVFS2_BUFMAP_GFP_FLAGS);
+ if (new_iovec == NULL) {
+ kfree(orig_iovec);
+ gossip_err(
+ "split_iovecs: Could not allocate memory for %lu bytes!\n",
+ (unsigned long)(max_new_nr_segs * sizeof(*new_iovec)));
+ return -ENOMEM;
+ }
+ sizes = kzalloc(max_new_nr_segs * sizeof(*sizes),
+ PVFS2_BUFMAP_GFP_FLAGS);
+ if (sizes == NULL) {
+ kfree(new_iovec);
+ kfree(orig_iovec);
+ gossip_err(
+ "split_iovecs: Could not allocate memory for %lu bytes!\n",
+ (unsigned long)(max_new_nr_segs * sizeof(*sizes)));
+ return -ENOMEM;
+ }
+ /* copy the passed in iovec to a temp structure */
+ memcpy(orig_iovec, original_iovec, nr_segs * sizeof(*orig_iovec));
+ begin_seg = 0;
+repeat:
+ for (seg = begin_seg; seg < nr_segs; seg++) {
+ if (tmpnew_nr_segs >= max_new_nr_segs ||
+ sizes_count >= max_new_nr_segs) {
+ kfree(sizes);
+ kfree(orig_iovec);
+ kfree(new_iovec);
+ gossip_err
+ ("split_iovecs: exceeded the index limit (%lu)\n",
+ tmpnew_nr_segs);
+ return -EINVAL;
+ }
+ if (count + orig_iovec[seg].iov_len <
+ pvfs_bufmap_size_query()) {
+ count += orig_iovec[seg].iov_len;
+ memcpy(&new_iovec[tmpnew_nr_segs],
+ &orig_iovec[seg],
+ sizeof(*new_iovec));
+ tmpnew_nr_segs++;
+ sizes[sizes_count]++;
+ } else {
+ new_iovec[tmpnew_nr_segs].iov_base =
+ orig_iovec[seg].iov_base;
+ new_iovec[tmpnew_nr_segs].iov_len =
+ (pvfs_bufmap_size_query() - count);
+ tmpnew_nr_segs++;
+ sizes[sizes_count]++;
+ sizes_count++;
+ begin_seg = seg;
+ orig_iovec[seg].iov_base +=
+ (pvfs_bufmap_size_query() - count);
+ orig_iovec[seg].iov_len -=
+ (pvfs_bufmap_size_query() - count);
+ count = 0;
+ break;
+ }
+ }
+ if (seg != nr_segs)
+ goto repeat;
+ else
+ sizes_count++;
+
+ *new_nr_segs = tmpnew_nr_segs;
+ /* new_iovec is freed by the caller */
+ *new_vec = new_iovec;
+ *seg_count = sizes_count;
+ /* seg_array is also freed by the caller */
+ *seg_array = sizes;
+ kfree(orig_iovec);
+ return 0;
+}
+
+static long bound_max_iovecs(const struct iovec *curr, unsigned long nr_segs,
+ ssize_t *total_count)
+{
+ unsigned long i;
+ long max_nr_iovecs;
+ ssize_t total;
+ ssize_t count;
+
+ total = 0;
+ count = 0;
+ max_nr_iovecs = 0;
+ for (i = 0; i < nr_segs; i++) {
+ const struct iovec *iv = &curr[i];
+ count += iv->iov_len;
+ if (unlikely((ssize_t) (count | iv->iov_len) < 0))
+ return -EINVAL;
+ if (total + iv->iov_len < pvfs_bufmap_size_query()) {
+ total += iv->iov_len;
+ max_nr_iovecs++;
+ } else {
+ total =
+ (total + iv->iov_len - pvfs_bufmap_size_query());
+ max_nr_iovecs += (total / pvfs_bufmap_size_query() + 2);
+ }
+ }
+ *total_count = count;
+ return max_nr_iovecs;
+}
+
+/*
+ * Common entry point for read/write/readv/writev
+ * This function will dispatch it to either the direct I/O
+ * or buffered I/O path depending on the mount options and/or
+ * augmented/extended metadata attached to the file.
+ * Note: File extended attributes override any mount options.
+ */
+static ssize_t do_readv_writev(enum PVFS_io_type type, struct file *file,
+ loff_t *offset, const struct iovec *iov, unsigned long nr_segs)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode);
+ struct pvfs2_khandle *handle = &pvfs2_inode->refn.khandle;
+ ssize_t ret;
+ ssize_t total_count;
+ unsigned int to_free;
+ size_t count;
+ unsigned long seg;
+ unsigned long new_nr_segs = 0;
+ unsigned long max_new_nr_segs = 0;
+ unsigned long seg_count = 0;
+ unsigned long *seg_array = NULL;
+ struct iovec *iovecptr = NULL;
+ struct iovec *ptr = NULL;
+
+ total_count = 0;
+ ret = -EINVAL;
+ count = 0;
+ to_free = 0;
+
+ /* Compute total and max number of segments after split */
+ max_new_nr_segs = bound_max_iovecs(iov, nr_segs, &count);
+ if (max_new_nr_segs < 0) {
+ gossip_lerr("%s: could not bound iovec %lu\n",
+ __func__,
+ max_new_nr_segs);
+ goto out;
+ }
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
+ __func__,
+ handle,
+ (int)count);
+
+ if (type == PVFS_IO_WRITE) {
+ if (file->f_flags & O_APPEND) {
+ /*
+ * Make sure generic_write_checks sees an uptodate
+ * inode size.
+ */
+ ret = pvfs2_inode_getattr(inode, PVFS_ATTR_SYS_SIZE);
+ if (ret != 0)
+ goto out;
+ } else if (file->f_pos > i_size_read(inode))
+ pvfs2_i_size_write(inode, file->f_pos);
+
+
+ ret = generic_write_checks(file,
+ offset,
+ &count,
+ S_ISBLK(inode->i_mode));
+ if (ret != 0) {
+ gossip_err("%s: failed generic argument checks.\n",
+ __func__);
+ goto out;
+ }
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): proceeding with offset : %llu, "
+ "size %d\n",
+ __func__,
+ handle,
+ llu(*offset),
+ (int)count);
+ }
+
+ if (count == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * if the total size of data transfer requested is greater than
+ * the kernel-set blocksize of PVFS2, then we split the iovecs
+ * such that no iovec description straddles a block size limit
+ */
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s: pvfs_bufmap_size:%d\n",
+ __func__,
+ pvfs_bufmap_size_query());
+
+ if (count > pvfs_bufmap_size_query()) {
+ /*
+ * Split up the given iovec description such that
+ * no iovec descriptor straddles over the block-size limitation.
+ * This makes us our job easier to stage the I/O.
+ * In addition, this function will also compute an array
+ * with seg_count entries that will store the number of
+ * segments that straddle the block-size boundaries.
+ */
+ ret = split_iovecs(max_new_nr_segs, /* IN */
+ nr_segs, /* IN */
+ iov, /* IN */
+ &new_nr_segs, /* OUT */
+ &iovecptr, /* OUT */
+ &seg_count, /* OUT */
+ &seg_array); /* OUT */
+ if (ret < 0) {
+ gossip_err("%s: Failed to split iovecs to satisfy larger than blocksize readv/writev request %zd\n",
+ __func__,
+ ret);
+ goto out;
+ }
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s: Splitting iovecs from %lu to %lu"
+ " [max_new %lu]\n",
+ __func__,
+ nr_segs,
+ new_nr_segs,
+ max_new_nr_segs);
+ /* We must free seg_array and iovecptr */
+ to_free = 1;
+ } else {
+ new_nr_segs = nr_segs;
+ /* use the given iovec description */
+ iovecptr = (struct iovec *)iov;
+ /* There is only 1 element in the seg_array */
+ seg_count = 1;
+ /* and its value is the number of segments passed in */
+ seg_array = &nr_segs;
+ /* We dont have to free up anything */
+ to_free = 0;
+ }
+ ptr = iovecptr;
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU) %zd@%llu\n",
+ __func__,
+ handle,
+ count,
+ llu(*offset));
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): new_nr_segs: %lu, seg_count: %lu\n",
+ __func__,
+ handle,
+ new_nr_segs, seg_count);
+
+/* PVFS2_KERNEL_DEBUG is a CFLAGS define. */
+#ifdef PVFS2_KERNEL_DEBUG
+ for (seg = 0; seg < new_nr_segs; seg++)
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s: %d) %p to %p [%d bytes]\n",
+ __func__,
+ (int)seg + 1,
+ iovecptr[seg].iov_base,
+ iovecptr[seg].iov_base + iovecptr[seg].iov_len,
+ (int)iovecptr[seg].iov_len);
+ for (seg = 0; seg < seg_count; seg++)
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s: %zd) %lu\n",
+ __func__,
+ seg + 1,
+ seg_array[seg]);
+#endif
+ seg = 0;
+ while (total_count < count) {
+ size_t each_count;
+ size_t amt_complete;
+
+ /* how much to transfer in this loop iteration */
+ each_count =
+ (((count - total_count) > pvfs_bufmap_size_query()) ?
+ pvfs_bufmap_size_query() :
+ (count - total_count));
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): size of each_count(%d)\n",
+ __func__,
+ handle,
+ (int)each_count);
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): BEFORE wait_for_io: offset is %d\n",
+ __func__,
+ handle,
+ (int)*offset);
+
+ ret = wait_for_direct_io(type, inode, offset, ptr,
+ seg_array[seg], each_count, 0, 1);
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): return from wait_for_io:%d\n",
+ __func__,
+ handle,
+ (int)ret);
+
+ if (ret < 0)
+ goto out;
+
+ /* advance the iovec pointer */
+ ptr += seg_array[seg];
+ seg++;
+ *offset += ret;
+ total_count += ret;
+ amt_complete = ret;
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): AFTER wait_for_io: offset is %d\n",
+ __func__,
+ handle,
+ (int)*offset);
+
+ /*
+ * if we got a short I/O operations,
+ * fall out and return what we got so far
+ */
+ if (amt_complete < each_count)
+ break;
+ } /*end while */
+
+ if (total_count > 0)
+ ret = total_count;
+out:
+ if (to_free) {
+ kfree(iovecptr);
+ kfree(seg_array);
+ }
+ if (ret > 0) {
+ if (type == PVFS_IO_READ) {
+ file_accessed(file);
+ } else {
+ SetMtimeFlag(pvfs2_inode);
+ inode->i_mtime = CURRENT_TIME;
+ mark_inode_dirty_sync(inode);
+ }
+ }
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): Value(%d) returned.\n",
+ __func__,
+ handle,
+ (int)ret);
+
+ return ret;
+}
+
+/*
+ * Read data from a specified offset in a file (referenced by inode).
+ * Data may be placed either in a user or kernel buffer.
+ */
+ssize_t pvfs2_inode_read(struct inode *inode,
+ char __user *buf,
+ size_t count,
+ loff_t *offset,
+ loff_t readahead_size)
+{
+ struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode);
+ size_t bufmap_size;
+ struct iovec vec;
+ ssize_t ret = -EINVAL;
+
+ g_pvfs2_stats.reads++;
+
+ vec.iov_base = buf;
+ vec.iov_len = count;
+
+ bufmap_size = pvfs_bufmap_size_query();
+ if (count > bufmap_size) {
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s: count is too large (%zd/%zd)!\n",
+ __func__, count, bufmap_size);
+ return -EINVAL;
+ }
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU) %zd@%llu\n",
+ __func__,
+ &pvfs2_inode->refn.khandle,
+ count,
+ llu(*offset));
+
+ ret = wait_for_direct_io(PVFS_IO_READ, inode, offset, &vec, 1,
+ count, readahead_size, 0);
+ if (ret > 0)
+ *offset += ret;
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s(%pU): Value(%zd) returned.\n",
+ __func__,
+ &pvfs2_inode->refn.khandle,
+ ret);
+
+ return ret;
+}
+
+static ssize_t pvfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ loff_t pos = *(&iocb->ki_pos);
+ ssize_t rc = 0;
+ unsigned long nr_segs = iter->nr_segs;
+
+ BUG_ON(iocb->private);
+
+ gossip_debug(GOSSIP_FILE_DEBUG,"pvfs2_file_read_iter\n");
+
+ g_pvfs2_stats.reads++;
+
+ rc = do_readv_writev(PVFS_IO_READ,
+ file,
+ &pos,
+ iter->iov,
+ nr_segs);
+ iocb->ki_pos = pos;
+
+ return rc;
+}
+
+static ssize_t pvfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ loff_t pos = *(&iocb->ki_pos);
+ unsigned long nr_segs = iter->nr_segs;
+ ssize_t rc;
+
+ BUG_ON(iocb->private);
+
+ gossip_debug(GOSSIP_FILE_DEBUG,"pvfs2_file_write_iter\n");
+
+ g_pvfs2_stats.writes++;
+
+ rc = do_readv_writev(PVFS_IO_WRITE,
+ file,
+ &pos,
+ iter->iov,
+ nr_segs);
+ iocb->ki_pos = pos;
+
+ return rc;
+}
+
+/*
+ * Perform a miscellaneous operation on a file.
+ */
+long pvfs2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOTTY;
+ uint64_t val = 0;
+ unsigned long uval;
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "pvfs2_ioctl: called with cmd %d\n",
+ cmd);
+
+ /*
+ * we understand some general ioctls on files, such as the immutable
+ * and append flags
+ */
+ if (cmd == FS_IOC_GETFLAGS) {
+ val = 0;
+ ret = pvfs2_xattr_get_default(file->f_path.dentry,
+ "user.pvfs2.meta_hint",
+ &val,
+ sizeof(val),
+ 0);
+ if (ret < 0 && ret != -ENODATA)
+ return ret;
+ else if (ret == -ENODATA)
+ val = 0;
+ uval = val;
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "pvfs2_ioctl: FS_IOC_GETFLAGS: %llu\n",
+ (unsigned long long)uval);
+ return put_user(uval, (int __user *)arg);
+ } else if (cmd == FS_IOC_SETFLAGS) {
+ ret = 0;
+ if (get_user(uval, (int __user *)arg))
+ return -EFAULT;
+ /*
+ * PVFS_MIRROR_FL is set internally when the mirroring mode
+ * is turned on for a file. The user is not allowed to turn
+ * on this bit, but the bit is present if the user first gets
+ * the flags and then updates the flags with some new
+ * settings. So, we ignore it in the following edit. bligon.
+ */
+ if ((uval & ~PVFS_MIRROR_FL) &
+ (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) {
+ gossip_err("pvfs2_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n");
+ return -EINVAL;
+ }
+ val = uval;
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "pvfs2_ioctl: FS_IOC_SETFLAGS: %llu\n",
+ (unsigned long long)val);
+ ret = pvfs2_xattr_set_default(file->f_path.dentry,
+ "user.pvfs2.meta_hint",
+ &val,
+ sizeof(val),
+ 0,
+ 0);
+ }
+
+ return ret;
+}
+
+/*
+ * Memory map a region of a file.
+ */
+static int pvfs2_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "pvfs2_file_mmap: called on %s\n",
+ (file ?
+ (char *)file->f_path.dentry->d_name.name :
+ (char *)"Unknown"));
+
+ /* set the sequential readahead hint */
+ vma->vm_flags |= VM_SEQ_READ;
+ vma->vm_flags &= ~VM_RAND_READ;
+ return generic_file_mmap(file, vma);
+}
+
+#define mapping_nrpages(idata) ((idata)->nrpages)
+
+/*
+ * Called to notify the module that there are no more references to
+ * this file (i.e. no processes have it open).
+ *
+ * \note Not called when each file is closed.
+ */
+int pvfs2_file_release(struct inode *inode, struct file *file)
+{
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "pvfs2_file_release: called on %s\n",
+ file->f_path.dentry->d_name.name);
+
+ pvfs2_flush_inode(inode);
+
+ /*
+ remove all associated inode pages from the page cache and mmap
+ readahead cache (if any); this forces an expensive refresh of
+ data for the next caller of mmap (or 'get_block' accesses)
+ */
+ if (file->f_path.dentry->d_inode &&
+ file->f_path.dentry->d_inode->i_mapping &&
+ mapping_nrpages(&file->f_path.dentry->d_inode->i_data))
+ truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
+ 0);
+ return 0;
+}
+
+/*
+ * Push all data for a specific file onto permanent storage.
+ */
+int pvfs2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ int ret = -EINVAL;
+ struct pvfs2_inode_s *pvfs2_inode =
+ PVFS2_I(file->f_path.dentry->d_inode);
+ struct pvfs2_kernel_op *new_op = NULL;
+
+ /* required call */
+ filemap_write_and_wait_range(file->f_mapping, start, end);
+
+ new_op = op_alloc(PVFS2_VFS_OP_FSYNC);
+ if (!new_op)
+ return -ENOMEM;
+ new_op->upcall.req.fsync.refn = pvfs2_inode->refn;
+
+ ret = service_operation(new_op,
+ "pvfs2_fsync",
+ get_interruptible_flag(file->f_path.dentry->d_inode));
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "pvfs2_fsync got return value of %d\n",
+ ret);
+
+ op_release(new_op);
+
+ pvfs2_flush_inode(file->f_path.dentry->d_inode);
+ return ret;
+}
+
+/*
+ * Change the file pointer position for an instance of an open file.
+ *
+ * \note If .llseek is overriden, we must acquire lock as described in
+ * Documentation/filesystems/Locking.
+ *
+ * Future upgrade could support SEEK_DATA and SEEK_HOLE but would
+ * require much changes to the FS
+ */
+loff_t pvfs2_file_llseek(struct file *file, loff_t offset, int origin)
+{
+ int ret = -EINVAL;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
+ if (!inode) {
+ gossip_err("pvfs2_file_llseek: invalid inode (NULL)\n");
+ return ret;
+ }
+
+ if (origin == PVFS2_SEEK_END) {
+ /*
+ * revalidate the inode's file size.
+ * NOTE: We are only interested in file size here,
+ * so we set mask accordingly.
+ */
+ ret = pvfs2_inode_getattr(inode, PVFS_ATTR_SYS_SIZE);
+ if (ret) {
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "%s:%s:%d calling make bad inode\n",
+ __FILE__,
+ __func__,
+ __LINE__);
+ pvfs2_make_bad_inode(inode);
+ return ret;
+ }
+ }
+
+ gossip_debug(GOSSIP_FILE_DEBUG,
+ "pvfs2_file_llseek: offset is %ld | origin is %d | "
+ "inode size is %lu\n",
+ (long)offset,
+ origin,
+ (unsigned long)file->f_path.dentry->d_inode->i_size);
+
+ return generic_file_llseek(file, offset, origin);
+}
+
+int pvfs2_lock(struct file *f, int flags, struct file_lock *lock)
+{
+ return -ENOSYS;
+}
+
+/** PVFS2 implementation of VFS file operations */
+const struct file_operations pvfs2_file_operations = {
+ .llseek = pvfs2_file_llseek,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = pvfs2_file_read_iter,
+ .write_iter = pvfs2_file_write_iter,
+ .lock = pvfs2_lock,
+ .unlocked_ioctl = pvfs2_ioctl,
+ .mmap = pvfs2_file_mmap,
+ .open = generic_file_open,
+ .release = pvfs2_file_release,
+ .fsync = pvfs2_fsync,
+};
new file mode 100644
@@ -0,0 +1,537 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ * Linux VFS extended attribute operations.
+ */
+
+#include "protocol.h"
+#include "pvfs2-kernel.h"
+#include "pvfs2-bufmap.h"
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr.h>
+
+
+#define SYSTEM_PVFS2_KEY "system.pvfs2."
+#define SYSTEM_PVFS2_KEY_LEN 13
+
+/*
+ * this function returns
+ * 0 if the key corresponding to name is not meant to be printed as part
+ * of a listxattr.
+ * 1 if the key corresponding to name is meant to be returned as part of
+ * a listxattr.
+ * The ones that start SYSTEM_PVFS2_KEY are the ones to avoid printing.
+ */
+static int is_reserved_key(const char *key, size_t size)
+{
+
+ if (size < SYSTEM_PVFS2_KEY_LEN)
+ return 1;
+
+ return strncmp(key, SYSTEM_PVFS2_KEY, SYSTEM_PVFS2_KEY_LEN) ?
+ 1 :
+ 0 ;
+}
+
+static inline int convert_to_internal_xattr_flags(int setxattr_flags)
+{
+ int internal_flag = 0;
+
+ if (setxattr_flags & XATTR_REPLACE) {
+ /* Attribute must exist! */
+ internal_flag = PVFS_XATTR_REPLACE;
+ } else if (setxattr_flags & XATTR_CREATE) {
+ /* Attribute must not exist */
+ internal_flag = PVFS_XATTR_CREATE;
+ }
+ return internal_flag;
+}
+
+
+/*
+ * Tries to get a specified key's attributes of a given
+ * file into a user-specified buffer. Note that the getxattr
+ * interface allows for the users to probe the size of an
+ * extended attribute by passing in a value of 0 to size.
+ * Thus our return value is always the size of the attribute
+ * unless the key does not exist for the file and/or if
+ * there were errors in fetching the attribute value.
+ */
+ssize_t pvfs2_inode_getxattr(struct inode *inode, const char *prefix,
+ const char *name, void *buffer, size_t size)
+{
+ struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode);
+ struct pvfs2_kernel_op *new_op = NULL;
+ ssize_t ret = -ENOMEM;
+ ssize_t length = 0;
+ int fsuid;
+ int fsgid;
+
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "%s: prefix %s name %s, buffer_size %zd\n",
+ __func__, prefix, name, size);
+
+ if (name == NULL || (size > 0 && buffer == NULL)) {
+ gossip_err("pvfs2_inode_getxattr: bogus NULL pointers\n");
+ return -EINVAL;
+ }
+ if (size < 0 ||
+ (strlen(name) + strlen(prefix)) >= PVFS_MAX_XATTR_NAMELEN) {
+ gossip_err("Invalid size (%d) or key length (%d)\n",
+ (int)size,
+ (int)(strlen(name) + strlen(prefix)));
+ return -EINVAL;
+ }
+
+ fsuid = from_kuid(current_user_ns(), current_fsuid());
+ fsgid = from_kgid(current_user_ns(), current_fsgid());
+
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "getxattr on inode %pU, name %s "
+ "(uid %o, gid %o)\n",
+ get_khandle_from_ino(inode),
+ name,
+ fsuid,
+ fsgid);
+
+ down_read(&pvfs2_inode->xattr_sem);
+
+ new_op = op_alloc(PVFS2_VFS_OP_GETXATTR);
+ if (!new_op)
+ goto out_unlock;
+
+ new_op->upcall.req.getxattr.refn = pvfs2_inode->refn;
+ ret = snprintf((char *)new_op->upcall.req.getxattr.key,
+ PVFS_MAX_XATTR_NAMELEN, "%s%s", prefix, name);
+
+ /*
+ * NOTE: Although keys are meant to be NULL terminated textual
+ * strings, I am going to explicitly pass the length just in case
+ * we change this later on...
+ */
+ new_op->upcall.req.getxattr.key_sz = ret + 1;
+
+ ret = service_operation(new_op, "pvfs2_inode_getxattr",
+ get_interruptible_flag(inode));
+ if (ret != 0) {
+ if (ret == -ENOENT) {
+ ret = -ENODATA;
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "pvfs2_inode_getxattr: inode %pU key %s"
+ " does not exist!\n",
+ get_khandle_from_ino(inode),
+ (char *)new_op->upcall.req.getxattr.key);
+ }
+ goto out_release_op;
+ }
+
+ /*
+ * Length returned includes null terminator.
+ */
+ length = new_op->downcall.resp.getxattr.val_sz - 1;
+
+ /*
+ * Just return the length of the queried attribute.
+ */
+ if (size == 0) {
+ ret = length;
+ goto out_release_op;
+ }
+
+ /*
+ * Check to see if key length is > provided buffer size.
+ */
+ if (length > size) {
+ ret = -ERANGE;
+ goto out_release_op;
+ }
+
+ memset(buffer, 0, size);
+ memcpy(buffer, new_op->downcall.resp.getxattr.val, length);
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "pvfs2_inode_getxattr: inode %pU "
+ "key %s key_sz %d, val_len %d\n",
+ get_khandle_from_ino(inode),
+ (char *)new_op->
+ upcall.req.getxattr.key,
+ (int)new_op->
+ upcall.req.getxattr.key_sz,
+ (int)ret);
+
+ ret = length;
+
+out_release_op:
+ op_release(new_op);
+out_unlock:
+ up_read(&pvfs2_inode->xattr_sem);
+ return ret;
+}
+
+static int pvfs2_inode_removexattr(struct inode *inode,
+ const char *prefix,
+ const char *name,
+ int flags)
+{
+ struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode);
+ struct pvfs2_kernel_op *new_op = NULL;
+ int ret = -ENOMEM;
+
+ down_write(&pvfs2_inode->xattr_sem);
+ new_op = op_alloc(PVFS2_VFS_OP_REMOVEXATTR);
+ if (!new_op)
+ goto out_unlock;
+
+ new_op->upcall.req.removexattr.refn = pvfs2_inode->refn;
+ /*
+ * NOTE: Although keys are meant to be NULL terminated
+ * textual strings, I am going to explicitly pass the
+ * length just in case we change this later on...
+ */
+ ret = snprintf((char *)new_op->upcall.req.removexattr.key,
+ PVFS_MAX_XATTR_NAMELEN,
+ "%s%s",
+ (prefix ? prefix : ""),
+ name);
+ new_op->upcall.req.removexattr.key_sz = ret + 1;
+
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "pvfs2_inode_removexattr: key %s, key_sz %d\n",
+ (char *)new_op->upcall.req.removexattr.key,
+ (int)new_op->upcall.req.removexattr.key_sz);
+
+ ret = service_operation(new_op,
+ "pvfs2_inode_removexattr",
+ get_interruptible_flag(inode));
+ if (ret == -ENOENT) {
+ /*
+ * Request to replace a non-existent attribute is an error.
+ */
+ if (flags & XATTR_REPLACE)
+ ret = -ENODATA;
+ else
+ ret = 0;
+ }
+
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "pvfs2_inode_removexattr: returning %d\n", ret);
+
+ op_release(new_op);
+out_unlock:
+ up_write(&pvfs2_inode->xattr_sem);
+ return ret;
+}
+
+/*
+ * Tries to set an attribute for a given key on a file.
+ *
+ * Returns a -ve number on error and 0 on success. Key is text, but value
+ * can be binary!
+ */
+int pvfs2_inode_setxattr(struct inode *inode, const char *prefix,
+ const char *name, const void *value, size_t size, int flags)
+{
+ struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode);
+ struct pvfs2_kernel_op *new_op;
+ int internal_flag = 0;
+ int ret = -ENOMEM;
+
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "%s: prefix %s, name %s, buffer_size %zd\n",
+ __func__, prefix, name, size);
+
+ if (size < 0 ||
+ size >= PVFS_MAX_XATTR_VALUELEN ||
+ flags < 0) {
+ gossip_err("pvfs2_inode_setxattr: bogus values of size(%d), flags(%d)\n",
+ (int)size,
+ flags);
+ return -EINVAL;
+ }
+
+ if (name == NULL ||
+ (size > 0 && value == NULL)) {
+ gossip_err("pvfs2_inode_setxattr: bogus NULL pointers!\n");
+ return -EINVAL;
+ }
+
+ internal_flag = convert_to_internal_xattr_flags(flags);
+
+ if (prefix) {
+ if (strlen(name) + strlen(prefix) >= PVFS_MAX_XATTR_NAMELEN) {
+ gossip_err
+ ("pvfs2_inode_setxattr: bogus key size (%d)\n",
+ (int)(strlen(name) + strlen(prefix)));
+ return -EINVAL;
+ }
+ } else {
+ if (strlen(name) >= PVFS_MAX_XATTR_NAMELEN) {
+ gossip_err
+ ("pvfs2_inode_setxattr: bogus key size (%d)\n",
+ (int)(strlen(name)));
+ return -EINVAL;
+ }
+ }
+
+ /* This is equivalent to a removexattr */
+ if (size == 0 && value == NULL) {
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "removing xattr (%s%s)\n",
+ prefix,
+ name);
+ return pvfs2_inode_removexattr(inode, prefix, name, flags);
+ }
+
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "setxattr on inode %pU, name %s\n",
+ get_khandle_from_ino(inode),
+ name);
+
+ down_write(&pvfs2_inode->xattr_sem);
+ new_op = op_alloc(PVFS2_VFS_OP_SETXATTR);
+ if (!new_op)
+ goto out_unlock;
+
+
+ new_op->upcall.req.setxattr.refn = pvfs2_inode->refn;
+ new_op->upcall.req.setxattr.flags = internal_flag;
+ /*
+ * NOTE: Although keys are meant to be NULL terminated textual
+ * strings, I am going to explicitly pass the length just in
+ * case we change this later on...
+ */
+ ret = snprintf((char *)new_op->upcall.req.setxattr.keyval.key,
+ PVFS_MAX_XATTR_NAMELEN,
+ "%s%s",
+ prefix, name);
+ new_op->upcall.req.setxattr.keyval.key_sz = ret + 1;
+ memcpy(new_op->upcall.req.setxattr.keyval.val, value, size);
+ new_op->upcall.req.setxattr.keyval.val[size] = '\0';
+ /* For some reason, val_sz should include the \0 at the end
+ * as well.
+ */
+ new_op->upcall.req.setxattr.keyval.val_sz = size + 1;
+
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "pvfs2_inode_setxattr: key %s, key_sz %d "
+ " value size %zd\n",
+ (char *)new_op->upcall.req.setxattr.keyval.key,
+ (int)new_op->upcall.req.setxattr.keyval.key_sz,
+ size + 1);
+
+ ret = service_operation(new_op,
+ "pvfs2_inode_setxattr",
+ get_interruptible_flag(inode));
+
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+ "pvfs2_inode_setxattr: returning %d\n",
+ ret);
+
+ /* when request is serviced properly, free req op struct */
+ op_release(new_op);
+out_unlock:
+ up_write(&pvfs2_inode->xattr_sem);
+ return ret;
+}
+
+/*
+ * Tries to get a specified object's keys into a user-specified buffer of a
+ * given size. Note that like the previous instances of xattr routines, this
+ * also allows you to pass in a NULL pointer and 0 size to probe the size for
+ * subsequent memory allocations. Thus our return value is always the size of
+ * all the keys unless there were errors in fetching the keys!
+ */
+ssize_t pvfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode);
+ struct pvfs2_kernel_op *new_op;
+ uint64_t token = PVFS_ITERATE_START;
+ ssize_t ret = -ENOMEM;
+ ssize_t total = 0;
+ ssize_t length = 0;
+ int count_keys = 0;
+ int key_size;
+ int i = 0;
+
+ if (size > 0 && buffer == NULL) {
+ gossip_err("%s: bogus NULL pointers\n", __func__);
+ return -EINVAL;
+ }
+ if (size < 0) {
+ gossip_err("Invalid size (%d)\n", (int)size);
+ return -EINVAL;
+ }
+
+
+ down_read(&pvfs2_inode->xattr_sem);
+ new_op = op_alloc(PVFS2_VFS_OP_LISTXATTR);
+ if (!new_op)
+ goto out_unlock;
+
+ if (buffer && size > 0)
+ memset(buffer, 0, size);
+
+try_again:
+ key_size = 0;
+ new_op->upcall.req.listxattr.refn = pvfs2_inode->refn;
+ new_op->upcall.req.listxattr.token = token;
+ new_op->upcall.req.listxattr.requested_count =
+ (size == 0) ? 0 : PVFS_MAX_XATTR_LISTLEN;
+ ret = service_operation(new_op, __func__,
+ get_interruptible_flag(inode));
+ if (ret != 0)
+ goto done;
+
+ if (size == 0) {
+ /*
+ * This is a bit of a big upper limit, but I did not want to
+ * spend too much time getting this correct, since users end
+ * up allocating memory rather than us...
+ */
+ total = new_op->downcall.resp.listxattr.returned_count *
+ PVFS_MAX_XATTR_NAMELEN;
+ goto done;
+ }
+
+ length = new_op->downcall.resp.listxattr.keylen;
+ if (length == 0)
+ goto done;
+
+ /*
+ * Check to see how much can be fit in the buffer. Fit only whole keys.
+ */
+ for (i = 0; i < new_op->downcall.resp.listxattr.returned_count; i++) {
+ if (total + new_op->downcall.resp.listxattr.lengths[i] > size)
+ goto done;
+
+ /*
+ * Since many dumb programs try to setxattr() on our reserved
+ * xattrs this is a feeble attempt at defeating those by not
+ * listing them in the output of listxattr.. sigh
+ */
+ if (is_reserved_key(new_op->downcall.resp.listxattr.key + key_size,
+ new_op->downcall.resp.listxattr.lengths[i])) {
+ gossip_debug(GOSSIP_XATTR_DEBUG, "Copying key %d -> %s\n",
+ i, new_op->downcall.resp.listxattr.key +
+ key_size);
+ memcpy(buffer + total,
+ new_op->downcall.resp.listxattr.key + key_size,
+ new_op->downcall.resp.listxattr.lengths[i]);
+ total += new_op->downcall.resp.listxattr.lengths[i];
+ count_keys++;
+ } else {
+ gossip_debug(GOSSIP_XATTR_DEBUG, "[RESERVED] key %d -> %s\n",
+ i, new_op->downcall.resp.listxattr.key +
+ key_size);
+ }
+ key_size += new_op->downcall.resp.listxattr.lengths[i];
+ }
+
+ /*
+ * Since the buffer was large enough, we might have to continue
+ * fetching more keys!
+ */
+ token = new_op->downcall.resp.listxattr.token;
+ if (token != PVFS_ITERATE_END)
+ goto try_again;
+
+done:
+ gossip_debug(GOSSIP_XATTR_DEBUG, "%s: returning %d"
+ " [size of buffer %ld] (filled in %d keys)\n",
+ __func__,
+ ret ? (int)ret : (int)total,
+ (long)size,
+ count_keys);
+ op_release(new_op);
+ if (ret == 0)
+ ret = total;
+out_unlock:
+ up_read(&pvfs2_inode->xattr_sem);
+ return ret;
+}
+
+int pvfs2_xattr_set_default(struct dentry *dentry,
+ const char *name,
+ const void *buffer,
+ size_t size,
+ int flags,
+ int handler_flags)
+{
+ return pvfs2_inode_setxattr(dentry->d_inode,
+ PVFS2_XATTR_NAME_DEFAULT_PREFIX,
+ name,
+ buffer,
+ size,
+ flags);
+}
+
+int pvfs2_xattr_get_default(struct dentry *dentry,
+ const char *name,
+ void *buffer,
+ size_t size,
+ int handler_flags)
+{
+ return pvfs2_inode_getxattr(dentry->d_inode,
+ PVFS2_XATTR_NAME_DEFAULT_PREFIX,
+ name,
+ buffer,
+ size);
+
+}
+
+static int pvfs2_xattr_set_trusted(struct dentry *dentry,
+ const char *name,
+ const void *buffer,
+ size_t size,
+ int flags,
+ int handler_flags)
+{
+ return pvfs2_inode_setxattr(dentry->d_inode,
+ PVFS2_XATTR_NAME_TRUSTED_PREFIX,
+ name,
+ buffer,
+ size,
+ flags);
+}
+
+static int pvfs2_xattr_get_trusted(struct dentry *dentry,
+ const char *name,
+ void *buffer,
+ size_t size,
+ int handler_flags)
+{
+ return pvfs2_inode_getxattr(dentry->d_inode,
+ PVFS2_XATTR_NAME_TRUSTED_PREFIX,
+ name,
+ buffer,
+ size);
+}
+
+static struct xattr_handler pvfs2_xattr_trusted_handler = {
+ .prefix = PVFS2_XATTR_NAME_TRUSTED_PREFIX,
+ .get = pvfs2_xattr_get_trusted,
+ .set = pvfs2_xattr_set_trusted,
+};
+
+static struct xattr_handler pvfs2_xattr_default_handler = {
+ /*
+ * NOTE: this is set to be the empty string.
+ * so that all un-prefixed xattrs keys get caught
+ * here!
+ */
+ .prefix = PVFS2_XATTR_NAME_DEFAULT_PREFIX,
+ .get = pvfs2_xattr_get_default,
+ .set = pvfs2_xattr_set_default,
+};
+
+const struct xattr_handler *pvfs2_xattr_handlers[] = {
+ &posix_acl_access_xattr_handler,
+ &posix_acl_default_xattr_handler,
+ &pvfs2_xattr_trusted_handler,
+ &pvfs2_xattr_default_handler,
+ NULL
+};