@@ -14,7 +14,8 @@ target_core_mod-y := target_core_configfs.o \
target_core_ua.o \
target_core_rd.o \
target_core_stat.o \
- target_core_xcopy.o
+ target_core_xcopy.o \
+ target_core_cluster.o
obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
new file mode 100644
@@ -0,0 +1,87 @@
+/*
+ * Target core clustered api
+ *
+ * Copyright (C) 2015 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/list.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_cluster.h>
+
+static LIST_HEAD(cluster_api_list);
+static DEFINE_MUTEX(cluster_api_mutex);
+
+int core_cluster_api_register(struct se_cluster_api *api)
+{
+ struct se_cluster_api *a;
+
+ INIT_LIST_HEAD(&api->api_list);
+
+ mutex_lock(&cluster_api_mutex);
+ list_for_each_entry(a, &cluster_api_list, api_list) {
+ if (!strcmp(a->name, api->name)) {
+ pr_err("%p is already registered with duplicate name "
+ "%s, unable to process request\n", a, a->name);
+ mutex_unlock(&cluster_api_mutex);
+ return -EEXIST;
+ }
+ }
+
+ list_add_tail(&api->api_list, &cluster_api_list);
+ mutex_unlock(&cluster_api_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(core_cluster_api_register);
+
+void core_cluster_api_unregister(struct se_cluster_api *api)
+{
+ mutex_lock(&cluster_api_mutex);
+ list_del(&api->api_list);
+ mutex_unlock(&cluster_api_mutex);
+}
+EXPORT_SYMBOL(core_cluster_api_unregister);
+
+int core_cluster_attach(struct se_device *dev, char *name)
+{
+ struct se_cluster_api *api;
+ int ret = -EINVAL;
+
+ mutex_lock(&cluster_api_mutex);
+ list_for_each_entry(api, &cluster_api_list, api_list) {
+ if (!strcmp(api->name, name)) {
+ ret = api->attach_device(dev);
+ if (!ret) {
+ dev->cluster_api = api;
+ if (!try_module_get(api->owner)) {
+ api->detach_device(dev);
+ ret = -EBUSY;
+ }
+ }
+ break;
+ }
+ }
+ mutex_unlock(&cluster_api_mutex);
+ return ret;
+}
+
+void core_cluster_detach(struct se_device *dev)
+{
+ mutex_lock(&cluster_api_mutex);
+ if (dev->cluster_api) {
+ dev->cluster_api->detach_device(dev);
+ module_put(dev->cluster_api->owner);
+ dev->cluster_api = NULL;
+ }
+ mutex_unlock(&cluster_api_mutex);
+}
@@ -40,6 +40,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
+#include <target/target_core_cluster.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
@@ -1762,6 +1763,42 @@ static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
.store = target_core_store_dev_lba_map,
};
+static ssize_t target_core_store_dev_cluster_api(
+ void *p,
+ const char *page,
+ size_t count)
+{
+ struct se_device *dev = p;
+ char name[16], newline;
+ int ret;
+
+ memset(name, 0, sizeof(name));
+ ret = sscanf(page, "%16s %c", name, &newline);
+ if (!strlen(name))
+ return -EINVAL;
+
+ ret = core_cluster_attach(dev, name);
+ if (ret)
+ return ret;
+ return count;
+}
+
+static ssize_t target_core_show_dev_cluster_api(void *p, char *page)
+{
+ struct se_device *dev = p;
+
+ return snprintf(page, PAGE_SIZE, "%s\n", dev->cluster_api ?
+ dev->cluster_api->name : "none");
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_cluster_api = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "cluster_api",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = target_core_show_dev_cluster_api,
+ .store = target_core_store_dev_cluster_api,
+};
+
static struct configfs_attribute *target_core_dev_attrs[] = {
&target_core_attr_dev_info.attr,
&target_core_attr_dev_control.attr,
@@ -1770,6 +1807,7 @@ static struct configfs_attribute *target_core_dev_attrs[] = {
&target_core_attr_dev_enable.attr,
&target_core_attr_dev_alua_lu_gp.attr,
&target_core_attr_dev_lba_map.attr,
+ &target_core_attr_dev_cluster_api.attr,
NULL,
};
@@ -40,6 +40,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include <target/target_core_cluster.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
@@ -1638,6 +1639,9 @@ void target_free_device(struct se_device *dev)
WARN_ON(!list_empty(&dev->dev_sep_list));
+ if (dev->cluster_api)
+ core_cluster_detach(dev);
+
if (dev->dev_flags & DF_CONFIGURED) {
destroy_workqueue(dev->tmr_wq);
@@ -818,6 +818,9 @@ struct se_device {
struct se_lun xcopy_lun;
/* Protection Information */
int prot_length;
+ /* cluster api template */
+ struct se_cluster_api *cluster_api;
+ void *cluster_dev_data;
};
struct se_hba {
new file mode 100644
@@ -0,0 +1,33 @@
+#ifndef TARGET_CORE_CLUSTER_H
+#define TARGET_CORE_CLUSTER_H
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+
+struct se_device;
+
+struct se_cluster_api {
+ char *name;
+ struct module *owner;
+ struct list_head api_list;
+
+ int (*attach_device)(struct se_device *dev);
+ int (*detach_device)(struct se_device *dev);
+ /**
+ * reset_device - stop and cleanup running commands on all nodes.
+ * @dev: LU's request queue to execute reset for
+ * @timeout: timeout for reset operation
+ *
+ * Return 0 for success or -Exyz error code. If the operation
+ * takes longer than timeout seconds then -ETIMEDOUT should be returned.
+ */
+ int (*reset_device)(struct se_device *dev, u32 timeout);
+};
+
+extern int core_cluster_api_register(struct se_cluster_api *);
+extern void core_cluster_api_unregister(struct se_cluster_api *api);
+extern int core_cluster_attach(struct se_device *dev, char *name);
+extern void core_cluster_detach(struct se_device *dev);
+
+#endif