Message ID | 1468947453-5433-6-git-send-email-prasanna.kalever@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Prasanna Kumar Kalever <prasanna.kalever@redhat.com> writes: > This patch adds a way to specify multiple volfile servers to the gluster > block backend of QEMU with tcp|rdma transport types and their port numbers. > > Problem: > > Currently VM Image on gluster volume is specified like this: > > file=gluster[+tcp]://host[:port]/testvol/a.img > > Say we have three hosts in a trusted pool with replica 3 volume in action. > When the host mentioned in the command above goes down for some reason, > the other two hosts are still available. But there's currently no way > to tell QEMU about them. > > Solution: > > New way of specifying VM Image on gluster volume with volfile servers: > (We still support old syntax to maintain backward compatibility) > > Basic command line syntax looks like: > > Pattern I: > -drive driver=gluster, > volume=testvol,path=/path/a.raw,[debug=N,] > server.0.type=tcp, > server.0.host=1.2.3.4, > server.0.port=24007, > server.1.type=unix, > server.1.socket=/path/socketfile > > Pattern II: > 'json:{"driver":"qcow2","file":{"driver":"gluster", > "volume":"testvol","path":"/path/a.qcow2",["debug":N,] > "server":[{hostinfo_1}, ...{hostinfo_N}]}}' > > driver => 'gluster' (protocol name) > volume => name of gluster volume where our VM image resides > path => absolute path of image in gluster volume > [debug] => libgfapi loglevel [(0 - 9) default 4 -> Error] > > {hostinfo} => {{type:"tcp",host:"1.2.3.4"[,port=24007]}, > {type:"unix",socket:"/path/sockfile"}} > > type => transport type used to connect to gluster management daemon, > it can be tcp|unix > host => host address (hostname/ipv4/ipv6 addresses/socket path) > port => port number on which glusterd is listening. > socket => path to socket file > > Examples: > 1. > -drive driver=qcow2,file.driver=gluster, > file.volume=testvol,file.path=/path/a.qcow2,file.debug=9, > file.server.0.type=tcp, > file.server.0.host=1.2.3.4, > file.server.0.port=24007, > file.server.1.type=tcp, > file.server.1.socket=/var/run/glusterd.socket > 2. > 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol", > "path":"/path/a.qcow2","debug":9,"server": > [{type:"tcp",host:"1.2.3.4",port=24007}, > {type:"unix",socket:"/var/run/glusterd.socket"}] } }' I tried using this as argument for -drive, like this: $ qemu-system-x86_64 -drive 'json:{"driver":"qcow2","file":{"driver":"gluster", ... but I get a "Must specify either driver or file" error. What am I doing wrong? > This patch gives a mechanism to provide all the server addresses, which are in > replica set, so in case host1 is down VM can still boot from any of the > active hosts. > > This is equivalent to the backup-volfile-servers option supported by > mount.glusterfs (FUSE way of mounting gluster volume) > > credits: sincere thanks to all the supporters > > Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> > --- > block/gluster.c | 397 +++++++++++++++++++++++++++++++++++++++++++++------ > qapi/block-core.json | 2 +- > 2 files changed, 358 insertions(+), 41 deletions(-) > > diff --git a/block/gluster.c b/block/gluster.c > index c4ca59e..0524789 100644 > --- a/block/gluster.c > +++ b/block/gluster.c > @@ -11,15 +11,27 @@ > #include <glusterfs/api/glfs.h> > #include "block/block_int.h" > #include "qapi/error.h" > +#include "qapi/qmp/qerror.h" > #include "qemu/uri.h" > #include "qemu/error-report.h" > > #define GLUSTER_OPT_FILENAME "filename" > +#define GLUSTER_OPT_VOLUME "volume" > +#define GLUSTER_OPT_PATH "path" > +#define GLUSTER_OPT_TYPE "type" > +#define GLUSTER_OPT_SERVER_PATTERN "server." > +#define GLUSTER_OPT_HOST "host" > +#define GLUSTER_OPT_PORT "port" > +#define GLUSTER_OPT_TO "to" > +#define GLUSTER_OPT_IPV4 "ipv4" > +#define GLUSTER_OPT_IPV6 "ipv6" > +#define GLUSTER_OPT_SOCKET "socket" > #define GLUSTER_OPT_DEBUG "debug" > #define GLUSTER_DEFAULT_PORT 24007 > #define GLUSTER_DEBUG_DEFAULT 4 > #define GLUSTER_DEBUG_MAX 9 > > +#define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n" > > typedef struct GlusterAIOCB { > int64_t size; > @@ -83,6 +95,92 @@ static QemuOptsList runtime_opts = { > }, > }; > > +static QemuOptsList runtime_json_opts = { > + .name = "gluster_json", > + .head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head), > + .desc = { > + { > + .name = GLUSTER_OPT_VOLUME, > + .type = QEMU_OPT_STRING, > + .help = "name of gluster volume where VM image resides", > + }, > + { > + .name = GLUSTER_OPT_PATH, > + .type = QEMU_OPT_STRING, > + .help = "absolute path to image file in gluster volume", > + }, > + { > + .name = GLUSTER_OPT_DEBUG, > + .type = QEMU_OPT_NUMBER, > + .help = "Gluster log level, valid range is 0-9", > + }, > + { /* end of list */ } > + }, > +}; > + > +static QemuOptsList runtime_type_opts = { > + .name = "gluster_type", > + .head = QTAILQ_HEAD_INITIALIZER(runtime_type_opts.head), > + .desc = { > + { > + .name = GLUSTER_OPT_TYPE, > + .type = QEMU_OPT_STRING, > + .help = "tcp|unix", > + }, > + { /* end of list */ } > + }, > +}; > + > +static QemuOptsList runtime_unix_opts = { > + .name = "gluster_unix", > + .head = QTAILQ_HEAD_INITIALIZER(runtime_unix_opts.head), > + .desc = { > + { > + .name = GLUSTER_OPT_SOCKET, > + .type = QEMU_OPT_STRING, > + .help = "socket file path)", > + }, > + { /* end of list */ } > + }, > +}; > + > +static QemuOptsList runtime_tcp_opts = { > + .name = "gluster_tcp", > + .head = QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts.head), > + .desc = { > + { > + .name = GLUSTER_OPT_TYPE, > + .type = QEMU_OPT_STRING, > + .help = "tcp|unix", > + }, > + { > + .name = GLUSTER_OPT_HOST, > + .type = QEMU_OPT_STRING, > + .help = "host address (hostname/ipv4/ipv6 addresses)", > + }, > + { > + .name = GLUSTER_OPT_PORT, > + .type = QEMU_OPT_NUMBER, > + .help = "port number on which glusterd is listening (default 24007)", > + }, > + { > + .name = "to", > + .type = QEMU_OPT_NUMBER, > + .help = "max port number, not supported by gluster", > + }, > + { > + .name = "ipv4", > + .type = QEMU_OPT_BOOL, > + .help = "ipv4 bool value, not supported by gluster", > + }, > + { > + .name = "ipv6", > + .type = QEMU_OPT_BOOL, > + .help = "ipv6 bool value, not supported by gluster", > + }, > + { /* end of list */ } > + }, > +}; > > static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path) > { > @@ -155,7 +253,8 @@ static int qemu_gluster_parse_uri(BlockdevOptionsGluster *gconf, > return -EINVAL; > } > > - gconf->server = gsconf = g_new0(GlusterServer, 1); > + gconf->server = g_new0(GlusterServerList, 1); > + gconf->server->value = gsconf = g_new0(GlusterServer, 1); > > /* transport */ > if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { > @@ -212,39 +311,34 @@ out: > return ret; > } > > -static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, > - const char *filename, Error **errp) > +static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf, > + Error **errp) > { > - struct glfs *glfs = NULL; > + struct glfs *glfs; > int ret; > int old_errno; > - > - ret = qemu_gluster_parse_uri(gconf, filename); > - if (ret < 0) { > - error_setg(errp, "Invalid URI"); > - error_append_hint(errp, "Usage: file=gluster[+transport]://" > - "[host[:port]]/volume/path[?socket=...]\n"); > - errno = -ret; > - goto out; > - } > + GlusterServerList *server; > > glfs = glfs_new(gconf->volume); > if (!glfs) { > goto out; > } > > - if (gconf->server->type == GLUSTER_TRANSPORT_UNIX) { > - ret = glfs_set_volfile_server(glfs, > - GlusterTransport_lookup[gconf->server->type], > - gconf->server->u.q_unix.path, 0); > - } else { > - ret = glfs_set_volfile_server(glfs, > - GlusterTransport_lookup[gconf->server->type], > - gconf->server->u.tcp.host, > - atoi(gconf->server->u.tcp.port)); > - } > - if (ret < 0) { > - goto out; > + for (server = gconf->server; server; server = server->next) { > + if (server->value->type == GLUSTER_TRANSPORT_UNIX) { > + ret = glfs_set_volfile_server(glfs, > + GlusterTransport_lookup[server->value->type], > + server->value->u.q_unix.path, 0); > + } else { > + ret = glfs_set_volfile_server(glfs, > + GlusterTransport_lookup[server->value->type], > + server->value->u.tcp.host, > + atoi(server->value->u.tcp.port)); > + } > + > + if (ret < 0) { > + goto out; > + } > } > > ret = glfs_set_logging(glfs, "-", gconf->debug_level); > @@ -254,18 +348,21 @@ static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, > > ret = glfs_init(glfs); > if (ret) { > - if (gconf->server->type == GLUSTER_TRANSPORT_UNIX) { > - error_setg(errp, > - "Gluster connection for volume %s, path %s failed on " > - "socket %s ", gconf->volume, gconf->path, > - gconf->server->u.q_unix.path); > - } else { > - error_setg(errp, > - "Gluster connection for volume %s, path %s failed on " > - "host %s and port %s ", gconf->volume, gconf->path, > - gconf->server->u.tcp.host, gconf->server->u.tcp.port); > + error_setg(errp, "Gluster connection for volume %s, path %s failed" > + " to connect", gconf->volume, gconf->path); > + for (server = gconf->server; server; server = server->next) { > + if (server->value->type == GLUSTER_TRANSPORT_UNIX) { > + error_append_hint(errp, "hint: failed on socket %s ", > + server->value->u.q_unix.path); > + } else { > + error_append_hint(errp, "hint: failed on host %s and port %s ", > + server->value->u.tcp.host, > + server->value->u.tcp.port); > + } > } > > + error_append_hint(errp, "Please refer to gluster logs for more info\n"); > + > /* glfs_init sometimes doesn't set errno although docs suggest that */ > if (errno == 0) { > errno = EINVAL; > @@ -284,6 +381,226 @@ out: > return NULL; > } > > +static int qapi_enum_parse(const char *opt) > +{ > + int i; > + > + if (!opt) { > + return GLUSTER_TRANSPORT__MAX; > + } > + > + for (i = 0; i < GLUSTER_TRANSPORT__MAX; i++) { > + if (!strcmp(opt, GlusterTransport_lookup[i])) { > + return i; > + } > + } > + > + return i; > +} > + > +/* > + * Convert the json formatted command line into qapi. > +*/ > +static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, > + QDict *options, Error **errp) > +{ > + QemuOpts *opts; > + GlusterServer *gsconf; > + GlusterServerList *curr = NULL; > + QDict *backing_options = NULL; > + Error *local_err = NULL; > + char *str = NULL; > + const char *ptr; > + size_t num_servers; > + int i; > + > + /* create opts info from runtime_json_opts list */ > + opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort); > + qemu_opts_absorb_qdict(opts, options, &local_err); > + if (local_err) { > + goto out; > + } > + > + num_servers = qdict_array_entries(options, GLUSTER_OPT_SERVER_PATTERN); > + if (num_servers < 1) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, "server"); > + goto out; > + } > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_VOLUME); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_VOLUME); > + goto out; > + } > + gconf->volume = g_strdup(ptr); > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_PATH); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_PATH); > + goto out; > + } > + gconf->path = g_strdup(ptr); > + qemu_opts_del(opts); > + > + for (i = 0; i < num_servers; i++) { > + str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i); > + qdict_extract_subqdict(options, &backing_options, str); > + > + /* create opts info from runtime_type_opts list */ > + opts = qemu_opts_create(&runtime_type_opts, NULL, 0, &error_abort); > + qemu_opts_absorb_qdict(opts, backing_options, &local_err); > + if (local_err) { > + goto out; > + } > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_TYPE); > + gsconf = g_new0(GlusterServer, 1); > + gsconf->type = qapi_enum_parse(ptr); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_TYPE); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); > + goto out; > + > + } > + if (gsconf->type == GLUSTER_TRANSPORT__MAX) { > + error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, > + GLUSTER_OPT_TYPE, "tcp or unix"); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); > + goto out; > + } > + qemu_opts_del(opts); > + > + if (gsconf->type == GLUSTER_TRANSPORT_TCP) { > + /* create opts info from runtime_tcp_opts list */ > + opts = qemu_opts_create(&runtime_tcp_opts, NULL, 0, &error_abort); > + qemu_opts_absorb_qdict(opts, backing_options, &local_err); > + if (local_err) { > + goto out; > + } > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_HOST); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, > + GLUSTER_OPT_HOST); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); > + goto out; > + } > + gsconf->u.tcp.host = g_strdup(ptr); > + ptr = qemu_opt_get(opts, GLUSTER_OPT_PORT); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, > + GLUSTER_OPT_PORT); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); > + goto out; > + } > + gsconf->u.tcp.port = g_strdup(ptr); > + > + /* defend for unsupported fields in InetSocketAddress, > + * i.e. @ipv4, @ipv6 and @to > + */ > + ptr = qemu_opt_get(opts, GLUSTER_OPT_TO); > + if (ptr) { > + gsconf->u.tcp.has_to = true; > + } > + ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV4); > + if (ptr) { > + gsconf->u.tcp.has_ipv4 = true; > + } > + ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV6); > + if (ptr) { > + gsconf->u.tcp.has_ipv6 = true; > + } > + if (gsconf->u.tcp.has_to) { > + error_setg(&local_err, "Parameter 'to' not supported"); > + goto out; > + } > + if (gsconf->u.tcp.has_ipv4 || gsconf->u.tcp.has_ipv6) { > + error_setg(&local_err, "Parameters 'ipv4/ipv6' not supported"); > + goto out; > + } > + qemu_opts_del(opts); > + } else { > + /* create opts info from runtime_unix_opts list */ > + opts = qemu_opts_create(&runtime_unix_opts, NULL, 0, &error_abort); > + qemu_opts_absorb_qdict(opts, backing_options, &local_err); > + if (local_err) { > + goto out; > + } > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_SOCKET); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, > + GLUSTER_OPT_SOCKET); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); > + goto out; > + } > + gsconf->u.q_unix.path = g_strdup(ptr); > + qemu_opts_del(opts); > + } > + > + if (gconf->server == NULL) { > + gconf->server = g_new0(GlusterServerList, 1); > + gconf->server->value = gsconf; > + curr = gconf->server; > + } else { > + curr->next = g_new0(GlusterServerList, 1); > + curr->next->value = gsconf; > + curr = curr->next; > + } > + > + qdict_del(backing_options, str); > + g_free(str); > + str = NULL; > + } > + > + return 0; > + > +out: > + error_propagate(errp, local_err); > + qemu_opts_del(opts); > + if (str) { > + qdict_del(backing_options, str); > + g_free(str); > + } > + errno = EINVAL; > + return -errno; > +} > + > +static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, > + const char *filename, > + QDict *options, Error **errp) > +{ > + int ret; > + if (filename) { > + ret = qemu_gluster_parse_uri(gconf, filename); > + if (ret < 0) { > + error_setg(errp, "invalid URI"); > + error_append_hint(errp, "Usage: file=gluster[+transport]://" > + "[host[:port]]/volume/path[?socket=...]\n"); > + errno = -ret; > + return NULL; > + } > + } else { > + ret = qemu_gluster_parse_json(gconf, options, errp); > + if (ret < 0) { > + error_append_hint(errp, "Usage: " > + "-drive driver=qcow2,file.driver=gluster," > + "file.volume=testvol,file.path=/path/a.qcow2" > + "[,file.debug=9],file.server.0.type=tcp," > + "file.server.0.host=1.2.3.4," > + "file.server.0.port=24007," > + "file.server.1.transport=unix," > + "file.server.1.socket=/var/run/glusterd.socket ..." > + "\n"); > + errno = -ret; > + return NULL; > + } > + > + } > + > + return qemu_gluster_glfs_init(gconf, errp); > +} If @filename is non-null, this function doesn't touch @options. Perhaps the function should be split into one that takes just @filename and one that takes just @options. If yes, followup patch. > + > static void qemu_gluster_complete_aio(void *opaque) > { > GlusterAIOCB *acb = (GlusterAIOCB *)opaque; > @@ -383,7 +700,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, > gconf = g_new0(BlockdevOptionsGluster, 1); > gconf->debug_level = s->debug_level; > gconf->has_debug_level = true; > - s->glfs = qemu_gluster_init(gconf, filename, errp); > + s->glfs = qemu_gluster_init(gconf, filename, options, errp); > if (!s->glfs) { > ret = -errno; > goto out; > @@ -454,7 +771,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state, > gconf = g_new0(BlockdevOptionsGluster, 1); > gconf->debug_level = s->debug_level; > gconf->has_debug_level = true; > - reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp); > + reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, NULL, errp); > if (reop_s->glfs == NULL) { > ret = -errno; > goto exit; > @@ -601,7 +918,7 @@ static int qemu_gluster_create(const char *filename, > } > gconf->has_debug_level = true; > > - glfs = qemu_gluster_init(gconf, filename, errp); > + glfs = qemu_gluster_init(gconf, filename, NULL, errp); > if (!glfs) { > ret = -errno; > goto out; > @@ -981,7 +1298,7 @@ static BlockDriver bdrv_gluster = { > .format_name = "gluster", > .protocol_name = "gluster", > .instance_size = sizeof(BDRVGlusterState), > - .bdrv_needs_filename = true, > + .bdrv_needs_filename = false, > .bdrv_file_open = qemu_gluster_open, > .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, > .bdrv_reopen_commit = qemu_gluster_reopen_commit, > @@ -1009,7 +1326,7 @@ static BlockDriver bdrv_gluster_tcp = { > .format_name = "gluster", > .protocol_name = "gluster+tcp", > .instance_size = sizeof(BDRVGlusterState), > - .bdrv_needs_filename = true, > + .bdrv_needs_filename = false, > .bdrv_file_open = qemu_gluster_open, > .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, > .bdrv_reopen_commit = qemu_gluster_reopen_commit, > diff --git a/qapi/block-core.json b/qapi/block-core.json > index 1fa0674..5f8179b 100644 > --- a/qapi/block-core.json > +++ b/qapi/block-core.json > @@ -2111,7 +2111,7 @@ > { 'struct': 'BlockdevOptionsGluster', > 'data': { 'volume': 'str', > 'path': 'str', > - 'server': 'GlusterServer', > + 'server': ['GlusterServer'], > '*debug_level': 'int' } } > > ## Reviewed-by: Markus Armbruster <armbru@redhat.com>
One more... Prasanna Kumar Kalever <prasanna.kalever@redhat.com> writes: > This patch adds a way to specify multiple volfile servers to the gluster > block backend of QEMU with tcp|rdma transport types and their port numbers. > > Problem: > > Currently VM Image on gluster volume is specified like this: > > file=gluster[+tcp]://host[:port]/testvol/a.img > > Say we have three hosts in a trusted pool with replica 3 volume in action. > When the host mentioned in the command above goes down for some reason, > the other two hosts are still available. But there's currently no way > to tell QEMU about them. > > Solution: > > New way of specifying VM Image on gluster volume with volfile servers: > (We still support old syntax to maintain backward compatibility) > > Basic command line syntax looks like: > > Pattern I: > -drive driver=gluster, > volume=testvol,path=/path/a.raw,[debug=N,] > server.0.type=tcp, > server.0.host=1.2.3.4, > server.0.port=24007, > server.1.type=unix, > server.1.socket=/path/socketfile > > Pattern II: > 'json:{"driver":"qcow2","file":{"driver":"gluster", > "volume":"testvol","path":"/path/a.qcow2",["debug":N,] > "server":[{hostinfo_1}, ...{hostinfo_N}]}}' > > driver => 'gluster' (protocol name) > volume => name of gluster volume where our VM image resides > path => absolute path of image in gluster volume > [debug] => libgfapi loglevel [(0 - 9) default 4 -> Error] > > {hostinfo} => {{type:"tcp",host:"1.2.3.4"[,port=24007]}, > {type:"unix",socket:"/path/sockfile"}} > > type => transport type used to connect to gluster management daemon, > it can be tcp|unix > host => host address (hostname/ipv4/ipv6 addresses/socket path) > port => port number on which glusterd is listening. > socket => path to socket file > > Examples: > 1. > -drive driver=qcow2,file.driver=gluster, > file.volume=testvol,file.path=/path/a.qcow2,file.debug=9, > file.server.0.type=tcp, > file.server.0.host=1.2.3.4, > file.server.0.port=24007, > file.server.1.type=tcp, > file.server.1.socket=/var/run/glusterd.socket > 2. > 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol", > "path":"/path/a.qcow2","debug":9,"server": > [{type:"tcp",host:"1.2.3.4",port=24007}, > {type:"unix",socket:"/var/run/glusterd.socket"}] } }' This example is 1. confusing, and 2. wrong :) It's wrong, because several member names lack quotes. Also, the value of port should be a string. It confused me, because I didn't realize that this is the non-option image argument. Two ways to fix that. One, add context: $ qemu-system-x86_64 'json:{"file":{"driver":"gluster","volume":"sample","path":"/fedora23.qcow2","server":[{"type":"tcp","host":"192.168.1.220","port":"24007"},{"type":"unix","socket":"/var/run/glusterd.socket"}]},"driver":"qcow2"}' Two, use -drive: -drive 'file=json:{"file":{"driver":"gluster",,"volume":"sample",,"path":"/fedora23.qcow2",,"server":[{"type":"tcp",,"host":"192.168.1.220",,"port":"24007"},,{"type":"unix",,"socket":"/var/run/glusterd.socket"}]},,"driver":"qcow2"}' Exquisitely ugly due to the necessary comma escaping. Hopefully, the maintainer can touch this up on commit. > This patch gives a mechanism to provide all the server addresses, which are in > replica set, so in case host1 is down VM can still boot from any of the > active hosts. > > This is equivalent to the backup-volfile-servers option supported by > mount.glusterfs (FUSE way of mounting gluster volume) > > credits: sincere thanks to all the supporters > > Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> R-by stands.
On Wed, Jul 20, 2016 at 12:03 AM, Markus Armbruster <armbru@redhat.com> wrote: > > One more... > > Prasanna Kumar Kalever <prasanna.kalever@redhat.com> writes: > > > This patch adds a way to specify multiple volfile servers to the gluster > > block backend of QEMU with tcp|rdma transport types and their port numbers. > > > > Problem: > > > > Currently VM Image on gluster volume is specified like this: > > > > file=gluster[+tcp]://host[:port]/testvol/a.img > > > > Say we have three hosts in a trusted pool with replica 3 volume in action. > > When the host mentioned in the command above goes down for some reason, > > the other two hosts are still available. But there's currently no way > > to tell QEMU about them. > > > > Solution: > > > > New way of specifying VM Image on gluster volume with volfile servers: > > (We still support old syntax to maintain backward compatibility) > > > > Basic command line syntax looks like: > > > > Pattern I: > > -drive driver=gluster, > > volume=testvol,path=/path/a.raw,[debug=N,] > > server.0.type=tcp, > > server.0.host=1.2.3.4, > > server.0.port=24007, > > server.1.type=unix, > > server.1.socket=/path/socketfile > > > > Pattern II: > > 'json:{"driver":"qcow2","file":{"driver":"gluster", > > "volume":"testvol","path":"/path/a.qcow2",["debug":N,] > > "server":[{hostinfo_1}, ...{hostinfo_N}]}}' > > > > driver => 'gluster' (protocol name) > > volume => name of gluster volume where our VM image resides > > path => absolute path of image in gluster volume > > [debug] => libgfapi loglevel [(0 - 9) default 4 -> Error] > > > > {hostinfo} => {{type:"tcp",host:"1.2.3.4"[,port=24007]}, > > {type:"unix",socket:"/path/sockfile"}} > > > > type => transport type used to connect to gluster management daemon, > > it can be tcp|unix > > host => host address (hostname/ipv4/ipv6 addresses/socket path) > > port => port number on which glusterd is listening. > > socket => path to socket file > > > > Examples: > > 1. > > -drive driver=qcow2,file.driver=gluster, > > file.volume=testvol,file.path=/path/a.qcow2,file.debug=9, > > file.server.0.type=tcp, > > file.server.0.host=1.2.3.4, > > file.server.0.port=24007, > > file.server.1.type=tcp, > > file.server.1.socket=/var/run/glusterd.socket > > 2. > > 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol", > > "path":"/path/a.qcow2","debug":9,"server": > > [{type:"tcp",host:"1.2.3.4",port=24007}, > > {type:"unix",socket:"/var/run/glusterd.socket"}] } }' > > This example is 1. confusing, and 2. wrong :) > > It's wrong, because several member names lack quotes. Also, the value > of port should be a string. > > It confused me, because I didn't realize that this is the non-option > image argument. Two ways to fix that. One, add context: > > $ qemu-system-x86_64 'json:{"file":{"driver":"gluster","volume":"sample","path":"/fedora23.qcow2","server":[{"type":"tcp","host":"192.168.1.220","port":"24007"},{"type":"unix","socket":"/var/run/glusterd.socket"}]},"driver":"qcow2"}' > > Two, use -drive: > > -drive 'file=json:{"file":{"driver":"gluster",,"volume":"sample",,"path":"/fedora23.qcow2",,"server":[{"type":"tcp",,"host":"192.168.1.220",,"port":"24007"},,{"type":"unix",,"socket":"/var/run/glusterd.socket"}]},,"driver":"qcow2"}' > > Exquisitely ugly due to the necessary comma escaping. or Examples: 1. -drive driver=qcow2,file.driver=gluster, file.volume=testvol,file.path=/path/a.qcow2,file.debug=9, file.server.0.type=tcp, file.server.0.host=1.2.3.4, file.server.0.port=24007, file.server.1.type=unix, file.server.1.socket=/var/run/glusterd.socket 2. 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol", "path":"/path/a.qcow2","debug":9,"server": [{"type":"tcp","host":"1.2.3.4","port":"24007"}, {"type":"unix","socket":"/var/run/glusterd.socket"} ]}}' I have tested this and they are working now after a small alteration sorry about that -- Prasanna > > Hopefully, the maintainer can touch this up on commit. > > > This patch gives a mechanism to provide all the server addresses, which are in > > replica set, so in case host1 is down VM can still boot from any of the > > active hosts. > > > > This is equivalent to the backup-volfile-servers option supported by > > mount.glusterfs (FUSE way of mounting gluster volume) > > > > credits: sincere thanks to all the supporters > > > > Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> > > R-by stands.
On Tue, Jul 19, 2016 at 10:27:33PM +0530, Prasanna Kumar Kalever wrote: > This patch adds a way to specify multiple volfile servers to the gluster > block backend of QEMU with tcp|rdma transport types and their port numbers. > > Problem: > > Currently VM Image on gluster volume is specified like this: > > file=gluster[+tcp]://host[:port]/testvol/a.img > > Say we have three hosts in a trusted pool with replica 3 volume in action. > When the host mentioned in the command above goes down for some reason, > the other two hosts are still available. But there's currently no way > to tell QEMU about them. > > Solution: > > New way of specifying VM Image on gluster volume with volfile servers: > (We still support old syntax to maintain backward compatibility) > > Basic command line syntax looks like: > > Pattern I: > -drive driver=gluster, > volume=testvol,path=/path/a.raw,[debug=N,] > server.0.type=tcp, > server.0.host=1.2.3.4, > server.0.port=24007, > server.1.type=unix, > server.1.socket=/path/socketfile > > Pattern II: > 'json:{"driver":"qcow2","file":{"driver":"gluster", > "volume":"testvol","path":"/path/a.qcow2",["debug":N,] > "server":[{hostinfo_1}, ...{hostinfo_N}]}}' > > driver => 'gluster' (protocol name) > volume => name of gluster volume where our VM image resides > path => absolute path of image in gluster volume > [debug] => libgfapi loglevel [(0 - 9) default 4 -> Error] > > {hostinfo} => {{type:"tcp",host:"1.2.3.4"[,port=24007]}, > {type:"unix",socket:"/path/sockfile"}} > > type => transport type used to connect to gluster management daemon, > it can be tcp|unix > host => host address (hostname/ipv4/ipv6 addresses/socket path) > port => port number on which glusterd is listening. > socket => path to socket file > > Examples: > 1. > -drive driver=qcow2,file.driver=gluster, > file.volume=testvol,file.path=/path/a.qcow2,file.debug=9, > file.server.0.type=tcp, > file.server.0.host=1.2.3.4, > file.server.0.port=24007, > file.server.1.type=tcp, > file.server.1.socket=/var/run/glusterd.socket > 2. > 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol", > "path":"/path/a.qcow2","debug":9,"server": > [{type:"tcp",host:"1.2.3.4",port=24007}, > {type:"unix",socket:"/var/run/glusterd.socket"}] } }' > I will use your revised wording in the commit, from your follow-up email. > This patch gives a mechanism to provide all the server addresses, which are in > replica set, so in case host1 is down VM can still boot from any of the > active hosts. > > This is equivalent to the backup-volfile-servers option supported by > mount.glusterfs (FUSE way of mounting gluster volume) > > credits: sincere thanks to all the supporters > > Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> > --- > block/gluster.c | 397 +++++++++++++++++++++++++++++++++++++++++++++------ > qapi/block-core.json | 2 +- > 2 files changed, 358 insertions(+), 41 deletions(-) > > diff --git a/block/gluster.c b/block/gluster.c > index c4ca59e..0524789 100644 > --- a/block/gluster.c > +++ b/block/gluster.c > @@ -11,15 +11,27 @@ > #include <glusterfs/api/glfs.h> > #include "block/block_int.h" > #include "qapi/error.h" > +#include "qapi/qmp/qerror.h" > #include "qemu/uri.h" > #include "qemu/error-report.h" > > #define GLUSTER_OPT_FILENAME "filename" > +#define GLUSTER_OPT_VOLUME "volume" > +#define GLUSTER_OPT_PATH "path" > +#define GLUSTER_OPT_TYPE "type" > +#define GLUSTER_OPT_SERVER_PATTERN "server." > +#define GLUSTER_OPT_HOST "host" > +#define GLUSTER_OPT_PORT "port" > +#define GLUSTER_OPT_TO "to" > +#define GLUSTER_OPT_IPV4 "ipv4" > +#define GLUSTER_OPT_IPV6 "ipv6" > +#define GLUSTER_OPT_SOCKET "socket" > #define GLUSTER_OPT_DEBUG "debug" > #define GLUSTER_DEFAULT_PORT 24007 > #define GLUSTER_DEBUG_DEFAULT 4 > #define GLUSTER_DEBUG_MAX 9 > > +#define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n" > > typedef struct GlusterAIOCB { > int64_t size; > @@ -83,6 +95,92 @@ static QemuOptsList runtime_opts = { > }, > }; > > +static QemuOptsList runtime_json_opts = { > + .name = "gluster_json", > + .head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head), > + .desc = { > + { > + .name = GLUSTER_OPT_VOLUME, > + .type = QEMU_OPT_STRING, > + .help = "name of gluster volume where VM image resides", > + }, > + { > + .name = GLUSTER_OPT_PATH, > + .type = QEMU_OPT_STRING, > + .help = "absolute path to image file in gluster volume", > + }, > + { > + .name = GLUSTER_OPT_DEBUG, > + .type = QEMU_OPT_NUMBER, > + .help = "Gluster log level, valid range is 0-9", > + }, > + { /* end of list */ } > + }, > +}; > + > +static QemuOptsList runtime_type_opts = { > + .name = "gluster_type", > + .head = QTAILQ_HEAD_INITIALIZER(runtime_type_opts.head), > + .desc = { > + { > + .name = GLUSTER_OPT_TYPE, > + .type = QEMU_OPT_STRING, > + .help = "tcp|unix", > + }, > + { /* end of list */ } > + }, > +}; > + > +static QemuOptsList runtime_unix_opts = { > + .name = "gluster_unix", > + .head = QTAILQ_HEAD_INITIALIZER(runtime_unix_opts.head), > + .desc = { > + { > + .name = GLUSTER_OPT_SOCKET, > + .type = QEMU_OPT_STRING, > + .help = "socket file path)", > + }, > + { /* end of list */ } > + }, > +}; > + > +static QemuOptsList runtime_tcp_opts = { > + .name = "gluster_tcp", > + .head = QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts.head), > + .desc = { > + { > + .name = GLUSTER_OPT_TYPE, > + .type = QEMU_OPT_STRING, > + .help = "tcp|unix", > + }, > + { > + .name = GLUSTER_OPT_HOST, > + .type = QEMU_OPT_STRING, > + .help = "host address (hostname/ipv4/ipv6 addresses)", > + }, > + { > + .name = GLUSTER_OPT_PORT, > + .type = QEMU_OPT_NUMBER, > + .help = "port number on which glusterd is listening (default 24007)", Per checkpatch.pl, exceeds 80 chars; no need for respin, will fix on commit. > + }, > + { > + .name = "to", > + .type = QEMU_OPT_NUMBER, > + .help = "max port number, not supported by gluster", > + }, > + { > + .name = "ipv4", > + .type = QEMU_OPT_BOOL, > + .help = "ipv4 bool value, not supported by gluster", > + }, > + { > + .name = "ipv6", > + .type = QEMU_OPT_BOOL, > + .help = "ipv6 bool value, not supported by gluster", > + }, > + { /* end of list */ } > + }, > +}; > > static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path) > { > @@ -155,7 +253,8 @@ static int qemu_gluster_parse_uri(BlockdevOptionsGluster *gconf, > return -EINVAL; > } > > - gconf->server = gsconf = g_new0(GlusterServer, 1); > + gconf->server = g_new0(GlusterServerList, 1); > + gconf->server->value = gsconf = g_new0(GlusterServer, 1); > > /* transport */ > if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { > @@ -212,39 +311,34 @@ out: > return ret; > } > > -static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, > - const char *filename, Error **errp) > +static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf, > + Error **errp) > { > - struct glfs *glfs = NULL; > + struct glfs *glfs; > int ret; > int old_errno; > - > - ret = qemu_gluster_parse_uri(gconf, filename); > - if (ret < 0) { > - error_setg(errp, "Invalid URI"); > - error_append_hint(errp, "Usage: file=gluster[+transport]://" > - "[host[:port]]/volume/path[?socket=...]\n"); > - errno = -ret; > - goto out; > - } > + GlusterServerList *server; > > glfs = glfs_new(gconf->volume); > if (!glfs) { > goto out; > } > > - if (gconf->server->type == GLUSTER_TRANSPORT_UNIX) { > - ret = glfs_set_volfile_server(glfs, > - GlusterTransport_lookup[gconf->server->type], > - gconf->server->u.q_unix.path, 0); > - } else { > - ret = glfs_set_volfile_server(glfs, > - GlusterTransport_lookup[gconf->server->type], > - gconf->server->u.tcp.host, > - atoi(gconf->server->u.tcp.port)); > - } > - if (ret < 0) { > - goto out; > + for (server = gconf->server; server; server = server->next) { > + if (server->value->type == GLUSTER_TRANSPORT_UNIX) { > + ret = glfs_set_volfile_server(glfs, > + GlusterTransport_lookup[server->value->type], Per checkpatch.pl, exceeds 80 chars; no need for respin, will fix on commit. > + server->value->u.q_unix.path, 0); > + } else { > + ret = glfs_set_volfile_server(glfs, > + GlusterTransport_lookup[server->value->type], Per checkpatch.pl, exceeds 80 chars; no need for respin, will fix on commit. > + server->value->u.tcp.host, > + atoi(server->value->u.tcp.port)); > + } > + > + if (ret < 0) { > + goto out; > + } > } > > ret = glfs_set_logging(glfs, "-", gconf->debug_level); > @@ -254,18 +348,21 @@ static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, > > ret = glfs_init(glfs); > if (ret) { > - if (gconf->server->type == GLUSTER_TRANSPORT_UNIX) { > - error_setg(errp, > - "Gluster connection for volume %s, path %s failed on " > - "socket %s ", gconf->volume, gconf->path, > - gconf->server->u.q_unix.path); > - } else { > - error_setg(errp, > - "Gluster connection for volume %s, path %s failed on " > - "host %s and port %s ", gconf->volume, gconf->path, > - gconf->server->u.tcp.host, gconf->server->u.tcp.port); > + error_setg(errp, "Gluster connection for volume %s, path %s failed" > + " to connect", gconf->volume, gconf->path); > + for (server = gconf->server; server; server = server->next) { > + if (server->value->type == GLUSTER_TRANSPORT_UNIX) { > + error_append_hint(errp, "hint: failed on socket %s ", > + server->value->u.q_unix.path); > + } else { > + error_append_hint(errp, "hint: failed on host %s and port %s ", > + server->value->u.tcp.host, > + server->value->u.tcp.port); > + } > } > > + error_append_hint(errp, "Please refer to gluster logs for more info\n"); > + > /* glfs_init sometimes doesn't set errno although docs suggest that */ > if (errno == 0) { > errno = EINVAL; > @@ -284,6 +381,226 @@ out: > return NULL; > } > > +static int qapi_enum_parse(const char *opt) > +{ > + int i; > + > + if (!opt) { > + return GLUSTER_TRANSPORT__MAX; > + } > + > + for (i = 0; i < GLUSTER_TRANSPORT__MAX; i++) { > + if (!strcmp(opt, GlusterTransport_lookup[i])) { > + return i; > + } > + } > + > + return i; > +} > + > +/* > + * Convert the json formatted command line into qapi. > +*/ > +static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, > + QDict *options, Error **errp) > +{ > + QemuOpts *opts; > + GlusterServer *gsconf; > + GlusterServerList *curr = NULL; > + QDict *backing_options = NULL; > + Error *local_err = NULL; > + char *str = NULL; > + const char *ptr; > + size_t num_servers; > + int i; > + > + /* create opts info from runtime_json_opts list */ > + opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort); > + qemu_opts_absorb_qdict(opts, options, &local_err); > + if (local_err) { > + goto out; > + } > + > + num_servers = qdict_array_entries(options, GLUSTER_OPT_SERVER_PATTERN); > + if (num_servers < 1) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, "server"); > + goto out; > + } > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_VOLUME); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_VOLUME); > + goto out; > + } > + gconf->volume = g_strdup(ptr); > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_PATH); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_PATH); > + goto out; > + } > + gconf->path = g_strdup(ptr); > + qemu_opts_del(opts); > + > + for (i = 0; i < num_servers; i++) { > + str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i); > + qdict_extract_subqdict(options, &backing_options, str); > + > + /* create opts info from runtime_type_opts list */ > + opts = qemu_opts_create(&runtime_type_opts, NULL, 0, &error_abort); > + qemu_opts_absorb_qdict(opts, backing_options, &local_err); > + if (local_err) { > + goto out; > + } > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_TYPE); > + gsconf = g_new0(GlusterServer, 1); > + gsconf->type = qapi_enum_parse(ptr); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_TYPE); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); > + goto out; > + > + } > + if (gsconf->type == GLUSTER_TRANSPORT__MAX) { > + error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, > + GLUSTER_OPT_TYPE, "tcp or unix"); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); > + goto out; > + } > + qemu_opts_del(opts); > + > + if (gsconf->type == GLUSTER_TRANSPORT_TCP) { > + /* create opts info from runtime_tcp_opts list */ > + opts = qemu_opts_create(&runtime_tcp_opts, NULL, 0, &error_abort); > + qemu_opts_absorb_qdict(opts, backing_options, &local_err); > + if (local_err) { > + goto out; > + } > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_HOST); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, > + GLUSTER_OPT_HOST); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); > + goto out; > + } > + gsconf->u.tcp.host = g_strdup(ptr); > + ptr = qemu_opt_get(opts, GLUSTER_OPT_PORT); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, > + GLUSTER_OPT_PORT); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); > + goto out; > + } > + gsconf->u.tcp.port = g_strdup(ptr); > + > + /* defend for unsupported fields in InetSocketAddress, > + * i.e. @ipv4, @ipv6 and @to > + */ > + ptr = qemu_opt_get(opts, GLUSTER_OPT_TO); > + if (ptr) { > + gsconf->u.tcp.has_to = true; > + } > + ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV4); > + if (ptr) { > + gsconf->u.tcp.has_ipv4 = true; > + } > + ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV6); > + if (ptr) { > + gsconf->u.tcp.has_ipv6 = true; > + } > + if (gsconf->u.tcp.has_to) { > + error_setg(&local_err, "Parameter 'to' not supported"); > + goto out; > + } > + if (gsconf->u.tcp.has_ipv4 || gsconf->u.tcp.has_ipv6) { > + error_setg(&local_err, "Parameters 'ipv4/ipv6' not supported"); > + goto out; > + } > + qemu_opts_del(opts); > + } else { > + /* create opts info from runtime_unix_opts list */ > + opts = qemu_opts_create(&runtime_unix_opts, NULL, 0, &error_abort); > + qemu_opts_absorb_qdict(opts, backing_options, &local_err); > + if (local_err) { > + goto out; > + } > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_SOCKET); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, > + GLUSTER_OPT_SOCKET); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); > + goto out; > + } > + gsconf->u.q_unix.path = g_strdup(ptr); > + qemu_opts_del(opts); > + } > + > + if (gconf->server == NULL) { > + gconf->server = g_new0(GlusterServerList, 1); > + gconf->server->value = gsconf; > + curr = gconf->server; > + } else { > + curr->next = g_new0(GlusterServerList, 1); > + curr->next->value = gsconf; > + curr = curr->next; > + } > + > + qdict_del(backing_options, str); > + g_free(str); > + str = NULL; > + } > + > + return 0; > + > +out: > + error_propagate(errp, local_err); > + qemu_opts_del(opts); > + if (str) { > + qdict_del(backing_options, str); > + g_free(str); > + } > + errno = EINVAL; > + return -errno; > +} > + > +static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, > + const char *filename, > + QDict *options, Error **errp) > +{ > + int ret; > + if (filename) { > + ret = qemu_gluster_parse_uri(gconf, filename); > + if (ret < 0) { > + error_setg(errp, "invalid URI"); > + error_append_hint(errp, "Usage: file=gluster[+transport]://" > + "[host[:port]]/volume/path[?socket=...]\n"); > + errno = -ret; > + return NULL; > + } > + } else { > + ret = qemu_gluster_parse_json(gconf, options, errp); > + if (ret < 0) { > + error_append_hint(errp, "Usage: " > + "-drive driver=qcow2,file.driver=gluster," > + "file.volume=testvol,file.path=/path/a.qcow2" > + "[,file.debug=9],file.server.0.type=tcp," > + "file.server.0.host=1.2.3.4," > + "file.server.0.port=24007," > + "file.server.1.transport=unix," > + "file.server.1.socket=/var/run/glusterd.socket ..." > + "\n"); > + errno = -ret; > + return NULL; > + } > + > + } > + > + return qemu_gluster_glfs_init(gconf, errp); > +} > + > static void qemu_gluster_complete_aio(void *opaque) > { > GlusterAIOCB *acb = (GlusterAIOCB *)opaque; > @@ -383,7 +700,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, > gconf = g_new0(BlockdevOptionsGluster, 1); > gconf->debug_level = s->debug_level; > gconf->has_debug_level = true; > - s->glfs = qemu_gluster_init(gconf, filename, errp); > + s->glfs = qemu_gluster_init(gconf, filename, options, errp); > if (!s->glfs) { > ret = -errno; > goto out; > @@ -454,7 +771,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state, > gconf = g_new0(BlockdevOptionsGluster, 1); > gconf->debug_level = s->debug_level; > gconf->has_debug_level = true; > - reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp); > + reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, NULL, errp); > if (reop_s->glfs == NULL) { > ret = -errno; > goto exit; > @@ -601,7 +918,7 @@ static int qemu_gluster_create(const char *filename, > } > gconf->has_debug_level = true; > > - glfs = qemu_gluster_init(gconf, filename, errp); > + glfs = qemu_gluster_init(gconf, filename, NULL, errp); > if (!glfs) { > ret = -errno; > goto out; > @@ -981,7 +1298,7 @@ static BlockDriver bdrv_gluster = { > .format_name = "gluster", > .protocol_name = "gluster", > .instance_size = sizeof(BDRVGlusterState), > - .bdrv_needs_filename = true, > + .bdrv_needs_filename = false, > .bdrv_file_open = qemu_gluster_open, > .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, > .bdrv_reopen_commit = qemu_gluster_reopen_commit, > @@ -1009,7 +1326,7 @@ static BlockDriver bdrv_gluster_tcp = { > .format_name = "gluster", > .protocol_name = "gluster+tcp", > .instance_size = sizeof(BDRVGlusterState), > - .bdrv_needs_filename = true, > + .bdrv_needs_filename = false, > .bdrv_file_open = qemu_gluster_open, > .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, > .bdrv_reopen_commit = qemu_gluster_reopen_commit, > diff --git a/qapi/block-core.json b/qapi/block-core.json > index 1fa0674..5f8179b 100644 > --- a/qapi/block-core.json > +++ b/qapi/block-core.json > @@ -2111,7 +2111,7 @@ > { 'struct': 'BlockdevOptionsGluster', > 'data': { 'volume': 'str', > 'path': 'str', > - 'server': 'GlusterServer', > + 'server': ['GlusterServer'], > '*debug_level': 'int' } } > > ## > -- > 2.7.4 > > With fixups: Reviewed-by: Jeff Cody <jcody@redhat.com>
On 07/19/2016 10:57 AM, Prasanna Kumar Kalever wrote: > This patch adds a way to specify multiple volfile servers to the gluster > block backend of QEMU with tcp|rdma transport types and their port numbers. If rdma is deprecated, we don't need to mention it here. > > Problem: > > Currently VM Image on gluster volume is specified like this: > > file=gluster[+tcp]://host[:port]/testvol/a.img > > Say we have three hosts in a trusted pool with replica 3 volume in action. > When the host mentioned in the command above goes down for some reason, > the other two hosts are still available. But there's currently no way > to tell QEMU about them. > > Solution: > > New way of specifying VM Image on gluster volume with volfile servers: > (We still support old syntax to maintain backward compatibility) > > Basic command line syntax looks like: > > Pattern I: > -drive driver=gluster, > volume=testvol,path=/path/a.raw,[debug=N,] > server.0.type=tcp, > server.0.host=1.2.3.4, > server.0.port=24007, > server.1.type=unix, > server.1.socket=/path/socketfile So, I haven't checked this yet, but if I'm correct, the old syntax was: -drive driver=gluster, volume=testvol,path=/path/a.raw, server.type=tcp, server.host=1.2.3.4, server.port=24007 Is that syntax still supported? That is, if I only specify one server, does 'server.FOO' mean the same as 'server.0.FOO'? Or am I completely wrong? Maybe listing the old syntax for comparison is in order, especially since you claim above that the old syntax is still supported. > > Pattern II: > 'json:{"driver":"qcow2","file":{"driver":"gluster", > "volume":"testvol","path":"/path/a.qcow2",["debug":N,] > "server":[{hostinfo_1}, ...{hostinfo_N}]}}' > > driver => 'gluster' (protocol name) > volume => name of gluster volume where our VM image resides > path => absolute path of image in gluster volume > [debug] => libgfapi loglevel [(0 - 9) default 4 -> Error] > > {hostinfo} => {{type:"tcp",host:"1.2.3.4"[,port=24007]}, > {type:"unix",socket:"/path/sockfile"}} > > type => transport type used to connect to gluster management daemon, > it can be tcp|unix > host => host address (hostname/ipv4/ipv6 addresses/socket path) You dropped ipv6 in the last patch, if you want to update this comment. > port => port number on which glusterd is listening. > socket => path to socket file > > Examples: > 1. > -drive driver=qcow2,file.driver=gluster, > file.volume=testvol,file.path=/path/a.qcow2,file.debug=9, > file.server.0.type=tcp, > file.server.0.host=1.2.3.4, > file.server.0.port=24007, > file.server.1.type=tcp, > file.server.1.socket=/var/run/glusterd.socket > 2. > 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol", > "path":"/path/a.qcow2","debug":9,"server": > [{type:"tcp",host:"1.2.3.4",port=24007}, > {type:"unix",socket:"/var/run/glusterd.socket"}] } }' > > This patch gives a mechanism to provide all the server addresses, which are in > replica set, so in case host1 is down VM can still boot from any of the > active hosts. > > This is equivalent to the backup-volfile-servers option supported by > mount.glusterfs (FUSE way of mounting gluster volume) > > credits: sincere thanks to all the supporters > > Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> > --- > block/gluster.c | 397 +++++++++++++++++++++++++++++++++++++++++++++------ > qapi/block-core.json | 2 +- > 2 files changed, 358 insertions(+), 41 deletions(-) > > +static QemuOptsList runtime_tcp_opts = { > + .name = "gluster_tcp", > + .head = QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts.head), > + .desc = { > + { > + .name = GLUSTER_OPT_TYPE, > + .type = QEMU_OPT_STRING, > + .help = "tcp|unix", > + }, > + { > + .name = GLUSTER_OPT_HOST, > + .type = QEMU_OPT_STRING, > + .help = "host address (hostname/ipv4/ipv6 addresses)", > + }, Awkward to state ipv6 here, > + { > + .name = GLUSTER_OPT_PORT, > + .type = QEMU_OPT_NUMBER, > + .help = "port number on which glusterd is listening (default 24007)", > + }, > + { > + .name = "to", > + .type = QEMU_OPT_NUMBER, > + .help = "max port number, not supported by gluster", > + }, > + { > + .name = "ipv4", > + .type = QEMU_OPT_BOOL, > + .help = "ipv4 bool value, not supported by gluster", > + }, > + { > + .name = "ipv6", > + .type = QEMU_OPT_BOOL, > + .help = "ipv6 bool value, not supported by gluster", but then to state it is not supported here. Do we actually have to provide QemuOpt listings for the options that we don't actually support? That is, must QemuOpt precisely match the InetAddressSocket struct we are parsing into, or can it be merely a subset where we omit the portions we won't use? > @@ -284,6 +381,226 @@ out: > return NULL; > } > > +static int qapi_enum_parse(const char *opt) > +{ > + int i; > + > + if (!opt) { > + return GLUSTER_TRANSPORT__MAX; > + } > + > + for (i = 0; i < GLUSTER_TRANSPORT__MAX; i++) { > + if (!strcmp(opt, GlusterTransport_lookup[i])) { > + return i; > + } > + } > + > + return i; Is this duplicating any functionality that we already have? > +} > + > +/* > + * Convert the json formatted command line into qapi. > +*/ > +static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, > + QDict *options, Error **errp) Indentation is off. > +{ > + QemuOpts *opts; > + GlusterServer *gsconf; > + GlusterServerList *curr = NULL; > + QDict *backing_options = NULL; > + Error *local_err = NULL; > + char *str = NULL; > + const char *ptr; > + size_t num_servers; > + int i; > + > + /* create opts info from runtime_json_opts list */ > + opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort); > + qemu_opts_absorb_qdict(opts, options, &local_err); In fact, if we were to use opts_visitor_new(), could we let the already-existing generated QAPI code parse from QemuOpts into QAPI form without having to do it by hand here? Hmm, maybe not... > + if (local_err) { > + goto out; > + } > + > + num_servers = qdict_array_entries(options, GLUSTER_OPT_SERVER_PATTERN); > + if (num_servers < 1) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, "server"); > + goto out; > + } > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_VOLUME); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_VOLUME); > + goto out; > + } > + gconf->volume = g_strdup(ptr); > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_PATH); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_PATH); > + goto out; > + } > + gconf->path = g_strdup(ptr); > + qemu_opts_del(opts); > + > + for (i = 0; i < num_servers; i++) { > + str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i); > + qdict_extract_subqdict(options, &backing_options, str); ...since the OptsVisitor doesn't handle structs, but we are definitely parsing a sub-struct. I also wonder if Dan's work on a new string-based qmp-input-visitor, and/or his qdict_crumple() work, could reduce the amount of effort needed here. It's not to say that your patch is wrong, only that we may have follow-up patches that can improve it. > + > + /* create opts info from runtime_type_opts list */ > + opts = qemu_opts_create(&runtime_type_opts, NULL, 0, &error_abort); > + qemu_opts_absorb_qdict(opts, backing_options, &local_err); > + if (local_err) { > + goto out; > + } > + > + ptr = qemu_opt_get(opts, GLUSTER_OPT_TYPE); > + gsconf = g_new0(GlusterServer, 1); > + gsconf->type = qapi_enum_parse(ptr); > + if (!ptr) { > + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_TYPE); > + error_append_hint(&local_err, GERR_INDEX_HINT, i); Nothing else in our code base uses GERR_INDEX_HINT. I'd MUCH prefer open-coding the string instead of relying on some unknown string literal from glib, since we have less control over whether glib will always keep the format markers lined up with what we are using. > + /* defend for unsupported fields in InetSocketAddress, > + * i.e. @ipv4, @ipv6 and @to > + */ > + ptr = qemu_opt_get(opts, GLUSTER_OPT_TO); > + if (ptr) { > + gsconf->u.tcp.has_to = true; > + } > + ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV4); > + if (ptr) { > + gsconf->u.tcp.has_ipv4 = true; > + } > + ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV6); > + if (ptr) { > + gsconf->u.tcp.has_ipv6 = true; > + } > + if (gsconf->u.tcp.has_to) { > + error_setg(&local_err, "Parameter 'to' not supported"); > + goto out; > + } > + if (gsconf->u.tcp.has_ipv4 || gsconf->u.tcp.has_ipv6) { > + error_setg(&local_err, "Parameters 'ipv4/ipv6' not supported"); I know we are rejecting ipv6 until gluster supports it, but do we really have to reject ipv4? On the other hand, it's always backwards-compatible to relax this restriction later on, but harder to add it in after a release where it was not present. > +++ b/qapi/block-core.json > @@ -2111,7 +2111,7 @@ > { 'struct': 'BlockdevOptionsGluster', > 'data': { 'volume': 'str', > 'path': 'str', > - 'server': 'GlusterServer', > + 'server': ['GlusterServer'], Documentation should probably be tweaked to mention that 'server' is now a list of servers, and should not be empty. For that matter, did you test that the error message is sane when there are no servers listed?
diff --git a/block/gluster.c b/block/gluster.c index c4ca59e..0524789 100644 --- a/block/gluster.c +++ b/block/gluster.c @@ -11,15 +11,27 @@ #include <glusterfs/api/glfs.h> #include "block/block_int.h" #include "qapi/error.h" +#include "qapi/qmp/qerror.h" #include "qemu/uri.h" #include "qemu/error-report.h" #define GLUSTER_OPT_FILENAME "filename" +#define GLUSTER_OPT_VOLUME "volume" +#define GLUSTER_OPT_PATH "path" +#define GLUSTER_OPT_TYPE "type" +#define GLUSTER_OPT_SERVER_PATTERN "server." +#define GLUSTER_OPT_HOST "host" +#define GLUSTER_OPT_PORT "port" +#define GLUSTER_OPT_TO "to" +#define GLUSTER_OPT_IPV4 "ipv4" +#define GLUSTER_OPT_IPV6 "ipv6" +#define GLUSTER_OPT_SOCKET "socket" #define GLUSTER_OPT_DEBUG "debug" #define GLUSTER_DEFAULT_PORT 24007 #define GLUSTER_DEBUG_DEFAULT 4 #define GLUSTER_DEBUG_MAX 9 +#define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n" typedef struct GlusterAIOCB { int64_t size; @@ -83,6 +95,92 @@ static QemuOptsList runtime_opts = { }, }; +static QemuOptsList runtime_json_opts = { + .name = "gluster_json", + .head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head), + .desc = { + { + .name = GLUSTER_OPT_VOLUME, + .type = QEMU_OPT_STRING, + .help = "name of gluster volume where VM image resides", + }, + { + .name = GLUSTER_OPT_PATH, + .type = QEMU_OPT_STRING, + .help = "absolute path to image file in gluster volume", + }, + { + .name = GLUSTER_OPT_DEBUG, + .type = QEMU_OPT_NUMBER, + .help = "Gluster log level, valid range is 0-9", + }, + { /* end of list */ } + }, +}; + +static QemuOptsList runtime_type_opts = { + .name = "gluster_type", + .head = QTAILQ_HEAD_INITIALIZER(runtime_type_opts.head), + .desc = { + { + .name = GLUSTER_OPT_TYPE, + .type = QEMU_OPT_STRING, + .help = "tcp|unix", + }, + { /* end of list */ } + }, +}; + +static QemuOptsList runtime_unix_opts = { + .name = "gluster_unix", + .head = QTAILQ_HEAD_INITIALIZER(runtime_unix_opts.head), + .desc = { + { + .name = GLUSTER_OPT_SOCKET, + .type = QEMU_OPT_STRING, + .help = "socket file path)", + }, + { /* end of list */ } + }, +}; + +static QemuOptsList runtime_tcp_opts = { + .name = "gluster_tcp", + .head = QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts.head), + .desc = { + { + .name = GLUSTER_OPT_TYPE, + .type = QEMU_OPT_STRING, + .help = "tcp|unix", + }, + { + .name = GLUSTER_OPT_HOST, + .type = QEMU_OPT_STRING, + .help = "host address (hostname/ipv4/ipv6 addresses)", + }, + { + .name = GLUSTER_OPT_PORT, + .type = QEMU_OPT_NUMBER, + .help = "port number on which glusterd is listening (default 24007)", + }, + { + .name = "to", + .type = QEMU_OPT_NUMBER, + .help = "max port number, not supported by gluster", + }, + { + .name = "ipv4", + .type = QEMU_OPT_BOOL, + .help = "ipv4 bool value, not supported by gluster", + }, + { + .name = "ipv6", + .type = QEMU_OPT_BOOL, + .help = "ipv6 bool value, not supported by gluster", + }, + { /* end of list */ } + }, +}; static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path) { @@ -155,7 +253,8 @@ static int qemu_gluster_parse_uri(BlockdevOptionsGluster *gconf, return -EINVAL; } - gconf->server = gsconf = g_new0(GlusterServer, 1); + gconf->server = g_new0(GlusterServerList, 1); + gconf->server->value = gsconf = g_new0(GlusterServer, 1); /* transport */ if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { @@ -212,39 +311,34 @@ out: return ret; } -static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, - const char *filename, Error **errp) +static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf, + Error **errp) { - struct glfs *glfs = NULL; + struct glfs *glfs; int ret; int old_errno; - - ret = qemu_gluster_parse_uri(gconf, filename); - if (ret < 0) { - error_setg(errp, "Invalid URI"); - error_append_hint(errp, "Usage: file=gluster[+transport]://" - "[host[:port]]/volume/path[?socket=...]\n"); - errno = -ret; - goto out; - } + GlusterServerList *server; glfs = glfs_new(gconf->volume); if (!glfs) { goto out; } - if (gconf->server->type == GLUSTER_TRANSPORT_UNIX) { - ret = glfs_set_volfile_server(glfs, - GlusterTransport_lookup[gconf->server->type], - gconf->server->u.q_unix.path, 0); - } else { - ret = glfs_set_volfile_server(glfs, - GlusterTransport_lookup[gconf->server->type], - gconf->server->u.tcp.host, - atoi(gconf->server->u.tcp.port)); - } - if (ret < 0) { - goto out; + for (server = gconf->server; server; server = server->next) { + if (server->value->type == GLUSTER_TRANSPORT_UNIX) { + ret = glfs_set_volfile_server(glfs, + GlusterTransport_lookup[server->value->type], + server->value->u.q_unix.path, 0); + } else { + ret = glfs_set_volfile_server(glfs, + GlusterTransport_lookup[server->value->type], + server->value->u.tcp.host, + atoi(server->value->u.tcp.port)); + } + + if (ret < 0) { + goto out; + } } ret = glfs_set_logging(glfs, "-", gconf->debug_level); @@ -254,18 +348,21 @@ static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, ret = glfs_init(glfs); if (ret) { - if (gconf->server->type == GLUSTER_TRANSPORT_UNIX) { - error_setg(errp, - "Gluster connection for volume %s, path %s failed on " - "socket %s ", gconf->volume, gconf->path, - gconf->server->u.q_unix.path); - } else { - error_setg(errp, - "Gluster connection for volume %s, path %s failed on " - "host %s and port %s ", gconf->volume, gconf->path, - gconf->server->u.tcp.host, gconf->server->u.tcp.port); + error_setg(errp, "Gluster connection for volume %s, path %s failed" + " to connect", gconf->volume, gconf->path); + for (server = gconf->server; server; server = server->next) { + if (server->value->type == GLUSTER_TRANSPORT_UNIX) { + error_append_hint(errp, "hint: failed on socket %s ", + server->value->u.q_unix.path); + } else { + error_append_hint(errp, "hint: failed on host %s and port %s ", + server->value->u.tcp.host, + server->value->u.tcp.port); + } } + error_append_hint(errp, "Please refer to gluster logs for more info\n"); + /* glfs_init sometimes doesn't set errno although docs suggest that */ if (errno == 0) { errno = EINVAL; @@ -284,6 +381,226 @@ out: return NULL; } +static int qapi_enum_parse(const char *opt) +{ + int i; + + if (!opt) { + return GLUSTER_TRANSPORT__MAX; + } + + for (i = 0; i < GLUSTER_TRANSPORT__MAX; i++) { + if (!strcmp(opt, GlusterTransport_lookup[i])) { + return i; + } + } + + return i; +} + +/* + * Convert the json formatted command line into qapi. +*/ +static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, + QDict *options, Error **errp) +{ + QemuOpts *opts; + GlusterServer *gsconf; + GlusterServerList *curr = NULL; + QDict *backing_options = NULL; + Error *local_err = NULL; + char *str = NULL; + const char *ptr; + size_t num_servers; + int i; + + /* create opts info from runtime_json_opts list */ + opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort); + qemu_opts_absorb_qdict(opts, options, &local_err); + if (local_err) { + goto out; + } + + num_servers = qdict_array_entries(options, GLUSTER_OPT_SERVER_PATTERN); + if (num_servers < 1) { + error_setg(&local_err, QERR_MISSING_PARAMETER, "server"); + goto out; + } + + ptr = qemu_opt_get(opts, GLUSTER_OPT_VOLUME); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_VOLUME); + goto out; + } + gconf->volume = g_strdup(ptr); + + ptr = qemu_opt_get(opts, GLUSTER_OPT_PATH); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_PATH); + goto out; + } + gconf->path = g_strdup(ptr); + qemu_opts_del(opts); + + for (i = 0; i < num_servers; i++) { + str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i); + qdict_extract_subqdict(options, &backing_options, str); + + /* create opts info from runtime_type_opts list */ + opts = qemu_opts_create(&runtime_type_opts, NULL, 0, &error_abort); + qemu_opts_absorb_qdict(opts, backing_options, &local_err); + if (local_err) { + goto out; + } + + ptr = qemu_opt_get(opts, GLUSTER_OPT_TYPE); + gsconf = g_new0(GlusterServer, 1); + gsconf->type = qapi_enum_parse(ptr); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_TYPE); + error_append_hint(&local_err, GERR_INDEX_HINT, i); + goto out; + + } + if (gsconf->type == GLUSTER_TRANSPORT__MAX) { + error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, + GLUSTER_OPT_TYPE, "tcp or unix"); + error_append_hint(&local_err, GERR_INDEX_HINT, i); + goto out; + } + qemu_opts_del(opts); + + if (gsconf->type == GLUSTER_TRANSPORT_TCP) { + /* create opts info from runtime_tcp_opts list */ + opts = qemu_opts_create(&runtime_tcp_opts, NULL, 0, &error_abort); + qemu_opts_absorb_qdict(opts, backing_options, &local_err); + if (local_err) { + goto out; + } + + ptr = qemu_opt_get(opts, GLUSTER_OPT_HOST); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, + GLUSTER_OPT_HOST); + error_append_hint(&local_err, GERR_INDEX_HINT, i); + goto out; + } + gsconf->u.tcp.host = g_strdup(ptr); + ptr = qemu_opt_get(opts, GLUSTER_OPT_PORT); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, + GLUSTER_OPT_PORT); + error_append_hint(&local_err, GERR_INDEX_HINT, i); + goto out; + } + gsconf->u.tcp.port = g_strdup(ptr); + + /* defend for unsupported fields in InetSocketAddress, + * i.e. @ipv4, @ipv6 and @to + */ + ptr = qemu_opt_get(opts, GLUSTER_OPT_TO); + if (ptr) { + gsconf->u.tcp.has_to = true; + } + ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV4); + if (ptr) { + gsconf->u.tcp.has_ipv4 = true; + } + ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV6); + if (ptr) { + gsconf->u.tcp.has_ipv6 = true; + } + if (gsconf->u.tcp.has_to) { + error_setg(&local_err, "Parameter 'to' not supported"); + goto out; + } + if (gsconf->u.tcp.has_ipv4 || gsconf->u.tcp.has_ipv6) { + error_setg(&local_err, "Parameters 'ipv4/ipv6' not supported"); + goto out; + } + qemu_opts_del(opts); + } else { + /* create opts info from runtime_unix_opts list */ + opts = qemu_opts_create(&runtime_unix_opts, NULL, 0, &error_abort); + qemu_opts_absorb_qdict(opts, backing_options, &local_err); + if (local_err) { + goto out; + } + + ptr = qemu_opt_get(opts, GLUSTER_OPT_SOCKET); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, + GLUSTER_OPT_SOCKET); + error_append_hint(&local_err, GERR_INDEX_HINT, i); + goto out; + } + gsconf->u.q_unix.path = g_strdup(ptr); + qemu_opts_del(opts); + } + + if (gconf->server == NULL) { + gconf->server = g_new0(GlusterServerList, 1); + gconf->server->value = gsconf; + curr = gconf->server; + } else { + curr->next = g_new0(GlusterServerList, 1); + curr->next->value = gsconf; + curr = curr->next; + } + + qdict_del(backing_options, str); + g_free(str); + str = NULL; + } + + return 0; + +out: + error_propagate(errp, local_err); + qemu_opts_del(opts); + if (str) { + qdict_del(backing_options, str); + g_free(str); + } + errno = EINVAL; + return -errno; +} + +static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, + const char *filename, + QDict *options, Error **errp) +{ + int ret; + if (filename) { + ret = qemu_gluster_parse_uri(gconf, filename); + if (ret < 0) { + error_setg(errp, "invalid URI"); + error_append_hint(errp, "Usage: file=gluster[+transport]://" + "[host[:port]]/volume/path[?socket=...]\n"); + errno = -ret; + return NULL; + } + } else { + ret = qemu_gluster_parse_json(gconf, options, errp); + if (ret < 0) { + error_append_hint(errp, "Usage: " + "-drive driver=qcow2,file.driver=gluster," + "file.volume=testvol,file.path=/path/a.qcow2" + "[,file.debug=9],file.server.0.type=tcp," + "file.server.0.host=1.2.3.4," + "file.server.0.port=24007," + "file.server.1.transport=unix," + "file.server.1.socket=/var/run/glusterd.socket ..." + "\n"); + errno = -ret; + return NULL; + } + + } + + return qemu_gluster_glfs_init(gconf, errp); +} + static void qemu_gluster_complete_aio(void *opaque) { GlusterAIOCB *acb = (GlusterAIOCB *)opaque; @@ -383,7 +700,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, gconf = g_new0(BlockdevOptionsGluster, 1); gconf->debug_level = s->debug_level; gconf->has_debug_level = true; - s->glfs = qemu_gluster_init(gconf, filename, errp); + s->glfs = qemu_gluster_init(gconf, filename, options, errp); if (!s->glfs) { ret = -errno; goto out; @@ -454,7 +771,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state, gconf = g_new0(BlockdevOptionsGluster, 1); gconf->debug_level = s->debug_level; gconf->has_debug_level = true; - reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp); + reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, NULL, errp); if (reop_s->glfs == NULL) { ret = -errno; goto exit; @@ -601,7 +918,7 @@ static int qemu_gluster_create(const char *filename, } gconf->has_debug_level = true; - glfs = qemu_gluster_init(gconf, filename, errp); + glfs = qemu_gluster_init(gconf, filename, NULL, errp); if (!glfs) { ret = -errno; goto out; @@ -981,7 +1298,7 @@ static BlockDriver bdrv_gluster = { .format_name = "gluster", .protocol_name = "gluster", .instance_size = sizeof(BDRVGlusterState), - .bdrv_needs_filename = true, + .bdrv_needs_filename = false, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, @@ -1009,7 +1326,7 @@ static BlockDriver bdrv_gluster_tcp = { .format_name = "gluster", .protocol_name = "gluster+tcp", .instance_size = sizeof(BDRVGlusterState), - .bdrv_needs_filename = true, + .bdrv_needs_filename = false, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, diff --git a/qapi/block-core.json b/qapi/block-core.json index 1fa0674..5f8179b 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -2111,7 +2111,7 @@ { 'struct': 'BlockdevOptionsGluster', 'data': { 'volume': 'str', 'path': 'str', - 'server': 'GlusterServer', + 'server': ['GlusterServer'], '*debug_level': 'int' } } ##
This patch adds a way to specify multiple volfile servers to the gluster block backend of QEMU with tcp|rdma transport types and their port numbers. Problem: Currently VM Image on gluster volume is specified like this: file=gluster[+tcp]://host[:port]/testvol/a.img Say we have three hosts in a trusted pool with replica 3 volume in action. When the host mentioned in the command above goes down for some reason, the other two hosts are still available. But there's currently no way to tell QEMU about them. Solution: New way of specifying VM Image on gluster volume with volfile servers: (We still support old syntax to maintain backward compatibility) Basic command line syntax looks like: Pattern I: -drive driver=gluster, volume=testvol,path=/path/a.raw,[debug=N,] server.0.type=tcp, server.0.host=1.2.3.4, server.0.port=24007, server.1.type=unix, server.1.socket=/path/socketfile Pattern II: 'json:{"driver":"qcow2","file":{"driver":"gluster", "volume":"testvol","path":"/path/a.qcow2",["debug":N,] "server":[{hostinfo_1}, ...{hostinfo_N}]}}' driver => 'gluster' (protocol name) volume => name of gluster volume where our VM image resides path => absolute path of image in gluster volume [debug] => libgfapi loglevel [(0 - 9) default 4 -> Error] {hostinfo} => {{type:"tcp",host:"1.2.3.4"[,port=24007]}, {type:"unix",socket:"/path/sockfile"}} type => transport type used to connect to gluster management daemon, it can be tcp|unix host => host address (hostname/ipv4/ipv6 addresses/socket path) port => port number on which glusterd is listening. socket => path to socket file Examples: 1. -drive driver=qcow2,file.driver=gluster, file.volume=testvol,file.path=/path/a.qcow2,file.debug=9, file.server.0.type=tcp, file.server.0.host=1.2.3.4, file.server.0.port=24007, file.server.1.type=tcp, file.server.1.socket=/var/run/glusterd.socket 2. 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol", "path":"/path/a.qcow2","debug":9,"server": [{type:"tcp",host:"1.2.3.4",port=24007}, {type:"unix",socket:"/var/run/glusterd.socket"}] } }' This patch gives a mechanism to provide all the server addresses, which are in replica set, so in case host1 is down VM can still boot from any of the active hosts. This is equivalent to the backup-volfile-servers option supported by mount.glusterfs (FUSE way of mounting gluster volume) credits: sincere thanks to all the supporters Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> --- block/gluster.c | 397 +++++++++++++++++++++++++++++++++++++++++++++------ qapi/block-core.json | 2 +- 2 files changed, 358 insertions(+), 41 deletions(-)