@@ -1901,7 +1901,7 @@ int xc_tmem_control_oid(xc_interface *xch, int32_t pool_id, uint32_t subop,
int xc_tmem_control(xc_interface *xch,
int32_t pool_id, uint32_t subop, uint32_t cli_id,
uint32_t len, uint32_t arg, void *buf);
-int xc_tmem_auth(xc_interface *xch, int cli_id, char *uuid_str, int arg1);
+int xc_tmem_auth(xc_interface *xch, int cli_id, char *uuid_str, int enable);
int xc_tmem_save(xc_interface *xch, int dom, int live, int fd, int field_marker);
int xc_tmem_save_extra(xc_interface *xch, int dom, int fd, int field_marker);
void xc_tmem_save_done(xc_interface *xch, int dom);
@@ -22,30 +22,6 @@
#include <assert.h>
#include <xen/tmem.h>
-static int do_tmem_op(xc_interface *xch, tmem_op_t *op)
-{
- int ret;
- DECLARE_HYPERCALL_BOUNCE(op, sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
- if ( xc_hypercall_bounce_pre(xch, op) )
- {
- PERROR("Could not bounce buffer for tmem op hypercall");
- return -EFAULT;
- }
-
- ret = xencall1(xch->xcall, __HYPERVISOR_tmem_op,
- HYPERCALL_BUFFER_AS_ARG(op));
- if ( ret < 0 )
- {
- if ( errno == EACCES )
- DPRINTF("tmem operation failed -- need to"
- " rebuild the user-space tool set?\n");
- }
- xc_hypercall_bounce_post(xch, op);
-
- return ret;
-}
-
int xc_tmem_control(xc_interface *xch,
int32_t pool_id,
uint32_t cmd,
@@ -69,7 +45,8 @@ int xc_tmem_control(xc_interface *xch,
sysctl.u.tmem_op.oid.oid[1] = 0;
sysctl.u.tmem_op.oid.oid[2] = 0;
- if ( cmd == XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO )
+ if ( cmd == XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO ||
+ cmd == XEN_SYSCTL_TMEM_OP_SET_AUTH )
HYPERCALL_BOUNCE_SET_DIR(buf, XC_HYPERCALL_BUFFER_BOUNCE_IN);
if ( len )
{
@@ -176,22 +153,25 @@ static int xc_tmem_uuid_parse(char *uuid_str, uint64_t *uuid_lo, uint64_t *uuid_
int xc_tmem_auth(xc_interface *xch,
int cli_id,
char *uuid_str,
- int arg1)
+ int enable)
{
- tmem_op_t op;
-
- op.cmd = TMEM_AUTH;
- op.pool_id = 0;
- op.u.creat.arg1 = cli_id;
- op.u.creat.flags = arg1;
- if ( xc_tmem_uuid_parse(uuid_str, &op.u.creat.uuid[0],
- &op.u.creat.uuid[1]) < 0 )
+ xen_tmem_pool_info_t pool = {
+ .flags.u.auth = enable,
+ .id = 0,
+ .n_pages = 0,
+ .uuid[0] = 0,
+ .uuid[1] = 0,
+ };
+ if ( xc_tmem_uuid_parse(uuid_str, &pool.uuid[0],
+ &pool.uuid[1]) < 0 )
{
PERROR("Can't parse uuid, use xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx");
return -1;
}
-
- return do_tmem_op(xch, &op);
+ return xc_tmem_control(xch, 0 /* pool_id */,
+ XEN_SYSCTL_TMEM_OP_SET_AUTH,
+ cli_id, sizeof(pool),
+ 0 /* arg */, &pool);
}
/* Save/restore/live migrate */
@@ -1604,8 +1604,8 @@ fail:
/************ TMEM CONTROL OPERATIONS ************************************/
-static int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
- uint64_t uuid_hi, bool_t auth)
+int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
+ uint64_t uuid_hi, bool auth)
{
struct client *client;
int i, free = -1;
@@ -1908,7 +1908,8 @@ long do_tmem_op(tmem_cli_op_t uops)
/* Acquire write lock for all commands at first. */
write_lock(&tmem_rwlock);
- if ( op.cmd == TMEM_CONTROL || op.cmd == TMEM_RESTORE_NEW )
+ if ( op.cmd == TMEM_CONTROL || op.cmd == TMEM_RESTORE_NEW ||
+ op.cmd == TMEM_AUTH )
{
rc = -EOPNOTSUPP;
}
@@ -448,6 +448,51 @@ static int tmemc_set_pools(int cli_id,
return rc ? : idx;
}
+static int tmemc_auth_pools(int cli_id,
+ XEN_GUEST_HANDLE(xen_tmem_pool_info_t) pools,
+ uint32_t len)
+{
+ unsigned int i, idx;
+ int rc = 0;
+ unsigned int nr = len / sizeof(xen_tmem_pool_info_t);
+ struct client *client = tmem_client_from_cli_id(cli_id);
+
+ if ( len % sizeof(xen_tmem_pool_info_t) )
+ return -EINVAL;
+
+ if ( nr > MAX_POOLS_PER_DOMAIN )
+ return -E2BIG;
+
+ if ( !guest_handle_okay(pools, nr) )
+ return -EINVAL;
+
+ if ( !client )
+ {
+ client = client_create(cli_id);
+ if ( !client )
+ return -ENOMEM;
+ }
+
+ for ( idx = 0, i = 0; i < nr; i++ )
+ {
+ xen_tmem_pool_info_t pool;
+
+ if ( __copy_from_guest_offset(&pool, pools, i, 1 ) )
+ return -EFAULT;
+
+ rc = tmemc_shared_pool_auth(cli_id, pool.uuid[0], pool.uuid[1],
+ pool.flags.u.auth);
+
+ if ( rc < 0 )
+ break;
+
+ idx++;
+ }
+
+ /* And how many we have processed. */
+ return rc ? : idx;
+}
+
int tmem_control(struct xen_sysctl_tmem_op *op)
{
int ret;
@@ -487,6 +532,9 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
case XEN_SYSCTL_TMEM_OP_SET_POOLS: /* TMEM_RESTORE_NEW */
ret = tmemc_set_pools(op->cli_id, op->u.pool, op->len);
break;
+ case XEN_SYSCTL_TMEM_OP_SET_AUTH: /* TMEM_AUTH */
+ ret = tmemc_auth_pools(op->cli_id, op->u.pool, op->len);
+ break;
default:
ret = do_tmem_control(op);
break;
@@ -772,6 +772,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
#define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB 8
#define XEN_SYSCTL_TMEM_OP_SET_POOLS 9
#define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN 10
+#define XEN_SYSCTL_TMEM_OP_SET_AUTH 11
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE 19
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV 20
#define XEN_SYSCTL_TMEM_OP_SAVE_END 21
@@ -824,7 +825,8 @@ struct xen_tmem_pool_info {
struct {
uint32_t persist:1, /* See TMEM_POOL_PERSIST. */
shared:1, /* See TMEM_POOL_SHARED. */
- rsv:2,
+ auth:1, /* See TMEM_POOL_AUTH. */
+ rsv1:1,
pagebits:8, /* TMEM_POOL_PAGESIZE_[SHIFT,MASK]. */
rsv2:12,
version:8; /* TMEM_POOL_VERSION_[SHIFT,MASK]. */
@@ -51,10 +51,9 @@
#define TMEM_XCHG 10
#endif
-/* Privileged commands to HYPERVISOR_tmem_op() */
-#define TMEM_AUTH 101
-#define TMEM_RESTORE_NEW 102 /* Now called via XEN_SYSCTL_tmem_op as
- XEN_SYSCTL_TMEM_OP_SET_POOL. */
+/* Privileged commands now called via XEN_SYSCTL_tmem_op. */
+#define TMEM_AUTH 101 /* as XEN_SYSCTL_TMEM_OP_SET_AUTH. */
+#define TMEM_RESTORE_NEW 102 /* as XEN_SYSCTL_TMEM_OP_SET_POOL. */
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
#define TMEM_POOL_PERSIST 1
@@ -93,7 +92,7 @@ struct tmem_op {
uint64_t uuid[2];
uint32_t flags;
uint32_t arg1;
- } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH */
+ } creat; /* for cmd == TMEM_NEW_POOL. */
struct {
#if __XEN_INTERFACE_VERSION__ < 0x00040600
uint64_t oid[3];
@@ -22,6 +22,8 @@ struct client *client_create(domid_t cli_id);
int do_tmem_new_pool(domid_t this_cli_id, uint32_t d_poolid, uint32_t flags,
uint64_t uuid_lo, uint64_t uuid_hi);
+int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo,
+ uint64_t uuid_hi, bool auth);
#endif /* CONFIG_TMEM */
#endif /* __XEN_TMEM_CONTROL_H__ */
@@ -198,7 +198,6 @@ static inline int tmem_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
switch ( cop.cmd )
{
case TMEM_NEW_POOL: u = XLAT_tmem_op_u_creat; break;
- case TMEM_AUTH: u = XLAT_tmem_op_u_creat; break;
default: u = XLAT_tmem_op_u_gen ; break;
}
XLAT_tmem_op(op, &cop);
which surprisingly (or maybe not) looks like XEN_SYSCTL_TMEM_OP_SET_POOLS. This hypercall came about, as explained in docs/misc/tmem-internals.html: When tmem was first proposed to the linux kernel mailing list (LKML), there was concern expressed about security of shared ephemeral pools. The initial tmem implementation only required a client to provide a 128-bit UUID to identify a shared pool, and the linux-side tmem implementation obtained this UUID from the superblock of the shared filesystem (in ocfs2). It was pointed out on LKML that the UUID was essentially a security key and any malicious domain that guessed it would have access to any data from the shared filesystem that found its way into tmem. .. As a result, a Xen boot option -- tmem_shared_auth; -- was added. The option defaults to disabled, but when it is enabled, management tools must explicitly authenticate (or may explicitly deny) shared pool access to any client. On Xen, this is done with the xm tmem-shared-auth command. " However the implementation has some rather large holes: a) The hypercall was accessed from any guest. b) If the ->domain id value is 0xFFFF then one can toggle the tmem_global.shared_auth knob on/off. That with a) made it pretty bad. c) If one toggles the tmem_global.shared_auth off, then the 'tmem_shared_auth=1' bootup parameter is ignored and one can join any shared pool (if UUID is known)! d) If the 'tmem_shared_auth=1' and tmem_global.shared_auth is set to 1, then one can only join an shared pool if the UUID has been set by 'xl tmem-shared-auth'. Otherwise the joining of a pool fails and a non-shared pool is created (without errors to guest). Not exactly sure if the shared pool creation at that point should error out or not. e) If a guest is migrated, the policy values (which UUID can be shared, whether tmem_global.shared_auth is set, etc) are completely ignored. This patch only fixes a) and only allows the hypercall to be called by the control domain. Subsequent patches will fix the remaining issues. We also have to call client_create as the guest at this point may not have done any tmem hypercalls - and hence the '->tmem' from 'struct domain' is still NULL. Us calling client_create fixes this. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> --- Cc: Ian Jackson <ian.jackson@eu.citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> --- tools/libxc/include/xenctrl.h | 2 +- tools/libxc/xc_tmem.c | 52 +++++++++++++----------------------------- xen/common/tmem.c | 7 +++--- xen/common/tmem_control.c | 48 ++++++++++++++++++++++++++++++++++++++ xen/include/public/sysctl.h | 4 +++- xen/include/public/tmem.h | 9 ++++---- xen/include/xen/tmem_control.h | 2 ++ xen/include/xen/tmem_xen.h | 1 - 8 files changed, 78 insertions(+), 47 deletions(-)