@@ -385,16 +385,18 @@ static int xc_tmem_restore_new_pool(
uint64_t uuid_lo,
uint64_t uuid_hi)
{
- tmem_op_t op;
-
- op.cmd = TMEM_RESTORE_NEW;
- op.pool_id = pool_id;
- op.u.creat.arg1 = cli_id;
- op.u.creat.flags = flags;
- op.u.creat.uuid[0] = uuid_lo;
- op.u.creat.uuid[1] = uuid_hi;
-
- return do_tmem_op(xch, &op);
+ xen_tmem_pool_info_t pool = {
+ .flags.raw = flags,
+ .id = pool_id,
+ .n_pages = 0,
+ .uuid[0] = uuid_lo,
+ .uuid[1] = uuid_hi,
+ };
+
+ return xc_tmem_control(xch, pool_id,
+ XEN_SYSCTL_TMEM_OP_SET_POOLS,
+ cli_id, sizeof(pool),
+ 0 /* arg */, &pool);
}
int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
@@ -804,7 +804,7 @@ static void pool_flush(struct tmem_pool *pool, domid_t cli_id)
/************ CLIENT MANIPULATION OPERATIONS **************************/
-static struct client *client_create(domid_t cli_id)
+struct client *client_create(domid_t cli_id)
{
struct client *client = xzalloc(struct client);
int i, shift;
@@ -1435,9 +1435,9 @@ static int do_tmem_destroy_pool(uint32_t pool_id)
return 1;
}
-static int do_tmem_new_pool(domid_t this_cli_id,
- uint32_t d_poolid, uint32_t flags,
- uint64_t uuid_lo, uint64_t uuid_hi)
+int do_tmem_new_pool(domid_t this_cli_id,
+ uint32_t d_poolid, uint32_t flags,
+ uint64_t uuid_lo, uint64_t uuid_hi)
{
struct client *client;
domid_t cli_id;
@@ -1908,7 +1908,7 @@ long do_tmem_op(tmem_cli_op_t uops)
/* Acquire write lock for all commands at first. */
write_lock(&tmem_rwlock);
- if ( op.cmd == TMEM_CONTROL )
+ if ( op.cmd == TMEM_CONTROL || op.cmd == TMEM_RESTORE_NEW )
{
rc = -EOPNOTSUPP;
}
@@ -1917,11 +1917,6 @@ long do_tmem_op(tmem_cli_op_t uops)
rc = tmemc_shared_pool_auth(op.u.creat.arg1,op.u.creat.uuid[0],
op.u.creat.uuid[1],op.u.creat.flags);
}
- else if ( op.cmd == TMEM_RESTORE_NEW )
- {
- rc = do_tmem_new_pool(op.u.creat.arg1, op.pool_id, op.u.creat.flags,
- op.u.creat.uuid[0], op.u.creat.uuid[1]);
- }
else {
/*
* For other commands, create per-client tmem structure dynamically on
@@ -402,6 +402,52 @@ static int tmemc_get_pool(int cli_id,
return rc ? : idx;
}
+static int tmemc_set_pools(int cli_id,
+ XEN_GUEST_HANDLE(xen_tmem_pool_info_t) pools,
+ uint32_t len)
+{
+ unsigned int i, idx;
+ int rc = 0;
+ unsigned int nr = len / sizeof(xen_tmem_pool_info_t);
+ struct client *client = tmem_client_from_cli_id(cli_id);
+
+ if ( len % sizeof(xen_tmem_pool_info_t) )
+ return -EINVAL;
+
+ if ( nr > MAX_POOLS_PER_DOMAIN )
+ return -E2BIG;
+
+ if ( !guest_handle_okay(pools, nr) )
+ return -EINVAL;
+
+ if ( !client )
+ {
+ client = client_create(cli_id);
+ if ( !client )
+ return -ENOMEM;
+ }
+ for ( idx = 0, i = 0; i < nr; i++ )
+ {
+ xen_tmem_pool_info_t pool;
+
+ if ( __copy_from_guest_offset(&pool, pools, i, 1 ) )
+ return -EFAULT;
+
+ rc = do_tmem_new_pool(cli_id, pool.id, pool.flags.raw,
+ pool.uuid[0], pool.uuid[1]);
+ if ( rc < 0 )
+ break;
+
+ pool.id = rc;
+ if ( __copy_to_guest_offset(pools, idx, &pool, 1) )
+ return -EFAULT;
+ idx++;
+ }
+
+ /* And how many we have processed. */
+ return rc ? : idx;
+}
+
int tmem_control(struct xen_sysctl_tmem_op *op)
{
int ret;
@@ -438,6 +484,9 @@ int tmem_control(struct xen_sysctl_tmem_op *op)
case XEN_SYSCTL_TMEM_OP_GET_POOLS:
ret = tmemc_get_pool(op->cli_id, op->u.pool, op->len);
break;
+ case XEN_SYSCTL_TMEM_OP_SET_POOLS: /* TMEM_RESTORE_NEW */
+ ret = tmemc_set_pools(op->cli_id, op->u.pool, op->len);
+ break;
default:
ret = do_tmem_control(op);
break;
@@ -770,6 +770,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
#define XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO 6
#define XEN_SYSCTL_TMEM_OP_GET_POOLS 7
#define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB 8
+#define XEN_SYSCTL_TMEM_OP_SET_POOLS 9
#define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN 10
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE 19
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV 20
@@ -53,7 +53,8 @@
/* Privileged commands to HYPERVISOR_tmem_op() */
#define TMEM_AUTH 101
-#define TMEM_RESTORE_NEW 102
+#define TMEM_RESTORE_NEW 102 /* Now called via XEN_SYSCTL_tmem_op as
+ XEN_SYSCTL_TMEM_OP_SET_POOL. */
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
#define TMEM_POOL_PERSIST 1
@@ -92,7 +93,7 @@ struct tmem_op {
uint64_t uuid[2];
uint32_t flags;
uint32_t arg1;
- } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
+ } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH */
struct {
#if __XEN_INTERFACE_VERSION__ < 0x00040600
uint64_t oid[3];
@@ -18,6 +18,10 @@ extern rwlock_t tmem_rwlock;
int tmem_evict(void);
int do_tmem_control(struct xen_sysctl_tmem_op *op);
+struct client *client_create(domid_t cli_id);
+int do_tmem_new_pool(domid_t this_cli_id, uint32_t d_poolid, uint32_t flags,
+ uint64_t uuid_lo, uint64_t uuid_hi);
+
#endif /* CONFIG_TMEM */
#endif /* __XEN_TMEM_CONTROL_H__ */
@@ -199,7 +199,6 @@ static inline int tmem_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
{
case TMEM_NEW_POOL: u = XLAT_tmem_op_u_creat; break;
case TMEM_AUTH: u = XLAT_tmem_op_u_creat; break;
- case TMEM_RESTORE_NEW:u = XLAT_tmem_op_u_creat; break;
default: u = XLAT_tmem_op_u_gen ; break;
}
XLAT_tmem_op(op, &cop);
This used to be done under TMEM_RESTORE_NEW which was an hypercall accessible by the guest. However there are couple of reasons not to do it: - No checking of domid on TMEM_RESTORE_NEW which meant that any guest could create TMEM pools for other guests. - The guest can already create pools using TMEM_NEW_POOL (which is limited to guest doing the hypercall) - This functionality is only needed during migration - there is no need for the guest to have this functionality. However to move this we also have to allocate the 'struct domain' ->tmem pointer. It is by default set to NULL and would be initialized via the guest do_tmem() hypercalls. Presumarily that was the initial reason that TMEM_RESTORE_NEW was in the guest accessible hypercalls. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> --- Cc: Ian Jackson <ian.jackson@eu.citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> --- tools/libxc/xc_tmem.c | 22 ++++++++++--------- xen/common/tmem.c | 15 +++++-------- xen/common/tmem_control.c | 49 ++++++++++++++++++++++++++++++++++++++++++ xen/include/public/sysctl.h | 1 + xen/include/public/tmem.h | 5 +++-- xen/include/xen/tmem_control.h | 4 ++++ xen/include/xen/tmem_xen.h | 1 - 7 files changed, 74 insertions(+), 23 deletions(-)