@@ -12,6 +12,22 @@
CHECK_vcpu_get_physid;
#undef xen_vcpu_get_physid
+static void cf_check
+runstate_area_populate(void *map, struct vcpu *v)
+{
+ if ( is_pv_vcpu(v) )
+ v->arch.pv.need_update_runstate_area = false;
+
+ v->runstate_guest_area_compat = true;
+
+ if ( v == current )
+ {
+ struct compat_vcpu_runstate_info *info = map;
+
+ XLAT_vcpu_runstate_info(info, &v->runstate);
+ }
+}
+
int
compat_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
{
@@ -58,6 +74,25 @@ compat_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
+ case VCPUOP_register_runstate_phys_area:
+ {
+ struct compat_vcpu_register_runstate_memory_area area;
+
+ rc = -EFAULT;
+ if ( copy_from_guest(&area.addr.p, arg, 1) )
+ break;
+
+ rc = map_guest_area(v, area.addr.p,
+ sizeof(struct compat_vcpu_runstate_info),
+ &v->runstate_guest_area,
+ runstate_area_populate);
+ if ( rc == -ERESTART )
+ rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih",
+ cmd, vcpuid, arg);
+
+ break;
+ }
+
case VCPUOP_register_vcpu_time_memory_area:
{
struct compat_vcpu_register_time_memory_area area = { .addr.p = 0 };
@@ -1830,6 +1830,26 @@ bool update_runstate_area(struct vcpu *v)
return rc;
}
+static void cf_check
+runstate_area_populate(void *map, struct vcpu *v)
+{
+#ifdef CONFIG_PV
+ if ( is_pv_vcpu(v) )
+ v->arch.pv.need_update_runstate_area = false;
+#endif
+
+#ifdef CONFIG_COMPAT
+ v->runstate_guest_area_compat = false;
+#endif
+
+ if ( v == current )
+ {
+ struct vcpu_runstate_info *info = map;
+
+ *info = v->runstate;
+ }
+}
+
long common_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
{
long rc = 0;
@@ -2012,6 +2032,25 @@ long common_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
+ case VCPUOP_register_runstate_phys_area:
+ {
+ struct vcpu_register_runstate_memory_area area;
+
+ rc = -EFAULT;
+ if ( copy_from_guest(&area.addr.p, arg, 1) )
+ break;
+
+ rc = map_guest_area(v, area.addr.p,
+ sizeof(struct vcpu_runstate_info),
+ &v->runstate_guest_area,
+ runstate_area_populate);
+ if ( rc == -ERESTART )
+ rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iih",
+ cmd, vcpuid, arg);
+
+ break;
+ }
+
default:
rc = -ENOSYS;
break;
@@ -110,6 +110,8 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
* runstate.state will always be RUNSTATE_running and
* runstate.state_entry_time will indicate the system time at which the
* VCPU was last scheduled to run.
+ * 3. New code wants to prefer VCPUOP_register_runstate_phys_area, and only
+ * fall back to the operation here for backwards compatibility.
* @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
*/
#define VCPUOP_register_runstate_memory_area 5
@@ -221,6 +223,19 @@ struct vcpu_register_time_memory_area {
typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t);
+/*
+ * Like the respective VCPUOP_register_*_memory_area, just using the "addr.p"
+ * field of the supplied struct as a guest physical address (i.e. in GFN space).
+ * The respective area may not cross a page boundary. Pass ~0 to unregister an
+ * area. Note that as long as an area is registered by physical address, the
+ * linear address based area will not be serviced (updated) by the hypervisor.
+ *
+ * Note that the area registered via VCPUOP_register_runstate_memory_area will
+ * be updated in the same manner as the one registered via virtual address PLUS
+ * VMASST_TYPE_runstate_update_flag engaged by the domain.
+ */
+#define VCPUOP_register_runstate_phys_area 14
+
#endif /* __XEN_PUBLIC_VCPU_H__ */
/*