@@ -184,10 +184,13 @@ A directory of all current cpupools.
The individual cpupools. Each entry is a directory with the name being the
cpupool-id (e.g. /cpupool/0/).
-#### /cpupool/*/sched-gran = ("cpu" | "core" | "socket")
+#### /cpupool/*/sched-gran = ("cpu" | "core" | "socket") [w]
The scheduling granularity of a cpupool.
+Writing a value is allowed only for cpupools with no cpu assigned and if the
+architecture is supporting different scheduling granularities.
+
#### /params/
A directory of runtime parameters.
@@ -77,7 +77,7 @@ static void sched_gran_print(enum sched_gran mode, unsigned int gran)
}
#ifdef CONFIG_HAS_SCHED_GRANULARITY
-static int __init sched_select_granularity(const char *str)
+static int sched_gran_get(const char *str, enum sched_gran *mode)
{
unsigned int i;
@@ -85,36 +85,43 @@ static int __init sched_select_granularity(const char *str)
{
if ( strcmp(sg_name[i].name, str) == 0 )
{
- opt_sched_granularity = sg_name[i].mode;
+ *mode = sg_name[i].mode;
return 0;
}
}
return -EINVAL;
}
+
+static int __init sched_select_granularity(const char *str)
+{
+ return sched_gran_get(str, &opt_sched_granularity);
+}
custom_param("sched-gran", sched_select_granularity);
+#elif CONFIG_HYPFS
+static int sched_gran_get(const char *str, enum sched_gran *mode)
+{
+ return -EINVAL;
+}
#endif
-static unsigned int __init cpupool_check_granularity(void)
+static unsigned int cpupool_check_granularity(enum sched_gran mode)
{
unsigned int cpu;
unsigned int siblings, gran = 0;
- if ( opt_sched_granularity == SCHED_GRAN_cpu )
+ if ( mode == SCHED_GRAN_cpu )
return 1;
for_each_online_cpu ( cpu )
{
- siblings = cpumask_weight(sched_get_opt_cpumask(opt_sched_granularity,
- cpu));
+ siblings = cpumask_weight(sched_get_opt_cpumask(mode, cpu));
if ( gran == 0 )
gran = siblings;
else if ( gran != siblings )
return 0;
}
- sched_disable_smt_switching = true;
-
return gran;
}
@@ -126,7 +133,7 @@ static void __init cpupool_gran_init(void)
while ( gran == 0 )
{
- gran = cpupool_check_granularity();
+ gran = cpupool_check_granularity(opt_sched_granularity);
if ( gran == 0 )
{
@@ -152,6 +159,9 @@ static void __init cpupool_gran_init(void)
if ( fallback )
warning_add(fallback);
+ if ( opt_sched_granularity != SCHED_GRAN_cpu )
+ sched_disable_smt_switching = true;
+
sched_granularity = gran;
sched_gran_print(opt_sched_granularity, sched_granularity);
}
@@ -1126,17 +1136,55 @@ static unsigned int hypfs_gran_getsize(const struct hypfs_entry *entry)
return strlen(gran) + 1;
}
+static int cpupool_gran_write(struct hypfs_entry_leaf *leaf,
+ XEN_GUEST_HANDLE_PARAM(const_void) uaddr,
+ unsigned int ulen)
+{
+ const struct hypfs_dyndir_id *data;
+ struct cpupool *cpupool;
+ enum sched_gran gran;
+ unsigned int sched_gran = 0;
+ char name[SCHED_GRAN_NAME_LEN];
+ int ret = 0;
+
+ if ( ulen > SCHED_GRAN_NAME_LEN )
+ return -ENOSPC;
+
+ if ( copy_from_guest(name, uaddr, ulen) )
+ return -EFAULT;
+
+ if ( memchr(name, 0, ulen) == (name + ulen - 1) )
+ sched_gran = sched_gran_get(name, &gran) ?
+ 0 : cpupool_check_granularity(gran);
+ if ( sched_gran == 0 )
+ return -EINVAL;
+
+ data = hypfs_get_dyndata();
+ cpupool = data->data;
+ ASSERT(cpupool);
+
+ if ( !cpumask_empty(cpupool->cpu_valid) )
+ ret = -EBUSY;
+ else
+ {
+ cpupool->gran = gran;
+ cpupool->sched_gran = sched_gran;
+ }
+
+ return ret;
+}
+
static const struct hypfs_funcs cpupool_gran_funcs = {
.enter = hypfs_node_enter,
.exit = hypfs_node_exit,
.read = cpupool_gran_read,
- .write = hypfs_write_deny,
+ .write = cpupool_gran_write,
.getsize = hypfs_gran_getsize,
.findentry = hypfs_leaf_findentry,
};
static HYPFS_VARSIZE_INIT(cpupool_gran, XEN_HYPFS_TYPE_STRING, "sched-gran",
- 0, &cpupool_gran_funcs);
+ SCHED_GRAN_NAME_LEN, &cpupool_gran_funcs);
static char granstr[SCHED_GRAN_NAME_LEN] = {
[0 ... SCHED_GRAN_NAME_LEN - 2] = '?',
[SCHED_GRAN_NAME_LEN - 1] = 0