@@ -72,6 +72,26 @@ enabled - BOOLEAN
Default: 1 (enabled)
+path_manager - STRING
+ Set the default path manager name to use for each new MPTCP
+ socket. In-kernel path management will control subflow
+ connections and address advertisements according to
+ per-namespace values configured over the MPTCP netlink
+ API. Userspace path management puts per-MPTCP-connection subflow
+ connection decisions and address advertisements under control of
+ a privileged userspace program, at the cost of more netlink
+ traffic to propagate all of the related events and commands.
+ User-defined BPF-based path managers can also be set via this
+ sysctl.
+
+ This is a per-namespace sysctl.
+
+ * "kernel" - In-kernel path manager
+ * "userspace" - Userspace path manager
+ * all other strings - BPF-based path managers
+
+ Default: "kernel"
+
pm_type - INTEGER
Set the default path manager type to use for each new MPTCP
socket. In-kernel path management will control subflow
@@ -84,6 +104,8 @@ pm_type - INTEGER
This is a per-namespace sysctl.
+ (Deprecated, use path_manager instead.).
+
* 0 - In-kernel path manager
* 1 - Userspace path manager
@@ -39,6 +39,7 @@ struct mptcp_pernet {
u8 allow_join_initial_addr_port;
u8 pm_type;
char scheduler[MPTCP_SCHED_NAME_MAX];
+ char path_manager[MPTCP_PM_NAME_MAX];
};
static struct mptcp_pernet *mptcp_get_pernet(const struct net *net)
@@ -83,6 +84,11 @@ int mptcp_get_pm_type(const struct net *net)
return mptcp_get_pernet(net)->pm_type;
}
+const char *mptcp_get_path_manager(const struct net *net)
+{
+ return mptcp_get_pernet(net)->path_manager;
+}
+
const char *mptcp_get_scheduler(const struct net *net)
{
return mptcp_get_pernet(net)->scheduler;
@@ -101,6 +107,7 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
pernet->stale_loss_cnt = 4;
pernet->pm_type = MPTCP_PM_TYPE_KERNEL;
strscpy(pernet->scheduler, "default", sizeof(pernet->scheduler));
+ strscpy(pernet->path_manager, "kernel", sizeof(pernet->path_manager));
}
#ifdef CONFIG_SYSCTL
@@ -174,6 +181,42 @@ static int proc_blackhole_detect_timeout(const struct ctl_table *table,
return ret;
}
+static int mptcp_set_path_manager(char *path_manager, const char *name)
+{
+ struct mptcp_pm_ops *pm;
+ int ret = 0;
+
+ rcu_read_lock();
+ pm = mptcp_pm_find(name);
+ if (pm)
+ strscpy(path_manager, name, MPTCP_PM_NAME_MAX);
+ else
+ ret = -ENOENT;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static int proc_path_manager(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ char (*path_manager)[MPTCP_PM_NAME_MAX] = ctl->data;
+ char val[MPTCP_PM_NAME_MAX];
+ const struct ctl_table tbl = {
+ .data = val,
+ .maxlen = MPTCP_PM_NAME_MAX,
+ };
+ int ret;
+
+ strscpy(val, *path_manager, MPTCP_PM_NAME_MAX);
+
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0)
+ ret = mptcp_set_path_manager(*path_manager, val);
+
+ return ret;
+}
+
static struct ctl_table mptcp_sysctl_table[] = {
{
.procname = "enabled",
@@ -253,6 +296,12 @@ static struct ctl_table mptcp_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
},
+ {
+ .procname = "path_manager",
+ .maxlen = MPTCP_PM_NAME_MAX,
+ .mode = 0644,
+ .proc_handler = proc_path_manager,
+ },
};
static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
@@ -278,6 +327,7 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
table[8].data = &pernet->close_timeout;
table[9].data = &pernet->blackhole_timeout;
table[10].data = &pernet->syn_retrans_before_tcp_fallback;
+ table[11].data = &pernet->path_manager;
hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table,
ARRAY_SIZE(mptcp_sysctl_table));
@@ -694,6 +694,7 @@ int mptcp_allow_join_id0(const struct net *net);
unsigned int mptcp_stale_loss_cnt(const struct net *net);
unsigned int mptcp_close_timeout(const struct sock *sk);
int mptcp_get_pm_type(const struct net *net);
+const char *mptcp_get_path_manager(const struct net *net);
const char *mptcp_get_scheduler(const struct net *net);
void mptcp_active_disable(struct sock *sk);