@@ -10,6 +10,8 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
+void bpfilter_umh_cleanup(struct umd_info *info);
+
struct bpfilter_umh_ops {
struct umd_info info;
/* since ip_getsockopt() can run in parallel, serialize access to umh */
@@ -18,7 +20,6 @@ struct bpfilter_umh_ops {
char __user *optval,
unsigned int optlen, bool is_set);
int (*start)(void);
- bool stop;
};
extern struct bpfilter_umh_ops bpfilter_ops;
#endif
@@ -18,10 +18,11 @@ static void shutdown_umh(void)
struct umd_info *info = &bpfilter_ops.info;
struct pid *tgid = info->tgid;
- if (bpfilter_ops.stop)
- return;
-
- kill_pid_info(SIGKILL, SEND_SIG_PRIV, tgid);
+ if (tgid) {
+ kill_pid_info(SIGKILL, SEND_SIG_PRIV, tgid);
+ wait_event(tgid->wait_pidfd, !pid_task(tgid, PIDTYPE_TGID));
+ bpfilter_umh_cleanup(info);
+ }
}
static void __stop_umh(void)
@@ -77,7 +78,6 @@ static int start_umh(void)
err = fork_usermode_driver(&bpfilter_ops.info);
if (err)
return err;
- bpfilter_ops.stop = false;
pr_info("Loaded bpfilter_umh pid %d\n", pid_nr(bpfilter_ops.info.tgid));
/* health check that usermode process started correctly */
@@ -100,16 +100,11 @@ static int __init load_umh(void)
return err;
mutex_lock(&bpfilter_ops.lock);
- if (!bpfilter_ops.stop) {
- err = -EFAULT;
- goto out;
- }
err = start_umh();
if (!err && IS_ENABLED(CONFIG_INET)) {
bpfilter_ops.sockopt = &__bpfilter_process_sockopt;
bpfilter_ops.start = &start_umh;
}
-out:
mutex_unlock(&bpfilter_ops.lock);
if (err)
umd_unload_blob(&bpfilter_ops.info);
@@ -12,16 +12,14 @@
struct bpfilter_umh_ops bpfilter_ops;
EXPORT_SYMBOL_GPL(bpfilter_ops);
-static void bpfilter_umh_cleanup(struct umd_info *info)
+void bpfilter_umh_cleanup(struct umd_info *info)
{
- mutex_lock(&bpfilter_ops.lock);
- bpfilter_ops.stop = true;
fput(info->pipe_to_umh);
fput(info->pipe_from_umh);
put_pid(info->tgid);
info->tgid = NULL;
- mutex_unlock(&bpfilter_ops.lock);
}
+EXPORT_SYMBOL_GPL(bpfilter_umh_cleanup);
static int bpfilter_mbox_request(struct sock *sk, int optname,
char __user *optval,
@@ -39,7 +37,11 @@ static int bpfilter_mbox_request(struct sock *sk, int optname,
goto out;
}
}
- if (bpfilter_ops.stop) {
+ if (bpfilter_ops.info.tgid &&
+ !pid_has_task(bpfilter_ops.info.tgid, PIDTYPE_TGID))
+ bpfilter_umh_cleanup(&bpfilter_ops.info);
+
+ if (!bpfilter_ops.info.tgid) {
err = bpfilter_ops.start();
if (err)
goto out;
@@ -70,9 +72,8 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
static int __init bpfilter_sockopt_init(void)
{
mutex_init(&bpfilter_ops.lock);
- bpfilter_ops.stop = true;
+ bpfilter_ops.info.tgid = NULL;
bpfilter_ops.info.driver_name = "bpfilter_umh";
- bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup;
return 0;
}
Instead of relying on the exit_umh cleanup callback use the fact a struct pid can be tested to see if a process still exists, and that struct pid has a wait queue that notifies when the process dies. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> --- include/linux/bpfilter.h | 3 ++- net/bpfilter/bpfilter_kern.c | 15 +++++---------- net/ipv4/bpfilter/sockopt.c | 15 ++++++++------- 3 files changed, 15 insertions(+), 18 deletions(-)