@@ -132,9 +132,10 @@ extern struct pid *find_vpid(int nr);
*/
extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
+struct task_struct *find_get_tgid_task(int *id, struct pid_namespace *);
extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
- size_t set_tid_size);
+ size_t set_tid_size, bool group_leader);
extern void free_pid(struct pid *pid);
extern void disable_pid_allocation(struct pid_namespace *ns);
@@ -2254,7 +2254,7 @@ static __latent_entropy struct task_struct *copy_process(
if (pid != &init_struct_pid) {
pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid,
- args->set_tid_size);
+ args->set_tid_size, !(clone_flags & CLONE_THREAD));
if (IS_ERR(pid)) {
retval = PTR_ERR(pid);
goto bad_fork_cleanup_thread;
@@ -157,7 +157,7 @@ void free_pid(struct pid *pid)
}
struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
- size_t set_tid_size)
+ size_t set_tid_size, bool group_leader)
{
struct pid *pid;
enum pid_type type;
@@ -272,6 +272,8 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
for ( ; upid >= pid->numbers; --upid) {
/* Make the PID visible to find_pid_ns. */
idr_replace(&upid->ns->idr, pid, upid->nr);
+ if (group_leader)
+ idr_set_tag(&upid->ns->idr, upid->nr);
upid->ns->pid_allocated++;
}
spin_unlock_irq(&pidmap_lock);
@@ -331,6 +333,10 @@ static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
void attach_pid(struct task_struct *task, enum pid_type type)
{
struct pid *pid = *task_pid_ptr(task, type);
+ struct pid_namespace *pid_ns = ns_of_pid(pid);
+ pid_t pid_nr = pid_nr_ns(pid, pid_ns);
+
+ WARN_ON(type == PIDTYPE_TGID && !idr_get_tag(&pid_ns->idr, pid_nr));
hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
}
@@ -520,6 +526,38 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
return idr_get_next(&ns->idr, &nr);
}
+/*
+ * Used by proc to find the first thread group leader task with an id greater
+ * than or equal to *id.
+ *
+ * Use the idr tag hint to find the next best pid. The tag does not guarantee a
+ * linked task exists, so retry until a suitable entry is found.
+ */
+struct task_struct *find_get_tgid_task(int *id, struct pid_namespace *ns)
+{
+ struct pid *pid;
+ struct task_struct *t;
+ unsigned int nr = *id;
+
+ rcu_read_lock();
+
+ do {
+ pid = idr_get_next_tag(&ns->idr, nr);
+ if (!pid) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ t = pid_task(pid, PIDTYPE_TGID);
+ nr++;
+ } while (!t);
+
+ *id = pid_nr_ns(pid, ns);
+ get_task_struct(t);
+ rcu_read_unlock();
+
+ return t;
+}
+
struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
{
struct fd f;
Searching the pid_namespace for group leader tasks is a fairly inefficient operation. Listing the root directory of a procfs mount performs a linear scan of allocated pids, checking each entry for an associated PIDTYPE_TGID task to determine whether to populate a directory entry. This can cause a significant increase in readdir() syscall latency when run in namespaces that might have one or more processes with significant thread counts. To facilitate improved TGID pid searches, tag the ids of pid entries that are likely to have an associated PIDTYPE_TGID task. To keep the code simple and avoid having to maintain synchronization between tag state and post-fork pid-task association changes, the tag is applied to all pids allocated for tasks cloned without CLONE_THREAD. This means that it is possible for a pid to remain tagged in the idr tree after being disassociated from the group leader task. For example, a process that does a setsid() followed by fork() and exit() (to daemonize) will remain associated with the original pid for the session, but link with the child pid as the group leader. OTOH, the only place other than fork() where a tgid association occurs is in the exec() path, which kills all other tasks in the group and associates the current task with the preexisting leader pid. Therefore, the semantics of the tag are that false positives (tagged pids without PIDTYPE_TGID tasks) are possible, but false negatives (untagged pids without PIDTYPE_TGID tasks) should never occur. This is an effective optimization because false negatives are fairly uncommon and don't add overhead (i.e. we already have to check pid_task() for tagged entries), but still filters out thread pids that are guaranteed not to have TGID task association. Tag entries in the pid allocation path when the caller specifies that the pid associates with a new thread group. Since false negatives are not allowed, warn in the event that a PIDTYPE_TGID task is ever attached to an untagged pid. Finally, create a helper to implement the task search based on the tag semantics defined above (based on search logic currently implemented by next_tgid() in procfs). Signed-off-by: Brian Foster <bfoster@redhat.com> --- include/linux/pid.h | 3 ++- kernel/fork.c | 2 +- kernel/pid.c | 40 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 42 insertions(+), 3 deletions(-)