@@ -9,7 +9,7 @@
#include <linux/threads.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
#define MAX_PID_NS_LEVEL 32
@@ -17,7 +17,7 @@
struct fs_pin;
struct pid_namespace {
- struct idr idr;
+ struct xarray xa;
unsigned int pid_next;
struct rcu_head rcu;
unsigned int pid_allocated;
@@ -38,6 +38,8 @@ extern struct pid_namespace init_pid_ns;
#define PIDNS_ADDING (1U << 31)
+#define PID_XA_FLAGS (XA_FLAGS_TRACK_FREE | XA_FLAGS_LOCK_IRQ)
+
#ifdef CONFIG_PID_NS
static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
{
@@ -85,7 +87,7 @@ static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
void pidhash_init(void);
-void pid_idr_init(void);
+void pid_init(void);
static inline bool task_is_in_init_pid_ns(struct task_struct *tsk)
{
@@ -38,7 +38,7 @@
* Define a minimum number of pids per cpu. Heuristically based
* on original pid max of 32k for 32 cpus. Also, increase the
* minimum settable value for pid_max on the running system based
- * on similar defaults. See kernel/pid.c:pid_idr_init() for details.
+ * on similar defaults. See kernel/pid.c:pid_init() for details.
*/
#define PIDS_PER_CPU_DEFAULT 1024
#define PIDS_PER_CPU_MIN 8
@@ -73,7 +73,6 @@
#include <linux/sched.h>
#include <linux/sched/init.h>
#include <linux/signal.h>
-#include <linux/idr.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/async.h>
@@ -1100,7 +1099,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
late_time_init();
sched_clock_init();
calibrate_delay();
- pid_idr_init();
+ pid_init();
anon_vma_init();
#ifdef CONFIG_X86
if (efi_enabled(EFI_RUNTIME_SERVICES))
@@ -41,7 +41,7 @@
#include <linux/anon_inodes.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <net/sock.h>
#include <uapi/linux/pidfd.h>
@@ -66,15 +66,9 @@ int pid_max = PID_MAX_DEFAULT;
int pid_max_min = RESERVED_PIDS + 1;
int pid_max_max = PID_MAX_LIMIT;
-/*
- * PID-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way a low pid_max
- * value does not cause lots of bitmaps to be allocated, but
- * the scheme scales to up to 4 million PIDs, runtime.
- */
struct pid_namespace init_pid_ns = {
.ns.count = REFCOUNT_INIT(2),
- .idr = IDR_INIT(init_pid_ns.idr),
+ .xa = XARRAY_INIT(init_pid_ns.xa, PID_XA_FLAGS),
.pid_next = 0,
.pid_allocated = PIDNS_ADDING,
.level = 0,
@@ -118,7 +112,7 @@ void free_pid(struct pid *pid)
struct upid *upid = pid->numbers + i;
struct pid_namespace *ns = upid->ns;
- xa_lock_irqsave(&ns->idr.idr_rt, flags);
+ xa_lock_irqsave(&ns->xa, flags);
switch (--ns->pid_allocated) {
case 2:
case 1:
@@ -135,8 +129,8 @@ void free_pid(struct pid *pid)
break;
}
- idr_remove(&ns->idr, upid->nr);
- xa_unlock_irqrestore(&ns->idr.idr_rt, flags);
+ __xa_erase(&ns->xa, upid->nr);
+ xa_unlock_irqrestore(&ns->xa, flags);
}
call_rcu(&pid->rcu, delayed_put_pid);
@@ -147,7 +141,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
{
struct pid *pid;
enum pid_type type;
- int i, nr;
+ int i;
struct pid_namespace *tmp;
struct upid *upid;
int retval = -ENOMEM;
@@ -191,18 +185,17 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
set_tid_size--;
}
- idr_preload(GFP_KERNEL);
- xa_lock_irq(&tmp->idr.idr_rt);
+ xa_lock_irq(&tmp->xa);
if (tid) {
- nr = idr_alloc(&tmp->idr, NULL, tid,
- tid + 1, GFP_ATOMIC);
+ retval = __xa_insert(&tmp->xa, tid, NULL, GFP_KERNEL);
+
/*
- * If ENOSPC is returned it means that the PID is
- * alreay in use. Return EEXIST in that case.
+ * If EBUSY is returned it means that the PID is already
+ * in use. Return EEXIST in that case.
*/
- if (nr == -ENOSPC)
- nr = -EEXIST;
+ if (retval == -EBUSY)
+ retval = -EEXIST;
} else {
int pid_min = 1;
/*
@@ -216,19 +209,18 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
* Store a null pointer so find_pid_ns does not find
* a partially initialized PID (see below).
*/
- nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
- pid_max, GFP_ATOMIC);
- tmp->pid_next = nr + 1;
+ retval = __xa_alloc_cyclic(&tmp->xa, &tid, NULL,
+ XA_LIMIT(pid_min, pid_max),
+ &tmp->pid_next, GFP_KERNEL);
+ if (retval == -EBUSY)
+ retval = -EAGAIN;
}
- xa_unlock_irq(&tmp->idr.idr_rt);
- idr_preload_end();
+ xa_unlock_irq(&tmp->xa);
- if (nr < 0) {
- retval = (nr == -ENOSPC) ? -EAGAIN : nr;
+ if (retval < 0)
goto out_free;
- }
- pid->numbers[i].nr = nr;
+ pid->numbers[i].nr = tid;
pid->numbers[i].ns = tmp;
tmp = tmp->parent;
}
@@ -256,17 +248,17 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
for ( ; upid >= pid->numbers; --upid) {
tmp = upid->ns;
- xa_lock_irq(&tmp->idr.idr_rt);
+ xa_lock_irq(&tmp->xa);
if (tmp == ns && !(tmp->pid_allocated & PIDNS_ADDING)) {
- xa_unlock_irq(&tmp->idr.idr_rt);
+ xa_unlock_irq(&tmp->xa);
put_pid_ns(ns);
goto out_free;
}
/* Make the PID visible to find_pid_ns. */
- idr_replace(&tmp->idr, pid, upid->nr);
+ __xa_store(&tmp->xa, upid->nr, pid, 0);
tmp->pid_allocated++;
- xa_unlock_irq(&tmp->idr.idr_rt);
+ xa_unlock_irq(&tmp->xa);
}
return pid;
@@ -276,14 +268,14 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
upid = pid->numbers + i;
tmp = upid->ns;
- xa_lock_irq(&tmp->idr.idr_rt);
+ xa_lock_irq(&tmp->xa);
/* On failure to allocate the first pid, reset the state */
if (tmp == ns && tmp->pid_allocated == PIDNS_ADDING)
ns->pid_next = 0;
- idr_remove(&tmp->idr, upid->nr);
- xa_unlock_irq(&tmp->idr.idr_rt);
+ __xa_erase(&tmp->xa, upid->nr);
+ xa_unlock_irq(&tmp->xa);
}
kmem_cache_free(ns->pid_cachep, pid);
@@ -292,14 +284,14 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
void disable_pid_allocation(struct pid_namespace *ns)
{
- xa_lock_irq(&ns->idr.idr_rt);
+ xa_lock_irq(&ns->xa);
ns->pid_allocated &= ~PIDNS_ADDING;
- xa_unlock_irq(&ns->idr.idr_rt);
+ xa_unlock_irq(&ns->xa);
}
struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
{
- return idr_find(&ns->idr, nr);
+ return xa_load(&ns->xa, nr);
}
EXPORT_SYMBOL_GPL(find_pid_ns);
@@ -508,7 +500,9 @@ EXPORT_SYMBOL_GPL(task_active_pid_ns);
*/
struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
{
- return idr_get_next(&ns->idr, &nr);
+ unsigned long index = nr;
+
+ return xa_find(&ns->xa, &index, ULONG_MAX, XA_PRESENT);
}
struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
@@ -649,7 +643,7 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
* take it we can leave the interrupts enabled. For now it is easier to be safe
* than to prove it can't happen.
*/
-void __init pid_idr_init(void)
+void __init pid_init(void)
{
/* Verify no one has done anything silly: */
BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
@@ -661,8 +655,6 @@ void __init pid_idr_init(void)
PIDS_PER_CPU_MIN * num_possible_cpus());
pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
- idr_init(&init_pid_ns.idr);
-
init_pid_ns.pid_cachep = KMEM_CACHE(pid,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
}
@@ -22,7 +22,7 @@
#include <linux/export.h>
#include <linux/sched/task.h>
#include <linux/sched/signal.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
static DEFINE_MUTEX(pid_caches_mutex);
static struct kmem_cache *pid_ns_cachep;
@@ -92,15 +92,15 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
if (ns == NULL)
goto out_dec;
- idr_init(&ns->idr);
+ xa_init_flags(&ns->xa, PID_XA_FLAGS);
ns->pid_cachep = create_pid_cachep(level);
if (ns->pid_cachep == NULL)
- goto out_free_idr;
+ goto out_free_xa;
err = ns_alloc_inum(&ns->ns);
if (err)
- goto out_free_idr;
+ goto out_free_xa;
ns->ns.ops = &pidns_operations;
refcount_set(&ns->ns.count, 1);
@@ -112,8 +112,8 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
return ns;
-out_free_idr:
- idr_destroy(&ns->idr);
+out_free_xa:
+ xa_destroy(&ns->xa);
kmem_cache_free(pid_ns_cachep, ns);
out_dec:
dec_pid_namespaces(ucounts);
@@ -135,7 +135,7 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
{
ns_free_inum(&ns->ns);
- idr_destroy(&ns->idr);
+ xa_destroy(&ns->xa);
call_rcu(&ns->rcu, delayed_free_pidns);
}
@@ -165,7 +165,7 @@ EXPORT_SYMBOL_GPL(put_pid_ns);
void zap_pid_ns_processes(struct pid_namespace *pid_ns)
{
- int nr;
+ long nr;
int rc;
struct task_struct *task, *me = current;
int init_pids = thread_group_leader(me) ? 1 : 2;
@@ -198,8 +198,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
*/
rcu_read_lock();
read_lock(&tasklist_lock);
- nr = 2;
- idr_for_each_entry_continue(&pid_ns->idr, pid, nr) {
+ xa_for_each_range(&pid_ns->xa, nr, pid, 2, ULONG_MAX) {
task = pid_task(pid, PIDTYPE_PID);
if (task && !__fatal_signal_pending(task))
group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX);