@@ -103,7 +103,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* if platform didn't set the present map already, do it now
* boot cpu is set to present already by init/main.c
*/
- if (num_present_cpus() <= 1)
+ if (num_present_cpus_le(2))
init_cpu_present(cpu_possible_mask);
}
@@ -44,7 +44,7 @@ int machine_kexec_prepare(struct kimage *image)
* and implements CPU hotplug for the current HW. If not, we won't be
* able to kexec reliably, so fail the prepare operation.
*/
- if (num_possible_cpus() > 1 && platform_can_secondary_boot() &&
+ if (num_possible_cpus_gt(1) && platform_can_secondary_boot() &&
!platform_can_cpu_hotplug())
return -EINVAL;
@@ -120,7 +120,7 @@ void exynos_set_delayed_reset_assertion(bool enable)
if (of_machine_is_compatible("samsung,exynos4")) {
unsigned int tmp, core_id;
- for (core_id = 0; core_id < num_possible_cpus(); core_id++) {
+ for (core_id = 0; num_possible_cpus_gt(core_id); core_id++) {
tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
if (enable)
tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
@@ -296,7 +296,7 @@ static int __init b15_rac_init(void)
if (!dn)
return -ENODEV;
- if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
+ if (WARN(num_possible_cpus_gt(4), "RAC only supports 4 CPUs\n"))
goto out;
b15_rac_base = of_iomap(dn, 0);
@@ -1099,7 +1099,7 @@ static bool have_cpu_die(void)
bool cpus_are_stuck_in_kernel(void)
{
- bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
+ bool smp_spin_tables = (num_possible_cpus_gt(1) && !have_cpu_die());
return !!cpus_stuck_in_kernel || smp_spin_tables ||
is_protected_kvm_enabled();
@@ -384,7 +384,7 @@ static int asids_update_limit(void)
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
- WARN_ON(num_available_asids - 1 <= num_possible_cpus());
+ WARN_ON(num_possible_cpus_gt(num_available_asids - 2));
pr_info("ASID allocator initialised with %lu entries\n",
num_available_asids);
@@ -176,7 +176,7 @@ int asid_allocator_init(struct asid_info *info,
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is always reserved.
*/
- WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
+ WARN_ON(num_possible_cpus_gt(NUM_CTXT_ASIDS(info) - 2));
atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
sizeof(*info->map), GFP_KERNEL);
@@ -28,7 +28,7 @@ static void asid_flush_cpu_ctxt(void)
static int asids_init(void)
{
- BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus());
+ BUG_ON(num_possible_cpus_gt((1 << CONFIG_CPU_ASID_BITS) - 2));
if (asid_allocator_init(&asid_info, CONFIG_CPU_ASID_BITS, 1,
asid_flush_cpu_ctxt))
@@ -202,7 +202,7 @@ setup_ptcg_sem(int max_purges, int nptcg_from)
goto resetsema;
}
if (kp_override) {
- need_ptcg_sem = num_possible_cpus() > nptcg;
+ need_ptcg_sem = num_possible_cpus_gt(nptcg);
return;
}
@@ -221,7 +221,7 @@ setup_ptcg_sem(int max_purges, int nptcg_from)
}
if (palo_override) {
if (nptcg != PALO_MAX_TLB_PURGES)
- need_ptcg_sem = (num_possible_cpus() > nptcg);
+ need_ptcg_sem = num_possible_cpus_gt(nptcg);
return;
}
@@ -238,7 +238,7 @@ setup_ptcg_sem(int max_purges, int nptcg_from)
need_ptcg_sem = 0;
return;
} else
- need_ptcg_sem = (num_possible_cpus() > nptcg);
+ need_ptcg_sem = num_possible_cpus_gt(nptcg);
resetsema:
spinaphore_init(&ptcg_sem, max_purges);
@@ -29,7 +29,7 @@ void __init setup_pit_timer(void)
static int __init init_pit_clocksource(void)
{
- if (num_possible_cpus() > 1 || /* PIT does not scale! */
+ if (num_possible_cpus_gt(1) || /* PIT does not scale! */
!clockevent_state_periodic(&i8253_clockevent))
return 0;
@@ -135,7 +135,7 @@ static DEFINE_RWLOCK(pmuint_rwlock);
/* Copied from op_model_mipsxx.c */
static unsigned int vpe_shift(void)
{
- if (num_possible_cpus() > 1)
+ if (num_possible_cpus_gt(1))
return 1;
return 0;
@@ -704,7 +704,7 @@ static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
* event_id.
*/
#ifdef CONFIG_MIPS_MT_SMP
- if (num_possible_cpus() > 1)
+ if (num_possible_cpus_gt(1))
return ((unsigned int)pev->range << 24) |
(pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
@@ -54,7 +54,7 @@ int __init rtlx_module_init(void)
return -ENODEV;
}
- if (num_possible_cpus() - aprp_cpu_index() < 1) {
+ if (num_possible_cpus_le(aprp_cpu_index() + 1)) {
pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
"Pass maxcpus=<n> argument as kernel argument\n");
@@ -248,7 +248,7 @@ int mips_smp_ipi_allocate(const struct cpumask *mask)
* setup, if we're running with only a single CPU.
*/
if (!ipidomain) {
- BUG_ON(num_present_cpus() > 1);
+ BUG_ON(num_present_cpus_gt(1));
return 0;
}
@@ -314,7 +314,7 @@ int mips_smp_ipi_free(const struct cpumask *mask)
static int __init mips_smp_ipi_init(void)
{
- if (num_possible_cpus() == 1)
+ if (num_possible_cpus_eq(1))
return 0;
mips_smp_ipi_allocate(cpu_possible_mask);
@@ -98,7 +98,7 @@ int __init vpe_module_init(void)
return -ENODEV;
}
- if (num_possible_cpus() - aprp_cpu_index() < 1) {
+ if (num_possible_cpus_le(aprp_cpu_index() + 1)) {
pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
"Pass maxcpus=<n> argument as kernel argument\n");
return -ENODEV;
@@ -194,7 +194,7 @@ static struct clocksource clocksource_mfgpt = {
int __init init_mfgpt_clocksource(void)
{
- if (num_possible_cpus() > 1) /* MFGPT does not scale! */
+ if (num_possible_cpus_gt(1)) /* MFGPT does not scale! */
return 0;
return clocksource_register_hz(&clocksource_mfgpt, MFGPT_TICK_RATE);
@@ -274,7 +274,7 @@ static int mmid_init(void)
* one more MMID than CPUs.
*/
num_mmids = asid_first_version(0);
- WARN_ON(num_mmids <= num_possible_cpus());
+ WARN_ON(num_possible_cpus_gt(num_mmids - 1));
atomic64_set(&mmid_version, asid_first_version(0));
mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
@@ -363,7 +363,7 @@ static struct work_registers build_get_work_registers(u32 **p)
return r;
}
- if (num_possible_cpus() > 1) {
+ if (num_possible_cpus_gt(1)) {
/* Get smp_processor_id */
UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
@@ -172,7 +172,7 @@ static void *cpuinfo_start(struct seq_file *m, loff_t *pos)
{
unsigned long i = *pos;
- return i < num_possible_cpus() ? (void *) (i + 1) : NULL;
+ return num_possible_cpus_gt(i) ? (void *) (i + 1) : NULL;
}
static void *cpuinfo_next(struct seq_file *m, void *v, loff_t *pos)
@@ -280,7 +280,7 @@ static int smp_85xx_kick_cpu(int nr)
int primary = nr;
#endif
- WARN_ON(nr < 0 || nr >= num_possible_cpus());
+ WARN_ON(nr < 0 || num_possible_cpus_le(nr + 1));
pr_debug("kick CPU #%d\n", nr);
@@ -365,7 +365,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
cpu_maps_update_begin();
break;
}
- if (cpu == num_possible_cpus()) {
+ if (num_possible_cpus_eq(cpu)) {
pr_warn("Could not find cpu to offline with physical id 0x%x\n",
thread);
}
@@ -408,7 +408,7 @@ static int dlpar_online_cpu(struct device_node *dn)
break;
}
- if (cpu == num_possible_cpus())
+ if (num_possible_cpus_eq(cpu))
printk(KERN_WARNING "Could not find cpu to online "
"with physical id 0x%x\n", thread);
}
@@ -1440,7 +1440,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
* The MPIC driver will crash if there are more cores than we
* can initialize, so we may as well catch that problem here.
*/
- BUG_ON(num_possible_cpus() > MPIC_MAX_CPUS);
+ BUG_ON(num_possible_cpus_gt(MPIC_MAX_CPUS));
/* Map the per-CPU registers */
for_each_possible_cpu(i) {
@@ -2747,7 +2747,7 @@ static void dump_all_pacas(void)
{
int cpu;
- if (num_possible_cpus() == 0) {
+ if (num_possible_cpus_eq(0)) {
printf("No possible cpus, use 'dp #' to dump individual cpus\n");
return;
}
@@ -2809,7 +2809,7 @@ static void dump_all_xives(void)
{
int cpu;
- if (num_possible_cpus() == 0) {
+ if (num_possible_cpus_eq(0)) {
printf("No possible cpus, use 'dx #' to dump individual cpus\n");
return;
}
@@ -3692,7 +3692,7 @@ symbol_lookup(void)
ptr >= (void __percpu *)__per_cpu_start &&
ptr < (void __percpu *)__per_cpu_end)
{
- if (scanhex(&cpu) && cpu < num_possible_cpus()) {
+ if (scanhex(&cpu) && num_possible_cpus_gt(cpu)) {
addr = (unsigned long)per_cpu_ptr(ptr, cpu);
} else {
cpu = raw_smp_processor_id();
@@ -36,7 +36,7 @@ void kvm_riscv_stage2_vmid_detect(void)
__kvm_riscv_hfence_gvma_all();
/* We don't use VMID bits if they are not sufficient */
- if ((1UL << vmid_bits) < num_possible_cpus())
+ if (num_possible_cpus_gt(1UL << vmid_bits))
vmid_bits = 0;
}
@@ -885,7 +885,7 @@ static void __mark_core_id(struct mdesc_handle *hp, u64 node,
{
const u64 *id = mdesc_get_property(hp, node, "id", NULL);
- if (*id < num_possible_cpus())
+ if (num_possible_cpus_gt(*id))
cpu_data(*id).core_id = core_id;
}
@@ -894,7 +894,7 @@ static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
{
const u64 *id = mdesc_get_property(hp, node, "id", NULL);
- if (*id < num_possible_cpus()) {
+ if (num_possible_cpus_gt(*id)) {
cpu_data(*id).max_cache_id = max_cache_id;
/**
@@ -986,7 +986,7 @@ static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
continue;
id = mdesc_get_property(hp, t, "id", NULL);
- if (*id < num_possible_cpus())
+ if (num_possible_cpus_gt(*id))
cpu_data(*id).sock_id = idx;
}
idx++;
@@ -1007,7 +1007,7 @@ __init int amd_pmu_init(void)
if (ret)
return ret;
- if (num_possible_cpus() == 1) {
+ if (num_possible_cpus_eq(1)) {
/*
* No point in allocating data structures to serialize
* against other CPUs, when there is only the one CPU.
@@ -574,7 +574,7 @@ void __init_or_module alternatives_smp_module_add(struct module *mod,
if (!uniproc_patched)
goto unlock;
- if (num_possible_cpus() == 1)
+ if (num_possible_cpus_eq(1))
/* Don't bother remembering, we'll never have to undo it. */
goto smp_unlock;
@@ -620,7 +620,7 @@ void alternatives_enable_smp(void)
struct smp_alt_module *mod;
/* Why bother if there are no other CPUs? */
- BUG_ON(num_possible_cpus() == 1);
+ BUG_ON(num_possible_cpus_eq(1));
mutex_lock(&text_mutex);
@@ -833,14 +833,14 @@ void __init alternative_instructions(void)
#ifdef CONFIG_SMP
/* Patch to UP if other cpus not imminent. */
- if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
+ if (!noreplace_smp && (num_present_cpus_eq(1) || setup_max_cpus <= 1)) {
uniproc_patched = true;
alternatives_smp_module_add(NULL, "core kernel",
__smp_locks, __smp_locks_end,
_text, _etext);
}
- if (!uniproc_patched || num_possible_cpus() == 1) {
+ if (!uniproc_patched || num_possible_cpus_eq(1)) {
free_init_pages("SMP alternatives",
(unsigned long)__smp_locks,
(unsigned long)__smp_locks_end);
@@ -1020,7 +1020,7 @@ void __init setup_boot_APIC_clock(void)
if (disable_apic_timer) {
pr_info("Disabling APIC timer\n");
/* No broadcast on UP ! */
- if (num_possible_cpus() > 1) {
+ if (num_possible_cpus_gt(1)) {
lapic_clockevent.mult = 1;
setup_APIC_timer();
}
@@ -1029,7 +1029,7 @@ void __init setup_boot_APIC_clock(void)
if (calibrate_APIC_clock()) {
/* No broadcast on UP ! */
- if (num_possible_cpus() > 1)
+ if (num_possible_cpus_gt(1))
setup_APIC_timer();
return;
}
@@ -189,7 +189,7 @@ static void physflat_init_apic_ldr(void)
static int physflat_probe(void)
{
- if (apic == &apic_physflat || num_possible_cpus() > 8 ||
+ if (apic == &apic_physflat || num_possible_cpus_gt(8) ||
jailhouse_paravirt())
return 1;
@@ -138,7 +138,7 @@ void __init default_setup_apic_routing(void)
{
int version = boot_cpu_apic_version;
- if (num_possible_cpus() > 8) {
+ if (num_possible_cpus_gt(8)) {
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
if (!APIC_XAPIC(version)) {
@@ -310,7 +310,7 @@ static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
if (copy_from_user(&m, ubuf, usize))
return -EFAULT;
- if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu))
+ if (num_possible_cpus_le(m.extcpu + 1) || !cpu_online(m.extcpu))
return -EINVAL;
/*
@@ -737,7 +737,7 @@ static void __init hpet_select_clockevents(void)
hc->irq = irq;
hc->mode = HPET_MODE_CLOCKEVT;
- if (++hpet_base.nr_clockevents == num_possible_cpus())
+ if (num_possible_cpus_eq(++hpet_base.nr_clockevents))
break;
}
@@ -57,7 +57,7 @@ static int __init init_pit_clocksource(void)
* - when HPET is enabled
* - when local APIC timer is active (PIT is switched off)
*/
- if (num_possible_cpus() > 1 || is_hpet_enabled() ||
+ if (num_possible_cpus_gt(1) || is_hpet_enabled() ||
!clockevent_state_periodic(&i8253_clockevent))
return 0;
@@ -1057,7 +1057,7 @@ void __init kvm_spinlock_init(void)
goto out;
}
- if (num_possible_cpus() == 1) {
+ if (num_possible_cpus_eq(1)) {
pr_info("PV spinlocks disabled, single CPU\n");
goto out;
}
@@ -205,7 +205,7 @@ static void __init kvmclock_init_mem(void)
struct page *p;
int r;
- if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus())
+ if (num_possible_cpus_le(HVC_BOOT_ARRAY_SIZE + 1))
return;
ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
@@ -1223,7 +1223,7 @@ int unsynchronized_tsc(void)
*/
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
/* assume multi socket systems are not synchronized: */
- if (num_possible_cpus() > 1)
+ if (num_possible_cpus_gt(1))
return 1;
}
@@ -254,7 +254,7 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
/* Restrict the possible_map according to max_cpus. */
- while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
+ while (num_possible_cpus_gt(max(1, max_cpus))) {
for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
continue;
set_cpu_possible(cpu, false);
@@ -125,7 +125,7 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
void __init xen_init_spinlocks(void)
{
/* Don't need to use pvqspinlock code if there is only 1 vCPU. */
- if (num_possible_cpus() == 1 || nopvspin)
+ if (num_possible_cpus_eq(1) || nopvspin)
xen_pvspin = false;
if (!xen_pvspin) {
@@ -1178,7 +1178,7 @@ static void __init exynos4x12_core_down_clock(void)
PWR_CTRL1_USE_CORE1_WFE | PWR_CTRL1_USE_CORE0_WFE |
PWR_CTRL1_USE_CORE1_WFI | PWR_CTRL1_USE_CORE0_WFI);
/* On Exynos4412 enable it also on core 2 and 3 */
- if (num_possible_cpus() == 4)
+ if (num_possible_cpus_eq(4))
tmp |= PWR_CTRL1_USE_CORE3_WFE | PWR_CTRL1_USE_CORE2_WFE |
PWR_CTRL1_USE_CORE3_WFI | PWR_CTRL1_USE_CORE2_WFI;
writel_relaxed(tmp, reg_base + PWR_CTRL1);
@@ -302,8 +302,7 @@ static int __init ingenic_tcu_init(struct device_node *np)
(u32 *)&tcu->pwm_channels_mask);
/* Verify that we have at least num_possible_cpus() + 1 free channels */
- if (hweight8(tcu->pwm_channels_mask) >
- soc_info->num_channels - num_possible_cpus() + 1) {
+ if (num_possible_cpus_gt(soc_info->num_channels + 1 - hweight8(tcu->pwm_channels_mask))) {
pr_crit("%s: Invalid PWM channel mask: 0x%02lx\n", __func__,
tcu->pwm_channels_mask);
ret = -EINVAL;
@@ -593,7 +593,7 @@ static int __init pcc_cpufreq_init(void)
return ret;
}
- if (num_present_cpus() > 4) {
+ if (num_present_cpus_gt(4)) {
pcc_cpufreq_driver.flags |= CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
pr_err("%s: Too many CPUs, dynamic performance scaling disabled\n",
__func__);
@@ -1293,7 +1293,7 @@ static int mv_xor_probe(struct platform_device *pdev)
struct mv_xor_device *xordev;
struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
- unsigned int max_engines, max_channels;
+ unsigned int max_channels;
int i, ret;
dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
@@ -1362,7 +1362,6 @@ static int mv_xor_probe(struct platform_device *pdev)
* separate engines when possible. For dual-CPU Armada 3700
* SoC with single XOR engine allow using its both channels.
*/
- max_engines = num_present_cpus();
if (xordev->xor_type == XOR_ARMADA_37XX)
max_channels = num_present_cpus();
else
@@ -1370,7 +1369,7 @@ static int mv_xor_probe(struct platform_device *pdev)
MV_XOR_MAX_CHANNELS,
DIV_ROUND_UP(num_present_cpus(), 2));
- if (mv_xor_engine_count >= max_engines)
+ if (num_present_cpus_le(mv_xor_engine_count + 1))
return 0;
if (pdev->dev.of_node) {
@@ -80,7 +80,7 @@ static struct pci_driver i810_pci_driver = {
static int __init i810_init(void)
{
- if (num_possible_cpus() > 1) {
+ if (num_possible_cpus_gt(1)) {
pr_err("drm/i810 does not support SMP\n");
return -EINVAL;
}
@@ -430,7 +430,7 @@ static u8 gic_get_cpumask(struct gic_chip_data *gic)
break;
}
- if (!mask && num_possible_cpus() > 1)
+ if (!mask && num_possible_cpus_gt(1))
pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
return mask;
@@ -537,7 +537,7 @@ static netdev_tx_t cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
*
* Flow-on is triggered when sufficient buffers are freed
*/
- if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
+ if (unlikely(num_present_cpus_gt(cfv->vq_tx->num_free - 1))) {
flow_off = true;
cfv->stats.tx_full_ring++;
}
@@ -649,7 +649,7 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
rings_per_vf);
oct->sriov_info.rings_per_vf = rings_per_vf;
} else {
- if (rings_per_vf > num_present_cpus()) {
+ if (num_present_cpus_le(rings_per_vf)) {
dev_warn(&oct->pci_dev->dev,
"PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n",
rings_per_vf,
@@ -1239,7 +1239,7 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
* The cpu mask set by ring index according to the ring flag
* which indicate the ring is tx or rx.
*/
- if (q_num == num_possible_cpus()) {
+ if (num_possible_cpus_eq(q_num)) {
if (is_tx_ring(ring))
cpu = ring_idx;
else
@@ -3315,7 +3315,7 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
goto err_cleanup;
/* Assign this queue to a CPU */
- if (queue < num_possible_cpus())
+ if (num_possible_cpus_gt(queue))
netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
}
@@ -429,7 +429,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
* This issue is not present on PCI-Express devices or pre-AR5416
* devices (legacy, 802.11abg).
*/
- if (num_possible_cpus() > 1)
+ if (num_possible_cpus_gt(1))
ah->config.serialize_regmode = SER_REG_MODE_AUTO;
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
@@ -1536,7 +1536,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
- if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB)
+ if (num_possible_cpus_gt(1) || adapter->iface_type == MWIFIEX_USB)
adapter->rx_work_enabled = true;
adapter->workqueue =
@@ -1691,7 +1691,7 @@ mwifiex_add_card(void *card, struct completion *fw_done,
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
- if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB)
+ if (num_possible_cpus_gt(1) || adapter->iface_type == MWIFIEX_USB)
adapter->rx_work_enabled = true;
adapter->workqueue =
@@ -312,8 +312,7 @@ int cw1200_queue_put(struct cw1200_queue *queue,
* Leave extra queue slots so we don't overflow.
*/
if (queue->overfull == false &&
- queue->num_queued >=
- (queue->capacity - (num_present_cpus() - 1))) {
+ num_present_cpus_gt(queue->capacity - queue->num_queued)) {
queue->overfull = true;
__cw1200_queue_lock(queue);
mod_timer(&queue->gc, jiffies);
@@ -17,7 +17,7 @@ static int nd_region_probe(struct device *dev)
struct nd_region *nd_region = to_nd_region(dev);
if (nd_region->num_lanes > num_online_cpus()
- && nd_region->num_lanes < num_possible_cpus()
+ && num_possible_cpus_gt(nd_region->num_lanes)
&& !test_and_set_bit(0, &once)) {
dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
num_online_cpus(), nd_region->num_lanes,
@@ -79,7 +79,7 @@ static int io_queue_count_set(const char *val, const struct kernel_param *kp)
int ret;
ret = kstrtouint(val, 10, &n);
- if (ret != 0 || n > num_possible_cpus())
+ if (ret != 0 || num_possible_cpus_le(n))
return -EINVAL;
return param_set_uint(val, kp);
}
@@ -637,7 +637,7 @@ int armpmu_request_irq(int irq, int cpu)
err = irq_force_affinity(irq, cpumask_of(cpu));
- if (err && num_possible_cpus() > 1) {
+ if (err && num_possible_cpus_gt(1)) {
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
goto err_out;
@@ -297,7 +297,7 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
int i, bus_number;
if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
- cpu >= num_possible_cpus())
+ num_possible_cpus_le(cpu + 1))
return NULL;
bus_number = isst_cpu_info[cpu].bus_info[bus_no];
@@ -362,7 +362,7 @@ struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
struct pci_dev *pci_dev;
if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
- cpu >= num_possible_cpus())
+ num_possible_cpus_le(cpu + 1))
return NULL;
pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
@@ -442,7 +442,7 @@ static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
if (cpu_map->logical_cpu >= nr_cpu_ids ||
- cpu_map->logical_cpu >= num_possible_cpus())
+ num_possible_cpus_le(cpu_map->logical_cpu + 1))
return -EINVAL;
*write_only = 0;
@@ -181,7 +181,7 @@ static void __init a72_b53_rac_enable_all(struct device_node *np)
if (IS_ENABLED(CONFIG_CACHE_B15_RAC))
return;
- if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
+ if (WARN(num_possible_cpus_gt(4), "RAC only supports 4 CPUs\n"))
return;
pref_dist = cbc_readl(RAC_CONFIG1_REG);
@@ -60,7 +60,7 @@ static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
if (d)
return d;
- if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
+ if (cpu != DPAA2_IO_ANY_CPU && num_possible_cpus_le(cpu + 1))
return NULL;
/*
@@ -140,7 +140,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
return NULL;
/* check if CPU is out of range (-1 means any cpu) */
- if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
+ if (desc->cpu != DPAA2_IO_ANY_CPU && num_possible_cpus_le(desc->cpu + 1)) {
kfree(obj);
return NULL;
}
@@ -241,7 +241,7 @@ static int dw_spi_bt1_sys_init(struct platform_device *pdev,
* though, but still tends to be not fast enough at low CPU
* frequencies.
*/
- if (num_possible_cpus() > 1)
+ if (num_possible_cpus_gt(1))
dws->max_mem_freq = 10000000U;
else
dws->max_mem_freq = 20000000U;
@@ -431,7 +431,7 @@ static ssize_t remove_cpu_store(struct device *dev,
if (kstrtoull(buf, 0, &cpu) < 0)
return -EINVAL;
- if (cpu >= num_possible_cpus() || cpu == 0 || !cpu_is_hotpluggable(cpu))
+ if (num_possible_cpus_le(cpu + 1) || cpu == 0 || !cpu_is_hotpluggable(cpu))
return -EINVAL;
if (cpu_online(cpu))
@@ -211,7 +211,7 @@ pwork_threads_store(
if (ret)
return ret;
- if (val < -1 || val > num_possible_cpus())
+ if (val < -1 || num_possible_cpus_le(val))
return -EINVAL;
xfs_globals.pwork_threads = val;
@@ -945,8 +945,19 @@ static inline unsigned int num_online_cpus(void)
return atomic_read(&__num_online_cpus);
}
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
+#define num_possible_cpus_eq(n) cpumask_weight_eq(cpu_possible_mask, (n))
+#define num_possible_cpus_gt(n) cpumask_weight_gt(cpu_possible_mask, (n))
+#define num_possible_cpus_le(n) cpumask_weight_le(cpu_possible_mask, (n))
+
#define num_present_cpus() cpumask_weight(cpu_present_mask)
+#define num_present_cpus_eq(n) cpumask_weight_eq(cpu_present_mask, (n))
+#define num_present_cpus_gt(n) cpumask_weight_gt(cpu_present_mask, (n))
+#define num_present_cpus_le(n) cpumask_weight_le(cpu_present_mask, (n))
+
#define num_active_cpus() cpumask_weight(cpu_active_mask)
+#define num_active_cpus_eq(n) cpumask_weight_eq(cpu_active_mask, (n))
+#define num_active_cpus_gt(n) cpumask_weight_gt(cpu_active_mask, (n))
+#define num_active_cpus_le(n) cpumask_weight_le(cpu_active_mask, (n))
static inline bool cpu_online(unsigned int cpu)
{
@@ -976,9 +987,21 @@ static inline bool cpu_dying(unsigned int cpu)
#else
#define num_online_cpus() 1U
+
#define num_possible_cpus() 1U
+#define num_possible_cpus_eq(n) (1U == (n))
+#define num_possible_cpus_gt(n) (1U > (n))
+#define num_possible_cpus_le(n) (1U < (n))
+
#define num_present_cpus() 1U
+#define num_present_cpus_eq(n) (1U == (n))
+#define num_present_cpus_gt(n) (1U > (n))
+#define num_present_cpus_le(n) (1U < (n))
+
#define num_active_cpus() 1U
+#define num_active_cpus_eq(n) (1U == (n))
+#define num_active_cpus_gt(n) (1U > (n))
+#define num_active_cpus_le(n) (1U < (n))
static inline bool cpu_online(unsigned int cpu)
{
@@ -191,7 +191,7 @@ static inline
int kdb_process_cpu(const struct task_struct *p)
{
unsigned int cpu = task_cpu(p);
- if (cpu > num_possible_cpus())
+ if (num_possible_cpus_le(cpu))
cpu = 0;
return cpu;
}
@@ -108,7 +108,7 @@ kdb_bt_cpu(unsigned long cpu)
{
struct task_struct *kdb_tsk;
- if (cpu >= num_possible_cpus() || !cpu_online(cpu)) {
+ if (num_possible_cpus_le(cpu + 1) || !cpu_online(cpu)) {
kdb_printf("WARNING: no process for cpu %ld\n", cpu);
return;
}
@@ -1005,7 +1005,7 @@ static void __init log_buf_add_cpu(void)
* set_cpu_possible() after setup_arch() but just in
* case lets ensure this is valid.
*/
- if (num_possible_cpus() == 1)
+ if (num_possible_cpus_eq(1))
return;
cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
@@ -639,7 +639,7 @@ static int __init reboot_setup(char *str)
if (isdigit(str[0])) {
int cpu = simple_strtoul(str, NULL, 0);
- if (cpu >= num_possible_cpus()) {
+ if (num_possible_cpus_le(cpu + 1)) {
pr_err("Ignoring the CPU number in reboot= option. "
"CPU %d exceeds possible cpu number %d\n",
cpu, num_possible_cpus());
@@ -844,7 +844,7 @@ static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr,
if (rc)
return rc;
- if (cpunum >= num_possible_cpus())
+ if (num_possible_cpus_le(cpunum + 1))
return -ERANGE;
reboot_default = 0;
@@ -448,7 +448,7 @@ void clockevents_register_device(struct clock_event_device *dev)
clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
if (!dev->cpumask) {
- WARN_ON(num_possible_cpus() > 1);
+ WARN_ON(num_possible_cpus_gt(1));
dev->cpumask = cpumask_of(smp_processor_id());
}
@@ -2936,7 +2936,7 @@ static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
* greater-than comparison ensures upa==1 always
* passes the following check.
*/
- if (wasted > num_possible_cpus() / 3)
+ if (num_possible_cpus_le(wasted * 3))
continue;
/* and then don't consume more memory */
@@ -3193,7 +3193,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
/* allocate pages */
j = 0;
- for (unit = 0; unit < num_possible_cpus(); unit++) {
+ for (unit = 0; num_possible_cpus_gt(unit); unit++) {
unsigned int cpu = ai->groups[0].cpu_map[unit];
for (i = 0; i < unit_pages; i++) {
void *ptr;
@@ -3215,7 +3215,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
vm.size = num_possible_cpus() * ai->unit_size;
vm_area_register_early(&vm, PAGE_SIZE);
- for (unit = 0; unit < num_possible_cpus(); unit++) {
+ for (unit = 0; num_possible_cpus_gt(unit); unit++) {
unsigned long unit_addr =
(unsigned long)vm.addr + unit * ai->unit_size;
@@ -3929,7 +3929,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
* to a larger limit. Thus disabled by default.
*/
shared = 0;
- if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
+ if (cachep->size <= PAGE_SIZE && num_possible_cpus_gt(1))
shared = 8;
#if DEBUG
Add num_{possible,present,active}_cpus_{eq,gt,le} and replace num_*_cpus() with one of new functions where appropriate. This allows num_*_cpus_*() to return earlier depending on the condition. Signed-off-by: Yury Norov <yury.norov@gmail.com> --- arch/arc/kernel/smp.c | 2 +- arch/arm/kernel/machine_kexec.c | 2 +- arch/arm/mach-exynos/exynos.c | 2 +- arch/arm/mm/cache-b15-rac.c | 2 +- arch/arm64/kernel/smp.c | 2 +- arch/arm64/mm/context.c | 2 +- arch/csky/mm/asid.c | 2 +- arch/csky/mm/context.c | 2 +- arch/ia64/mm/tlb.c | 6 ++--- arch/mips/kernel/i8253.c | 2 +- arch/mips/kernel/perf_event_mipsxx.c | 4 ++-- arch/mips/kernel/rtlx-cmp.c | 2 +- arch/mips/kernel/smp.c | 4 ++-- arch/mips/kernel/vpe-cmp.c | 2 +- .../loongson2ef/common/cs5536/cs5536_mfgpt.c | 2 +- arch/mips/mm/context.c | 2 +- arch/mips/mm/tlbex.c | 2 +- arch/nios2/kernel/cpuinfo.c | 2 +- arch/powerpc/platforms/85xx/smp.c | 2 +- arch/powerpc/platforms/pseries/hotplug-cpu.c | 4 ++-- arch/powerpc/sysdev/mpic.c | 2 +- arch/powerpc/xmon/xmon.c | 6 ++--- arch/riscv/kvm/vmid.c | 2 +- arch/sparc/kernel/mdesc.c | 6 ++--- arch/x86/events/amd/core.c | 2 +- arch/x86/kernel/alternative.c | 8 +++---- arch/x86/kernel/apic/apic.c | 4 ++-- arch/x86/kernel/apic/apic_flat_64.c | 2 +- arch/x86/kernel/apic/probe_32.c | 2 +- arch/x86/kernel/cpu/mce/dev-mcelog.c | 2 +- arch/x86/kernel/hpet.c | 2 +- arch/x86/kernel/i8253.c | 2 +- arch/x86/kernel/kvm.c | 2 +- arch/x86/kernel/kvmclock.c | 2 +- arch/x86/kernel/tsc.c | 2 +- arch/x86/xen/smp_pv.c | 2 +- arch/x86/xen/spinlock.c | 2 +- drivers/clk/samsung/clk-exynos4.c | 2 +- drivers/clocksource/ingenic-timer.c | 3 +-- drivers/cpufreq/pcc-cpufreq.c | 2 +- drivers/dma/mv_xor.c | 5 ++-- drivers/gpu/drm/i810/i810_drv.c | 2 +- drivers/irqchip/irq-gic.c | 2 +- drivers/net/caif/caif_virtio.c | 2 +- .../cavium/liquidio/cn23xx_vf_device.c | 2 +- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 2 +- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 2 +- drivers/net/wireless/ath/ath9k/hw.c | 2 +- drivers/net/wireless/marvell/mwifiex/main.c | 4 ++-- drivers/net/wireless/st/cw1200/queue.c | 3 +-- drivers/nvdimm/region.c | 2 +- drivers/nvme/host/pci.c | 2 +- drivers/perf/arm_pmu.c | 2 +- .../intel/speed_select_if/isst_if_common.c | 6 ++--- drivers/soc/bcm/brcmstb/biuctrl.c | 2 +- drivers/soc/fsl/dpio/dpio-service.c | 4 ++-- drivers/spi/spi-dw-bt1.c | 2 +- drivers/virt/acrn/hsm.c | 2 +- fs/xfs/xfs_sysfs.c | 2 +- include/linux/cpumask.h | 23 +++++++++++++++++++ include/linux/kdb.h | 2 +- kernel/debug/kdb/kdb_bt.c | 2 +- kernel/printk/printk.c | 2 +- kernel/reboot.c | 4 ++-- kernel/time/clockevents.c | 2 +- mm/percpu.c | 6 ++--- mm/slab.c | 2 +- 67 files changed, 110 insertions(+), 90 deletions(-)