@@ -89,10 +89,10 @@
#endif /* CONFIG_SMP */
-#define __my_cpu_type(var) typeof(var) __percpu_seg_override
-#define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr)
-#define __my_cpu_var(var) (*__my_cpu_ptr(&(var)))
-#define __percpu_arg(x) __percpu_prefix "%" #x
+#define __my_cpu_type(var) typeof(var)
+#define __my_cpu_ptr(ptr) (ptr)
+#define __my_cpu_var(var) (var)
+#define __percpu_arg(x) "%" #x
#define __force_percpu_arg(x) __force_percpu_prefix "%" #x
/*
@@ -148,7 +148,7 @@
do { \
__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
if (0) { \
- typeof(_var) pto_tmp__; \
+ __typeof_unqual__(_var) pto_tmp__; \
pto_tmp__ = (_val); \
(void)pto_tmp__; \
} \
@@ -173,7 +173,7 @@ do { \
((val) == 1 || (val) == -1)) ? \
(int)(val) : 0; \
if (0) { \
- typeof(var) pao_tmp__; \
+ __typeof_unqual__(var) pao_tmp__; \
pao_tmp__ = (val); \
(void)pao_tmp__; \
} \
@@ -223,7 +223,7 @@ do { \
*/
#define raw_percpu_xchg_op(_var, _nval) \
({ \
- typeof(_var) pxo_old__ = raw_cpu_read(_var); \
+ __typeof_unqual__(_var) pxo_old__ = raw_cpu_read(_var); \
raw_cpu_write(_var, _nval); \
pxo_old__; \
})
@@ -235,7 +235,7 @@ do { \
*/
#define this_percpu_xchg_op(_var, _nval) \
({ \
- typeof(_var) pxo_old__ = this_cpu_read(_var); \
+ __typeof_unqual__(_var) pxo_old__ = this_cpu_read(_var); \
do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval)); \
pxo_old__; \
})
@@ -560,9 +560,10 @@ void early_setup_idt(void)
void __head startup_64_setup_gdt_idt(void)
{
void *handler = NULL;
+ struct desc_struct *gdt = (struct desc_struct *)(uintptr_t)init_per_cpu_var(gdt_page.gdt);
struct desc_ptr startup_gdt_descr = {
- .address = (unsigned long)&RIP_REL_REF(init_per_cpu_var(gdt_page.gdt)),
+ .address = (unsigned long)&RIP_REL_REF(*gdt),
.size = GDT_SIZE - 1,
};
@@ -1223,6 +1223,6 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
void devm_free_percpu(struct device *dev, void __percpu *pdata)
{
WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
- (__force void *)pdata));
+ (__force void *)(uintptr_t)pdata));
}
EXPORT_SYMBOL_GPL(devm_free_percpu);
@@ -100,7 +100,7 @@ struct kioctx {
unsigned long user_id;
- struct __percpu kioctx_cpu *cpu;
+ struct kioctx_cpu __percpu *cpu;
/*
* For percpu reqs_available, number of slots we move to/from global
@@ -57,7 +57,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __user BTF_TYPE_TAG(user)
# endif
# define __iomem
-# define __percpu BTF_TYPE_TAG(percpu)
+# define __percpu __percpu_seg_override BTF_TYPE_TAG(percpu)
# define __rcu BTF_TYPE_TAG(rcu)
# define __chk_user_ptr(x) (void)0
@@ -33,7 +33,7 @@ struct disk_stats {
#define part_stat_read(part, field) \
({ \
- typeof((part)->bd_stats->field) res = 0; \
+ __typeof_unqual__((part)->bd_stats->field) res = 0; \
unsigned int _cpu; \
for_each_possible_cpu(_cpu) \
res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \
@@ -233,13 +233,13 @@ do { \
#define per_cpu_ptr(ptr, cpu) \
({ \
__verify_pcpu_ptr(ptr); \
- SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \
+ (__typeof_unqual__(*(ptr)) *)(uintptr_t)SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \
})
#define raw_cpu_ptr(ptr) \
({ \
__verify_pcpu_ptr(ptr); \
- arch_raw_cpu_ptr(ptr); \
+ (__typeof_unqual__(*(ptr)) *)(uintptr_t)arch_raw_cpu_ptr(ptr); \
})
#ifdef CONFIG_DEBUG_PREEMPT
@@ -315,7 +315,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { }
#define __pcpu_size_call_return(stem, variable) \
({ \
- typeof(variable) pscr_ret__; \
+ __typeof_unqual__(variable) pscr_ret__; \
__verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr_ret__ = stem##1(variable); break; \
@@ -330,7 +330,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { }
#define __pcpu_size_call_return2(stem, variable, ...) \
({ \
- typeof(variable) pscr2_ret__; \
+ __typeof_unqual__(variable) pscr2_ret__; \
__verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/once.h>
+#include <linux/percpu.h>
#include <linux/random.h>
struct rnd_state {
@@ -849,7 +849,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
cpu_events = alloc_percpu(typeof(*cpu_events));
if (!cpu_events)
- return (void __percpu __force *)ERR_PTR(-ENOMEM);
+ return (void __percpu __force *)(uintptr_t)ERR_PTR(-ENOMEM);
cpus_read_lock();
for_each_online_cpu(cpu) {
@@ -868,7 +868,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
return cpu_events;
unregister_wide_hw_breakpoint(cpu_events);
- return (void __percpu __force *)ERR_PTR(err);
+ return (void __percpu __force *)(uintptr_t)ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
@@ -184,7 +184,7 @@ EXPORT_SYMBOL_GPL(__percpu_down_read);
#define per_cpu_sum(var) \
({ \
- typeof(var) __sum = 0; \
+ __typeof_unqual__(var) __sum = 0; \
int cpu; \
compiletime_assert_atomic_type(__sum); \
for_each_possible_cpu(cpu) \
@@ -377,7 +377,7 @@ struct workqueue_struct {
/* hot fields used during command issue, aligned to cacheline */
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
- struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
+ struct pool_workqueue * __percpu __rcu *cpu_pwq; /* I: per-cpu pwqs */
struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */
};
@@ -175,7 +175,7 @@ int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
INIT_LIST_HEAD(&fbc[i].list);
#endif
fbc[i].count = amount;
- fbc[i].counters = (void *)counters + (i * counter_size);
+ fbc[i].counters = (void __percpu *)counters + (i * counter_size);
debug_percpu_counter_activate(&fbc[i]);
}
@@ -10651,7 +10651,7 @@ noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset)
return;
}
- field = (__force unsigned long __percpu *)((__force void *)p + offset);
+ field = (unsigned long __percpu *)(void __percpu *)(p + offset);
this_cpu_inc(*field);
}
EXPORT_SYMBOL_GPL(netdev_core_stats_inc);