@@ -162,8 +162,8 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
*/
extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
-extern int _find_first_bit_le(const unsigned long *p, unsigned size);
-extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
+extern int _find_first_bit_le(const volatile unsigned long *p, unsigned size);
+extern int _find_next_bit_le(const volatile unsigned long *p, int size, int offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
@@ -52,7 +52,7 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
return gva_n - offset;
}
-static void hyperv_flush_tlb_others(const struct cpumask *cpus,
+static void hyperv_flush_tlb_others(const volatile struct cpumask *cpus,
const struct flush_tlb_info *info)
{
int cpu, vcpu, gva_n, max_gvas;
@@ -201,7 +201,7 @@ struct pv_mmu_ops {
void (*flush_tlb_user)(void);
void (*flush_tlb_kernel)(void);
void (*flush_tlb_one_user)(unsigned long addr);
- void (*flush_tlb_others)(const struct cpumask *cpus,
+ void (*flush_tlb_others)(const volatile struct cpumask *cpus,
const struct flush_tlb_info *info);
void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
@@ -208,7 +208,7 @@ struct flush_tlb_info {
void flush_tlb_local(void);
void flush_tlb_one_user(unsigned long addr);
void flush_tlb_one_kernel(unsigned long addr);
-void flush_tlb_others(const struct cpumask *cpumask,
+void flush_tlb_others(const volatile struct cpumask *cpumask,
const struct flush_tlb_info *info);
#ifdef CONFIG_PARAVIRT
@@ -796,7 +796,7 @@ static bool tlb_is_not_lazy(int cpu, void *data)
return !per_cpu(cpu_tlbstate.is_lazy, cpu);
}
-STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
+STATIC_NOPV void native_flush_tlb_others(const volatile struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
@@ -824,7 +824,7 @@ STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
(void *)info, 1, cpumask);
}
-void flush_tlb_others(const struct cpumask *cpumask,
+void flush_tlb_others(const volatile struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
__flush_tlb_others(cpumask, info);
@@ -1247,7 +1247,7 @@ static void xen_flush_tlb_one_user(unsigned long addr)
preempt_enable();
}
-static void xen_flush_tlb_others(const struct cpumask *cpus,
+static void xen_flush_tlb_others(const volatile struct cpumask *cpus,
const struct flush_tlb_info *info)
{
struct {
@@ -12,8 +12,8 @@
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
- size, unsigned long offset);
+extern unsigned long find_next_bit(const volatile unsigned long *addr,
+ unsigned long size, unsigned long offset);
#endif
#ifndef find_next_and_bit
@@ -27,8 +27,8 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-extern unsigned long find_next_and_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long size,
+extern unsigned long find_next_and_bit(const volatile unsigned long *addr1,
+ const volatile unsigned long *addr2, unsigned long size,
unsigned long offset);
#endif
@@ -141,8 +141,8 @@ extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
extern void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned int first, unsigned int cut,
unsigned int nbits);
-extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
+extern int __bitmap_and(unsigned long *dst, const volatile unsigned long *bitmap1,
+ const volatile unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
@@ -152,8 +152,8 @@ extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
extern void __bitmap_replace(unsigned long *dst,
const unsigned long *old, const unsigned long *new,
const unsigned long *mask, unsigned int nbits);
-extern int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
+extern int __bitmap_intersects(const volatile unsigned long *bitmap1,
+ const volatile unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
@@ -278,8 +278,8 @@ extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
(const unsigned long *) (bitmap), (nbits))
#endif
-static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline int bitmap_and(unsigned long *dst, const volatile unsigned long *src1,
+ const volatile unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -359,8 +359,8 @@ static inline bool bitmap_or_equal(const unsigned long *src1,
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
-static inline int bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline int bitmap_intersects(const volatile unsigned long *src1,
+ const volatile unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -158,7 +158,7 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
}
/* Valid inputs for n are -1 and 0. */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+static inline unsigned int cpumask_next(int n, const volatile struct cpumask *srcp)
{
return n+1;
}
@@ -169,8 +169,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
}
static inline unsigned int cpumask_next_and(int n,
- const struct cpumask *srcp,
- const struct cpumask *andp)
+ const volatile struct cpumask *srcp,
+ const volatile struct cpumask *andp)
{
return n+1;
}
@@ -183,7 +183,7 @@ static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
}
/* cpu must be a valid cpu, ie 0, so there's no other choice. */
-static inline unsigned int cpumask_any_but(const struct cpumask *mask,
+static inline unsigned int cpumask_any_but(const volatile struct cpumask *mask,
unsigned int cpu)
{
return 1;
@@ -235,7 +235,7 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
}
-unsigned int cpumask_next(int n, const struct cpumask *srcp);
+unsigned int cpumask_next(int n, const volatile struct cpumask *srcp);
/**
* cpumask_next_zero - get the next unset cpu in a cpumask
@@ -252,8 +252,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
}
-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
-int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+int cpumask_next_and(int n, const volatile struct cpumask *, const volatile struct cpumask *);
+int cpumask_any_but(const volatile struct cpumask *mask, unsigned int cpu);
unsigned int cpumask_local_spread(unsigned int i, int node);
int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p);
@@ -335,7 +335,7 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static inline void cpumask_set_cpu(unsigned int cpu, volatile struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -351,7 +351,7 @@ static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static inline void cpumask_clear_cpu(int cpu, volatile struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -368,7 +368,7 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
*
* Returns 1 if @cpu is set in @cpumask, else returns 0
*/
-static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static inline int cpumask_test_cpu(int cpu, const volatile struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -428,8 +428,8 @@ static inline void cpumask_clear(struct cpumask *dstp)
* If *@dstp is empty, returns 0, else returns 1
*/
static inline int cpumask_and(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+ const volatile struct cpumask *src1p,
+ const volatile struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), nr_cpumask_bits);
@@ -521,8 +521,8 @@ static inline bool cpumask_or_equal(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*/
-static inline bool cpumask_intersects(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static inline bool cpumask_intersects(const volatile struct cpumask *src1p,
+ const volatile struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
nr_cpumask_bits);
@@ -611,9 +611,9 @@ static inline void mm_init_cpumask(struct mm_struct *mm)
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
-static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
+static inline volatile cpumask_t *mm_cpumask(struct mm_struct *mm)
{
- return (struct cpumask *)&mm->cpu_bitmap;
+ return (volatile struct cpumask *)&mm->cpu_bitmap;
}
struct mmu_gather;
@@ -59,7 +59,7 @@ void on_each_cpu(smp_call_func_t func, void *info, int wait);
* Call a function on processors specified by mask, which might include
* the local one.
*/
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+void on_each_cpu_mask(const volatile struct cpumask *mask, smp_call_func_t func,
void *info, bool wait);
/*
@@ -71,7 +71,7 @@ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait);
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
- void *info, bool wait, const struct cpumask *mask);
+ void *info, bool wait, const volatile struct cpumask *mask);
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
@@ -118,7 +118,7 @@ extern void smp_cpus_done(unsigned int max_cpus);
* Call a function on all other processors
*/
void smp_call_function(smp_call_func_t func, void *info, int wait);
-void smp_call_function_many(const struct cpumask *mask,
+void smp_call_function_many(const volatile struct cpumask *mask,
smp_call_func_t func, void *info, bool wait);
int smp_call_function_any(const struct cpumask *mask,
@@ -604,7 +604,7 @@ int smp_call_function_any(const struct cpumask *mask,
}
EXPORT_SYMBOL_GPL(smp_call_function_any);
-static void smp_call_function_many_cond(const struct cpumask *mask,
+static void smp_call_function_many_cond(const volatile struct cpumask *mask,
smp_call_func_t func, void *info,
bool wait, smp_cond_func_t cond_func)
{
@@ -705,7 +705,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
* hardware interrupt handler or from a bottom half handler. Preemption
* must be disabled when calling this function.
*/
-void smp_call_function_many(const struct cpumask *mask,
+void smp_call_function_many(const volatile struct cpumask *mask,
smp_call_func_t func, void *info, bool wait)
{
smp_call_function_many_cond(mask, func, info, wait, NULL);
@@ -853,7 +853,7 @@ EXPORT_SYMBOL(on_each_cpu);
* exception is that it may be used during early boot while
* early_boot_irqs_disabled is set.
*/
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+void on_each_cpu_mask(const volatile struct cpumask *mask, smp_call_func_t func,
void *info, bool wait)
{
int cpu = get_cpu();
@@ -892,7 +892,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* from a hardware interrupt handler or from a bottom half handler.
*/
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
- void *info, bool wait, const struct cpumask *mask)
+ void *info, bool wait, const volatile struct cpumask *mask)
{
int cpu = get_cpu();
@@ -235,8 +235,8 @@ void bitmap_cut(unsigned long *dst, const unsigned long *src,
}
EXPORT_SYMBOL(bitmap_cut);
-int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+int __bitmap_and(unsigned long *dst, const volatile unsigned long *bitmap1,
+ const volatile unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
unsigned int lim = bits/BITS_PER_LONG;
@@ -301,8 +301,8 @@ void __bitmap_replace(unsigned long *dst,
}
EXPORT_SYMBOL(__bitmap_replace);
-int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+int __bitmap_intersects(const volatile unsigned long *bitmap1,
+ const volatile unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
@@ -15,7 +15,7 @@
*
* Returns >= nr_cpu_ids if no further cpus set.
*/
-unsigned int cpumask_next(int n, const struct cpumask *srcp)
+unsigned int cpumask_next(int n, const volatile struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
@@ -32,8 +32,8 @@ EXPORT_SYMBOL(cpumask_next);
*
* Returns >= nr_cpu_ids if no further cpus set in both.
*/
-int cpumask_next_and(int n, const struct cpumask *src1p,
- const struct cpumask *src2p)
+int cpumask_next_and(int n, const volatile struct cpumask *src1p,
+ const volatile struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(cpumask_next_and);
* Often used to find any cpu but smp_processor_id() in a mask.
* Returns >= nr_cpu_ids if no cpus set.
*/
-int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
+int cpumask_any_but(const volatile struct cpumask *mask, unsigned int cpu)
{
unsigned int i;
@@ -29,8 +29,8 @@
* searching it for one bits.
* - The optional "addr2", which is anded with "addr1" if present.
*/
-static unsigned long _find_next_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long nbits,
+static unsigned long _find_next_bit(const volatile unsigned long *addr1,
+ const volatile unsigned long *addr2, unsigned long nbits,
unsigned long start, unsigned long invert, unsigned long le)
{
unsigned long tmp, mask;
@@ -74,7 +74,7 @@ static unsigned long _find_next_bit(const unsigned long *addr1,
/*
* Find the next set bit in a memory region.
*/
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+unsigned long find_next_bit(const volatile unsigned long *addr, unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
@@ -92,8 +92,8 @@ EXPORT_SYMBOL(find_next_zero_bit);
#endif
#if !defined(find_next_and_bit)
-unsigned long find_next_and_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long size,
+unsigned long find_next_and_bit(const volatile unsigned long *addr1,
+ const volatile unsigned long *addr2, unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);