@@ -83,8 +83,6 @@ static inline void count_vm_events(enum vm_event_item item, long delta)
extern void all_vm_events(unsigned long *);
-extern void vm_events_fold_cpu(int cpu);
-
#else
/* Disable counters */
@@ -103,9 +101,6 @@ static inline void __count_vm_events(enum vm_event_item item, long delta)
static inline void all_vm_events(unsigned long *ret)
{
}
-static inline void vm_events_fold_cpu(int cpu)
-{
-}
#endif /* CONFIG_VM_EVENT_COUNTERS */
@@ -5832,14 +5832,6 @@ static int page_alloc_cpu_dead(unsigned int cpu)
mlock_drain_remote(cpu);
drain_pages(cpu);
- /*
- * Spill the event counters of the dead processor
- * into the current processors event counters.
- * This artificially elevates the count of the current
- * processor.
- */
- vm_events_fold_cpu(cpu);
-
/*
* Zero the differential counters of the dead processor
* so that the vm statistics are consistent.
@@ -114,7 +114,7 @@ static void sum_vm_events(unsigned long *ret)
memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
@@ -135,23 +135,6 @@ void all_vm_events(unsigned long *ret)
}
EXPORT_SYMBOL_GPL(all_vm_events);
-/*
- * Fold the foreign cpu events into our own.
- *
- * This is adding to the events on one processor
- * but keeps the global counts constant.
- */
-void vm_events_fold_cpu(int cpu)
-{
- struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
- int i;
-
- for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
- count_vm_events(i, fold_state->event[i]);
- fold_state->event[i] = 0;
- }
-}
-
#endif /* CONFIG_VM_EVENT_COUNTERS */
/*