@@ -365,12 +365,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
#define __KERNEL_TSS_LIMIT \
(IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1)
-#ifdef CONFIG_X86_32
-DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
-#else
/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
-#endif
/*
* Save the original ist values for checking stack pointers during debugging
@@ -207,9 +207,7 @@ static inline int arch_within_stack_frames(const void * const stack,
#else /* !__ASSEMBLY__ */
-#ifdef CONFIG_X86_64
# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
-#endif
#endif
@@ -1598,10 +1598,6 @@ EXPORT_PER_CPU_SYMBOL(__preempt_count);
* the top of the kernel stack. Use an extra percpu variable to track the
* top of the kernel stack directly.
*/
-DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
- (unsigned long)&init_thread_union + THREAD_SIZE;
-EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
-
#ifdef CONFIG_STACKPROTECTOR
DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
@@ -288,12 +288,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
update_sp0(next_p);
refresh_sysenter_cs(next);
this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
- /*
- * TODO: Find a way to let cpu_current_top_of_stack point to
- * cpu_tss_rw.x86_tss.sp1. Doing so now results in stack corruption with
- * iret exceptions.
- */
- this_cpu_write(cpu_tss_rw.x86_tss.sp1, next_p->thread.sp0);
/*
* Restore %gs if needed (which is common)