@@ -46,14 +46,6 @@
.code64
.section .entry.text, "ax"
-#ifdef CONFIG_PARAVIRT_XXL
-SYM_CODE_START(native_usergs_sysret64)
- UNWIND_HINT_EMPTY
- swapgs
- sysretq
-SYM_CODE_END(native_usergs_sysret64)
-#endif /* CONFIG_PARAVIRT_XXL */
-
/*
* 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
*
@@ -128,7 +120,12 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
* Try to use SYSRET instead of IRET if we're returning to
* a completely clean 64-bit userspace context. If we're not,
* go to the slow exit path.
+ * In the Xen PV case we must use iret anyway.
*/
+
+ ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \
+ X86_FEATURE_XENPV
+
movq RCX(%rsp), %rcx
movq RIP(%rsp), %r11
@@ -220,7 +217,9 @@ syscall_return_via_sysret:
popq %rdi
popq %rsp
- USERGS_SYSRET64
+ swapgs
+ CLEAR_CPU_BUFFERS
+ sysretq
SYM_CODE_END(entry_SYSCALL_64)
/*
@@ -132,13 +132,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
#endif
#define INTERRUPT_RETURN jmp native_iret
-#define USERGS_SYSRET64 \
- swapgs; \
- CLEAR_CPU_BUFFERS; \
- sysretq;
-#define USERGS_SYSRET32 \
- swapgs; \
- sysretl
#else
#define INTERRUPT_RETURN iret
@@ -776,11 +776,6 @@ extern void default_banner(void);
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT_XXL
-#define USERGS_SYSRET64 \
- PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
- ANNOTATE_RETPOLINE_SAFE; \
- jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
-
#ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS(clobbers) \
PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
@@ -157,14 +157,6 @@ struct pv_cpu_ops {
u64 (*read_pmc)(int counter);
- /*
- * Switch to usermode gs and return to 64-bit usermode using
- * sysret. Only used in 64-bit kernels to return to 64-bit
- * processes. Usermode register state, including %rsp, must
- * already be restored.
- */
- void (*usergs_sysret64)(void);
-
/* Normal iret. Jump to this with the standard iret stack
frame set up. */
void (*iret)(void);
@@ -13,8 +13,6 @@ int main(void)
{
#ifdef CONFIG_PARAVIRT
#ifdef CONFIG_PARAVIRT_XXL
- OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template,
- cpu.usergs_sysret64);
#ifdef CONFIG_DEBUG_ENTRY
OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl);
#endif
@@ -124,8 +124,7 @@ unsigned paravirt_patch_default(u8 type, void *insn_buff,
else if (opfunc == _paravirt_ident_64)
ret = paravirt_patch_ident_64(insn_buff, len);
- else if (type == PARAVIRT_PATCH(cpu.iret) ||
- type == PARAVIRT_PATCH(cpu.usergs_sysret64))
+ else if (type == PARAVIRT_PATCH(cpu.iret))
/* If operation requires a jmp, then jmp */
ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len);
#endif
@@ -159,7 +158,6 @@ static u64 native_steal_clock(int cpu)
/* These are in entry.S */
extern void native_iret(void);
-extern void native_usergs_sysret64(void);
static struct resource reserve_ioports = {
.start = 0,
@@ -299,7 +297,6 @@ struct paravirt_patch_template pv_ops = {
.cpu.load_sp0 = native_load_sp0,
- .cpu.usergs_sysret64 = native_usergs_sysret64,
.cpu.iret = native_iret,
#ifdef CONFIG_X86_IOPL_IOPERM
@@ -27,7 +27,6 @@ struct patch_xxl {
const unsigned char mmu_write_cr3[3];
const unsigned char irq_restore_fl[2];
const unsigned char cpu_wbinvd[2];
- const unsigned char cpu_usergs_sysret64[6];
const unsigned char mov64[3];
};
@@ -40,8 +39,6 @@ static const struct patch_xxl patch_data_xxl = {
.mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
.irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
- .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8,
- 0x48, 0x0f, 0x07 }, // swapgs; sysretq
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
};
@@ -83,7 +80,6 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
- PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
#endif
@@ -1059,7 +1059,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.read_pmc = xen_read_pmc,
.iret = xen_iret,
- .usergs_sysret64 = xen_sysret64,
.load_tr_desc = paravirt_nop,
.set_ldt = xen_set_ldt,
@@ -220,27 +220,6 @@ SYM_CODE_START(xen_iret)
jmp hypercall_iret
SYM_CODE_END(xen_iret)
-SYM_CODE_START(xen_sysret64)
- UNWIND_HINT_EMPTY
- /*
- * We're already on the usermode stack at this point, but
- * still with the kernel gs, so we can easily switch back.
- *
- * tss.sp2 is scratch space.
- */
- movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
- movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
-
- pushq $__USER_DS
- pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
- pushq %r11
- pushq $__USER_CS
- pushq %rcx
-
- pushq $VGCF_in_syscall
- jmp hypercall_iret
-SYM_CODE_END(xen_sysret64)
-
/*
* XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
* also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
@@ -138,8 +138,6 @@ __visible unsigned long xen_read_cr2_direct(void);
/* These are not functions, and cannot be called normally */
__visible void xen_iret(void);
-__visible void xen_sysret32(void);
-__visible void xen_sysret64(void);
extern int xen_panic_handler_init(void);