x86/paravirt, 64-bit: don't restore user rsp within sysret

There's no need to combine restoring the user rsp within the sysret
pvop, so split it out.  This makes the pvop's semantics closer to the
machine instruction.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citirx.com>
Cc: xen-devel <xen-devel@lists.xensource.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Jeremy Fitzhardinge 2008-06-25 00:19:27 -04:00 committed by Ingo Molnar
parent d75cd22fdd
commit c7245da6ae
6 changed files with 14 additions and 15 deletions

View File

@ -62,7 +62,7 @@ int main(void)
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
OFFSET(PV_CPU_iret, pv_cpu_ops, iret); OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
OFFSET(PV_CPU_usersp_sysret, pv_cpu_ops, usersp_sysret); OFFSET(PV_CPU_usergs_sysret, pv_cpu_ops, usergs_sysret);
OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
#endif #endif

View File

@ -59,8 +59,7 @@
#endif #endif
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
ENTRY(native_usersp_sysret) ENTRY(native_usergs_sysret)
movq %gs:pda_oldrsp,%rsp
swapgs swapgs
sysretq sysretq
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
@ -275,7 +274,8 @@ sysret_check:
CFI_REGISTER rip,rcx CFI_REGISTER rip,rcx
RESTORE_ARGS 0,-ARG_SKIP,1 RESTORE_ARGS 0,-ARG_SKIP,1
/*CFI_REGISTER rflags,r11*/ /*CFI_REGISTER rflags,r11*/
USERSP_SYSRET movq %gs:pda_oldrsp, %rsp
USERGS_SYSRET
CFI_RESTORE_STATE CFI_RESTORE_STATE
/* Handle reschedules */ /* Handle reschedules */

View File

@ -141,7 +141,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
ret = paravirt_patch_nop(); ret = paravirt_patch_nop();
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
type == PARAVIRT_PATCH(pv_cpu_ops.usersp_sysret)) type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret))
/* If operation requires a jmp, then jmp */ /* If operation requires a jmp, then jmp */
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
else else
@ -193,7 +193,7 @@ static void native_flush_tlb_single(unsigned long addr)
/* These are in entry.S */ /* These are in entry.S */
extern void native_iret(void); extern void native_iret(void);
extern void native_irq_enable_sysexit(void); extern void native_irq_enable_sysexit(void);
extern void native_usersp_sysret(void); extern void native_usergs_sysret(void);
static int __init print_banner(void) static int __init print_banner(void)
{ {
@ -332,7 +332,7 @@ struct pv_cpu_ops pv_cpu_ops = {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
.irq_enable_sysexit = native_irq_enable_sysexit, .irq_enable_sysexit = native_irq_enable_sysexit,
#else #else
.usersp_sysret = native_usersp_sysret, .usergs_sysret = native_usergs_sysret,
#endif #endif
.iret = native_iret, .iret = native_iret,
.swapgs = native_swapgs, .swapgs = native_swapgs,

View File

@ -15,7 +15,7 @@ DEF_NATIVE(pv_cpu_ops, clts, "clts");
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
/* the three commands give us more control to how to return from a syscall */ /* the three commands give us more control to how to return from a syscall */
DEF_NATIVE(pv_cpu_ops, usersp_sysret, "movq %gs:" __stringify(pda_oldrsp) ", %rsp; swapgs; sysretq;"); DEF_NATIVE(pv_cpu_ops, usergs_sysret, "swapgs; sysretq;");
DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
unsigned native_patch(u8 type, u16 clobbers, void *ibuf, unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
@ -35,7 +35,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
PATCH_SITE(pv_irq_ops, irq_enable); PATCH_SITE(pv_irq_ops, irq_enable);
PATCH_SITE(pv_irq_ops, irq_disable); PATCH_SITE(pv_irq_ops, irq_disable);
PATCH_SITE(pv_cpu_ops, iret); PATCH_SITE(pv_cpu_ops, iret);
PATCH_SITE(pv_cpu_ops, usersp_sysret); PATCH_SITE(pv_cpu_ops, usergs_sysret);
PATCH_SITE(pv_cpu_ops, swapgs); PATCH_SITE(pv_cpu_ops, swapgs);
PATCH_SITE(pv_mmu_ops, read_cr2); PATCH_SITE(pv_mmu_ops, read_cr2);
PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, read_cr3);

View File

@ -112,8 +112,7 @@ static inline unsigned long __raw_local_irq_save(void)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define INTERRUPT_RETURN iretq #define INTERRUPT_RETURN iretq
#define USERSP_SYSRET \ #define USERGS_SYSRET \
movq %gs:pda_oldrsp, %rsp; \
swapgs; \ swapgs; \
sysretq; sysretq;
#else #else

View File

@ -143,7 +143,7 @@ struct pv_cpu_ops {
/* These three are jmp to, not actually called. */ /* These three are jmp to, not actually called. */
void (*irq_enable_sysexit)(void); void (*irq_enable_sysexit)(void);
void (*usersp_sysret)(void); void (*usergs_sysret)(void);
void (*iret)(void); void (*iret)(void);
void (*swapgs)(void); void (*swapgs)(void);
@ -1505,10 +1505,10 @@ static inline unsigned long __raw_local_irq_save(void)
movq %rax, %rcx; \ movq %rax, %rcx; \
xorq %rax, %rax; xorq %rax, %rax;
#define USERSP_SYSRET \ #define USERGS_SYSRET \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usersp_sysret), \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret), \
CLBR_NONE, \ CLBR_NONE, \
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usersp_sysret)) jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret))
#endif #endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */