Skip to content

Commit

Permalink
x86/paravirt: Add new features for paravirt patching
Browse files Browse the repository at this point in the history
For being able to switch paravirt patching from special cased custom
code sequences to ALTERNATIVE handling some X86_FEATURE_* are needed
as new features. This enables to have the standard indirect pv call
as the default code and to patch that with the non-Xen custom code
sequence via ALTERNATIVE patching later.

Make sure paravirt patching is performed before alternatives patching.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210311142319.4723-9-jgross@suse.com
  • Loading branch information
jgross1 authored and suryasaimadhu committed Mar 11, 2021
1 parent 2fe2a2c commit 4e62921
Show file tree
Hide file tree
Showing 4 changed files with 49 additions and 2 deletions.
2 changes: 2 additions & 0 deletions arch/x86/include/asm/cpufeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,8 @@
#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */
#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */

/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
Expand Down
10 changes: 10 additions & 0 deletions arch/x86/include/asm/paravirt.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ static inline u64 paravirt_steal_clock(int cpu)
return static_call(pv_steal_clock)(cpu);
}

#ifdef CONFIG_PARAVIRT_SPINLOCKS
void __init paravirt_set_cap(void);
#endif

/* The paravirtualized I/O functions */
static inline void slow_down_io(void)
{
Expand Down Expand Up @@ -809,5 +813,11 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
{
}
#endif

#ifndef CONFIG_PARAVIRT_SPINLOCKS
static inline void paravirt_set_cap(void)
{
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PARAVIRT_H */
30 changes: 28 additions & 2 deletions arch/x86/kernel/alternative.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include <asm/insn.h>
#include <asm/io.h>
#include <asm/fixmap.h>
#include <asm/paravirt.h>

int __read_mostly alternatives_patched;

Expand Down Expand Up @@ -733,6 +734,33 @@ void __init alternative_instructions(void)
* patching.
*/

/*
* Paravirt patching and alternative patching can be combined to
* replace a function call with a short direct code sequence (e.g.
* by setting a constant return value instead of doing that in an
* external function).
* In order to make this work the following sequence is required:
* 1. set (artificial) features depending on used paravirt
* functions which can later influence alternative patching
* 2. apply paravirt patching (generally replacing an indirect
* function call with a direct one)
* 3. apply alternative patching (e.g. replacing a direct function
* call with a custom code sequence)
* Doing paravirt patching after alternative patching would clobber
* the optimization of the custom code with a function call again.
*/
paravirt_set_cap();

/*
* First patch paravirt functions, such that we overwrite the indirect
* call with the direct call.
*/
apply_paravirt(__parainstructions, __parainstructions_end);

/*
* Then patch alternatives, such that those paravirt calls that are in
* alternatives can be overwritten by their immediate fragments.
*/
apply_alternatives(__alt_instructions, __alt_instructions_end);

#ifdef CONFIG_SMP
Expand All @@ -751,8 +779,6 @@ void __init alternative_instructions(void)
}
#endif

apply_paravirt(__parainstructions, __parainstructions_end);

restart_nmi();
alternatives_patched = 1;
}
Expand Down
9 changes: 9 additions & 0 deletions arch/x86/kernel/paravirt-spinlocks.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,12 @@ bool pv_is_native_vcpu_is_preempted(void)
return pv_ops.lock.vcpu_is_preempted.func ==
__raw_callee_save___native_vcpu_is_preempted;
}

void __init paravirt_set_cap(void)
{
if (!pv_is_native_spin_unlock())
setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);

if (!pv_is_native_vcpu_is_preempted())
setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
}

0 comments on commit 4e62921

Please sign in to comment.