Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pull] master from torvalds:master #83

Merged
merged 39 commits into from
Jun 28, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
2c18bd5
x86/resctrl: Fix memory bandwidth counter width for AMD
babumoger Jun 4, 2020
8e742aa
syscalls: Fix offset type of ksys_ftruncate()
Jun 10, 2020
e825873
x86, kcsan: Remove __no_kcsan_or_inline usage
Jun 2, 2020
e79302a
kcsan: Remove __no_kcsan_or_inline
Jun 2, 2020
5ddbc40
x86, kcsan: Add __no_kcsan to noinstr
Jun 2, 2020
7b861a5
kasan: Bump required compiler version
melver Jun 4, 2020
5144f8a
compiler_types.h: Add __no_sanitize_{address,undefined} to noinstr
melver Jun 4, 2020
14d3b37
x86/entry, cpumask: Provide non-instrumented variant of cpu_is_offline()
Jun 3, 2020
6b643a0
x86/entry, ubsan, objtool: Whitelist __ubsan_handle_*()
Jun 3, 2020
8e8bb06
x86/entry, bug: Comment the instrumentation_begin() usage for WARN()
Jun 4, 2020
5d51035
x86/cpu: Reinitialize IA32_FEAT_CTL MSR on BSP during wakeup
Jun 8, 2020
33aea07
compiler_attributes.h: Support no_sanitize_undefined check with GCC 4
melver Jun 15, 2020
cc5277f
x86/resctrl: Fix a NULL vs IS_ERR() static checker warning in rdt_cdp…
Jun 2, 2020
a13b9d0
x86/cpu: Use pinning mask for CR4 bits needing to be 0
kees Jun 9, 2020
2b10be2
objtool: Clean up elf_write() condition
Apr 17, 2020
fdabdd0
objtool: Provide elf_write_{insn,reloc}()
Jun 12, 2020
0f1441b
objtool: Fix noinstr vs KCOV
Jun 12, 2020
bb5570a
x86/asm/64: Align start of __clear_user() loop to 16-bytes
mfleming Jun 18, 2020
acf7b0b
kasan: Fix required compiler version
melver Jun 23, 2020
734d099
objtool: Don't consider vmlinux a C-file
Jun 17, 2020
e3a9e68
x86/entry: Fixup bad_iret vs noinstr
Jun 17, 2020
c7aadc0
x86/entry: Increase entry_stack size to a full page
Jun 17, 2020
145a773
x86/entry: Fix #UD vs WARN more
Jun 16, 2020
5faafd5
locking/atomics: Provide the arch_atomic_ interface to generic code
Jun 25, 2020
b58e733
rcu: Fixup noinstr warnings
Jun 15, 2020
2c92d78
Merge branch 'linus' into x86/entry, to resolve conflicts
Jun 26, 2020
4f311af
sched/core: Fix CONFIG_GCC_PLUGIN_RANDSTRUCT build fail
Jun 10, 2020
fd844ba
sched/core: Check cpus_mask, not cpus_ptr in __set_cpus_allowed_ptr()…
crwood-rh Jun 17, 2020
ce9bc3b
sched/deadline: Initialize ->dl_boosted
jlelli Jun 17, 2020
740797c
sched/core: Fix PI boosting between RT and DEADLINE tasks
jlelli Nov 19, 2018
b6e13e8
sched/core: Fix ttwu() race
Jun 22, 2020
739f70b
sched/core: s/WF_ON_RQ/WQ_ON_CPU/
Jun 22, 2020
8c4890d
smp, irq_work: Continue smp_call_function*() and irq_work*() integration
Jun 22, 2020
e21cf43
sched/cfs: change initial value of runnable_avg
vingu-linaro Jun 24, 2020
a358505
Merge tag 'x86_entry_for_5.8' of git://git.kernel.org/pub/scm/linux/k…
torvalds Jun 28, 2020
7ecb59a
Merge tag 'objtool_urgent_for_5.8_rc3' of git://git.kernel.org/pub/sc…
torvalds Jun 28, 2020
c141b30
Merge tag 'rcu_urgent_for_5.8_rc3' of git://git.kernel.org/pub/scm/li…
torvalds Jun 28, 2020
098c793
Merge tag 'x86_urgent_for_5.8_rc3' of git://git.kernel.org/pub/scm/li…
torvalds Jun 28, 2020
91a9a90
Merge tag 'sched_urgent_for_5.8_rc3' of git://git.kernel.org/pub/scm/…
torvalds Jun 28, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions Documentation/dev-tools/kcsan.rst
Original file line number Diff line number Diff line change
Expand Up @@ -114,12 +114,6 @@ the below options are available:
To dynamically limit for which functions to generate reports, see the
`DebugFS interface`_ blacklist/whitelist feature.

For ``__always_inline`` functions, replace ``__always_inline`` with
``__no_kcsan_or_inline`` (which implies ``__always_inline``)::

static __no_kcsan_or_inline void foo(void) {
...

* To disable data race detection for a particular compilation unit, add to the
``Makefile``::

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ config X86
select ARCH_HAS_FILTER_PGPROT
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_KCOV if X86_64 && STACK_VALIDATION
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
Expand Down
6 changes: 1 addition & 5 deletions arch/x86/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -201,12 +201,8 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr)
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}

static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
/*
* Because this is a plain access, we need to disable KCSAN here to
* avoid double instrumentation via instrumented bitops.
*/
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
Expand Down
6 changes: 6 additions & 0 deletions arch/x86/include/asm/bug.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,12 @@ do { \
unreachable(); \
} while (0)

/*
* This instrumentation_begin() is strictly speaking incorrect; but it
* suppresses the complaints from WARN()s in noinstr code. If such a WARN()
* were to trigger, we'd rather wreck the machine in an attempt to get the
* message out than not know about it.
*/
#define __WARN_FLAGS(flags) \
do { \
instrumentation_begin(); \
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/include/asm/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,4 +58,9 @@ static inline bool handle_guest_split_lock(unsigned long ip)
return false;
}
#endif
#ifdef CONFIG_IA32_FEAT_CTL
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
#else
static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
#endif
#endif /* _ASM_X86_CPU_H */
18 changes: 18 additions & 0 deletions arch/x86/include/asm/cpumask.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,23 @@ extern cpumask_var_t cpu_sibling_setup_mask;

extern void setup_cpu_local_masks(void);

/*
* NMI and MCE exceptions need cpu_is_offline() _really_ early,
* provide an arch_ special for them to avoid instrumentation.
*/
#if NR_CPUS > 1
static __always_inline bool arch_cpu_online(int cpu)
{
return arch_test_bit(cpu, cpumask_bits(cpu_online_mask));
}
#else
static __always_inline bool arch_cpu_online(int cpu)
{
return cpu == 0;
}
#endif

#define arch_cpu_is_offline(cpu) unlikely(!arch_cpu_online(cpu))

#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CPUMASK_H */
2 changes: 1 addition & 1 deletion arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ struct x86_hw_tss {
#define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1)

struct entry_stack {
unsigned long words[64];
char stack[PAGE_SIZE];
};

struct entry_stack_page {
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kernel/cpu/centaur.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>

#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/e820/api.h>
#include <asm/mtrr.h>
Expand Down
24 changes: 12 additions & 12 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,9 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c)
cr4_clear_bits(X86_CR4_UMIP);
}

/* These bits should not change their value after CPU init is finished. */
static const unsigned long cr4_pinned_mask =
X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
static unsigned long cr4_pinned_bits __ro_after_init;

Expand All @@ -371,20 +374,20 @@ EXPORT_SYMBOL(native_write_cr0);

void native_write_cr4(unsigned long val)
{
unsigned long bits_missing = 0;
unsigned long bits_changed = 0;

set_register:
asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));

if (static_branch_likely(&cr_pinning)) {
if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
bits_missing = ~val & cr4_pinned_bits;
val |= bits_missing;
if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
goto set_register;
}
/* Warn after we've set the missing bits. */
WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
bits_missing);
/* Warn after we've corrected the changed bits. */
WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
bits_changed);
}
}
#if IS_MODULE(CONFIG_LKDTM)
Expand Down Expand Up @@ -419,7 +422,7 @@ void cr4_init(void)
if (boot_cpu_has(X86_FEATURE_PCID))
cr4 |= X86_CR4_PCIDE;
if (static_branch_likely(&cr_pinning))
cr4 |= cr4_pinned_bits;
cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;

__write_cr4(cr4);

Expand All @@ -434,10 +437,7 @@ void cr4_init(void)
*/
static void __init setup_cr_pinning(void)
{
unsigned long mask;

mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
static_key_enable(&cr_pinning.key);
}

Expand Down
4 changes: 0 additions & 4 deletions arch/x86/kernel/cpu/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,4 @@ extern void update_srbds_msr(void);

extern u64 x86_read_arch_cap_msr(void);

#ifdef CONFIG_IA32_FEAT_CTL
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
#endif

#endif /* ARCH_X86_CPU_H */
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/mce/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1083,7 +1083,7 @@ static noinstr bool mce_check_crashing_cpu(void)
{
unsigned int cpu = smp_processor_id();

if (cpu_is_offline(cpu) ||
if (arch_cpu_is_offline(cpu) ||
(crashing_cpu != -1 && crashing_cpu != cpu)) {
u64 mcgstatus;

Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kernel/cpu/resctrl/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -981,10 +981,10 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c)

c->x86_cache_max_rmid = ecx;
c->x86_cache_occ_scale = ebx;
if (c->x86_vendor == X86_VENDOR_INTEL)
c->x86_cache_mbm_width_offset = eax & 0xff;
else
c->x86_cache_mbm_width_offset = -1;
c->x86_cache_mbm_width_offset = eax & 0xff;

if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
}
}

Expand Down
1 change: 1 addition & 0 deletions arch/x86/kernel/cpu/resctrl/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
#define MBA_IS_LINEAR 0x4
#define MBA_MAX_MBPS U32_MAX
#define MAX_MBA_BW_AMD 0x800
#define MBM_CNTR_WIDTH_OFFSET_AMD 20

#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kernel/cpu/resctrl/rdtgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1117,6 +1117,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
_r_cdp = NULL;
_d_cdp = NULL;
ret = -EINVAL;
}

Expand Down
1 change: 1 addition & 0 deletions arch/x86/kernel/cpu/zhaoxin.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>

#include <asm/cpu.h>
#include <asm/cpufeature.h>

#include "cpu.h"
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/nmi.c
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);

DEFINE_IDTENTRY_RAW(exc_nmi)
{
if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id()))
if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
return;

if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
Expand Down
78 changes: 41 additions & 37 deletions arch/x86/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,17 +84,16 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
local_irq_disable();
}

int is_valid_bugaddr(unsigned long addr)
__always_inline int is_valid_bugaddr(unsigned long addr)
{
unsigned short ud;

if (addr < TASK_SIZE_MAX)
return 0;

if (get_kernel_nofault(ud, (unsigned short *)addr))
return 0;

return ud == INSN_UD0 || ud == INSN_UD2;
/*
* We got #UD, if the text isn't readable we'd have gotten
* a different exception.
*/
return *(unsigned short *)addr == INSN_UD2;
}

static nokprobe_inline int
Expand Down Expand Up @@ -216,40 +215,45 @@ static inline void handle_invalid_op(struct pt_regs *regs)
ILL_ILLOPN, error_get_trap_addr(regs));
}

DEFINE_IDTENTRY_RAW(exc_invalid_op)
static noinstr bool handle_bug(struct pt_regs *regs)
{
bool rcu_exit;
bool handled = false;

if (!is_valid_bugaddr(regs->ip))
return handled;

/*
* Handle BUG/WARN like NMIs instead of like normal idtentries:
* if we bugged/warned in a bad RCU context, for example, the last
* thing we want is to BUG/WARN again in the idtentry code, ad
* infinitum.
* All lies, just get the WARN/BUG out.
*/
if (!user_mode(regs) && is_valid_bugaddr(regs->ip)) {
enum bug_trap_type type;
instrumentation_begin();
/*
* Since we're emulating a CALL with exceptions, restore the interrupt
* state to what it was at the exception site.
*/
if (regs->flags & X86_EFLAGS_IF)
raw_local_irq_enable();
if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
regs->ip += LEN_UD2;
handled = true;
}
if (regs->flags & X86_EFLAGS_IF)
raw_local_irq_disable();
instrumentation_end();

nmi_enter();
instrumentation_begin();
trace_hardirqs_off_finish();
type = report_bug(regs->ip, regs);
if (regs->flags & X86_EFLAGS_IF)
trace_hardirqs_on_prepare();
instrumentation_end();
nmi_exit();
return handled;
}

if (type == BUG_TRAP_TYPE_WARN) {
/* Skip the ud2. */
regs->ip += LEN_UD2;
return;
}
DEFINE_IDTENTRY_RAW(exc_invalid_op)
{
bool rcu_exit;

/*
* Else, if this was a BUG and report_bug returns or if this
* was just a normal #UD, we want to continue onward and
* crash.
*/
}
/*
* We use UD2 as a short encoding for 'CALL __WARN', as such
* handle it before exception entry to avoid recursive WARN
* in case exception entry is the one triggering WARNs.
*/
if (!user_mode(regs) && handle_bug(regs))
return;

rcu_exit = idtentry_enter_cond_rcu(regs);
instrumentation_begin();
Expand Down Expand Up @@ -691,13 +695,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
(struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;

/* Copy the IRET target to the temporary storage. */
memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
__memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);

/* Copy the remainder of the stack from the current stack. */
memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
__memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));

/* Update the entry stack */
memcpy(new_stack, &tmp, sizeof(tmp));
__memcpy(new_stack, &tmp, sizeof(tmp));

BUG_ON(!user_mode(&new_stack->regs));
return new_stack;
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/lib/memcpy_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
#include <asm/alternative-asm.h>
#include <asm/export.h>

.pushsection .noinstr.text, "ax"

/*
* We build a jump to memcpy_orig by default which gets NOPped out on
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
Expand Down Expand Up @@ -184,6 +186,8 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
retq
SYM_FUNC_END(memcpy_orig)

.popsection

#ifndef CONFIG_UML

MCSAFE_TEST_CTL
Expand Down
1 change: 1 addition & 0 deletions arch/x86/lib/usercopy_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
asm volatile(
" testq %[size8],%[size8]\n"
" jz 4f\n"
" .align 16\n"
"0: movq $0,(%[dst])\n"
" addq $8,%[dst]\n"
" decl %%ecx ; jnz 0b\n"
Expand Down
6 changes: 6 additions & 0 deletions arch/x86/power/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,8 @@ static void fix_processor_context(void)
*/
static void notrace __restore_processor_state(struct saved_context *ctxt)
{
struct cpuinfo_x86 *c;

if (ctxt->misc_enable_saved)
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
/*
Expand Down Expand Up @@ -263,6 +265,10 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
mtrr_bp_restore();
perf_restore_debug_store();
msr_restore_context(ctxt);

c = &cpu_data(smp_processor_id());
if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
init_ia32_feat_ctl(c);
}

/* Needed by apm.c */
Expand Down
Loading