Skip to content

Commit

Permalink
Merge pull request #66 from zandrey/5.4-1.0.0-imx
Browse files Browse the repository at this point in the history
Update 5.4-1.0.0-imx to v5.4.38 from stable
  • Loading branch information
otavio authored May 3, 2020
2 parents 559dd20 + fd90ac1 commit e96e260
Show file tree
Hide file tree
Showing 274 changed files with 2,736 additions and 1,243 deletions.
3 changes: 1 addition & 2 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5005,8 +5005,7 @@

usbcore.old_scheme_first=
[USB] Start with the old device initialization
scheme, applies only to low and full-speed devices
(default 0 = off).
scheme (default 0 = off).

usbcore.usbfs_memory_mb=
[USB] Memory limit (in MB) for buffers allocated by
Expand Down
2 changes: 2 additions & 0 deletions Documentation/arm64/silicon-errata.rst
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1349291 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-500 | #841119,826419 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 35
SUBLEVEL = 38
EXTRAVERSION =
NAME = Kleptomaniac Octopus

Expand Down
1 change: 1 addition & 0 deletions arch/arm/boot/dts/bcm283x.dtsi
Original file line number Diff line number Diff line change
Expand Up @@ -488,6 +488,7 @@
"dsi0_ddr2",
"dsi0_ddr";

status = "disabled";
};

thermal: thermal@7e212000 {
Expand Down
2 changes: 2 additions & 0 deletions arch/arm/mach-imx/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,10 @@ obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
obj-$(CONFIG_SOC_IMX7ULP) += suspend-imx7ulp.o
endif
ifeq ($(CONFIG_ARM_CPU_SUSPEND),y)
AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
endif
obj-$(CONFIG_SOC_IMX6) += pm-imx6.o

obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
Expand Down
16 changes: 16 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -559,6 +559,22 @@ config ARM64_ERRATUM_1463225

If unsure, say Y.

config ARM64_ERRATUM_1542419
bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
default y
help
This option adds a workaround for ARM Neoverse-N1 erratum
1542419.

Affected Neoverse-N1 cores could execute a stale instruction when
modified by another CPU. The workaround depends on a firmware
counterpart.

Workaround the issue by hiding the DIC feature from EL0. This
forces user-space to perform cache maintenance.

If unsure, say Y.

config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,15 @@
#define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
#define CTR_IMINLINE_SHIFT 0
#define CTR_IMINLINE_MASK 0xf
#define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
#define CTR_IDC_SHIFT 28
#define CTR_DIC_SHIFT 29

#define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
(0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT)

#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)

Expand Down
5 changes: 3 additions & 2 deletions arch/arm64/include/asm/compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
*/
#ifndef __ASM_COMPAT_H
#define __ASM_COMPAT_H

#include <asm-generic/compat.h>

#ifdef CONFIG_COMPAT

/*
Expand All @@ -13,8 +16,6 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>

#include <asm-generic/compat.h>

#define COMPAT_USER_HZ 100
#ifdef __AARCH64EB__
#define COMPAT_UTS_MACHINE "armv8b\0\0"
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/cpucaps.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@
#define ARM64_WORKAROUND_1463225 44
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
#define ARM64_WORKAROUND_1542419 47

#define ARM64_NCAPS 47
#define ARM64_NCAPS 48

#endif /* __ASM_CPUCAPS_H */
4 changes: 3 additions & 1 deletion arch/arm64/include/asm/sysreg.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@
#ifndef CONFIG_BROKEN_GAS_INST

#ifdef __ASSEMBLY__
#define __emit_inst(x) .inst (x)
// The space separator is omitted so that __emit_inst(x) can be parsed as
// either an assembler directive or an assembler macro argument.
#define __emit_inst(x) .inst(x)
#else
#define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
#endif
Expand Down
32 changes: 31 additions & 1 deletion arch/arm64/kernel/cpu_errata.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,13 +88,21 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
}

static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
{
u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
bool enable_uct_trap = false;

/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
if ((read_cpuid_cachetype() & mask) !=
(arm64_ftr_reg_ctrel0.sys_val & mask))
enable_uct_trap = true;

/* ... or if the system is affected by an erratum */
if (cap->capability == ARM64_WORKAROUND_1542419)
enable_uct_trap = true;

if (enable_uct_trap)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}

Expand Down Expand Up @@ -651,6 +659,18 @@ needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
return false;
}

static bool __maybe_unused
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
int scope)
{
u32 midr = read_cpuid_id();
bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);

WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return is_midr_in_range(midr, &range) && has_dic;
}

#ifdef CONFIG_HARDEN_EL2_VECTORS

static const struct midr_range arm64_harden_el2_vectors[] = {
Expand Down Expand Up @@ -927,6 +947,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1542419
{
/* we depend on the firmware portion for correctness */
.desc = "ARM erratum 1542419 (kernel portion)",
.capability = ARM64_WORKAROUND_1542419,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_neoverse_n1_erratum_1542419,
.cpu_enable = cpu_enable_trap_ctr_access,
},
#endif
{
}
Expand Down
11 changes: 11 additions & 0 deletions arch/arm64/kernel/sys_compat.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
*/

#include <linux/compat.h>
#include <linux/cpufeature.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
Expand All @@ -17,6 +18,7 @@

#include <asm/cacheflush.h>
#include <asm/system_misc.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h>

static long
Expand All @@ -30,6 +32,15 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current))
return 0;

if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
/*
* The workaround requires an inner-shareable tlbi.
* We pick the reserved-ASID to minimise the impact.
*/
__tlbi(aside1is, __TLBI_VADDR(0, 0));
dsb(ish);
}

ret = __flush_cache_user_range(start, start + chunk);
if (ret)
return ret;
Expand Down
9 changes: 9 additions & 0 deletions arch/arm64/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -501,6 +501,15 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = ESR_ELx_SYS64_ISS_RT(esr);
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);

if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
/* Hide DIC so that we can trap the unnecessary maintenance...*/
val &= ~BIT(CTR_DIC_SHIFT);

/* ... and fake IminLine to reduce the number of traps. */
val &= ~CTR_IMINLINE_MASK;
val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
}

pt_regs_write_reg(regs, rt, val);

arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -705,7 +705,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */

kuap_check r2, r4
kuap_check r2, r0
#ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/kernel/setup_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -541,6 +541,8 @@ static bool __init parse_cache_info(struct device_node *np,
lsizep = of_get_property(np, propnames[3], NULL);
if (bsizep == NULL)
bsizep = lsizep;
if (lsizep == NULL)
lsizep = bsizep;
if (lsizep != NULL)
lsize = be32_to_cpu(*lsizep);
if (bsizep != NULL)
Expand Down
44 changes: 13 additions & 31 deletions arch/powerpc/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -522,35 +522,6 @@ static inline void clear_irq_work_pending(void)
"i" (offsetof(struct paca_struct, irq_work_pending)));
}

void arch_irq_work_raise(void)
{
preempt_disable();
set_irq_work_pending_flag();
/*
* Non-nmi code running with interrupts disabled will replay
* irq_happened before it re-enables interrupts, so setthe
* decrementer there instead of causing a hardware exception
* which would immediately hit the masked interrupt handler
* and have the net effect of setting the decrementer in
* irq_happened.
*
* NMI interrupts can not check this when they return, so the
* decrementer hardware exception is raised, which will fire
* when interrupts are next enabled.
*
* BookE does not support this yet, it must audit all NMI
* interrupt handlers to ensure they call nmi_enter() so this
* check would be correct.
*/
if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
set_dec(1);
} else {
hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_DEC;
}
preempt_enable();
}

#else /* 32-bit */

DEFINE_PER_CPU(u8, irq_work_pending);
Expand All @@ -559,16 +530,27 @@ DEFINE_PER_CPU(u8, irq_work_pending);
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)

#endif /* 32 vs 64 bit */

void arch_irq_work_raise(void)
{
/*
* 64-bit code that uses irq soft-mask can just cause an immediate
* interrupt here that gets soft masked, if this is called under
* local_irq_disable(). It might be possible to prevent that happening
* by noticing interrupts are disabled and setting decrementer pending
* to be replayed when irqs are enabled. The problem there is that
* tracing can call irq_work_raise, including in code that does low
* level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
* which could get tangled up if we're messing with the same state
* here.
*/
preempt_disable();
set_irq_work_pending_flag();
set_dec(1);
preempt_enable();
}

#endif /* 32 vs 64 bit */

#else /* CONFIG_IRQ_WORK */

#define test_irq_work_pending() 0
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/platforms/Kconfig.cputype
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ config PPC_KUAP

config PPC_KUAP_DEBUG
bool "Extra debugging for Kernel Userspace Access Protection"
depends on PPC_HAVE_KUAP && (PPC_RADIX_MMU || PPC_32)
depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32)
help
Add extra debugging for Kernel Userspace Access Protection (KUAP)
If you're unsure, say N.
Expand Down
11 changes: 11 additions & 0 deletions arch/powerpc/platforms/pseries/ras.c
Original file line number Diff line number Diff line change
Expand Up @@ -683,6 +683,17 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
#endif

out:
/*
* Enable translation as we will be accessing per-cpu variables
* in save_mce_event() which may fall outside RMO region, also
* leave it enabled because subsequently we will be queuing work
* to workqueues where again per-cpu variables accessed, besides
* fwnmi_release_errinfo() crashes when called in realmode on
* pseries.
* Note: All the realmode handling like flushing SLB entries for
* SLB multihit is done by now.
*/
mtmsr(mfmsr() | MSR_IR | MSR_DR);
save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
&mce_err, regs->nip, eaddr, paddr);

Expand Down
2 changes: 1 addition & 1 deletion arch/s390/kernel/diag.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ void diag_stat_inc(enum diag_stat_enum nr)
}
EXPORT_SYMBOL(diag_stat_inc);

void diag_stat_inc_norecursion(enum diag_stat_enum nr)
void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr)
{
this_cpu_inc(diag_stat.counter[nr]);
trace_s390_diagnose_norecursion(diag_map[nr].code);
Expand Down
4 changes: 2 additions & 2 deletions arch/s390/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ int smp_find_processor_id(u16 address)
return -1;
}

bool arch_vcpu_is_preempted(int cpu)
bool notrace arch_vcpu_is_preempted(int cpu)
{
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
return false;
Expand All @@ -413,7 +413,7 @@ bool arch_vcpu_is_preempted(int cpu)
}
EXPORT_SYMBOL(arch_vcpu_is_preempted);

void smp_yield_cpu(int cpu)
void notrace smp_yield_cpu(int cpu)
{
if (MACHINE_HAS_DIAG9C) {
diag_stat_inc_norecursion(DIAG_STAT_X09C);
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/kernel/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);

static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);

void trace_s390_diagnose_norecursion(int diag_nr)
void notrace trace_s390_diagnose_norecursion(int diag_nr)
{
unsigned long flags;
unsigned int *depth;
Expand Down
3 changes: 3 additions & 0 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -1932,6 +1932,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
start = slot + 1;
}

if (start >= slots->used_slots)
return slots->used_slots - 1;

if (gfn >= memslots[start].base_gfn &&
gfn < memslots[start].base_gfn + memslots[start].npages) {
atomic_set(&slots->lru_slot, start);
Expand Down
Loading

0 comments on commit e96e260

Please sign in to comment.