Skip to content

Commit

Permalink
Merge tag 'v6.6.63' into 6.6-main
Browse files Browse the repository at this point in the history
This is the 6.6.63 stable release

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmdAl4gACgkQONu9yGCS
# aT6iCBAAqTvp0BGFFl+7pVb9SpCxaxleK3tzs1X3q6dZpk1p6bI34GCHMlWGQZRR
# Iy1DzLcmrJd22frPOZ3PZyWOpAvz7VcU9I/2PhNfG4Ge3aPw0Q9Cs6I+9E51msJA
# njRIDUcozBTX1dsQV/g56Pzefvq+W0X1Z6XlipAYqcZRrJHkZoL6WL4vgmpEFdEf
# Nk3u3QT67vALCnIxbtm7h2NZSwWFpRbW1VXDe0R7bxWb8q0ZRIsgoJiUroZatx4Y
# YejggnVNjRHqytSuNqHF8BtJzGHRD/0n+IMUih3oVSLLkzZSLriaWXGuXzQGpzDG
# FTDHyu77I12f5HSv5ftc2dMupb6RL9ICR1uuc/WppjWgP5xLd2ULSlwlaKQsk6IF
# v2wdzlOol15TDKqi+D4D1p+eUeSqzMUDYalAUAlX3/gOr69/2EYhgqwsND5NdXFJ
# NqKadL29R3a4DT/NOLqklLcUQlRBTwkQnV4k167+YKjwC6EYOrHFzr6xqKeqD+Dr
# OpNg5rKM7HB0VGobvw++uwHHSMYkdLeuSUNdFzEU2tL0N1YqlNxKs0YtrbOT0l1q
# l7VCOSzHHkkYmyRU+zfo2k5LtUSROSGM5qqkHFQvpA/ikFWahIMyjiAlxjkO6pgm
# vCGQnYSV0lO4Vsa5F131fkwGA0LTBFFRymUGN+QMIvj0jtUFADA=
# =EXuo
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri Nov 22 15:39:04 2024 CET
# gpg:                using RSA key 647F28654894E3BD457199BE38DBBDC86092693E
# gpg: Can't check signature: No public key
  • Loading branch information
frank-w committed Dec 6, 2024
2 parents fa30e3f + bff3e13 commit 4e6db84
Show file tree
Hide file tree
Showing 83 changed files with 819 additions and 424 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 62
SUBLEVEL = 63
EXTRAVERSION =
NAME = Pinguïn Aangedreven

Expand Down
8 changes: 6 additions & 2 deletions arch/arm/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -252,27 +252,31 @@ __create_page_tables:
*/
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
ldr r6, =(_end - 1)

/* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory */
#ifndef CONFIG_XIP_KERNEL
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
str r8, [r5, #4] @ Save physical start of kernel (BE)
#else
str r8, [r5] @ Save physical start of kernel (LE)
#endif
#endif
orr r3, r8, r7 @ Add the MMU flags
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
1: str r3, [r0], #1 << PMD_ENTRY_ORDER
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
bls 1b
#ifndef CONFIG_XIP_KERNEL
eor r3, r3, r7 @ Remove the MMU flags
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
str r3, [r5, #4] @ Save physical end of kernel (BE)
#else
str r3, [r5] @ Save physical end of kernel (LE)
#endif

#ifdef CONFIG_XIP_KERNEL
#else
/*
* Map the kernel image separately as it is not located in RAM.
*/
Expand Down
34 changes: 21 additions & 13 deletions arch/arm/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1402,18 +1402,6 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
create_mapping(&map);
}

/*
* Map the kernel if it is XIP.
* It is always first in the modulearea.
*/
#ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
create_mapping(&map);
#endif

/*
* Map the cache flushing regions.
*/
Expand Down Expand Up @@ -1603,12 +1591,27 @@ static void __init map_kernel(void)
* This will only persist until we turn on proper memory management later on
* and we remap the whole kernel with page granularity.
*/
#ifdef CONFIG_XIP_KERNEL
phys_addr_t kernel_nx_start = kernel_sec_start;
#else
phys_addr_t kernel_x_start = kernel_sec_start;
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
phys_addr_t kernel_nx_start = kernel_x_end;
#endif
phys_addr_t kernel_nx_end = kernel_sec_end;
struct map_desc map;

/*
* Map the kernel if it is XIP.
* It is always first in the modulearea.
*/
#ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
create_mapping(&map);
#else
map.pfn = __phys_to_pfn(kernel_x_start);
map.virtual = __phys_to_virt(kernel_x_start);
map.length = kernel_x_end - kernel_x_start;
Expand All @@ -1618,7 +1621,7 @@ static void __init map_kernel(void)
/* If the nx part is small it may end up covered by the tail of the RWX section */
if (kernel_x_end == kernel_nx_end)
return;

#endif
map.pfn = __phys_to_pfn(kernel_nx_start);
map.virtual = __phys_to_virt(kernel_nx_start);
map.length = kernel_nx_end - kernel_nx_start;
Expand Down Expand Up @@ -1763,6 +1766,11 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;

#ifdef CONFIG_XIP_KERNEL
/* Store the kernel RW RAM region start/end in these variables */
kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK;
kernel_sec_end = round_up(__pa(_end), SECTION_SIZE);
#endif
pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
kernel_sec_start, kernel_sec_end);

Expand Down
10 changes: 7 additions & 3 deletions arch/arm64/include/asm/mman.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
#define __ASM_MMAN_H__

#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
#include <linux/types.h>
#include <uapi/asm/mman.h>

Expand All @@ -21,19 +23,21 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)

static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
unsigned long flags)
{
/*
* Only allow MTE on anonymous mappings as these are guaranteed to be
* backed by tags-capable memory. The vm_flags may be overridden by a
* filesystem supporting MTE (RAM-based).
*/
if (system_supports_mte() && (flags & MAP_ANONYMOUS))
if (system_supports_mte() &&
((flags & MAP_ANONYMOUS) || shmem_file(file)))
return VM_MTE_ALLOWED;

return 0;
}
#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags)

static inline bool arch_validate_prot(unsigned long prot,
unsigned long addr __always_unused)
Expand Down
2 changes: 1 addition & 1 deletion arch/loongarch/include/asm/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
/* KAsan shadow memory start right after vmalloc. */
#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
#define KASAN_SHADOW_END (round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) - 1)

#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
Expand Down
2 changes: 1 addition & 1 deletion arch/loongarch/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ static void __init fdt_smp_setup(void)
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;

early_numa_add_cpu(cpu, 0);
early_numa_add_cpu(cpuid, 0);
set_cpuid_to_node(cpuid, 0);
}

Expand Down
41 changes: 36 additions & 5 deletions arch/loongarch/mm/kasan_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,13 @@

static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);

#ifdef __PAGETABLE_P4D_FOLDED
#define __pgd_none(early, pgd) (0)
#else
#define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
(__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
#endif

#ifdef __PAGETABLE_PUD_FOLDED
#define __p4d_none(early, p4d) (0)
#else
Expand Down Expand Up @@ -142,6 +149,19 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
return pud_offset(p4dp, addr);
}

static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
{
if (__pgd_none(early, pgdp_get(pgdp))) {
phys_addr_t p4d_phys = early ?
__pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
if (!early)
memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
}

return p4d_offset(pgdp, addr);
}

static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
unsigned long end, int node, bool early)
{
Expand Down Expand Up @@ -178,19 +198,19 @@ static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
do {
next = pud_addr_end(addr, end);
kasan_pmd_populate(pudp, addr, next, node, early);
} while (pudp++, addr = next, addr != end);
} while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
}

static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
p4d_t *p4dp = p4d_offset(pgdp, addr);
p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);

do {
next = p4d_addr_end(addr, end);
kasan_pud_populate(p4dp, addr, next, node, early);
} while (p4dp++, addr = next, addr != end);
} while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
}

static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
Expand Down Expand Up @@ -218,7 +238,7 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
asmlinkage void __init kasan_early_init(void)
{
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
}

static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
Expand All @@ -233,7 +253,7 @@ static void __init clear_pgds(unsigned long start, unsigned long end)
* swapper_pg_dir. pgd_clear() can't be used
* here because it's nop on 2,3-level pagetable setups
*/
for (; start < end; start += PGDIR_SIZE)
for (; start < end; start = pgd_addr_end(start, end))
kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
}

Expand All @@ -242,6 +262,17 @@ void __init kasan_init(void)
u64 i;
phys_addr_t pa_start, pa_end;

/*
* If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
* overflow UINTPTR_MAX and then looks like a user space address.
* For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
* large for Loongson-2K series whose cpu_vabits = 39.
*/
if (KASAN_SHADOW_END < vm_map_base) {
pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
return;
}

/*
* PGD was populated as invalid_pmd_table or invalid_pud_table
* in pagetable_init() which depends on how many levels of page
Expand Down
5 changes: 3 additions & 2 deletions arch/parisc/include/asm/mman.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#ifndef __ASM_MMAN_H__
#define __ASM_MMAN_H__

#include <linux/fs.h>
#include <uapi/asm/mman.h>

/* PARISC cannot allow mdwe as it needs writable stacks */
Expand All @@ -11,7 +12,7 @@ static inline bool arch_memory_deny_write_exec_supported(void)
}
#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported

static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
static inline unsigned long arch_calc_vm_flag_bits(struct file *file, unsigned long flags)
{
/*
* The stack on parisc grows upwards, so if userspace requests memory
Expand All @@ -23,6 +24,6 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)

return 0;
}
#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags)

#endif /* __ASM_MMAN_H__ */
29 changes: 18 additions & 11 deletions arch/x86/kvm/lapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -2603,19 +2603,26 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;

if (apic->apicv_active) {
/* irr_pending is always true when apicv is activated. */
apic->irr_pending = true;
/*
* When APICv is enabled, KVM must always search the IRR for a pending
* IRQ, as other vCPUs and devices can set IRR bits even if the vCPU
* isn't running. If APICv is disabled, KVM _should_ search the IRR
* for a pending IRQ. But KVM currently doesn't ensure *all* hardware,
* e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching
* the IRR at this time could race with IRQ delivery from hardware that
* still sees APICv as being enabled.
*
* FIXME: Ensure other vCPUs and devices observe the change in APICv
* state prior to updating KVM's metadata caches, so that KVM
* can safely search the IRR and set irr_pending accordingly.
*/
apic->irr_pending = true;

if (apic->apicv_active)
apic->isr_count = 1;
} else {
/*
* Don't clear irr_pending, searching the IRR can race with
* updates from the CPU as APICv is still active from hardware's
* perspective. The flag will be cleared as appropriate when
* KVM injects the interrupt.
*/
else
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
}

apic->highest_isr_cache = -1;
}

Expand Down
30 changes: 25 additions & 5 deletions arch/x86/kvm/vmx/nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -1150,11 +1150,14 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);

/*
* If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
* for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
* full TLB flush from the guest's perspective. This is required even
* if VPID is disabled in the host as KVM may need to synchronize the
* MMU in response to the guest TLB flush.
* If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
* same VPID as the host, and so architecturally, linear and combined
* mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM
* emulates L2 sharing L1's VPID=0 by using vpid01 while running L2,
* and so KVM must also emulate TLB flush of VPID=0, i.e. vpid01. This
* is required if VPID is disabled in KVM, as a TLB flush (there are no
* VPIDs) still occurs from L1's perspective, and KVM may need to
* synchronize the MMU in response to the guest TLB flush.
*
* Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
* EPT is a special snowflake, as guest-physical mappings aren't
Expand Down Expand Up @@ -2229,6 +2232,17 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,

vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);

/*
* If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
* same VPID as the host. Emulate this behavior by using vpid01 for L2
* if VPID is disabled in vmcs12. Note, if VPID is disabled, VM-Enter
* and VM-Exit are architecturally required to flush VPID=0, but *only*
* VPID=0. I.e. using vpid02 would be ok (so long as KVM emulates the
* required flushes), but doing so would cause KVM to over-flush. E.g.
* if L1 runs L2 X with VPID12=1, then runs L2 Y with VPID12 disabled,
* and then runs L2 X again, then KVM can and should retain TLB entries
* for VPID12=1.
*/
if (enable_vpid) {
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
Expand Down Expand Up @@ -5827,6 +5841,12 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return nested_vmx_fail(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);

/*
* Always flush the effective vpid02, i.e. never flush the current VPID
* and never explicitly flush vpid01. INVVPID targets a VPID, not a
* VMCS, and so whether or not the current vmcs12 has VPID enabled is
* irrelevant (and there may not be a loaded vmcs12).
*/
vpid02 = nested_get_vpid02(vcpu);
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
Expand Down
6 changes: 4 additions & 2 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -212,9 +212,11 @@ module_param(ple_window_shrink, uint, 0444);
static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
module_param(ple_window_max, uint, 0444);

/* Default is SYSTEM mode, 1 for host-guest mode */
/* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */
int __read_mostly pt_mode = PT_MODE_SYSTEM;
#ifdef CONFIG_BROKEN
module_param(pt_mode, int, S_IRUGO);
#endif

static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
Expand Down Expand Up @@ -3193,7 +3195,7 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)

static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu))
if (is_guest_mode(vcpu) && nested_cpu_has_vpid(get_vmcs12(vcpu)))
return nested_get_vpid02(vcpu);
return to_vmx(vcpu)->vpid;
}
Expand Down
Loading

0 comments on commit 4e6db84

Please sign in to comment.