Skip to content

Commit

Permalink
Merge tag 'x86_urgent_for_v6.12_rc5' of git://git.kernel.org/pub/scm/…
Browse files Browse the repository at this point in the history
…linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - Prevent a certain range of pages which get marked as hypervisor-only,
   to get allocated to a CoCo (SNP) guest which cannot use them and thus
   fail booting

 - Fix the microcode loader on AMD to pay attention to the stepping of a
   patch and to handle the case where a BIOS config option splits the
   machine into logical NUMA nodes per L3 cache slice

 - Disable LAM from being built by default due to security concerns

* tag 'x86_urgent_for_v6.12_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/sev: Ensure that RMP table fixups are reserved
  x86/microcode/AMD: Split load_microcode_amd()
  x86/microcode/AMD: Pay attention to the stepping dynamically
  x86/lam: Disable ADDRESS_MASKING in most cases
  • Loading branch information
torvalds committed Oct 27, 2024
2 parents f69a1ac + 88a921a commit ea1fda8
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 16 deletions.
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -2257,6 +2257,7 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
config ADDRESS_MASKING
bool "Linear Address Masking support"
depends on X86_64
depends on COMPILE_TEST || !CPU_MITIGATIONS # wait for LASS
help
Linear Address Masking (LAM) modifies the checking that is applied
to 64-bit linear addresses, allowing software to use of the
Expand Down
51 changes: 35 additions & 16 deletions arch/x86/kernel/cpu/microcode/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -584,7 +584,7 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
}

static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);

static int __init save_microcode_in_initrd(void)
{
Expand All @@ -605,24 +605,27 @@ static int __init save_microcode_in_initrd(void)
if (!desc.mc)
return -EINVAL;

ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret > UCODE_UPDATED)
return -EINVAL;

return 0;
}
early_initcall(save_microcode_in_initrd);

static inline bool patch_cpus_equivalent(struct ucode_patch *p, struct ucode_patch *n)
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
struct ucode_patch *n,
bool ignore_stepping)
{
/* Zen and newer hardcode the f/m/s in the patch ID */
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);

/* Zap stepping */
p_cid.stepping = 0;
n_cid.stepping = 0;
if (ignore_stepping) {
p_cid.stepping = 0;
n_cid.stepping = 0;
}

return p_cid.full == n_cid.full;
} else {
Expand All @@ -644,13 +647,13 @@ static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equi
WARN_ON_ONCE(!n.patch_id);

list_for_each_entry(p, &microcode_cache, plist)
if (patch_cpus_equivalent(p, &n))
if (patch_cpus_equivalent(p, &n, false))
return p;

return NULL;
}

static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n)
static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
{
/* Zen and newer hardcode the f/m/s in the patch ID */
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
Expand All @@ -659,6 +662,9 @@ static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n)
zp.ucode_rev = p->patch_id;
zn.ucode_rev = n->patch_id;

if (zn.stepping != zp.stepping)
return -1;

return zn.rev > zp.rev;
} else {
return n->patch_id > p->patch_id;
Expand All @@ -668,10 +674,14 @@ static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n)
static void update_cache(struct ucode_patch *new_patch)
{
struct ucode_patch *p;
int ret;

list_for_each_entry(p, &microcode_cache, plist) {
if (patch_cpus_equivalent(p, new_patch)) {
if (!patch_newer(p, new_patch)) {
if (patch_cpus_equivalent(p, new_patch, true)) {
ret = patch_newer(p, new_patch);
if (ret < 0)
continue;
else if (!ret) {
/* we already have the latest patch */
kfree(new_patch->data);
kfree(new_patch);
Expand Down Expand Up @@ -944,21 +954,30 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
return UCODE_OK;
}

static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
{
struct cpuinfo_x86 *c;
unsigned int nid, cpu;
struct ucode_patch *p;
enum ucode_state ret;

/* free old equiv table */
free_equiv_cpu_table();

ret = __load_microcode_amd(family, data, size);
if (ret != UCODE_OK) {
if (ret != UCODE_OK)
cleanup();

return ret;
}

static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
{
struct cpuinfo_x86 *c;
unsigned int nid, cpu;
struct ucode_patch *p;
enum ucode_state ret;

ret = _load_microcode_amd(family, data, size);
if (ret != UCODE_OK)
return ret;
}

for_each_node(nid) {
cpu = cpumask_first(cpumask_of_node(nid));
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/virt/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,8 @@ static void __init __snp_fixup_e820_tables(u64 pa)
e820__range_update(pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
e820__range_update_table(e820_table_kexec, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
e820__range_update_table(e820_table_firmware, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
if (!memblock_is_region_reserved(pa, PMD_SIZE))
memblock_reserve(pa, PMD_SIZE);
}
}

Expand Down

0 comments on commit ea1fda8

Please sign in to comment.