Skip to content

[sig cloud 8]4.18.0-553.33.1.el8_10 Update Customer Patches #37

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Documentation/atomic_bitops.txt
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ Like with atomic_t, the rule of thumb is:
- RMW operations that have a return value are fully ordered.

- RMW operations that are conditional are unordered on FAILURE,
otherwise the above rules apply. In the case of test_and_{}_bit() operations,
otherwise the above rules apply. In the case of test_and_set_bit_lock(),
if the bit in memory is unchanged by the operation then it is deemed to have
failed.

Expand Down
41 changes: 24 additions & 17 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1021,17 +1021,34 @@ static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;

if (c->extended_cpuid_level >= 0x80000008) {
if (!cpu_has(c, X86_FEATURE_CPUID) ||
(c->extended_cpuid_level < 0x80000008)) {
if (IS_ENABLED(CONFIG_X86_64)) {
c->x86_clflush_size = 64;
c->x86_phys_bits = 36;
c->x86_virt_bits = 48;
} else {
c->x86_clflush_size = 32;
c->x86_virt_bits = 32;
c->x86_phys_bits = 32;

if (cpu_has(c, X86_FEATURE_PAE) ||
cpu_has(c, X86_FEATURE_PSE36))
c->x86_phys_bits = 36;
}
} else {
cpuid(0x80000008, &eax, &ebx, &ecx, &edx);

c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;

/* Provide a sane default if not enumerated: */
if (!c->x86_clflush_size)
c->x86_clflush_size = 32;
}
#ifdef CONFIG_X86_32
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
c->x86_phys_bits = 36;
#endif

c->x86_cache_bits = c->x86_phys_bits;
c->x86_cache_alignment = c->x86_clflush_size;
}

static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
Expand Down Expand Up @@ -1468,17 +1485,6 @@ static void __init cpu_parse_early_param(void)
*/
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_64
c->x86_clflush_size = 64;
c->x86_phys_bits = 36;
c->x86_virt_bits = 48;
#else
c->x86_clflush_size = 32;
c->x86_phys_bits = 32;
c->x86_virt_bits = 32;
#endif
c->x86_cache_alignment = c->x86_clflush_size;

memset(&c->x86_capability, 0, sizeof(c->x86_capability));
c->extended_cpuid_level = 0;

Expand All @@ -1488,8 +1494,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
get_cpu_vendor(c);
get_cpu_cap(c);
get_model_name(c); /* RHEL8: get model name for unsupported check */
get_cpu_address_sizes(c);
setup_force_cpu_cap(X86_FEATURE_CPUID);
get_cpu_address_sizes(c);
cpu_parse_early_param();

if (this_cpu->c_early_init)
Expand All @@ -1503,6 +1509,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
} else {
identify_cpu_without_cpuid(c);
setup_clear_cpu_cap(X86_FEATURE_CPUID);
get_cpu_address_sizes(c);
}

setup_force_cpu_cap(X86_FEATURE_ALWAYS);
Expand Down
6 changes: 0 additions & 6 deletions include/asm-generic/bitops/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,6 @@ static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr);

p += BIT_WORD(nr);
if (READ_ONCE(*p) & mask)
return 1;

old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask);
}
Expand All @@ -48,9 +45,6 @@ static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr);

p += BIT_WORD(nr);
if (!(READ_ONCE(*p) & mask))
return 0;

old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask);
}
Expand Down