Skip to content

Commit d310ec0

Browse files
committed
Merge tag 'perf-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull performance event updates from Ingo Molnar: - Add CPU-PMU support for Intel Sapphire Rapids CPUs - Extend the perf ABI with PERF_SAMPLE_WEIGHT_STRUCT, to offer two-parameter sampling event feedback. Not used yet, but is intended for Golden Cove CPU-PMU, which can provide both the instruction latency and the cache latency information for memory profiling events. - Remove experimental, default-disabled perfmon-v4 counter_freezing support that could only be enabled via a boot option. The hardware is hopelessly broken, we'd like to make sure nobody starts relying on this, as it would only end in tears. - Fix energy/power events on Intel SPR platforms - Simplify the uprobes resume_execution() logic - Misc smaller fixes. * tag 'perf-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/rapl: Fix psys-energy event on Intel SPR platform perf/x86/rapl: Only check lower 32bits for RAPL energy counters perf/x86/rapl: Add msr mask support perf/x86/kvm: Add Cascade Lake Xeon steppings to isolation_ucodes[] perf/x86/intel: Support CPUID 10.ECX to disable fixed counters perf/x86/intel: Add perf core PMU support for Sapphire Rapids perf/x86/intel: Filter unsupported Topdown metrics event perf/x86/intel: Factor out intel_update_topdown_event() perf/core: Add PERF_SAMPLE_WEIGHT_STRUCT perf/intel: Remove Perfmon-v4 counter_freezing support x86/perf: Use static_call for x86_pmu.guest_get_msrs perf/x86/intel/uncore: With > 8 nodes, get pci bus die id from NUMA info perf/x86/intel/uncore: Store the logical die id instead of the physical die id. x86/kprobes: Do not decode opcode in resume_execution()
2 parents 657bd90 + 8bcfdd7 commit d310ec0

File tree

19 files changed

+801
-433
lines changed

19 files changed

+801
-433
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -945,12 +945,6 @@
945945
causing system reset or hang due to sending
946946
INIT from AP to BSP.
947947

948-
perf_v4_pmi= [X86,INTEL]
949-
Format: <bool>
950-
Disable Intel PMU counter freezing feature.
951-
The feature only exists starting from
952-
Arch Perfmon v4 (Skylake and newer).
953-
954948
disable_ddw [PPC/PSERIES]
955949
Disable Dynamic DMA Window support. Use this
956950
to workaround buggy firmware.

arch/powerpc/perf/core-book3s.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2195,7 +2195,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
21952195

21962196
if (event->attr.sample_type & PERF_SAMPLE_WEIGHT &&
21972197
ppmu->get_mem_weight)
2198-
ppmu->get_mem_weight(&data.weight);
2198+
ppmu->get_mem_weight(&data.weight.full);
21992199

22002200
if (perf_event_overflow(event, &data, regs))
22012201
power_pmu_stop(event, 0);

arch/x86/events/core.c

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,8 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
8181
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
8282
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
8383

84+
DEFINE_STATIC_CALL_NULL(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);
85+
8486
u64 __read_mostly hw_cache_event_ids
8587
[PERF_COUNT_HW_CACHE_MAX]
8688
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -253,6 +255,8 @@ static bool check_hw_exists(void)
253255
if (ret)
254256
goto msr_fail;
255257
for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
258+
if (fixed_counter_disabled(i))
259+
continue;
256260
if (val & (0x03 << i*4)) {
257261
bios_fail = 1;
258262
val_fail = val;
@@ -665,6 +669,12 @@ void x86_pmu_disable_all(void)
665669
}
666670
}
667671

672+
struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
673+
{
674+
return static_call(x86_pmu_guest_get_msrs)(nr);
675+
}
676+
EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
677+
668678
/*
669679
* There may be PMI landing after enabled=0. The PMI hitting could be before or
670680
* after disable_all.
@@ -1523,6 +1533,8 @@ void perf_event_print_debug(void)
15231533
cpu, idx, prev_left);
15241534
}
15251535
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1536+
if (fixed_counter_disabled(idx))
1537+
continue;
15261538
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
15271539

15281540
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1923,13 +1935,22 @@ static void x86_pmu_static_call_update(void)
19231935

19241936
static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
19251937
static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
1938+
1939+
static_call_update(x86_pmu_guest_get_msrs, x86_pmu.guest_get_msrs);
19261940
}
19271941

19281942
static void _x86_pmu_read(struct perf_event *event)
19291943
{
19301944
x86_perf_event_update(event);
19311945
}
19321946

1947+
static inline struct perf_guest_switch_msr *
1948+
perf_guest_get_msrs_nop(int *nr)
1949+
{
1950+
*nr = 0;
1951+
return NULL;
1952+
}
1953+
19331954
static int __init init_hw_perf_events(void)
19341955
{
19351956
struct x86_pmu_quirk *quirk;
@@ -1995,12 +2016,17 @@ static int __init init_hw_perf_events(void)
19952016
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
19962017
pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
19972018
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1998-
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
2019+
pr_info("... fixed-purpose events: %lu\n",
2020+
hweight64((((1ULL << x86_pmu.num_counters_fixed) - 1)
2021+
<< INTEL_PMC_IDX_FIXED) & x86_pmu.intel_ctrl));
19992022
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
20002023

20012024
if (!x86_pmu.read)
20022025
x86_pmu.read = _x86_pmu_read;
20032026

2027+
if (!x86_pmu.guest_get_msrs)
2028+
x86_pmu.guest_get_msrs = perf_guest_get_msrs_nop;
2029+
20042030
x86_pmu_static_call_update();
20052031

20062032
/*

0 commit comments

Comments
 (0)