Skip to content

Commit

Permalink
x86: Move calibrate_cpu to tsc.c
Browse files Browse the repository at this point in the history
Move the code where it's only user is. Also we need to look whether
this hardwired hackery might interfere with perfcounters.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
KAGA-KOKO committed Aug 31, 2009
1 parent 454ede7 commit 08047c4
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 56 deletions.
2 changes: 0 additions & 2 deletions arch/x86/include/asm/time.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,4 @@ extern void time_init(void);

#endif /* CONFIG_PARAVIRT */

extern unsigned long __init calibrate_cpu(void);

#endif /* _ASM_X86_TIME_H */
1 change: 0 additions & 1 deletion arch/x86/kernel/time_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
#include <asm/timer.h>
#include <asm/hpet.h>
#include <asm/time.h>
#include <asm/nmi.h>

#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
int timer_ack;
Expand Down
51 changes: 0 additions & 51 deletions arch/x86/kernel/time_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
#include <asm/timer.h>
#include <asm/hpet.h>
#include <asm/time.h>
#include <asm/nmi.h>

#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
int timer_ack;
Expand Down Expand Up @@ -84,56 +83,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}

/*
* calibrate_cpu is used on systems with fixed rate TSCs to determine
* processor frequency
*/
#define TICK_COUNT 100000000
unsigned long __init calibrate_cpu(void)
{
int tsc_start, tsc_now;
int i, no_ctr_free;
unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
unsigned long flags;

for (i = 0; i < 4; i++)
if (avail_to_resrv_perfctr_nmi_bit(i))
break;
no_ctr_free = (i == 4);
if (no_ctr_free) {
WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
"cpu_khz value may be incorrect.\n");
i = 3;
rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
wrmsrl(MSR_K7_EVNTSEL3, 0);
rdmsrl(MSR_K7_PERFCTR3, pmc3);
} else {
reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}
local_irq_save(flags);
/* start measuring cycles, incrementing from 0 */
wrmsrl(MSR_K7_PERFCTR0 + i, 0);
wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
rdtscl(tsc_start);
do {
rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
tsc_now = get_cycles();
} while ((tsc_now - tsc_start) < TICK_COUNT);

local_irq_restore(flags);
if (no_ctr_free) {
wrmsrl(MSR_K7_EVNTSEL3, 0);
wrmsrl(MSR_K7_PERFCTR3, pmc3);
wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
} else {
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}

return pmc_now * tsc_khz / (tsc_now - tsc_start);
}

static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
Expand Down
57 changes: 55 additions & 2 deletions arch/x86/kernel/tsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <asm/time.h>
#include <asm/delay.h>
#include <asm/hypervisor.h>
#include <asm/nmi.h>

unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
Expand Down Expand Up @@ -852,6 +853,60 @@ static void __init init_tsc_clocksource(void)
clocksource_register(&clocksource_tsc);
}

#ifdef CONFIG_X86_64
/*
* calibrate_cpu is used on systems with fixed rate TSCs to determine
* processor frequency
*/
#define TICK_COUNT 100000000
static unsigned long __init calibrate_cpu(void)
{
int tsc_start, tsc_now;
int i, no_ctr_free;
unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
unsigned long flags;

for (i = 0; i < 4; i++)
if (avail_to_resrv_perfctr_nmi_bit(i))
break;
no_ctr_free = (i == 4);
if (no_ctr_free) {
WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
"cpu_khz value may be incorrect.\n");
i = 3;
rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
wrmsrl(MSR_K7_EVNTSEL3, 0);
rdmsrl(MSR_K7_PERFCTR3, pmc3);
} else {
reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}
local_irq_save(flags);
/* start measuring cycles, incrementing from 0 */
wrmsrl(MSR_K7_PERFCTR0 + i, 0);
wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
rdtscl(tsc_start);
do {
rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
tsc_now = get_cycles();
} while ((tsc_now - tsc_start) < TICK_COUNT);

local_irq_restore(flags);
if (no_ctr_free) {
wrmsrl(MSR_K7_EVNTSEL3, 0);
wrmsrl(MSR_K7_PERFCTR3, pmc3);
wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
} else {
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}

return pmc_now * tsc_khz / (tsc_now - tsc_start);
}
#else
static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
#endif

void __init tsc_init(void)
{
u64 lpj;
Expand All @@ -870,11 +925,9 @@ void __init tsc_init(void)
return;
}

#ifdef CONFIG_X86_64
if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
cpu_khz = calibrate_cpu();
#endif

printk("Detected %lu.%03lu MHz processor.\n",
(unsigned long)cpu_khz / 1000,
Expand Down

0 comments on commit 08047c4

Please sign in to comment.