From af8b79d089a90cae0efa9d81aa37c894de9ad94b Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Wed, 27 Feb 2019 14:29:30 -0800 Subject: [PATCH] tests/kernel/sched/schedule_api: Restore spinning for timer alignment Commit 0cc362f87371 ("tests/kernel: Simplify timer spinning") was added to work around a qemu bug with dropped interrupts on x86_64. But it turns out that the tick alignment that the original implementation provided (fundamentally, it spins waiting on the timer driver to report tick changes) was needed for correct operation on nRF52. The effectively revert that commit (and refactors all the spinning into a single utility) and replaces it with a workaround targeted to qemu on x86_64 only. Fixes #11721 Signed-off-by: Andy Ross --- tests/kernel/sched/schedule_api/src/main.c | 22 +++++++++++++++++++ .../sched/schedule_api/src/test_sched.h | 2 ++ .../src/test_sched_timeslice_reset.c | 2 +- .../schedule_api/src/test_slice_scheduling.c | 4 ++-- 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/tests/kernel/sched/schedule_api/src/main.c b/tests/kernel/sched/schedule_api/src/main.c index 353294b1376e..5fbb6966b8f0 100644 --- a/tests/kernel/sched/schedule_api/src/main.c +++ b/tests/kernel/sched/schedule_api/src/main.c @@ -10,6 +10,28 @@ K_THREAD_STACK_DEFINE(tstack, STACK_SIZE); K_THREAD_STACK_ARRAY_DEFINE(tstacks, MAX_NUM_THREAD, STACK_SIZE); +void spin_for_ms(int ms) +{ +#if defined(CONFIG_X86_64) && defined(CONFIG_QEMU_TARGET) + /* qemu-system-x86_64 has a known bug with the hpet device + * where it will drop interrupts if you try to spin on the + * counter. + */ + k_busy_wait(ms * 1000); +#else + u32_t t32 = k_uptime_get_32(); + + while (k_uptime_get_32() - t32 < ms) { + /* In the posix arch, a busy loop takes no time, so + * let's make it take some + */ + if (IS_ENABLED(CONFIG_ARCH_POSIX)) { + k_busy_wait(50); + } + } +#endif +} + /** * @brief Test scheduling * diff --git a/tests/kernel/sched/schedule_api/src/test_sched.h b/tests/kernel/sched/schedule_api/src/test_sched.h index 6339bf6ae160..330809ef918f 100644 --- a/tests/kernel/sched/schedule_api/src/test_sched.h +++ b/tests/kernel/sched/schedule_api/src/test_sched.h @@ -23,6 +23,8 @@ struct thread_data { int executed; }; +void spin_for_ms(int ticks); + void test_priority_cooperative(void); void test_priority_preemptible(void); void test_yield_cooperative(void); diff --git a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c index 273e66c8ec44..e8ebc326f416 100644 --- a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c +++ b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c @@ -51,7 +51,7 @@ static void thread_tslice(void *p1, void *p2, void *p3) /* Keep the current thread busy for more than one slice, even though, * when timeslice used up the next thread should be scheduled in. */ - k_busy_wait(1000 * BUSY_MS); + spin_for_ms(BUSY_MS); k_sem_give(&sema); } diff --git a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c index 9d6ebeac1ef1..abc9b333f3c6 100644 --- a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c +++ b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c @@ -55,7 +55,7 @@ static void thread_tslice(void *p1, void *p2, void *p3) * even though, when timeslice used up the next thread * should be scheduled in. */ - k_busy_wait(1000 * BUSY_MS); + spin_for_ms(BUSY_MS); k_sem_give(&sema1); } @@ -102,7 +102,7 @@ void test_slice_scheduling(void) * even though, when timeslice used up the next thread * should be scheduled in. */ - k_busy_wait(1000 * BUSY_MS); + spin_for_ms(BUSY_MS); /* relinquish CPU and wait for each thread to complete*/ for (int i = 0; i < NUM_THREAD; i++) {