|
49 | 49 | #include <linux/tracepoint-defs.h> |
50 | 50 | #include <linux/unwind_deferred_types.h> |
51 | 51 | #include <asm/kmap_size.h> |
| 52 | +#ifndef COMPILE_OFFSETS |
| 53 | +#include <generated/rq-offsets.h> |
| 54 | +#endif |
52 | 55 |
|
53 | 56 | /* task_struct member predeclarations (sorted alphabetically): */ |
54 | 57 | struct audit_context; |
@@ -2317,4 +2320,114 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo |
2317 | 2320 | #define alloc_tag_restore(_tag, _old) do {} while (0) |
2318 | 2321 | #endif |
2319 | 2322 |
|
| 2323 | +#ifndef MODULE |
| 2324 | +#ifndef COMPILE_OFFSETS |
| 2325 | + |
| 2326 | +extern void ___migrate_enable(void); |
| 2327 | + |
| 2328 | +struct rq; |
| 2329 | +DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
| 2330 | + |
| 2331 | +/* |
| 2332 | + * The "struct rq" is not available here, so we can't access the |
| 2333 | + * "runqueues" with this_cpu_ptr(), as the compilation will fail in |
| 2334 | + * this_cpu_ptr() -> raw_cpu_ptr() -> __verify_pcpu_ptr(): |
| 2335 | + * typeof((ptr) + 0) |
| 2336 | + * |
| 2337 | + * So use arch_raw_cpu_ptr()/PERCPU_PTR() directly here. |
| 2338 | + */ |
| 2339 | +#ifdef CONFIG_SMP |
| 2340 | +#define this_rq_raw() arch_raw_cpu_ptr(&runqueues) |
| 2341 | +#else |
| 2342 | +#define this_rq_raw() PERCPU_PTR(&runqueues) |
| 2343 | +#endif |
| 2344 | +#define this_rq_pinned() (*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned)) |
| 2345 | + |
| 2346 | +static inline void __migrate_enable(void) |
| 2347 | +{ |
| 2348 | + struct task_struct *p = current; |
| 2349 | + |
| 2350 | +#ifdef CONFIG_DEBUG_PREEMPT |
| 2351 | + /* |
| 2352 | + * Check both overflow from migrate_disable() and superfluous |
| 2353 | + * migrate_enable(). |
| 2354 | + */ |
| 2355 | + if (WARN_ON_ONCE((s16)p->migration_disabled <= 0)) |
| 2356 | + return; |
| 2357 | +#endif |
| 2358 | + |
| 2359 | + if (p->migration_disabled > 1) { |
| 2360 | + p->migration_disabled--; |
| 2361 | + return; |
| 2362 | + } |
| 2363 | + |
| 2364 | + /* |
| 2365 | + * Ensure stop_task runs either before or after this, and that |
| 2366 | + * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). |
| 2367 | + */ |
| 2368 | + guard(preempt)(); |
| 2369 | + if (unlikely(p->cpus_ptr != &p->cpus_mask)) |
| 2370 | + ___migrate_enable(); |
| 2371 | + /* |
| 2372 | + * Mustn't clear migration_disabled() until cpus_ptr points back at the |
| 2373 | + * regular cpus_mask, otherwise things that race (eg. |
| 2374 | + * select_fallback_rq) get confused. |
| 2375 | + */ |
| 2376 | + barrier(); |
| 2377 | + p->migration_disabled = 0; |
| 2378 | + this_rq_pinned()--; |
| 2379 | +} |
| 2380 | + |
| 2381 | +static inline void __migrate_disable(void) |
| 2382 | +{ |
| 2383 | + struct task_struct *p = current; |
| 2384 | + |
| 2385 | + if (p->migration_disabled) { |
| 2386 | +#ifdef CONFIG_DEBUG_PREEMPT |
| 2387 | + /* |
| 2388 | + *Warn about overflow half-way through the range. |
| 2389 | + */ |
| 2390 | + WARN_ON_ONCE((s16)p->migration_disabled < 0); |
| 2391 | +#endif |
| 2392 | + p->migration_disabled++; |
| 2393 | + return; |
| 2394 | + } |
| 2395 | + |
| 2396 | + guard(preempt)(); |
| 2397 | + this_rq_pinned()++; |
| 2398 | + p->migration_disabled = 1; |
| 2399 | +} |
| 2400 | +#else /* !COMPILE_OFFSETS */ |
| 2401 | +static inline void __migrate_disable(void) { } |
| 2402 | +static inline void __migrate_enable(void) { } |
| 2403 | +#endif /* !COMPILE_OFFSETS */ |
| 2404 | + |
| 2405 | +/* |
| 2406 | + * So that it is possible to not export the runqueues variable, define and |
| 2407 | + * export migrate_enable/migrate_disable in kernel/sched/core.c too, and use |
| 2408 | + * them for the modules. The macro "INSTANTIATE_EXPORTED_MIGRATE_DISABLE" will |
| 2409 | + * be defined in kernel/sched/core.c. |
| 2410 | + */ |
| 2411 | +#ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE |
| 2412 | +static inline void migrate_disable(void) |
| 2413 | +{ |
| 2414 | + __migrate_disable(); |
| 2415 | +} |
| 2416 | + |
| 2417 | +static inline void migrate_enable(void) |
| 2418 | +{ |
| 2419 | + __migrate_enable(); |
| 2420 | +} |
| 2421 | +#else /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */ |
| 2422 | +extern void migrate_disable(void); |
| 2423 | +extern void migrate_enable(void); |
| 2424 | +#endif /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */ |
| 2425 | + |
| 2426 | +#else /* MODULE */ |
| 2427 | +extern void migrate_disable(void); |
| 2428 | +extern void migrate_enable(void); |
| 2429 | +#endif /* MODULE */ |
| 2430 | + |
| 2431 | +DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) |
| 2432 | + |
2320 | 2433 | #endif |
0 commit comments