@@ -1392,17 +1392,6 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
1392
1392
int i ;
1393
1393
struct rcu_data * rdp = __this_cpu_ptr (rsp -> rda );
1394
1394
1395
- /*
1396
- * If there is an rcu_barrier() operation in progress, then
1397
- * only the task doing that operation is permitted to adopt
1398
- * callbacks. To do otherwise breaks rcu_barrier() and friends
1399
- * by causing them to fail to wait for the callbacks in the
1400
- * orphanage.
1401
- */
1402
- if (rsp -> rcu_barrier_in_progress &&
1403
- rsp -> rcu_barrier_in_progress != current )
1404
- return ;
1405
-
1406
1395
/* Do the accounting first. */
1407
1396
rdp -> qlen_lazy += rsp -> qlen_lazy ;
1408
1397
rdp -> qlen += rsp -> qlen ;
@@ -1457,9 +1446,8 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1457
1446
* The CPU has been completely removed, and some other CPU is reporting
1458
1447
* this fact from process context. Do the remainder of the cleanup,
1459
1448
* including orphaning the outgoing CPU's RCU callbacks, and also
1460
- * adopting them, if there is no _rcu_barrier() instance running.
1461
- * There can only be one CPU hotplug operation at a time, so no other
1462
- * CPU can be attempting to update rcu_cpu_kthread_task.
1449
+ * adopting them. There can only be one CPU hotplug operation at a time,
1450
+ * so no other CPU can be attempting to update rcu_cpu_kthread_task.
1463
1451
*/
1464
1452
static void rcu_cleanup_dead_cpu (int cpu , struct rcu_state * rsp )
1465
1453
{
@@ -1521,10 +1509,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1521
1509
1522
1510
#else /* #ifdef CONFIG_HOTPLUG_CPU */
1523
1511
1524
- static void rcu_adopt_orphan_cbs (struct rcu_state * rsp )
1525
- {
1526
- }
1527
-
1528
1512
static void rcu_cleanup_dying_cpu (struct rcu_state * rsp )
1529
1513
{
1530
1514
}
@@ -2328,13 +2312,10 @@ static void rcu_barrier_func(void *type)
2328
2312
static void _rcu_barrier (struct rcu_state * rsp )
2329
2313
{
2330
2314
int cpu ;
2331
- unsigned long flags ;
2332
2315
struct rcu_data * rdp ;
2333
- struct rcu_data rd ;
2334
2316
unsigned long snap = ACCESS_ONCE (rsp -> n_barrier_done );
2335
2317
unsigned long snap_done ;
2336
2318
2337
- init_rcu_head_on_stack (& rd .barrier_head );
2338
2319
_rcu_barrier_trace (rsp , "Begin" , -1 , snap );
2339
2320
2340
2321
/* Take mutex to serialize concurrent rcu_barrier() requests. */
@@ -2374,70 +2355,30 @@ static void _rcu_barrier(struct rcu_state *rsp)
2374
2355
/*
2375
2356
* Initialize the count to one rather than to zero in order to
2376
2357
* avoid a too-soon return to zero in case of a short grace period
2377
- * (or preemption of this task). Also flag this task as doing
2378
- * an rcu_barrier(). This will prevent anyone else from adopting
2379
- * orphaned callbacks, which could cause otherwise failure if a
2380
- * CPU went offline and quickly came back online. To see this,
2381
- * consider the following sequence of events:
2382
- *
2383
- * 1. We cause CPU 0 to post an rcu_barrier_callback() callback.
2384
- * 2. CPU 1 goes offline, orphaning its callbacks.
2385
- * 3. CPU 0 adopts CPU 1's orphaned callbacks.
2386
- * 4. CPU 1 comes back online.
2387
- * 5. We cause CPU 1 to post an rcu_barrier_callback() callback.
2388
- * 6. Both rcu_barrier_callback() callbacks are invoked, awakening
2389
- * us -- but before CPU 1's orphaned callbacks are invoked!!!
2358
+ * (or preemption of this task). Exclude CPU-hotplug operations
2359
+ * to ensure that no offline CPU has callbacks queued.
2390
2360
*/
2391
2361
init_completion (& rsp -> barrier_completion );
2392
2362
atomic_set (& rsp -> barrier_cpu_count , 1 );
2393
- raw_spin_lock_irqsave (& rsp -> onofflock , flags );
2394
- rsp -> rcu_barrier_in_progress = current ;
2395
- raw_spin_unlock_irqrestore (& rsp -> onofflock , flags );
2363
+ get_online_cpus ();
2396
2364
2397
2365
/*
2398
- * Force every CPU with callbacks to register a new callback
2399
- * that will tell us when all the preceding callbacks have
2400
- * been invoked. If an offline CPU has callbacks, wait for
2401
- * it to either come back online or to finish orphaning those
2402
- * callbacks.
2366
+ * Force each CPU with callbacks to register a new callback.
2367
+ * When that callback is invoked, we will know that all of the
2368
+ * corresponding CPU's preceding callbacks have been invoked.
2403
2369
*/
2404
- for_each_possible_cpu (cpu ) {
2405
- preempt_disable ();
2370
+ for_each_online_cpu (cpu ) {
2406
2371
rdp = per_cpu_ptr (rsp -> rda , cpu );
2407
- if (cpu_is_offline (cpu )) {
2408
- _rcu_barrier_trace (rsp , "Offline" , cpu ,
2409
- rsp -> n_barrier_done );
2410
- preempt_enable ();
2411
- while (cpu_is_offline (cpu ) && ACCESS_ONCE (rdp -> qlen ))
2412
- schedule_timeout_interruptible (1 );
2413
- } else if (ACCESS_ONCE (rdp -> qlen )) {
2372
+ if (ACCESS_ONCE (rdp -> qlen )) {
2414
2373
_rcu_barrier_trace (rsp , "OnlineQ" , cpu ,
2415
2374
rsp -> n_barrier_done );
2416
2375
smp_call_function_single (cpu , rcu_barrier_func , rsp , 1 );
2417
- preempt_enable ();
2418
2376
} else {
2419
2377
_rcu_barrier_trace (rsp , "OnlineNQ" , cpu ,
2420
2378
rsp -> n_barrier_done );
2421
- preempt_enable ();
2422
2379
}
2423
2380
}
2424
-
2425
- /*
2426
- * Now that all online CPUs have rcu_barrier_callback() callbacks
2427
- * posted, we can adopt all of the orphaned callbacks and place
2428
- * an rcu_barrier_callback() callback after them. When that is done,
2429
- * we are guaranteed to have an rcu_barrier_callback() callback
2430
- * following every callback that could possibly have been
2431
- * registered before _rcu_barrier() was called.
2432
- */
2433
- raw_spin_lock_irqsave (& rsp -> onofflock , flags );
2434
- rcu_adopt_orphan_cbs (rsp );
2435
- rsp -> rcu_barrier_in_progress = NULL ;
2436
- raw_spin_unlock_irqrestore (& rsp -> onofflock , flags );
2437
- atomic_inc (& rsp -> barrier_cpu_count );
2438
- smp_mb__after_atomic_inc (); /* Ensure atomic_inc() before callback. */
2439
- rd .rsp = rsp ;
2440
- rsp -> call (& rd .barrier_head , rcu_barrier_callback );
2381
+ put_online_cpus ();
2441
2382
2442
2383
/*
2443
2384
* Now that we have an rcu_barrier_callback() callback on each
@@ -2458,8 +2399,6 @@ static void _rcu_barrier(struct rcu_state *rsp)
2458
2399
2459
2400
/* Other rcu_barrier() invocations can now safely proceed. */
2460
2401
mutex_unlock (& rsp -> barrier_mutex );
2461
-
2462
- destroy_rcu_head_on_stack (& rd .barrier_head );
2463
2402
}
2464
2403
2465
2404
/**
0 commit comments