Lines Matching refs:rdp

151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
240 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu() local
242 if (rcu_segcblist_is_enabled(&rdp->cblist)) in rcu_get_n_cbs_cpu()
243 return rcu_segcblist_n_cbs(&rdp->cblist); in rcu_get_n_cbs_cpu()
316 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_dynticks_eqs_online() local
318 if (atomic_read(&rdp->dynticks) & 0x1) in rcu_dynticks_eqs_online()
337 static int rcu_dynticks_snap(struct rcu_data *rdp) in rcu_dynticks_snap() argument
340 return atomic_read_acquire(&rdp->dynticks); in rcu_dynticks_snap()
355 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_is_idle_cpu() local
357 return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)); in rcu_is_idle_cpu()
365 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) in rcu_dynticks_in_eqs_since() argument
367 return snap != rcu_dynticks_snap(rdp); in rcu_dynticks_in_eqs_since()
376 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_dynticks_zero_in_eqs() local
380 snap = atomic_read(&rdp->dynticks) & ~0x1; in rcu_dynticks_zero_in_eqs()
388 return snap == atomic_read(&rdp->dynticks); in rcu_dynticks_zero_in_eqs()
552 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
611 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_enter() local
613 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); in rcu_eqs_enter()
614 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); in rcu_eqs_enter()
616 rdp->dynticks_nesting == 0); in rcu_eqs_enter()
617 if (rdp->dynticks_nesting != 1) { in rcu_eqs_enter()
619 rdp->dynticks_nesting--; in rcu_eqs_enter()
625 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); in rcu_eqs_enter()
631 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_eqs_enter()
634 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ in rcu_eqs_enter()
685 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_irq_work_resched() local
694 if (do_nocb_deferred_wakeup(rdp) && need_resched()) { in rcu_irq_work_resched()
743 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_nmi_exit() local
751 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); in rcu_nmi_exit()
758 if (rdp->dynticks_nmi_nesting != 1) { in rcu_nmi_exit()
759 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, in rcu_nmi_exit()
760 atomic_read(&rdp->dynticks)); in rcu_nmi_exit()
761 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ in rcu_nmi_exit()
762 rdp->dynticks_nmi_nesting - 2); in rcu_nmi_exit()
768 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); in rcu_nmi_exit()
769 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ in rcu_nmi_exit()
775 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_nmi_exit()
854 struct rcu_data *rdp; in rcu_eqs_exit() local
858 rdp = this_cpu_ptr(&rcu_data); in rcu_eqs_exit()
859 oldval = rdp->dynticks_nesting; in rcu_eqs_exit()
863 rdp->dynticks_nesting++; in rcu_eqs_exit()
873 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_eqs_exit()
876 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); in rcu_eqs_exit()
878 WRITE_ONCE(rdp->dynticks_nesting, 1); in rcu_eqs_exit()
879 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); in rcu_eqs_exit()
880 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); in rcu_eqs_exit()
946 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in __rcu_irq_enter_check_tick() local
955 if (!tick_nohz_full_cpu(rdp->cpu) || in __rcu_irq_enter_check_tick()
956 !READ_ONCE(rdp->rcu_urgent_qs) || in __rcu_irq_enter_check_tick()
957 READ_ONCE(rdp->rcu_forced_tick)) { in __rcu_irq_enter_check_tick()
969 raw_spin_lock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
970 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { in __rcu_irq_enter_check_tick()
973 WRITE_ONCE(rdp->rcu_forced_tick, true); in __rcu_irq_enter_check_tick()
974 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in __rcu_irq_enter_check_tick()
976 raw_spin_unlock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
995 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_nmi_enter() local
998 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); in rcu_nmi_enter()
1025 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_nmi_enter()
1027 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); in rcu_nmi_enter()
1038 rdp->dynticks_nmi_nesting, in rcu_nmi_enter()
1039 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); in rcu_nmi_enter()
1041 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ in rcu_nmi_enter()
1042 rdp->dynticks_nmi_nesting + incby); in rcu_nmi_enter()
1094 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) in rcu_disable_urgency_upon_qs() argument
1096 raw_lockdep_assert_held_rcu_node(rdp->mynode); in rcu_disable_urgency_upon_qs()
1097 WRITE_ONCE(rdp->rcu_urgent_qs, false); in rcu_disable_urgency_upon_qs()
1098 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); in rcu_disable_urgency_upon_qs()
1099 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { in rcu_disable_urgency_upon_qs()
1100 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in rcu_disable_urgency_upon_qs()
1101 WRITE_ONCE(rdp->rcu_forced_tick, false); in rcu_disable_urgency_upon_qs()
1162 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online() local
1169 rdp = this_cpu_ptr(&rcu_data); in rcu_lockdep_current_cpu_online()
1170 rnp = rdp->mynode; in rcu_lockdep_current_cpu_online()
1171 if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1) in rcu_lockdep_current_cpu_online()
1187 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
1190 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, in rcu_gpnum_ovf()
1192 WRITE_ONCE(rdp->gpwrap, true); in rcu_gpnum_ovf()
1193 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
1194 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
1202 static int dyntick_save_progress_counter(struct rcu_data *rdp) in dyntick_save_progress_counter() argument
1204 rdp->dynticks_snap = rcu_dynticks_snap(rdp); in dyntick_save_progress_counter()
1205 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { in dyntick_save_progress_counter()
1206 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in dyntick_save_progress_counter()
1207 rcu_gpnum_ovf(rdp->mynode, rdp); in dyntick_save_progress_counter()
1219 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) in rcu_implicit_dynticks_qs() argument
1222 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs()
1232 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { in rcu_implicit_dynticks_qs()
1233 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_implicit_dynticks_qs()
1234 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
1256 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) { in rcu_implicit_dynticks_qs()
1266 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in rcu_implicit_dynticks_qs()
1268 __func__, rdp->cpu, ".o"[onl], in rcu_implicit_dynticks_qs()
1269 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, in rcu_implicit_dynticks_qs()
1270 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); in rcu_implicit_dynticks_qs()
1286 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && in rcu_implicit_dynticks_qs()
1290 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); in rcu_implicit_dynticks_qs()
1292 smp_store_release(&rdp->rcu_urgent_qs, true); in rcu_implicit_dynticks_qs()
1294 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_implicit_dynticks_qs()
1305 if (tick_nohz_full_cpu(rdp->cpu) && in rcu_implicit_dynticks_qs()
1306 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || in rcu_implicit_dynticks_qs()
1308 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_implicit_dynticks_qs()
1309 resched_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
1310 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_implicit_dynticks_qs()
1322 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { in rcu_implicit_dynticks_qs()
1323 resched_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
1324 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_implicit_dynticks_qs()
1327 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_implicit_dynticks_qs()
1328 (rnp->ffmask & rdp->grpmask)) { in rcu_implicit_dynticks_qs()
1329 rdp->rcu_iw_pending = true; in rcu_implicit_dynticks_qs()
1330 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_implicit_dynticks_qs()
1331 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); in rcu_implicit_dynticks_qs()
1339 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
1363 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp() argument
1379 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); in rcu_start_this_gp()
1387 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
1399 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, in rcu_start_this_gp()
1411 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
1414 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
1418 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
1427 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1441 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_future_gp_cleanup() local
1446 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1490 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs() argument
1495 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs()
1499 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_accelerate_cbs()
1502 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); in rcu_accelerate_cbs()
1515 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) in rcu_accelerate_cbs()
1516 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1519 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) in rcu_accelerate_cbs()
1524 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); in rcu_accelerate_cbs()
1537 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked() argument
1542 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs_unlocked()
1544 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { in rcu_accelerate_cbs_unlocked()
1546 (void)rcu_segcblist_accelerate(&rdp->cblist, c); in rcu_accelerate_cbs_unlocked()
1550 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_accelerate_cbs_unlocked()
1566 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs() argument
1568 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs()
1572 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_advance_cbs()
1579 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1582 return rcu_accelerate_cbs(rnp, rdp); in rcu_advance_cbs()
1590 struct rcu_data *rdp) in rcu_advance_cbs_nowake() argument
1592 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs_nowake()
1596 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); in rcu_advance_cbs_nowake()
1619 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes() argument
1623 const bool offloaded = rcu_rdp_is_offloaded(rdp); in __note_gp_changes()
1627 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1631 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1632 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1634 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ in __note_gp_changes()
1635 rdp->core_needs_qs = false; in __note_gp_changes()
1636 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1639 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ in __note_gp_changes()
1640 if (rdp->core_needs_qs) in __note_gp_changes()
1641 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1645 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1646 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1653 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1654 rdp->cpu_no_qs.b.norm = need_qs; in __note_gp_changes()
1655 rdp->core_needs_qs = need_qs; in __note_gp_changes()
1656 zero_cpu_stall_ticks(rdp); in __note_gp_changes()
1658 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1659 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1660 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1661 WRITE_ONCE(rdp->gpwrap, false); in __note_gp_changes()
1662 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1666 static void note_gp_changes(struct rcu_data *rdp) in note_gp_changes() argument
1673 rnp = rdp->mynode; in note_gp_changes()
1674 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1675 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ in note_gp_changes()
1680 needwake = __note_gp_changes(rnp, rdp); in note_gp_changes()
1738 struct rcu_data *rdp; in rcu_gp_init() local
1848 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_init()
1852 if (rnp == rdp->mynode) in rcu_gp_init()
1853 (void)__note_gp_changes(rnp, rdp); in rcu_gp_init()
2009 struct rcu_data *rdp; in rcu_gp_cleanup() local
2047 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2048 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2049 needgp = __note_gp_changes(rnp, rdp) || needgp; in rcu_gp_cleanup()
2055 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_gp_cleanup()
2056 check_cb_ovld_locked(rdp, rnp); in rcu_gp_cleanup()
2074 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2076 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2081 offloaded = rcu_rdp_is_offloaded(rdp); in rcu_gp_cleanup()
2082 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { in rcu_gp_cleanup()
2275 rcu_report_qs_rdp(struct rcu_data *rdp) in rcu_report_qs_rdp() argument
2280 const bool offloaded = rcu_rdp_is_offloaded(rdp); in rcu_report_qs_rdp()
2283 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); in rcu_report_qs_rdp()
2284 rnp = rdp->mynode; in rcu_report_qs_rdp()
2286 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2287 rdp->gpwrap) { in rcu_report_qs_rdp()
2295 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ in rcu_report_qs_rdp()
2299 mask = rdp->grpmask; in rcu_report_qs_rdp()
2300 rdp->core_needs_qs = false; in rcu_report_qs_rdp()
2309 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_report_qs_rdp()
2311 rcu_disable_urgency_upon_qs(rdp); in rcu_report_qs_rdp()
2326 rcu_check_quiescent_state(struct rcu_data *rdp) in rcu_check_quiescent_state() argument
2329 note_gp_changes(rdp); in rcu_check_quiescent_state()
2335 if (!rdp->core_needs_qs) in rcu_check_quiescent_state()
2342 if (rdp->cpu_no_qs.b.norm) in rcu_check_quiescent_state()
2349 rcu_report_qs_rdp(rdp); in rcu_check_quiescent_state()
2359 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dying_cpu() local
2360 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu()
2365 blkd = !!(rnp->qsmask & rdp->grpmask); in rcutree_dying_cpu()
2424 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dead_cpu() local
2425 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcutree_dead_cpu()
2442 static void rcu_do_batch(struct rcu_data *rdp) in rcu_do_batch() argument
2447 const bool offloaded = rcu_rdp_is_offloaded(rdp); in rcu_do_batch()
2454 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { in rcu_do_batch()
2456 rcu_segcblist_n_cbs(&rdp->cblist), 0); in rcu_do_batch()
2458 !rcu_segcblist_empty(&rdp->cblist), in rcu_do_batch()
2470 rcu_nocb_lock(rdp); in rcu_do_batch()
2472 pending = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2475 bl = max(rdp->blimit, pending >> div); in rcu_do_batch()
2483 rcu_segcblist_n_cbs(&rdp->cblist), bl); in rcu_do_batch()
2484 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2486 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2488 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); in rcu_do_batch()
2489 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2534 rcu_nocb_lock(rdp); in rcu_do_batch()
2535 rdp->n_cbs_invoked += count; in rcu_do_batch()
2540 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2541 rcu_segcblist_add_len(&rdp->cblist, -count); in rcu_do_batch()
2544 count = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2545 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) in rcu_do_batch()
2546 rdp->blimit = blimit; in rcu_do_batch()
2549 if (count == 0 && rdp->qlen_last_fqs_check != 0) { in rcu_do_batch()
2550 rdp->qlen_last_fqs_check = 0; in rcu_do_batch()
2551 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcu_do_batch()
2552 } else if (count < rdp->qlen_last_fqs_check - qhimark) in rcu_do_batch()
2553 rdp->qlen_last_fqs_check = count; in rcu_do_batch()
2559 empty = rcu_segcblist_empty(&rdp->cblist); in rcu_do_batch()
2563 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); in rcu_do_batch()
2564 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); in rcu_do_batch()
2566 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2569 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_do_batch()
2611 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) in force_qs_rnp() argument
2616 struct rcu_data *rdp; in force_qs_rnp() local
2641 rdp = per_cpu_ptr(&rcu_data, cpu); in force_qs_rnp()
2642 if (f(rdp)) { in force_qs_rnp()
2643 mask |= rdp->grpmask; in force_qs_rnp()
2644 rcu_disable_urgency_upon_qs(rdp); in force_qs_rnp()
2707 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); in rcu_core() local
2708 struct rcu_node *rnp = rdp->mynode; in rcu_core()
2709 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); in rcu_core()
2714 WARN_ON_ONCE(!rdp->beenonline); in rcu_core()
2725 rcu_check_quiescent_state(rdp); in rcu_core()
2729 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) { in rcu_core()
2730 rcu_nocb_lock_irqsave(rdp, flags); in rcu_core()
2731 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_core()
2732 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_core()
2733 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_core()
2736 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); in rcu_core()
2739 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) && in rcu_core()
2741 rcu_do_batch(rdp); in rcu_core()
2744 do_nocb_deferred_wakeup(rdp); in rcu_core()
2749 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); in rcu_core()
2866 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in __call_rcu_core() argument
2887 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > in __call_rcu_core()
2888 rdp->qlen_last_fqs_check + qhimark)) { in __call_rcu_core()
2891 note_gp_changes(rdp); in __call_rcu_core()
2895 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); in __call_rcu_core()
2898 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; in __call_rcu_core()
2899 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && in __call_rcu_core()
2900 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in __call_rcu_core()
2902 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in __call_rcu_core()
2903 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in __call_rcu_core()
2921 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked() argument
2926 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) in check_cb_ovld_locked()
2927 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
2929 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
2944 static void check_cb_ovld(struct rcu_data *rdp) in check_cb_ovld() argument
2946 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld()
2949 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == in check_cb_ovld()
2950 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
2953 check_cb_ovld_locked(rdp, rnp); in check_cb_ovld()
2963 struct rcu_data *rdp; in __call_rcu() local
2986 rdp = this_cpu_ptr(&rcu_data); in __call_rcu()
2989 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { in __call_rcu()
2995 if (rcu_segcblist_empty(&rdp->cblist)) in __call_rcu()
2996 rcu_segcblist_init(&rdp->cblist); in __call_rcu()
2999 check_cb_ovld(rdp); in __call_rcu()
3000 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags)) in __call_rcu()
3003 rcu_segcblist_enqueue(&rdp->cblist, head); in __call_rcu()
3007 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu()
3010 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu()
3012 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); in __call_rcu()
3015 if (unlikely(rcu_rdp_is_offloaded(rdp))) { in __call_rcu()
3016 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ in __call_rcu()
3018 __call_rcu_core(rdp, head, flags); in __call_rcu()
3785 struct rcu_data *rdp; in start_poll_synchronize_rcu() local
3790 rdp = this_cpu_ptr(&rcu_data); in start_poll_synchronize_rcu()
3791 rnp = rdp->mynode; in start_poll_synchronize_rcu()
3793 needwake = rcu_start_this_gp(rnp, rdp, gp_seq); in start_poll_synchronize_rcu()
3872 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_pending() local
3873 struct rcu_node *rnp = rdp->mynode; in rcu_pending()
3878 check_cpu_stall(rdp); in rcu_pending()
3881 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE)) in rcu_pending()
3890 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) in rcu_pending()
3894 if (!rcu_rdp_is_offloaded(rdp) && in rcu_pending()
3895 rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_pending()
3899 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && in rcu_pending()
3900 !rcu_rdp_is_offloaded(rdp) && in rcu_pending()
3901 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_pending()
3905 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
3906 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ in rcu_pending()
3951 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier_func() local
3954 rdp->barrier_head.func = rcu_barrier_callback; in rcu_barrier_func()
3955 debug_rcu_head_queue(&rdp->barrier_head); in rcu_barrier_func()
3956 rcu_nocb_lock(rdp); in rcu_barrier_func()
3957 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies)); in rcu_barrier_func()
3958 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { in rcu_barrier_func()
3961 debug_rcu_head_unqueue(&rdp->barrier_head); in rcu_barrier_func()
3965 rcu_nocb_unlock(rdp); in rcu_barrier_func()
3979 struct rcu_data *rdp; in rcu_barrier() local
4017 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4019 !rcu_rdp_is_offloaded(rdp)) in rcu_barrier()
4021 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) { in rcu_barrier()
4025 } else if (rcu_segcblist_n_cbs(&rdp->cblist) && in rcu_barrier()
4095 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_boot_init_percpu_data() local
4098 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); in rcu_boot_init_percpu_data()
4099 INIT_WORK(&rdp->strict_work, strict_work_handler); in rcu_boot_init_percpu_data()
4100 WARN_ON_ONCE(rdp->dynticks_nesting != 1); in rcu_boot_init_percpu_data()
4101 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); in rcu_boot_init_percpu_data()
4102 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4103 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4104 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4105 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4106 rdp->cpu = cpu; in rcu_boot_init_percpu_data()
4107 rcu_boot_init_nocb_percpu_data(rdp); in rcu_boot_init_percpu_data()
4123 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_prepare_cpu() local
4128 rdp->qlen_last_fqs_check = 0; in rcutree_prepare_cpu()
4129 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcutree_prepare_cpu()
4130 rdp->blimit = blimit; in rcutree_prepare_cpu()
4131 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ in rcutree_prepare_cpu()
4138 if (!rcu_segcblist_is_enabled(&rdp->cblist)) in rcutree_prepare_cpu()
4139 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ in rcutree_prepare_cpu()
4146 rnp = rdp->mynode; in rcutree_prepare_cpu()
4148 rdp->beenonline = true; /* We have now been online. */ in rcutree_prepare_cpu()
4149 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
4150 rdp->gp_seq_needed = rdp->gp_seq; in rcutree_prepare_cpu()
4151 rdp->cpu_no_qs.b.norm = true; in rcutree_prepare_cpu()
4152 rdp->core_needs_qs = false; in rcutree_prepare_cpu()
4153 rdp->rcu_iw_pending = false; in rcutree_prepare_cpu()
4154 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); in rcutree_prepare_cpu()
4155 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; in rcutree_prepare_cpu()
4156 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); in rcutree_prepare_cpu()
4170 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_affinity_setting() local
4172 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); in rcutree_affinity_setting()
4182 struct rcu_data *rdp; in rcutree_online_cpu() local
4185 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_online_cpu()
4186 rnp = rdp->mynode; in rcutree_online_cpu()
4188 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
4207 struct rcu_data *rdp; in rcutree_offline_cpu() local
4210 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_offline_cpu()
4211 rnp = rdp->mynode; in rcutree_offline_cpu()
4213 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
4238 struct rcu_data *rdp; in rcu_cpu_starting() local
4242 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_starting()
4243 if (rdp->cpu_started) in rcu_cpu_starting()
4245 rdp->cpu_started = true; in rcu_cpu_starting()
4247 rnp = rdp->mynode; in rcu_cpu_starting()
4248 mask = rdp->grpmask; in rcu_cpu_starting()
4260 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcu_cpu_starting()
4261 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_cpu_starting()
4262 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_cpu_starting()
4266 rcu_disable_urgency_upon_qs(rdp); in rcu_cpu_starting()
4290 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_report_dead() local
4291 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_report_dead()
4294 do_nocb_deferred_wakeup(rdp); in rcu_report_dead()
4297 rcu_report_exp_rdp(rdp); in rcu_report_dead()
4301 mask = rdp->grpmask; in rcu_report_dead()
4307 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_report_dead()
4308 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_report_dead()
4321 rdp->cpu_started = false; in rcu_report_dead()
4335 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_migrate_callbacks() local
4338 if (rcu_rdp_is_offloaded(rdp) || in rcutree_migrate_callbacks()
4339 rcu_segcblist_empty(&rdp->cblist)) in rcutree_migrate_callbacks()
4349 needwake = rcu_advance_cbs(my_rnp, rdp) || in rcutree_migrate_callbacks()
4351 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); in rcutree_migrate_callbacks()
4353 rcu_segcblist_disable(&rdp->cblist); in rcutree_migrate_callbacks()
4366 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || in rcutree_migrate_callbacks()
4367 !rcu_segcblist_empty(&rdp->cblist), in rcutree_migrate_callbacks()
4369 cpu, rcu_segcblist_n_cbs(&rdp->cblist), in rcutree_migrate_callbacks()
4370 rcu_segcblist_first_cb(&rdp->cblist)); in rcutree_migrate_callbacks()