4 #include "per_cpu_data.h"
12 * \brief Encapsulation of RCU batch number.
16 friend class Jdb_rcupdate;
18 /// create uninitialized batch.
20 /// create a btach initialized with \a b.
21 Rcu_batch(long b) : _b(b) {}
23 /// less than comparison.
24 bool operator < (Rcu_batch const &o) const { return (_b - o._b) < 0; }
25 /// greater than comparison.
26 bool operator > (Rcu_batch const &o) const { return (_b - o._b) > 0; }
27 /// greater than comparison.
28 bool operator >= (Rcu_batch const &o) const { return (_b - o._b) >= 0; }
30 bool operator == (Rcu_batch const &o) const { return _b == o._b; }
32 bool operator != (Rcu_batch const &o) const { return _b != o._b; }
34 Rcu_batch &operator ++ () { ++_b; return *this; }
35 /// increase batch with \a r.
36 Rcu_batch operator + (long r) { return Rcu_batch(_b + r); }
44 * \brief Item that can bequeued for the next grace period.
46 * An RCU item is basically a pointer to a callback which is called
47 * after one grace period.
49 class Rcu_item : public cxx::S_list_item
51 friend class Rcu_data;
53 friend class Jdb_rcupdate;
56 bool (*_call_back)(Rcu_item *);
61 * \brief List of Rcu_items.
63 * RCU lists are used a lot of times in the RCU implementation and are
64 * implemented as single linked lists with FIFO semantics.
66 * \note Concurrent access to the list is not synchronized.
68 class Rcu_list : public cxx::S_list_tail<Rcu_item>
71 typedef cxx::S_list_tail<Rcu_item> Base;
74 Rcu_list(Rcu_list &&o) : Base(static_cast<Base &&>(o)) {}
75 Rcu_list &operator = (Rcu_list &&o)
77 Base::operator = (static_cast<Base &&>(o));
82 friend class Jdb_rcupdate;
86 * \brief CPU local data structure for RCU.
90 friend class Jdb_rcupdate;
93 Rcu_batch _q_batch; ///< batch nr. for grace period
94 bool _q_passed; ///< quiescent state passed?
95 bool _pending; ///< wait for quiescent state
108 * \brief Global RCU data structure.
112 friend class Rcu_data;
114 friend class Jdb_rcupdate;
117 Rcu_batch _current; ///< current batch
118 Rcu_batch _completed; ///< last completed batch
119 bool _next_pending; ///< next batch already pending?
123 Cpu_mask _active_cpus;
128 * \brief encapsulation of RCU implementation.
130 * This calss aggregates per CPU data structures as well as the global
131 * data structure for RCU and provides a common RCU interface.
135 friend class Rcu_data;
136 friend class Jdb_rcupdate;
139 /// The lock to prevent a quiescent state.
140 typedef Cpu_lock Lock;
141 enum { Period = 3000 /* 10ms */ };
142 static Rcu_glbl *rcu() { return &_rcu; }
144 static Rcu_glbl _rcu;
145 static Per_cpu<Rcu_data> _rcu_data;
148 // ------------------------------------------------------------------------
151 #include "tb_entry.h"
156 struct Log_rcu : public Tb_entry
162 unsigned print(int max, char *buf) const;
163 } __attribute__((packed));
173 // --------------------------------------------------------------------------
174 IMPLEMENTATION [debug]:
180 Rcu::Log_rcu::print(int max, char *buf) const
182 char const *events[] = { "call", "process"};
183 return snprintf(buf, max, "rcu-%s (cpu=%u) item=%p", events[event], cpu, item);
187 //--------------------------------------------------------------------------
191 #include "cpu_lock.h"
194 #include "lock_guard.h"
196 #include "static_init.h"
200 // XXX: includes for debugging
201 // #include "logdefs.h"
204 class Rcu_timeout : public Timeout
209 * Timeout expiration callback function
210 * @return true if reschedule is necessary, false otherwise
214 Rcu_timeout::expired()
215 { return Rcu::process_callbacks(); }
218 Rcu_glbl Rcu::_rcu INIT_PRIORITY(EARLY_INIT_PRIO);
219 DEFINE_PER_CPU Per_cpu<Rcu_data> Rcu::_rcu_data(true);
220 DEFINE_PER_CPU static Per_cpu<Rcu_timeout> _rcu_timeout;
229 Rcu_data::Rcu_data(unsigned cpu)
236 * \brief Enqueue Rcu_item into the list (at the tail).
237 * \prarm i the RCU item to enqueue.
239 PUBLIC inline void Rcu_list::enqueue(Rcu_item *i){ push_back(i); }
242 * \pre must run under cpu lock
246 Rcu_data::enqueue(Rcu_item *i)
252 PRIVATE inline NOEXPORT NEEDS["cpu_lock.h", "lock_guard.h"]
257 bool need_resched = false;
258 for (Rcu_list::Const_iterator l = _d.begin(); l != _d.end();)
263 need_resched |= i->_call_back(i);
267 // XXX: I do not know why this and the former stuff is w/o cpu lock
268 // but the couting needs it ?
271 // XXX: we use clear, we seemingly worked through the whole list
275 auto guard = lock_guard(cpu_lock);
281 Timeout *t = &_rcu_timeout.cpu(_cpu);
282 t->set(t->get_timeout(0) + Rcu::Period, _cpu);
288 PRIVATE inline NOEXPORT
290 Rcu_glbl::start_batch()
292 if (_next_pending && _completed == _current)
298 _cpus = _active_cpus;
304 Rcu::enter_idle(unsigned cpu)
306 Rcu_data *rdp = &_rcu_data.cpu(cpu);
307 if (EXPECT_TRUE(!rdp->_idle))
310 auto guard = lock_guard(rcu()->_lock);
311 rcu()->_active_cpus.clear(cpu);
317 Rcu::leave_idle(unsigned cpu)
319 Rcu_data *rdp = &_rcu_data.cpu(cpu);
320 if (EXPECT_FALSE(rdp->_idle))
323 auto guard = lock_guard(rcu()->_lock);
324 rcu()->_active_cpus.set(cpu);
325 rdp->_q_batch = Rcu::rcu()->_current;
330 PRIVATE inline NOEXPORT
332 Rcu_glbl::cpu_quiet(unsigned cpu)
337 _completed = _current;
344 Rcu_data::check_quiescent_state(Rcu_glbl *rgp)
346 if (_q_batch != rgp->_current)
348 // start new grace period
351 _q_batch = rgp->_current;
355 // Is the grace period already completed for this cpu?
356 // use _pending, not bitmap to avoid cache trashing
360 // Was there a quiescent state since the beginning of the grace period?
366 auto guard = lock_guard(rgp->_lock);
368 if (EXPECT_TRUE(_q_batch == rgp->_current))
369 rgp->cpu_quiet(_cpu);
373 PUBLIC static //inline NEEDS["cpu_lock.h", "globals.h", "lock_guard.h", "logdefs.h"]
375 Rcu::call(Rcu_item *i, bool (*cb)(Rcu_item *))
378 LOG_TRACE("Rcu call", "rcu", ::current(), Log_rcu,
379 l->cpu = current_cpu();
384 auto guard = lock_guard(cpu_lock);
386 Rcu_data *rdp = &_rcu_data.current();
392 Rcu_data::move_batch(Rcu_list &l)
394 auto guard = lock_guard(cpu_lock);
400 Rcu_data::~Rcu_data()
402 if (current_cpu() == _cpu)
405 Rcu_data *current_rdp = &Rcu::_rcu_data.current();
406 Rcu_glbl *rgp = Rcu::rcu();
409 auto guard = lock_guard(rgp->_lock);
410 if (rgp->_current != rgp->_completed)
411 rgp->cpu_quiet(_cpu);
414 current_rdp->move_batch(_c);
415 current_rdp->move_batch(_n);
416 current_rdp->move_batch(_d);
420 bool FIASCO_WARN_RESULT
421 Rcu_data::process_callbacks(Rcu_glbl *rgp)
423 LOG_TRACE("Rcu callbacks", "rcu", ::current(), Rcu::Log_rcu,
426 l->event = Rcu::Rcu_process);
428 if (!_c.empty() && rgp->_completed >= _batch)
431 if (!_n.empty() && _c.empty())
434 auto guard = lock_guard(cpu_lock);
438 // start the next batch of callbacks
440 _batch = rgp->_current + 1;
443 if (!rgp->_next_pending)
445 // start the batch and schedule start if it's a new batch
446 auto guard = lock_guard(rgp->_lock);
447 rgp->_next_pending = 1;
452 check_quiescent_state(rgp);
461 Rcu_data::pending(Rcu_glbl *rgp) const
463 // The CPU has pending RCU callbacks and the grace period for them
464 // has been completed.
465 if (!_c.empty() && rgp->_completed >= _batch)
468 // The CPU has no pending RCU callbacks, however there are new callbacks
469 if (_c.empty() && !_n.empty())
472 // The CPU has callbacks to be invoked finally
476 // RCU waits for a quiescent state from the CPU
477 if ((_q_batch != rgp->_current) || _pending)
480 // OK, no RCU work to do
485 PUBLIC static inline NEEDS["globals.h"]
486 bool FIASCO_WARN_RESULT
487 Rcu::process_callbacks()
488 { return _rcu_data.current().process_callbacks(&_rcu); }
490 PUBLIC static inline NEEDS["globals.h"]
491 bool FIASCO_WARN_RESULT
492 Rcu::process_callbacks(unsigned cpu)
493 { return _rcu_data.cpu(cpu).process_callbacks(&_rcu); }
497 Rcu::pending(unsigned cpu)
499 return _rcu_data.cpu(cpu).pending(&_rcu);
504 Rcu::idle(unsigned cpu)
506 Rcu_data const *d = &_rcu_data.cpu(cpu);
507 return d->_c.empty() && !d->pending(&_rcu);
512 Rcu::inc_q_cnt(unsigned cpu)
513 { _rcu_data.cpu(cpu)._q_passed = 1; }
517 Rcu::schedule_callbacks(unsigned cpu, Unsigned64 clock)
519 Timeout *t = &_rcu_timeout.cpu(cpu);
524 PUBLIC static inline NEEDS["cpu_lock.h"]
527 { return &cpu_lock; }
532 Rcu::do_pending_work(unsigned cpu)
537 return process_callbacks(cpu);
539 Rcu::schedule_callbacks(cpu, Kip::k()->clock + Rcu::Period);