--- linux/kernel/rcupdate.c.orig +++ linux/kernel/rcupdate.c @@ -468,3 +468,36 @@ module_param(maxbatch, int, 0); EXPORT_SYMBOL(call_rcu); EXPORT_SYMBOL(call_rcu_bh); EXPORT_SYMBOL(synchronize_kernel); + +#ifdef CONFIG_PREEMPT_RCU + +void rcu_read_lock(void) +{ + if (current->rcu_read_lock_nesting++ == 0) { + current->rcu_data = &get_cpu_var(rcu_data); + atomic_inc(¤t->rcu_data->active_readers); + put_cpu_var(rcu_data); + } +} +EXPORT_SYMBOL(rcu_read_lock); + +void rcu_read_unlock(void) +{ + int cpu; + + if (--current->rcu_read_lock_nesting == 0) { + atomic_dec(¤t->rcu_data->active_readers); + /* + * Check whether we have reached quiescent state. + * Note! This is only for the local CPU, not for + * current->rcu_data's CPU [which typically is the + * current CPU, but may also be another CPU]. + */ + cpu = get_cpu(); + rcu_qsctr_inc(cpu); + put_cpu(); + } +} +EXPORT_SYMBOL(rcu_read_unlock); + +#endif --- linux/include/linux/rcupdate.h.orig +++ linux/include/linux/rcupdate.h @@ -99,6 +99,9 @@ struct rcu_data { struct rcu_head *donelist; struct rcu_head **donetail; int cpu; +#ifdef CONFIG_PREEMPT_RCU + atomic_t active_readers; +#endif }; DECLARE_PER_CPU(struct rcu_data, rcu_data); @@ -115,12 +118,18 @@ extern struct rcu_ctrlblk rcu_bh_ctrlblk static inline void rcu_qsctr_inc(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - rdp->passed_quiesc = 1; +#ifdef CONFIG_PREEMPT_RCU + if (!atomic_read(&rdp->active_readers)) +#endif + rdp->passed_quiesc = 1; } static inline void rcu_bh_qsctr_inc(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; +#ifdef CONFIG_PREEMPT_RCU + if (!atomic_read(&rdp->active_readers)) +#endif + rdp->passed_quiesc = 1; } static inline int __rcu_pending(struct rcu_ctrlblk *rcp, @@ -183,14 +192,22 @@ static inline int rcu_pending(int cpu) * * It is illegal to block while in an RCU read-side critical section. */ -#define rcu_read_lock() preempt_disable() +#ifdef CONFIG_PREEMPT_RCU + extern void rcu_read_lock(void); +#else +# define rcu_read_lock preempt_disable() +#endif /** * rcu_read_unlock - marks the end of an RCU read-side critical section. * * See rcu_read_lock() for more information. */ -#define rcu_read_unlock() preempt_enable() +#ifdef CONFIG_PREEMPT_RCU + extern void rcu_read_unlock(void); +#else +# define rcu_read_unlock preempt_enable() +#endif /* * So where is rcu_write_lock()? It does not exist, as there is no @@ -213,14 +230,16 @@ static inline int rcu_pending(int cpu) * can use just rcu_read_lock(). * */ -#define rcu_read_lock_bh() local_bh_disable() +//#define rcu_read_lock_bh() local_bh_disable() +#define rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } /* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ -#define rcu_read_unlock_bh() local_bh_enable() +//#define rcu_read_unlock_bh() local_bh_enable() +#define rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } /** * rcu_dereference - fetch an RCU-protected pointer in an --- linux/include/linux/sched.h.orig +++ linux/include/linux/sched.h @@ -727,6 +872,10 @@ struct task_struct { nodemask_t mems_allowed; int cpuset_mems_generation; #endif +#ifdef CONFIG_PREEMPT_RCU + int rcu_read_lock_nesting; + struct rcu_data *rcu_data; +#endif }; static inline pid_t process_group(struct task_struct *tsk)