struct percpu_counter { atomic_long_t count; atomic_long_t *counters; }; #ifdef CONFIG_SMP void percpu_counter_mod(struct percpu_counter *fbc, long amount) { long new; atomic_long_t *pcount; pcount = per_cpu_ptr(fbc->counters, get_cpu()); new = atomic_long_read(pcount) + amount; if (new >= FBC_BATCH || new <= -FBC_BATCH) { new = atomic_long_xchg(pcount, 0) + amount; if (new) atomic_long_add(new, &fbc->count); } else atomic_long_add(amount, pcount); put_cpu(); } EXPORT_SYMBOL(percpu_counter_mod); long percpu_counter_read_accurate(struct percpu_counter *fbc) { long res = 0; int cpu; atomic_long_t *pcount; for_each_cpu(cpu) { pcount = per_cpu_ptr(fbc->counters, cpu); /* dont dirty cache line if not necessary */ if (atomic_long_read(pcount)) res += atomic_long_xchg(pcount, 0); } atomic_long_add(res, &fbc->count); return atomic_long_read(&fbc->count); } EXPORT_SYMBOL(percpu_counter_read_accurate); #endif /* CONFIG_SMP */