On Sun, Feb 10, 2019 at 09:00:50PM -0500, Waiman Long wrote: > +static inline int __down_read_trylock(struct rw_semaphore *sem) > +{ > + long tmp; > + > + while ((tmp = atomic_long_read(&sem->count)) >= 0) { > + if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, > + tmp + RWSEM_ACTIVE_READ_BIAS)) { > + return 1; > + } > + } > + return 0; > +} So the orignal x86 implementation reads: static inline bool __down_read_trylock(struct rw_semaphore *sem) { long result, tmp; asm volatile("# beginning __down_read_trylock\n\t" " mov %[count],%[result]\n\t" "1:\n\t" " mov %[result],%[tmp]\n\t" " add %[inc],%[tmp]\n\t" " jle 2f\n\t" LOCK_PREFIX " cmpxchg %[tmp],%[count]\n\t" " jnz 1b\n\t" "2:\n\t" "# ending __down_read_trylock\n\t" : [count] "+m" (sem->count), [result] "=&a" (result), [tmp] "=&r" (tmp) : [inc] "i" (RWSEM_ACTIVE_READ_BIAS) : "memory", "cc"); return result >= 0; } you replace that with: int __down_read_trylock1(unsigned long *l) { long tmp; while ((tmp = READ_ONCE(*l)) >= 0) { if (tmp == cmpxchg(l, tmp, tmp + 1)) return 1; } return 0; } which generates: 0000000000000000 <__down_read_trylock1>: 0: eb 17 jmp 19 <__down_read_trylock1+0x19> 2: 66 0f 1f 44 00 00 nopw 0x0(%rax,%rax,1) 8: 48 8d 4a 01 lea 0x1(%rdx),%rcx c: 48 89 d0 mov %rdx,%rax f: f0 48 0f b1 0f lock cmpxchg %rcx,(%rdi) 14: 48 39 c2 cmp %rax,%rdx 17: 74 0f je 28 <__down_read_trylock1+0x28> 19: 48 8b 17 mov (%rdi),%rdx 1c: 48 85 d2 test %rdx,%rdx 1f: 79 e7 jns 8 <__down_read_trylock1+0x8> 21: 31 c0 xor %eax,%eax 23: c3 retq 24: 0f 1f 40 00 nopl 0x0(%rax) 28: b8 01 00 00 00 mov $0x1,%eax 2d: c3 retq Which is clearly worse. Now we can write that as: int __down_read_trylock2(unsigned long *l) { long tmp = READ_ONCE(*l); while (tmp >= 0) { if (try_cmpxchg(l, &tmp, tmp + 1)) return 1; } return 0; } which generates: 0000000000000030 <__down_read_trylock2>: 30: 48 8b 07 mov (%rdi),%rax 33: 48 85 c0 test %rax,%rax 36: 78 18 js 50 <__down_read_trylock2+0x20> 38: 48 8d 50 01 lea 0x1(%rax),%rdx 3c: f0 48 0f b1 17 lock cmpxchg %rdx,(%rdi) 41: 75 f0 jne 33 <__down_read_trylock2+0x3> 43: b8 01 00 00 00 mov $0x1,%eax 48: c3 retq 49: 0f 1f 80 00 00 00 00 nopl 0x0(%rax) 50: 31 c0 xor %eax,%eax 52: c3 retq Which is a lot better; but not quite there yet. I've tried quite a bit, but I can't seem to get GCC to generate the: add $1,%rdx jle required; stuff like: new = old + 1; if (new <= 0) generates: lea 0x1(%rax),%rdx test %rdx, %rdx jle Ah well, have fun :-)