linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Mark Rutland <mark.rutland@arm.com>
To: linux-kernel@vger.kernel.org, will@kernel.org,
	boqun.feng@gmail.com, peterz@infradead.org
Cc: aou@eecs.berkeley.edu, arnd@arndb.de, bcain@codeaurora.org,
	benh@kernel.crashing.org, chris@zankel.net, dalias@libc.org,
	davem@davemloft.net, deanbo422@gmail.com, deller@gmx.de,
	geert@linux-m68k.org, green.hu@gmail.com, guoren@kernel.org,
	ink@jurassic.park.msu.ru, James.Bottomley@HansenPartnership.com,
	jcmvbkbc@gmail.com, jonas@southpole.se, ley.foon.tan@intel.com,
	linux@armlinux.org.uk, mark.rutland@arm.com, mattst88@gmail.com,
	monstr@monstr.eu, mpe@ellerman.id.au, nickhu@andestech.com,
	palmer@dabbelt.com, paulus@samba.org, paul.walmsley@sifive.com,
	rth@twiddle.net, shorne@gmail.com,
	stefan.kristiansson@saunalahti.fi, tsbogend@alpha.franken.de,
	vgupta@synopsys.com, ysato@users.sourceforge.jp
Subject: [PATCH 13/33] locking/atomic: alpha: move to ARCH_ATOMIC
Date: Mon, 10 May 2021 10:37:33 +0100	[thread overview]
Message-ID: <20210510093753.40683-14-mark.rutland@arm.com> (raw)
In-Reply-To: <20210510093753.40683-1-mark.rutland@arm.com>

We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates alpha to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Note: xchg_local() is NOT currently part of the generic atomic
arch_atomic API, and is not instrumented.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Will Deacon <will@kernel.org>
---
 arch/alpha/Kconfig               |  1 +
 arch/alpha/include/asm/atomic.h  | 88 +++++++++++++++++++++-------------------
 arch/alpha/include/asm/cmpxchg.h | 12 +++---
 3 files changed, 54 insertions(+), 47 deletions(-)

diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 5998106faa60..7920fc2e2a2a 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -2,6 +2,7 @@
 config ALPHA
 	bool
 	default y
+	select ARCH_ATOMIC
 	select ARCH_32BIT_USTAT_F_TINODE
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_MIGHT_HAVE_PC_SERIO
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index e41c113c6688..60a2872986a2 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -26,11 +26,11 @@
 
 #define ATOMIC64_INIT(i)	{ (i) }
 
-#define atomic_read(v)		READ_ONCE((v)->counter)
-#define atomic64_read(v)	READ_ONCE((v)->counter)
+#define arch_atomic_read(v)	READ_ONCE((v)->counter)
+#define arch_atomic64_read(v)	READ_ONCE((v)->counter)
 
-#define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
-#define atomic64_set(v,i)	WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_set(v,i)	WRITE_ONCE((v)->counter, (i))
+#define arch_atomic64_set(v,i)	WRITE_ONCE((v)->counter, (i))
 
 /*
  * To get proper branch prediction for the main line, we must branch
@@ -39,7 +39,7 @@
  */
 
 #define ATOMIC_OP(op, asm_op)						\
-static __inline__ void atomic_##op(int i, atomic_t * v)			\
+static __inline__ void arch_atomic_##op(int i, atomic_t * v)		\
 {									\
 	unsigned long temp;						\
 	__asm__ __volatile__(						\
@@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v)			\
 }									\
 
 #define ATOMIC_OP_RETURN(op, asm_op)					\
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 {									\
 	long temp, result;						\
 	__asm__ __volatile__(						\
@@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 }
 
 #define ATOMIC_FETCH_OP(op, asm_op)					\
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
 {									\
 	long temp, result;						\
 	__asm__ __volatile__(						\
@@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
 }
 
 #define ATOMIC64_OP(op, asm_op)						\
-static __inline__ void atomic64_##op(s64 i, atomic64_t * v)		\
+static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v)	\
 {									\
 	s64 temp;							\
 	__asm__ __volatile__(						\
@@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v)		\
 }									\
 
 #define ATOMIC64_OP_RETURN(op, asm_op)					\
-static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)	\
+static __inline__ s64							\
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)		\
 {									\
 	s64 temp, result;						\
 	__asm__ __volatile__(						\
@@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)	\
 }
 
 #define ATOMIC64_FETCH_OP(op, asm_op)					\
-static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)	\
+static __inline__ s64							\
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)		\
 {									\
 	s64 temp, result;						\
 	__asm__ __volatile__(						\
@@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)	\
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
-#define atomic_add_return_relaxed	atomic_add_return_relaxed
-#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
 
-#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed
 
-#define atomic_andnot atomic_andnot
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic_andnot		arch_atomic_andnot
+#define arch_atomic64_andnot		arch_atomic64_andnot
 
 #undef ATOMIC_OPS
 #define ATOMIC_OPS(op, asm)						\
@@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
 ATOMIC_OPS(or, bis)
 ATOMIC_OPS(xor, xor)
 
-#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed	atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed		arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed	arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed		arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed		arch_atomic_fetch_xor_relaxed
 
-#define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed	atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed		arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed	arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed		arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed		arch_atomic64_fetch_xor_relaxed
 
 #undef ATOMIC_OPS
 #undef ATOMIC64_FETCH_OP
@@ -198,14 +200,18 @@ ATOMIC_OPS(xor, xor)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, old, new) \
+	(arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic64_xchg(v, new) \
+	(arch_xchg(&((v)->counter), new))
 
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, old, new) \
+	(arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic_xchg(v, new) \
+	(arch_xchg(&((v)->counter), new))
 
 /**
- * atomic_fetch_add_unless - add unless the number is a given value
+ * arch_atomic_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -213,7 +219,7 @@ ATOMIC_OPS(xor, xor)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns the old value of @v.
  */
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
 	int c, new, old;
 	smp_mb();
@@ -234,10 +240,10 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
 	smp_mb();
 	return old;
 }
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
 
 /**
- * atomic64_fetch_add_unless - add unless the number is a given value
+ * arch_atomic64_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -245,7 +251,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns the old value of @v.
  */
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 {
 	s64 c, new, old;
 	smp_mb();
@@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 	smp_mb();
 	return old;
 }
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
 
 /*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
  * @v: pointer of type atomic_t
  *
  * The function returns the old value of *v minus 1, even if
  * the atomic variable, v, was not decremented.
  */
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
 {
 	s64 old, tmp;
 	smp_mb();
@@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
 	smp_mb();
 	return old - 1;
 }
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 
 #endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 6c7c39452471..6e0a850aa9d3 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -17,7 +17,7 @@
 				       sizeof(*(ptr)));			\
 })
 
-#define cmpxchg_local(ptr, o, n)					\
+#define arch_cmpxchg_local(ptr, o, n)					\
 ({									\
 	__typeof__(*(ptr)) _o_ = (o);					\
 	__typeof__(*(ptr)) _n_ = (n);					\
@@ -26,7 +26,7 @@
 					  sizeof(*(ptr)));		\
 })
 
-#define cmpxchg64_local(ptr, o, n)					\
+#define arch_cmpxchg64_local(ptr, o, n)					\
 ({									\
 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
 	cmpxchg_local((ptr), (o), (n));					\
@@ -42,7 +42,7 @@
  * The leading and the trailing memory barriers guarantee that these
  * operations are fully ordered.
  */
-#define xchg(ptr, x)							\
+#define arch_xchg(ptr, x)						\
 ({									\
 	__typeof__(*(ptr)) __ret;					\
 	__typeof__(*(ptr)) _x_ = (x);					\
@@ -53,7 +53,7 @@
 	__ret;								\
 })
 
-#define cmpxchg(ptr, o, n)						\
+#define arch_cmpxchg(ptr, o, n)						\
 ({									\
 	__typeof__(*(ptr)) __ret;					\
 	__typeof__(*(ptr)) _o_ = (o);					\
@@ -65,10 +65,10 @@
 	__ret;								\
 })
 
-#define cmpxchg64(ptr, o, n)						\
+#define arch_cmpxchg64(ptr, o, n)					\
 ({									\
 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
-	cmpxchg((ptr), (o), (n));					\
+	arch_cmpxchg((ptr), (o), (n));					\
 })
 
 #undef ____cmpxchg
-- 
2.11.0


  parent reply	other threads:[~2021-05-10  9:41 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-10  9:37 [PATCH 00/33] locking/atomic: convert all architectures to ARCH_ATOMIC Mark Rutland
2021-05-10  9:37 ` [PATCH 01/33] locking/atomic: make ARCH_ATOMIC a Kconfig symbol Mark Rutland
2021-05-10  9:37 ` [PATCH 02/33] locking/atomic: net: use linux/atomic.h for xchg & cmpxchg Mark Rutland
2021-05-10  9:37 ` [PATCH 03/33] locking/atomic: h8300: use asm-generic exclusively Mark Rutland
2021-05-10  9:37 ` [PATCH 04/33] locking/atomic: microblaze: " Mark Rutland
2021-05-10  9:37 ` [PATCH 05/33] locking/atomic: openrisc: avoid asm-generic/atomic.h Mark Rutland
2021-05-13 21:44   ` Stafford Horne
2021-05-14 10:17     ` Mark Rutland
2021-05-10  9:37 ` [PATCH 06/33] locking/atomic: atomic: remove stale comments Mark Rutland
2021-05-10  9:37 ` [PATCH 07/33] locking/atomic: atomic: remove redundant include Mark Rutland
2021-05-10  9:37 ` [PATCH 08/33] locking/atomic: atomic: simplify ifdeffery Mark Rutland
2021-05-10  9:37 ` [PATCH 09/33] locking/atomic: atomic: support ARCH_ATOMIC Mark Rutland
2021-05-10  9:37 ` [PATCH 10/33] locking/atomic: atomic64: " Mark Rutland
2021-05-10  9:37 ` [PATCH 11/33] locking/atomic: cmpxchg: make `generic` a prefix Mark Rutland
2021-05-11  9:29   ` Geert Uytterhoeven
2021-05-10  9:37 ` [PATCH 12/33] locking/atomic: cmpxchg: support ARCH_ATOMIC Mark Rutland
2021-05-10  9:37 ` Mark Rutland [this message]
2021-05-10  9:37 ` [PATCH 14/33] locking/atomic: arc: move to ARCH_ATOMIC Mark Rutland
2021-05-10 14:57   ` Mark Rutland
2021-05-10 19:48     ` Vineet Gupta
2021-05-10  9:37 ` [PATCH 15/33] locking/atomic: arm: " Mark Rutland
2021-05-13 10:49   ` Mark Rutland
2021-05-10  9:37 ` [PATCH 16/33] locking/atomic: csky: " Mark Rutland
2021-05-10 13:27   ` Guo Ren
2021-05-10  9:37 ` [PATCH 17/33] locking/atomic: h8300: " Mark Rutland
2021-05-10  9:37 ` [PATCH 18/33] locking/atomic: hexagon: " Mark Rutland
2021-05-10  9:37 ` [PATCH 19/33] locking/atomic: ia64: " Mark Rutland
2021-05-10  9:37 ` [PATCH 20/33] locking/atomic: m68k: " Mark Rutland
2021-05-11  9:27   ` Geert Uytterhoeven
2021-05-11 10:00     ` Mark Rutland
2021-05-11 10:27       ` Geert Uytterhoeven
2021-05-12 14:15         ` Greg Ungerer
2021-05-12 16:33           ` Mark Rutland
2021-05-10  9:37 ` [PATCH 21/33] locking/atomic: microblaze: " Mark Rutland
2021-05-10  9:37 ` [PATCH 22/33] locking/atomic: mips: " Mark Rutland
2021-05-10  9:37 ` [PATCH 23/33] locking/atomic: nds32: " Mark Rutland
2021-05-10  9:37 ` [PATCH 24/33] locking/atomic: nios2: " Mark Rutland
2021-05-10  9:37 ` [PATCH 25/33] locking/atomic: openrisc: " Mark Rutland
2021-05-15 21:47   ` Stafford Horne
2021-05-10  9:37 ` [PATCH 26/33] locking/atomic: parisc: " Mark Rutland
2021-05-10  9:37 ` [PATCH 27/33] locking/atomic: powerpc: " Mark Rutland
2021-05-11  9:16   ` Mark Rutland
2021-05-11  9:57     ` Mark Rutland
2021-05-10  9:37 ` [PATCH 28/33] locking/atomic: riscv: " Mark Rutland
2021-05-22 19:02   ` Palmer Dabbelt
2021-05-24 12:10     ` Mark Rutland
2021-05-10  9:37 ` [PATCH 29/33] locking/atomic: sh: " Mark Rutland
2021-05-10  9:37 ` [PATCH 30/33] locking/atomic: sparc: " Mark Rutland
2021-05-10  9:37 ` [PATCH 31/33] locking/atomic: xtensa: " Mark Rutland
2021-05-10 14:19   ` Max Filippov
2021-05-10  9:37 ` [PATCH 32/33] locking/atomic: delete !ARCH_ATOMIC remnants Mark Rutland
2021-05-11  9:30   ` Geert Uytterhoeven
2021-05-10  9:37 ` [PATCH 33/33] locking/atomics: atomic-instrumented: simplify ifdeffery Mark Rutland
2021-05-10 14:56 ` [PATCH 00/33] locking/atomic: convert all architectures to ARCH_ATOMIC Arnd Bergmann
2021-05-11  9:27   ` Mark Rutland
2021-05-12  7:28     ` Peter Zijlstra
2021-05-12  7:49       ` Arnd Bergmann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210510093753.40683-14-mark.rutland@arm.com \
    --to=mark.rutland@arm.com \
    --cc=James.Bottomley@HansenPartnership.com \
    --cc=aou@eecs.berkeley.edu \
    --cc=arnd@arndb.de \
    --cc=bcain@codeaurora.org \
    --cc=benh@kernel.crashing.org \
    --cc=boqun.feng@gmail.com \
    --cc=chris@zankel.net \
    --cc=dalias@libc.org \
    --cc=davem@davemloft.net \
    --cc=deanbo422@gmail.com \
    --cc=deller@gmx.de \
    --cc=geert@linux-m68k.org \
    --cc=green.hu@gmail.com \
    --cc=guoren@kernel.org \
    --cc=ink@jurassic.park.msu.ru \
    --cc=jcmvbkbc@gmail.com \
    --cc=jonas@southpole.se \
    --cc=ley.foon.tan@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=mattst88@gmail.com \
    --cc=monstr@monstr.eu \
    --cc=mpe@ellerman.id.au \
    --cc=nickhu@andestech.com \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    --cc=rth@twiddle.net \
    --cc=shorne@gmail.com \
    --cc=stefan.kristiansson@saunalahti.fi \
    --cc=tsbogend@alpha.franken.de \
    --cc=vgupta@synopsys.com \
    --cc=will@kernel.org \
    --cc=ysato@users.sourceforge.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).