intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: <intel-gfx@lists.freedesktop.org>, <dri-devel@lists.freedesktop.org>
Subject: [Intel-gfx] [PATCH 09/46] drm/i915: Add GT PM unpark worker
Date: Tue,  3 Aug 2021 15:29:06 -0700	[thread overview]
Message-ID: <20210803222943.27686-10-matthew.brost@intel.com> (raw)
In-Reply-To: <20210803222943.27686-1-matthew.brost@intel.com>

Sometimes it is desirable to queue work up for later if the GT PM isn't
held and run that work on next GT PM unpark.

Implemented with a list in the GT of all pending work, workqueues in
the list, a callback to add a workqueue to the list, and finally a
wakeref post_get callback that iterates / drains the list + queues the
workqueues.

First user of this is deregistration of GuC contexts.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_gt.c            |  3 ++
 drivers/gpu/drm/i915/gt/intel_gt_pm.c         |  8 +++++
 .../gpu/drm/i915/gt/intel_gt_pm_unpark_work.c | 35 +++++++++++++++++++
 .../gpu/drm/i915/gt/intel_gt_pm_unpark_work.h | 32 +++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_gt_types.h      |  3 ++
 drivers/gpu/drm/i915/gt/uc/intel_guc.h        |  3 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 14 +++++---
 drivers/gpu/drm/i915/intel_wakeref.c          |  5 +++
 drivers/gpu/drm/i915/intel_wakeref.h          |  1 +
 9 files changed, 99 insertions(+), 5 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_pm_unpark_work.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_pm_unpark_work.h

diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index a64aa43f7cd9..405558c08d6c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -29,6 +29,9 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 
 	spin_lock_init(&gt->irq_lock);
 
+	spin_lock_init(&gt->pm_unpark_work_lock);
+	INIT_LIST_HEAD(&gt->pm_unpark_work_list);
+
 	INIT_LIST_HEAD(&gt->closed_vma);
 	spin_lock_init(&gt->closed_lock);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index dea8e2479897..564c11a3748b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -90,6 +90,13 @@ static int __gt_unpark(struct intel_wakeref *wf)
 	return 0;
 }
 
+static void __gt_unpark_work_queue(struct intel_wakeref *wf)
+{
+	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+
+	intel_gt_pm_unpark_work_queue(gt);
+}
+
 static int __gt_park(struct intel_wakeref *wf)
 {
 	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
@@ -118,6 +125,7 @@ static int __gt_park(struct intel_wakeref *wf)
 
 static const struct intel_wakeref_ops wf_ops = {
 	.get = __gt_unpark,
+	.post_get = __gt_unpark_work_queue,
 	.put = __gt_park,
 };
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_unpark_work.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_unpark_work.c
new file mode 100644
index 000000000000..23162dbd0c35
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_unpark_work.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_runtime_pm.h"
+#include "intel_gt_pm.h"
+
+void intel_gt_pm_unpark_work_queue(struct intel_gt *gt)
+{
+	struct intel_gt_pm_unpark_work *work, *next;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gt->pm_unpark_work_lock, flags);
+	list_for_each_entry_safe(work, next,
+				 &gt->pm_unpark_work_list, link) {
+		list_del_init(&work->link);
+		queue_work(system_unbound_wq, &work->worker);
+	}
+	spin_unlock_irqrestore(&gt->pm_unpark_work_lock, flags);
+}
+
+void intel_gt_pm_unpark_work_add(struct intel_gt *gt,
+				 struct intel_gt_pm_unpark_work *work)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&gt->pm_unpark_work_lock, flags);
+	if (intel_gt_pm_is_awake(gt))
+		queue_work(system_unbound_wq, &work->worker);
+	else if (list_empty(&work->link))
+		list_add_tail(&work->link, &gt->pm_unpark_work_list);
+	spin_unlock_irqrestore(&gt->pm_unpark_work_lock, flags);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_unpark_work.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_unpark_work.h
new file mode 100644
index 000000000000..08e9011be023
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_unpark_work.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_UNPARK_WORK_H
+#define INTEL_GT_PM_UNPARK_WORK_H
+
+#include <linux/list.h>
+#include <linux/workqueue.h>
+
+struct intel_gt;
+
+struct intel_gt_pm_unpark_work {
+	struct list_head link;
+	struct work_struct worker;
+};
+
+void intel_gt_pm_unpark_work_queue(struct intel_gt *gt);
+
+void intel_gt_pm_unpark_work_add(struct intel_gt *gt,
+				 struct intel_gt_pm_unpark_work *work);
+
+static inline void
+intel_gt_pm_unpark_work_init(struct intel_gt_pm_unpark_work *work,
+			     work_func_t fn)
+{
+	INIT_LIST_HEAD(&work->link);
+	INIT_WORK(&work->worker, fn);
+}
+
+#endif /* INTEL_GT_PM_UNPARK_WORK_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 97a5075288d2..8d8a946561fa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -91,6 +91,9 @@ struct intel_gt {
 	struct intel_wakeref wakeref;
 	atomic_t user_wakeref;
 
+	struct list_head pm_unpark_work_list;
+	spinlock_t pm_unpark_work_lock;	/* protect list */
+
 	struct list_head closed_vma;
 	spinlock_t closed_lock; /* guards the list of closed_vma */
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 72fdfa1f6ccd..aedd5a4281b8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -18,6 +18,7 @@
 #include "intel_uc_fw.h"
 #include "i915_utils.h"
 #include "i915_vma.h"
+#include "gt/intel_gt_pm_unpark_work.h"
 
 struct __guc_ads_blob;
 
@@ -63,7 +64,7 @@ struct intel_guc {
 
 	spinlock_t destroy_lock;	/* protects list / worker */
 	struct list_head destroyed_contexts;
-	struct work_struct destroy_worker;
+	struct intel_gt_pm_unpark_work destroy_worker;
 
 	bool submission_supported;
 	bool submission_selected;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 262fa77b56e2..7fe4d1559a81 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1406,8 +1406,9 @@ int intel_guc_submission_init(struct intel_guc *guc)
 	ida_init(&guc->guc_ids);
 
 	spin_lock_init(&guc->destroy_lock);
+
 	INIT_LIST_HEAD(&guc->destroyed_contexts);
-	INIT_WORK(&guc->destroy_worker, destroy_worker_func);
+	intel_gt_pm_unpark_work_init(&guc->destroy_worker, destroy_worker_func);
 
 	return 0;
 }
@@ -2446,13 +2447,18 @@ static void deregister_destroyed_contexts(struct intel_guc *guc)
 
 static void destroy_worker_func(struct work_struct *w)
 {
+	struct intel_gt_pm_unpark_work *destroy_worker =
+		container_of(w, struct intel_gt_pm_unpark_work, worker);
 	struct intel_guc *guc =
-		container_of(w, struct intel_guc, destroy_worker);
+		container_of(destroy_worker, struct intel_guc, destroy_worker);
 	struct intel_gt *gt = guc_to_gt(guc);
 	int tmp;
 
-	with_intel_gt_pm(gt, tmp)
+	with_intel_gt_pm_if_awake(gt, tmp)
 		deregister_destroyed_contexts(guc);
+
+	if (!list_empty(&guc->destroyed_contexts))
+		intel_gt_pm_unpark_work_add(gt, destroy_worker);
 }
 
 static void guc_context_destroy(struct kref *kref)
@@ -2515,7 +2521,7 @@ static void guc_context_destroy(struct kref *kref)
 	 * take the GT PM for the first time which isn't allowed from an atomic
 	 * context.
 	 */
-	queue_work(system_unbound_wq, &guc->destroy_worker);
+	intel_gt_pm_unpark_work_add(guc_to_gt(guc), &guc->destroy_worker);
 }
 
 static int guc_context_alloc(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index dfd87d082218..282fc4f312e3 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -24,6 +24,8 @@ static void rpm_put(struct intel_wakeref *wf)
 
 int __intel_wakeref_get_first(struct intel_wakeref *wf)
 {
+	bool do_post = false;
+
 	/*
 	 * Treat get/put as different subclasses, as we may need to run
 	 * the put callback from under the shrinker and do not want to
@@ -44,8 +46,11 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf)
 		}
 
 		smp_mb__before_atomic(); /* release wf->count */
+		do_post = true;
 	}
 	atomic_inc(&wf->count);
+	if (do_post && wf->ops->post_get)
+		wf->ops->post_get(wf);
 	mutex_unlock(&wf->mutex);
 
 	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 545c8f277c46..ef7e6a698e8a 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -30,6 +30,7 @@ typedef depot_stack_handle_t intel_wakeref_t;
 
 struct intel_wakeref_ops {
 	int (*get)(struct intel_wakeref *wf);
+	void (*post_get)(struct intel_wakeref *wf);
 	int (*put)(struct intel_wakeref *wf);
 };
 
-- 
2.28.0


  parent reply	other threads:[~2021-08-03 22:12 UTC|newest]

Thread overview: 111+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-03 22:28 [Intel-gfx] [PATCH 00/46] Parallel submission aka multi-bb execbuf Matthew Brost
2021-08-03 22:28 ` [Intel-gfx] [PATCH 01/46] drm/i915/guc: Allow flexible number of context ids Matthew Brost
2021-08-03 22:28 ` [Intel-gfx] [PATCH 02/46] drm/i915/guc: Connect the number of guc_ids to debugfs Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 03/46] drm/i915/guc: Don't return -EAGAIN to user when guc_ids exhausted Matthew Brost
2021-08-05  8:27   ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 04/46] drm/i915/guc: Don't allow requests not ready to consume all guc_ids Matthew Brost
2021-08-05  8:29   ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 05/46] drm/i915/guc: Introduce guc_submit_engine object Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 06/46] drm/i915/guc: Check return of __xa_store when registering a context Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 07/46] drm/i915/guc: Non-static lrc descriptor registration buffer Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 08/46] drm/i915/guc: Take GT PM ref when deregistering context Matthew Brost
2021-08-03 22:29 ` Matthew Brost [this message]
2021-08-03 22:29 ` [Intel-gfx] [PATCH 10/46] drm/i915/guc: Take engine PM when a context is pinned with GuC submission Matthew Brost
2021-08-09 14:23   ` Daniel Vetter
2021-08-09 18:11     ` Matthew Brost
2021-08-10  6:43       ` Daniel Vetter
2021-08-10 21:29         ` Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 11/46] drm/i915/guc: Don't call switch_to_kernel_context " Matthew Brost
2021-08-09 14:27   ` Daniel Vetter
2021-08-09 18:20     ` Matthew Brost
2021-08-10  6:47       ` Daniel Vetter
2021-08-11 17:47         ` Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 12/46] drm/i915/guc: Selftest for GuC flow control Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 13/46] drm/i915: Add logical engine mapping Matthew Brost
2021-08-09 14:28   ` Daniel Vetter
2021-08-09 18:28     ` Matthew Brost
2021-08-10  6:49       ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 14/46] drm/i915: Expose logical engine instance to user Matthew Brost
2021-08-09 14:30   ` Daniel Vetter
2021-08-09 18:37     ` Matthew Brost
2021-08-10  6:53       ` Daniel Vetter
2021-08-11 17:55         ` Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 15/46] drm/i915/guc: Introduce context parent-child relationship Matthew Brost
2021-08-09 14:37   ` Daniel Vetter
2021-08-09 14:40     ` Daniel Vetter
2021-08-09 18:45       ` Matthew Brost
2021-08-09 18:44     ` Matthew Brost
2021-08-10  8:45       ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 16/46] drm/i915/guc: Implement GuC parent-child context pin / unpin functions Matthew Brost
2021-08-09 15:17   ` Daniel Vetter
2021-08-09 18:58     ` Matthew Brost
2021-08-10  8:53       ` Daniel Vetter
2021-08-10  9:07         ` Daniel Vetter
2021-08-11 18:06           ` Matthew Brost
2021-08-12 14:45             ` Daniel Vetter
2021-08-12 14:52               ` Daniel Vetter
2021-08-11 18:23         ` Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 17/46] drm/i915/guc: Add multi-lrc context registration Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 18/46] drm/i915/guc: Ensure GuC schedule operations do not operate on child contexts Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 19/46] drm/i915/guc: Assign contexts in parent-child relationship consecutive guc_ids Matthew Brost
2021-08-09 15:31   ` Daniel Vetter
2021-08-09 19:03     ` Matthew Brost
2021-08-10  9:12       ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 20/46] drm/i915/guc: Add hang check to GuC submit engine Matthew Brost
2021-08-09 15:35   ` Daniel Vetter
2021-08-09 19:05     ` Matthew Brost
2021-08-10  9:18       ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 21/46] drm/i915/guc: Add guc_child_context_destroy Matthew Brost
2021-08-09 15:36   ` Daniel Vetter
2021-08-09 19:06     ` Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 22/46] drm/i915/guc: Implement multi-lrc submission Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 23/46] drm/i915/guc: Insert submit fences between requests in parent-child relationship Matthew Brost
2021-08-09 16:32   ` Daniel Vetter
2021-08-09 16:39     ` Matthew Brost
2021-08-09 17:03       ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 24/46] drm/i915/guc: Implement multi-lrc reset Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 25/46] drm/i915/guc: Update debugfs for GuC multi-lrc Matthew Brost
2021-08-09 16:36   ` Daniel Vetter
2021-08-09 19:13     ` Matthew Brost
2021-08-10  9:23       ` Daniel Vetter
2021-08-10  9:27         ` Daniel Vetter
2021-08-10 17:29           ` Matthew Brost
2021-08-11 10:04             ` Daniel Vetter
2021-08-11 17:35               ` Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 26/46] drm/i915: Connect UAPI to GuC multi-lrc interface Matthew Brost
2021-08-09 16:37   ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 27/46] drm/i915/doc: Update parallel submit doc to point to i915_drm.h Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 28/46] drm/i915/guc: Add basic GuC multi-lrc selftest Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 29/46] drm/i915/guc: Extend GuC flow control selftest for multi-lrc Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 30/46] drm/i915/guc: Implement no mid batch preemption " Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 31/46] drm/i915: Move secure execbuf check to execbuf2 Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 32/46] drm/i915: Move input/exec fence handling to i915_gem_execbuffer2 Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 33/46] drm/i915: Move output " Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 34/46] drm/i915: Return output fence from i915_gem_do_execbuffer Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 35/46] drm/i915: Store batch index in struct i915_execbuffer Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 36/46] drm/i915: Allow callers of i915_gem_do_execbuffer to override the batch index Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 37/46] drm/i915: Teach execbuf there can be more than one batch in the objects list Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 38/46] drm/i915: Only track object dependencies on first request Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 39/46] drm/i915: Force parallel contexts to use copy engine for reloc Matthew Brost
2021-08-09 16:39   ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 40/46] drm/i915: Multi-batch execbuffer2 Matthew Brost
2021-08-09 17:02   ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 41/46] drm/i915: Eliminate unnecessary VMA calls for multi-BB submission Matthew Brost
2021-08-09 17:07   ` Daniel Vetter
2021-08-09 17:12     ` Daniel Vetter
2021-08-03 22:29 ` [Intel-gfx] [PATCH 42/46] drm/i915: Hold all parallel requests until last request, properly handle error Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 43/46] drm/i915/guc: Handle errors in multi-lrc requests Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 44/46] drm/i915: Enable multi-bb execbuf Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 45/46] drm/i915/execlists: Weak parallel submission support for execlists Matthew Brost
2021-08-03 22:29 ` [Intel-gfx] [PATCH 46/46] drm/i915/guc: Add delay before disabling scheduling on contexts Matthew Brost
2021-08-09 17:17   ` Daniel Vetter
2021-08-09 19:32     ` Matthew Brost
2021-08-11  9:55       ` Daniel Vetter
2021-08-11 17:43         ` Matthew Brost
2021-08-12 14:04           ` Daniel Vetter
2021-08-12 19:26   ` Daniel Vetter
2021-08-03 22:51 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for Parallel submission aka multi-bb execbuf (rev2) Patchwork
2021-08-03 22:53 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-08-03 22:57 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork
2021-08-03 23:19 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-08-05  3:53 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210803222943.27686-10-matthew.brost@intel.com \
    --to=matthew.brost@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).