kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] KVM: s390: filter and count invalid yields
@ 2019-10-10 10:21 Christian Borntraeger
  2019-10-10 10:25 ` David Hildenbrand
  0 siblings, 1 reply; 5+ messages in thread
From: Christian Borntraeger @ 2019-10-10 10:21 UTC (permalink / raw)
  To: Janosch Frank
  Cc: KVM, Cornelia Huck, Christian Borntraeger, David Hildenbrand,
	linux-s390, Thomas Huth

To analyze some performance issues with lock contention and scheduling
it is nice to know when diag9c did not result in any action.
At the same time avoid calling the scheduler (which can be expensive)
if we know that it does not make sense.

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
---
 arch/s390/include/asm/kvm_host.h |  2 ++
 arch/s390/kvm/diag.c             | 23 +++++++++++++++++++----
 arch/s390/kvm/kvm-s390.c         |  2 ++
 3 files changed, 23 insertions(+), 4 deletions(-)

diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index abe60268335d..743cd5a63b37 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -392,6 +392,8 @@ struct kvm_vcpu_stat {
 	u64 diagnose_10;
 	u64 diagnose_44;
 	u64 diagnose_9c;
+	u64 diagnose_9c_success;
+	u64 diagnose_9c_ignored;
 	u64 diagnose_258;
 	u64 diagnose_308;
 	u64 diagnose_500;
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 45634b3d2e0a..2c729f020585 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -158,14 +158,29 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
 
 	tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
 	vcpu->stat.diagnose_9c++;
-	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
 
+	/* yield to self */
 	if (tid == vcpu->vcpu_id)
-		return 0;
+		goto no_yield;
 
+	/* yield to invalid */
 	tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
-	if (tcpu)
-		kvm_vcpu_yield_to(tcpu);
+	if (!tcpu)
+		goto no_yield;
+
+	/* target already running */
+	if (tcpu->cpu >= 0)
+		goto no_yield;
+
+	if (kvm_vcpu_yield_to(tcpu) <= 0)
+		goto no_yield;
+
+	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
+	vcpu->stat.diagnose_9c_success++;
+	return 0;
+no_yield:
+	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
+	vcpu->stat.diagnose_9c_ignored++;
 	return 0;
 }
 
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 4a3bc40ca1a4..b368e835f2f7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -155,6 +155,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ "instruction_diag_10", VCPU_STAT(diagnose_10) },
 	{ "instruction_diag_44", VCPU_STAT(diagnose_44) },
 	{ "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
+	{ "diag_9c_success", VCPU_STAT(diagnose_9c_success) },
+	{ "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
 	{ "instruction_diag_258", VCPU_STAT(diagnose_258) },
 	{ "instruction_diag_308", VCPU_STAT(diagnose_308) },
 	{ "instruction_diag_500", VCPU_STAT(diagnose_500) },
-- 
2.21.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: s390: filter and count invalid yields
  2019-10-10 10:21 [PATCH] KVM: s390: filter and count invalid yields Christian Borntraeger
@ 2019-10-10 10:25 ` David Hildenbrand
  2019-10-10 10:28   ` Christian Borntraeger
  0 siblings, 1 reply; 5+ messages in thread
From: David Hildenbrand @ 2019-10-10 10:25 UTC (permalink / raw)
  To: Christian Borntraeger, Janosch Frank
  Cc: KVM, Cornelia Huck, linux-s390, Thomas Huth

On 10.10.19 12:21, Christian Borntraeger wrote:
> To analyze some performance issues with lock contention and scheduling
> it is nice to know when diag9c did not result in any action.
> At the same time avoid calling the scheduler (which can be expensive)
> if we know that it does not make sense.
> 
> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
> ---
>  arch/s390/include/asm/kvm_host.h |  2 ++
>  arch/s390/kvm/diag.c             | 23 +++++++++++++++++++----
>  arch/s390/kvm/kvm-s390.c         |  2 ++
>  3 files changed, 23 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
> index abe60268335d..743cd5a63b37 100644
> --- a/arch/s390/include/asm/kvm_host.h
> +++ b/arch/s390/include/asm/kvm_host.h
> @@ -392,6 +392,8 @@ struct kvm_vcpu_stat {
>  	u64 diagnose_10;
>  	u64 diagnose_44;
>  	u64 diagnose_9c;
> +	u64 diagnose_9c_success;
> +	u64 diagnose_9c_ignored;

Can't you derive the one from the other with diagnose_9c? Just sayin,
one would be sufficient.

>  	u64 diagnose_258;
>  	u64 diagnose_308;
>  	u64 diagnose_500;
> diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
> index 45634b3d2e0a..2c729f020585 100644
> --- a/arch/s390/kvm/diag.c
> +++ b/arch/s390/kvm/diag.c
> @@ -158,14 +158,29 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
>  
>  	tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
>  	vcpu->stat.diagnose_9c++;
> -	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
>  
> +	/* yield to self */
>  	if (tid == vcpu->vcpu_id)
> -		return 0;
> +		goto no_yield;
>  
> +	/* yield to invalid */
>  	tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
> -	if (tcpu)
> -		kvm_vcpu_yield_to(tcpu);
> +	if (!tcpu)
> +		goto no_yield;
> +
> +	/* target already running */
> +	if (tcpu->cpu >= 0)
> +		goto no_yield;

Wonder if it's wort moving this optimization to a separate patch.

> +
> +	if (kvm_vcpu_yield_to(tcpu) <= 0)
> +		goto no_yield;
> +
> +	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
> +	vcpu->stat.diagnose_9c_success++;
> +	return 0;
> +no_yield:
> +	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
> +	vcpu->stat.diagnose_9c_ignored++;
>  	return 0;
>  }
>  
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 4a3bc40ca1a4..b368e835f2f7 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -155,6 +155,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
>  	{ "instruction_diag_10", VCPU_STAT(diagnose_10) },
>  	{ "instruction_diag_44", VCPU_STAT(diagnose_44) },
>  	{ "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
> +	{ "diag_9c_success", VCPU_STAT(diagnose_9c_success) },
> +	{ "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
>  	{ "instruction_diag_258", VCPU_STAT(diagnose_258) },
>  	{ "instruction_diag_308", VCPU_STAT(diagnose_308) },
>  	{ "instruction_diag_500", VCPU_STAT(diagnose_500) },
> 


-- 

Thanks,

David / dhildenb

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: s390: filter and count invalid yields
  2019-10-10 10:25 ` David Hildenbrand
@ 2019-10-10 10:28   ` Christian Borntraeger
  2019-10-10 10:31     ` David Hildenbrand
  2019-10-10 11:04     ` Cornelia Huck
  0 siblings, 2 replies; 5+ messages in thread
From: Christian Borntraeger @ 2019-10-10 10:28 UTC (permalink / raw)
  To: David Hildenbrand, Janosch Frank
  Cc: KVM, Cornelia Huck, linux-s390, Thomas Huth



On 10.10.19 12:25, David Hildenbrand wrote:
> On 10.10.19 12:21, Christian Borntraeger wrote:
>> To analyze some performance issues with lock contention and scheduling
>> it is nice to know when diag9c did not result in any action.
>> At the same time avoid calling the scheduler (which can be expensive)
>> if we know that it does not make sense.
>>
>> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
>> ---
>>  arch/s390/include/asm/kvm_host.h |  2 ++
>>  arch/s390/kvm/diag.c             | 23 +++++++++++++++++++----
>>  arch/s390/kvm/kvm-s390.c         |  2 ++
>>  3 files changed, 23 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
>> index abe60268335d..743cd5a63b37 100644
>> --- a/arch/s390/include/asm/kvm_host.h
>> +++ b/arch/s390/include/asm/kvm_host.h
>> @@ -392,6 +392,8 @@ struct kvm_vcpu_stat {
>>  	u64 diagnose_10;
>>  	u64 diagnose_44;
>>  	u64 diagnose_9c;
>> +	u64 diagnose_9c_success;
>> +	u64 diagnose_9c_ignored;
> 
> Can't you derive the one from the other with diagnose_9c? Just sayin,
> one would be sufficient.

You mean diagnose9c = diagnose_9c_success + diagnose_9c_ignored anyway so this is redundant?
Could just do diagnose_9c  and diagnose_9c_ignored if you prefer that.


> 
>>  	u64 diagnose_258;
>>  	u64 diagnose_308;
>>  	u64 diagnose_500;
>> diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
>> index 45634b3d2e0a..2c729f020585 100644
>> --- a/arch/s390/kvm/diag.c
>> +++ b/arch/s390/kvm/diag.c
>> @@ -158,14 +158,29 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
>>  
>>  	tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
>>  	vcpu->stat.diagnose_9c++;
>> -	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
>>  
>> +	/* yield to self */
>>  	if (tid == vcpu->vcpu_id)
>> -		return 0;
>> +		goto no_yield;
>>  
>> +	/* yield to invalid */
>>  	tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
>> -	if (tcpu)
>> -		kvm_vcpu_yield_to(tcpu);
>> +	if (!tcpu)
>> +		goto no_yield;
>> +
>> +	/* target already running */
>> +	if (tcpu->cpu >= 0)
>> +		goto no_yield;
> 
> Wonder if it's wort moving this optimization to a separate patch.

Could do if you prefer that. 


> 
>> +
>> +	if (kvm_vcpu_yield_to(tcpu) <= 0)
>> +		goto no_yield;
>> +
>> +	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
>> +	vcpu->stat.diagnose_9c_success++;
>> +	return 0;
>> +no_yield:
>> +	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
>> +	vcpu->stat.diagnose_9c_ignored++;
>>  	return 0;
>>  }
>>  
>> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
>> index 4a3bc40ca1a4..b368e835f2f7 100644
>> --- a/arch/s390/kvm/kvm-s390.c
>> +++ b/arch/s390/kvm/kvm-s390.c
>> @@ -155,6 +155,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
>>  	{ "instruction_diag_10", VCPU_STAT(diagnose_10) },
>>  	{ "instruction_diag_44", VCPU_STAT(diagnose_44) },
>>  	{ "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
>> +	{ "diag_9c_success", VCPU_STAT(diagnose_9c_success) },
>> +	{ "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
>>  	{ "instruction_diag_258", VCPU_STAT(diagnose_258) },
>>  	{ "instruction_diag_308", VCPU_STAT(diagnose_308) },
>>  	{ "instruction_diag_500", VCPU_STAT(diagnose_500) },
>>
> 
> 


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: s390: filter and count invalid yields
  2019-10-10 10:28   ` Christian Borntraeger
@ 2019-10-10 10:31     ` David Hildenbrand
  2019-10-10 11:04     ` Cornelia Huck
  1 sibling, 0 replies; 5+ messages in thread
From: David Hildenbrand @ 2019-10-10 10:31 UTC (permalink / raw)
  To: Christian Borntraeger, Janosch Frank
  Cc: KVM, Cornelia Huck, linux-s390, Thomas Huth

On 10.10.19 12:28, Christian Borntraeger wrote:
> 
> 
> On 10.10.19 12:25, David Hildenbrand wrote:
>> On 10.10.19 12:21, Christian Borntraeger wrote:
>>> To analyze some performance issues with lock contention and scheduling
>>> it is nice to know when diag9c did not result in any action.
>>> At the same time avoid calling the scheduler (which can be expensive)
>>> if we know that it does not make sense.
>>>
>>> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
>>> ---
>>>  arch/s390/include/asm/kvm_host.h |  2 ++
>>>  arch/s390/kvm/diag.c             | 23 +++++++++++++++++++----
>>>  arch/s390/kvm/kvm-s390.c         |  2 ++
>>>  3 files changed, 23 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
>>> index abe60268335d..743cd5a63b37 100644
>>> --- a/arch/s390/include/asm/kvm_host.h
>>> +++ b/arch/s390/include/asm/kvm_host.h
>>> @@ -392,6 +392,8 @@ struct kvm_vcpu_stat {
>>>  	u64 diagnose_10;
>>>  	u64 diagnose_44;
>>>  	u64 diagnose_9c;
>>> +	u64 diagnose_9c_success;
>>> +	u64 diagnose_9c_ignored;
>>
>> Can't you derive the one from the other with diagnose_9c? Just sayin,
>> one would be sufficient.
> 
> You mean diagnose9c = diagnose_9c_success + diagnose_9c_ignored anyway so this is redundant?
> Could just do diagnose_9c  and diagnose_9c_ignored if you prefer that.
> 

I think that would make sense, but however you prefer.

> 
>>
>>>  	u64 diagnose_258;
>>>  	u64 diagnose_308;
>>>  	u64 diagnose_500;
>>> diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
>>> index 45634b3d2e0a..2c729f020585 100644
>>> --- a/arch/s390/kvm/diag.c
>>> +++ b/arch/s390/kvm/diag.c
>>> @@ -158,14 +158,29 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
>>>  
>>>  	tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
>>>  	vcpu->stat.diagnose_9c++;
>>> -	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
>>>  
>>> +	/* yield to self */
>>>  	if (tid == vcpu->vcpu_id)
>>> -		return 0;
>>> +		goto no_yield;
>>>  
>>> +	/* yield to invalid */
>>>  	tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
>>> -	if (tcpu)
>>> -		kvm_vcpu_yield_to(tcpu);
>>> +	if (!tcpu)
>>> +		goto no_yield;
>>> +
>>> +	/* target already running */
>>> +	if (tcpu->cpu >= 0)
>>> +		goto no_yield;
>>
>> Wonder if it's wort moving this optimization to a separate patch.
> 
> Could do if you prefer that. 
> 

Whatever you prefer, I would have put it into a separate patch :)

Take my

Reviewed-by: David Hildenbrand <david@redhat.com>

for either changes.

-- 

Thanks,

David / dhildenb

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: s390: filter and count invalid yields
  2019-10-10 10:28   ` Christian Borntraeger
  2019-10-10 10:31     ` David Hildenbrand
@ 2019-10-10 11:04     ` Cornelia Huck
  1 sibling, 0 replies; 5+ messages in thread
From: Cornelia Huck @ 2019-10-10 11:04 UTC (permalink / raw)
  To: Christian Borntraeger
  Cc: David Hildenbrand, Janosch Frank, KVM, linux-s390, Thomas Huth

On Thu, 10 Oct 2019 12:28:50 +0200
Christian Borntraeger <borntraeger@de.ibm.com> wrote:

> On 10.10.19 12:25, David Hildenbrand wrote:
> > On 10.10.19 12:21, Christian Borntraeger wrote:  
> >> To analyze some performance issues with lock contention and scheduling
> >> it is nice to know when diag9c did not result in any action.
> >> At the same time avoid calling the scheduler (which can be expensive)
> >> if we know that it does not make sense.
> >>
> >> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
> >> ---
> >>  arch/s390/include/asm/kvm_host.h |  2 ++
> >>  arch/s390/kvm/diag.c             | 23 +++++++++++++++++++----
> >>  arch/s390/kvm/kvm-s390.c         |  2 ++
> >>  3 files changed, 23 insertions(+), 4 deletions(-)
> >>
> >> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
> >> index abe60268335d..743cd5a63b37 100644
> >> --- a/arch/s390/include/asm/kvm_host.h
> >> +++ b/arch/s390/include/asm/kvm_host.h
> >> @@ -392,6 +392,8 @@ struct kvm_vcpu_stat {
> >>  	u64 diagnose_10;
> >>  	u64 diagnose_44;
> >>  	u64 diagnose_9c;
> >> +	u64 diagnose_9c_success;
> >> +	u64 diagnose_9c_ignored;  
> > 
> > Can't you derive the one from the other with diagnose_9c? Just sayin,
> > one would be sufficient.  
> 
> You mean diagnose9c = diagnose_9c_success + diagnose_9c_ignored anyway so this is redundant?
> Could just do diagnose_9c  and diagnose_9c_ignored if you prefer that.

Voting for that :)

> 
> 
> >   
> >>  	u64 diagnose_258;
> >>  	u64 diagnose_308;
> >>  	u64 diagnose_500;
> >> diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
> >> index 45634b3d2e0a..2c729f020585 100644
> >> --- a/arch/s390/kvm/diag.c
> >> +++ b/arch/s390/kvm/diag.c
> >> @@ -158,14 +158,29 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
> >>  
> >>  	tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
> >>  	vcpu->stat.diagnose_9c++;
> >> -	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
> >>  
> >> +	/* yield to self */
> >>  	if (tid == vcpu->vcpu_id)
> >> -		return 0;
> >> +		goto no_yield;
> >>  
> >> +	/* yield to invalid */
> >>  	tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
> >> -	if (tcpu)
> >> -		kvm_vcpu_yield_to(tcpu);
> >> +	if (!tcpu)
> >> +		goto no_yield;
> >> +
> >> +	/* target already running */
> >> +	if (tcpu->cpu >= 0)
> >> +		goto no_yield;  
> > 
> > Wonder if it's wort moving this optimization to a separate patch.  
> 
> Could do if you prefer that. 

Voting for that as well :)

> 
> 
> >   
> >> +
> >> +	if (kvm_vcpu_yield_to(tcpu) <= 0)
> >> +		goto no_yield;
> >> +
> >> +	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
> >> +	vcpu->stat.diagnose_9c_success++;
> >> +	return 0;
> >> +no_yield:
> >> +	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
> >> +	vcpu->stat.diagnose_9c_ignored++;
> >>  	return 0;
> >>  }
> >>  
> >> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> >> index 4a3bc40ca1a4..b368e835f2f7 100644
> >> --- a/arch/s390/kvm/kvm-s390.c
> >> +++ b/arch/s390/kvm/kvm-s390.c
> >> @@ -155,6 +155,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
> >>  	{ "instruction_diag_10", VCPU_STAT(diagnose_10) },
> >>  	{ "instruction_diag_44", VCPU_STAT(diagnose_44) },
> >>  	{ "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
> >> +	{ "diag_9c_success", VCPU_STAT(diagnose_9c_success) },
> >> +	{ "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
> >>  	{ "instruction_diag_258", VCPU_STAT(diagnose_258) },
> >>  	{ "instruction_diag_308", VCPU_STAT(diagnose_308) },
> >>  	{ "instruction_diag_500", VCPU_STAT(diagnose_500) },
> >>  
> > 
> >   
> 


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-10-10 11:04 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-10 10:21 [PATCH] KVM: s390: filter and count invalid yields Christian Borntraeger
2019-10-10 10:25 ` David Hildenbrand
2019-10-10 10:28   ` Christian Borntraeger
2019-10-10 10:31     ` David Hildenbrand
2019-10-10 11:04     ` Cornelia Huck

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).