From: Sean Christopherson <seanjc@google.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>,
Vitaly Kuznetsov <vkuznets@redhat.com>,
Wanpeng Li <wanpengli@tencent.com>,
Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
Yu Zhang <yu.c.zhang@linux.intel.com>,
Maxim Levitsky <mlevitsk@redhat.com>
Subject: [PATCH 23/54] KVM: x86/mmu: Use MMU's role_regs, not vCPU state, to compute mmu_role
Date: Tue, 22 Jun 2021 10:57:08 -0700 [thread overview]
Message-ID: <20210622175739.3610207-24-seanjc@google.com> (raw)
In-Reply-To: <20210622175739.3610207-1-seanjc@google.com>
Use the provided role_regs to calculate the mmu_role instead of pulling
bits from current vCPU state. For some flows, e.g. nested TDP, the vCPU
state may not be correct (or relevant).
Cc: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/kvm/mmu/mmu.c | 92 ++++++++++++++++++++++++------------------
1 file changed, 52 insertions(+), 40 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 84a40488eba7..896e92eac28b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4542,17 +4542,18 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
}
-static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
+static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_extended_role ext = {0};
- ext.cr0_pg = !!is_paging(vcpu);
- ext.cr4_pae = !!is_pae(vcpu);
- ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
- ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
- ext.cr4_pse = !!is_pse(vcpu);
- ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
- ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
+ ext.cr0_pg = ____is_cr0_pg(regs);
+ ext.cr4_pae = ____is_cr4_pae(regs);
+ ext.cr4_smep = ____is_cr4_smep(regs);
+ ext.cr4_smap = ____is_cr4_smap(regs);
+ ext.cr4_pse = ____is_cr4_pse(regs);
+ ext.cr4_pke = ____is_cr4_pke(regs);
+ ext.cr4_la57 = ____is_cr4_la57(regs);
ext.valid = 1;
@@ -4560,20 +4561,21 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
}
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs,
bool base_only)
{
union kvm_mmu_role role = {0};
role.base.access = ACC_ALL;
- role.base.nxe = !!is_nx(vcpu);
- role.base.cr0_wp = is_write_protection(vcpu);
+ role.base.nxe = ____is_efer_nx(regs);
+ role.base.cr0_wp = ____is_cr0_wp(regs);
role.base.smm = is_smm(vcpu);
role.base.guest_mode = is_guest_mode(vcpu);
if (base_only)
return role;
- role.ext = kvm_calc_mmu_role_ext(vcpu);
+ role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
return role;
}
@@ -4588,9 +4590,10 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
}
static union kvm_mmu_role
-kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4603,8 +4606,9 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role new_role =
- kvm_calc_tdp_mmu_root_page_role(vcpu, false);
+ kvm_calc_tdp_mmu_root_page_role(vcpu, ®s, false);
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
@@ -4648,30 +4652,30 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
}
static union kvm_mmu_role
-kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
- role.base.smep_andnot_wp = role.ext.cr4_smep &&
- !is_write_protection(vcpu);
- role.base.smap_andnot_wp = role.ext.cr4_smap &&
- !is_write_protection(vcpu);
- role.base.gpte_is_8_bytes = !!is_pae(vcpu);
+ role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
+ role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
+ role.base.gpte_is_8_bytes = ____is_cr4_pae(regs);
return role;
}
static union kvm_mmu_role
-kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
union kvm_mmu_role role =
- kvm_calc_shadow_root_page_role_common(vcpu, base_only);
+ kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
- role.base.direct = !is_paging(vcpu);
+ role.base.direct = !____is_cr0_pg(regs);
- if (!is_long_mode(vcpu))
+ if (!____is_efer_lma(regs))
role.base.level = PT32E_ROOT_LEVEL;
- else if (is_la57_mode(vcpu))
+ else if (____is_cr4_la57(regs))
role.base.level = PT64_ROOT_5LEVEL;
else
role.base.level = PT64_ROOT_4LEVEL;
@@ -4709,17 +4713,18 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role new_role =
- kvm_calc_shadow_mmu_root_page_role(vcpu, false);
+ kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
if (new_role.as_u64 != context->mmu_role.as_u64)
shadow_mmu_init_context(vcpu, context, regs, new_role);
}
static union kvm_mmu_role
-kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
+kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role =
- kvm_calc_shadow_root_page_role_common(vcpu, false);
+ kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
role.base.direct = false;
role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4736,7 +4741,9 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
.cr4 = cr4,
.efer = efer,
};
- union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
+ union kvm_mmu_role new_role;
+
+ new_role = kvm_calc_shadow_npt_root_page_role(vcpu, ®s);
__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
@@ -4821,9 +4828,12 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
context->inject_page_fault = kvm_inject_page_fault;
}
-static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
+static union kvm_mmu_role
+kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
{
- union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
+ union kvm_mmu_role role;
+
+ role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
/*
* Nested MMUs are used only for walking L2's gva->gpa, they never have
@@ -4832,12 +4842,12 @@ static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
*/
role.base.direct = true;
- if (!is_paging(vcpu))
+ if (!____is_cr0_pg(regs))
role.base.level = 0;
- else if (is_long_mode(vcpu))
- role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
- PT64_ROOT_4LEVEL;
- else if (is_pae(vcpu))
+ else if (____is_efer_lma(regs))
+ role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
+ PT64_ROOT_4LEVEL;
+ else if (____is_cr4_pae(regs))
role.base.level = PT32E_ROOT_LEVEL;
else
role.base.level = PT32_ROOT_LEVEL;
@@ -4847,7 +4857,8 @@ static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
- union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
+ union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, ®s);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
if (new_role.as_u64 == g_context->mmu_role.as_u64)
@@ -4913,12 +4924,13 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role role;
if (tdp_enabled)
- role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
+ role = kvm_calc_tdp_mmu_root_page_role(vcpu, ®s, true);
else
- role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
+ role = kvm_calc_shadow_mmu_root_page_role(vcpu, ®s, true);
return role.base;
}
--
2.32.0.288.g62a8d224e6-goog
next prev parent reply other threads:[~2021-06-22 18:00 UTC|newest]
Thread overview: 103+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-22 17:56 [PATCH 00/54] KVM: x86/mmu: Bug fixes and summer cleaning Sean Christopherson
2021-06-22 17:56 ` [PATCH 01/54] KVM: x86/mmu: Remove broken WARN that fires on 32-bit KVM w/ nested EPT Sean Christopherson
2021-06-22 17:56 ` [PATCH 02/54] KVM: x86/mmu: Treat NX as used (not reserved) for all !TDP shadow MMUs Sean Christopherson
2021-06-22 17:56 ` [PATCH 03/54] KVM: x86: Properly reset MMU context at vCPU RESET/INIT Sean Christopherson
2021-06-23 13:59 ` Paolo Bonzini
2021-06-23 14:01 ` Paolo Bonzini
2021-06-23 14:50 ` Sean Christopherson
2021-06-22 17:56 ` [PATCH 04/54] KVM: x86/mmu: Use MMU's role to detect CR4.SMEP value in nested NPT walk Sean Christopherson
2021-06-22 17:56 ` [PATCH 05/54] Revert "KVM: x86/mmu: Drop kvm_mmu_extended_role.cr4_la57 hack" Sean Christopherson
2021-06-25 8:47 ` Yu Zhang
2021-06-25 8:57 ` Paolo Bonzini
2021-06-25 9:29 ` Yu Zhang
2021-06-25 10:25 ` Paolo Bonzini
2021-06-25 11:23 ` Yu Zhang
2021-06-22 17:56 ` [PATCH 06/54] KVM: x86: Force all MMUs to reinitialize if guest CPUID is modified Sean Christopherson
2021-06-22 17:56 ` [PATCH 07/54] KVM: x86: Alert userspace that KVM_SET_CPUID{,2} after KVM_RUN is broken Sean Christopherson
2021-06-23 14:16 ` Paolo Bonzini
2021-06-23 17:00 ` Jim Mattson
2021-06-23 17:11 ` Paolo Bonzini
2021-06-23 18:11 ` Jim Mattson
2021-06-23 18:49 ` Paolo Bonzini
2021-06-23 19:02 ` Jim Mattson
2021-06-23 19:53 ` Paolo Bonzini
2021-06-22 17:56 ` [PATCH 08/54] Revert "KVM: MMU: record maximum physical address width in kvm_mmu_extended_role" Sean Christopherson
2021-06-25 8:52 ` Yu Zhang
2021-06-22 17:56 ` [PATCH 09/54] KVM: x86/mmu: Unconditionally zap unsync SPs when creating >4k SP at GFN Sean Christopherson
2021-06-23 14:36 ` Paolo Bonzini
2021-06-23 15:08 ` Sean Christopherson
2021-06-23 16:38 ` Paolo Bonzini
2021-06-23 22:04 ` Sean Christopherson
2021-06-25 9:51 ` Yu Zhang
2021-06-25 10:26 ` Paolo Bonzini
2021-06-25 13:08 ` Yu Zhang
2021-06-22 17:56 ` [PATCH 10/54] KVM: x86/mmu: Replace EPT shadow page shenanigans with simpler check Sean Christopherson
2021-06-23 15:49 ` Paolo Bonzini
2021-06-23 16:17 ` Sean Christopherson
2021-06-23 16:41 ` Paolo Bonzini
2021-06-23 16:54 ` Sean Christopherson
2021-06-22 17:56 ` [PATCH 11/54] KVM: x86/mmu: WARN and zap SP when sync'ing if MMU role mismatches Sean Christopherson
2021-06-22 17:56 ` [PATCH 12/54] KVM: x86/mmu: Drop the intermediate "transient" __kvm_sync_page() Sean Christopherson
2021-06-23 16:54 ` Paolo Bonzini
2021-06-22 17:56 ` [PATCH 13/54] KVM: x86/mmu: Rename unsync helper and update related comments Sean Christopherson
2021-06-22 17:56 ` [PATCH 14/54] KVM: x86: Fix sizes used to pass around CR0, CR4, and EFER Sean Christopherson
2021-06-22 17:57 ` [PATCH 15/54] KVM: nSVM: Add a comment to document why nNPT uses vmcb01, not vCPU state Sean Christopherson
2021-06-23 17:06 ` Paolo Bonzini
2021-06-23 20:49 ` Sean Christopherson
2021-06-22 17:57 ` [PATCH 16/54] KVM: x86/mmu: Drop smep_andnot_wp check from "uses NX" for shadow MMUs Sean Christopherson
2021-06-23 17:11 ` Paolo Bonzini
2021-06-23 19:36 ` Sean Christopherson
2021-06-22 17:57 ` [PATCH 17/54] KVM: x86: Read and pass all CR0/CR4 role bits to shadow MMU helper Sean Christopherson
2021-06-22 17:57 ` [PATCH 18/54] KVM: x86/mmu: Move nested NPT reserved bit calculation into MMU proper Sean Christopherson
2021-06-23 17:13 ` Paolo Bonzini
2021-06-22 17:57 ` [PATCH 19/54] KVM: x86/mmu: Grab shadow root level from mmu_role for shadow MMUs Sean Christopherson
2021-06-22 17:57 ` [PATCH 20/54] KVM: x86/mmu: Add struct and helpers to retrieve MMU role bits from regs Sean Christopherson
2021-06-23 1:58 ` kernel test robot
2021-06-23 17:18 ` Paolo Bonzini
2021-06-22 17:57 ` [PATCH 21/54] KVM: x86/mmu: Consolidate misc updates into shadow_mmu_init_context() Sean Christopherson
2021-06-22 17:57 ` [PATCH 22/54] KVM: x86/mmu: Ignore CR0 and CR4 bits in nested EPT MMU role Sean Christopherson
2021-06-22 17:57 ` Sean Christopherson [this message]
2021-06-22 17:57 ` [PATCH 24/54] KVM: x86/mmu: Rename "nxe" role bit to "efer_nx" for macro shenanigans Sean Christopherson
2021-06-22 17:57 ` [PATCH 25/54] KVM: x86/mmu: Add helpers to query mmu_role bits Sean Christopherson
2021-06-23 20:02 ` Paolo Bonzini
2021-06-23 20:47 ` Sean Christopherson
2021-06-23 20:53 ` Paolo Bonzini
2021-06-22 17:57 ` [PATCH 26/54] KVM: x86/mmu: Do not set paging-related bits in MMU role if CR0.PG=0 Sean Christopherson
2021-06-22 17:57 ` [PATCH 27/54] KVM: x86/mmu: Set CR4.PKE/LA57 in MMU role iff long mode is active Sean Christopherson
2021-06-22 17:57 ` [PATCH 28/54] KVM: x86/mmu: Always Set new mmu_role immediately after checking old role Sean Christopherson
2021-06-22 17:57 ` [PATCH 29/54] KVM: x86/mmu: Don't grab CR4.PSE for calculating shadow reserved bits Sean Christopherson
2021-06-22 17:57 ` [PATCH 30/54] KVM: x86/mmu: Use MMU's role to get CR4.PSE for computing rsvd bits Sean Christopherson
2021-06-22 17:57 ` [PATCH 31/54] KVM: x86/mmu: Drop vCPU param from reserved bits calculator Sean Christopherson
2021-06-22 17:57 ` [PATCH 32/54] KVM: x86/mmu: Use MMU's role to compute permission bitmask Sean Christopherson
2021-06-22 17:57 ` [PATCH 33/54] KVM: x86/mmu: Use MMU's role to compute PKRU bitmask Sean Christopherson
2021-06-22 17:57 ` [PATCH 34/54] KVM: x86/mmu: Use MMU's roles to compute last non-leaf level Sean Christopherson
2021-06-22 17:57 ` [PATCH 35/54] KVM: x86/mmu: Use MMU's role to detect EFER.NX in guest page walk Sean Christopherson
2021-06-22 17:57 ` [PATCH 36/54] KVM: x86/mmu: Use MMU's role/role_regs to compute context's metadata Sean Christopherson
2021-06-22 17:57 ` [PATCH 37/54] KVM: x86/mmu: Use MMU's role to get EFER.NX during MMU configuration Sean Christopherson
2021-06-22 17:57 ` [PATCH 38/54] KVM: x86/mmu: Drop "nx" from MMU context now that there are no readers Sean Christopherson
2021-06-22 17:57 ` [PATCH 39/54] KVM: x86/mmu: Get nested MMU's root level from the MMU's role Sean Christopherson
2021-06-22 17:57 ` [PATCH 40/54] KVM: x86/mmu: Use MMU role_regs to get LA57, and drop vCPU LA57 helper Sean Christopherson
2021-06-22 17:57 ` [PATCH 41/54] KVM: x86/mmu: Consolidate reset_rsvds_bits_mask() calls Sean Christopherson
2021-06-23 20:07 ` Paolo Bonzini
2021-06-23 20:53 ` Sean Christopherson
2021-06-22 17:57 ` [PATCH 42/54] KVM: x86/mmu: Don't update nested guest's paging bitmasks if CR0.PG=0 Sean Christopherson
2021-06-22 17:57 ` [PATCH 43/54] KVM: x86/mmu: Add helper to update paging metadata Sean Christopherson
2021-06-22 17:57 ` [PATCH 44/54] KVM: x86/mmu: Add a helper to calculate root from role_regs Sean Christopherson
2021-06-22 17:57 ` [PATCH 45/54] KVM: x86/mmu: Collapse 32-bit PAE and 64-bit statements for helpers Sean Christopherson
2021-06-22 17:57 ` [PATCH 46/54] KVM: x86/mmu: Use MMU's role to determine PTTYPE Sean Christopherson
2021-06-22 17:57 ` [PATCH 47/54] KVM: x86/mmu: Add helpers to do full reserved SPTE checks w/ generic MMU Sean Christopherson
2021-06-23 20:13 ` Paolo Bonzini
2021-06-22 17:57 ` [PATCH 48/54] KVM: x86/mmu: WARN on any reserved SPTE value when making a valid SPTE Sean Christopherson
2021-06-22 17:57 ` [PATCH 49/54] KVM: x86: Enhance comments for MMU roles and nested transition trickiness Sean Christopherson
2021-06-22 17:57 ` [PATCH 50/54] KVM: x86/mmu: Optimize and clean up so called "last nonleaf level" logic Sean Christopherson
2021-06-23 20:22 ` Paolo Bonzini
2021-06-23 20:58 ` Sean Christopherson
2021-06-22 17:57 ` [PATCH 51/54] KVM: x86/mmu: Drop redundant rsvd bits reset for nested NPT Sean Christopherson
2021-06-22 17:57 ` [PATCH 52/54] KVM: x86/mmu: Get CR0.WP from MMU, not vCPU, in shadow page fault Sean Christopherson
2021-06-22 17:57 ` [PATCH 53/54] KVM: x86/mmu: Get CR4.SMEP " Sean Christopherson
2021-06-22 17:57 ` [PATCH 54/54] KVM: x86/mmu: Let guest use GBPAGES if supported in hardware and TDP is on Sean Christopherson
2021-06-23 20:29 ` [PATCH 00/54] KVM: x86/mmu: Bug fixes and summer cleaning Paolo Bonzini
2021-06-23 21:06 ` Sean Christopherson
2021-06-23 21:33 ` Paolo Bonzini
2021-06-23 22:08 ` Sean Christopherson
2021-06-23 22:12 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210622175739.3610207-24-seanjc@google.com \
--to=seanjc@google.com \
--cc=jmattson@google.com \
--cc=joro@8bytes.org \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mlevitsk@redhat.com \
--cc=pbonzini@redhat.com \
--cc=vkuznets@redhat.com \
--cc=wanpengli@tencent.com \
--cc=yu.c.zhang@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).