From: Robert Foley <robert.foley@linaro.org> To: qemu-devel@nongnu.org Cc: robert.foley@linaro.org, richard.henderson@linaro.org, "Emilio G. Cota" <cota@braap.org>, qemu-ppc@nongnu.org, peter.puhov@linaro.org, alex.bennee@linaro.org, David Gibson <david@gibson.dropbear.id.au> Subject: [PATCH v8 19/74] ppc: convert to cpu_halted Date: Thu, 26 Mar 2020 15:31:01 -0400 Message-ID: <20200326193156.4322-20-robert.foley@linaro.org> (raw) In-Reply-To: <20200326193156.4322-1-robert.foley@linaro.org> From: "Emilio G. Cota" <cota@braap.org> In ppce500_spin.c, acquire the lock just once to update both cpu->halted and cpu->stopped. In hw/ppc/spapr_hcall.c, acquire the lock just once to update cpu->halted and call cpu_has_work, since later in the series we'll acquire the BQL (if not already held) from cpu_has_work. Cc: David Gibson <david@gibson.dropbear.id.au> Cc: qemu-ppc@nongnu.org Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Acked-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Emilio G. Cota <cota@braap.org> [RF: hw/ppc/spapr_hcall.c, spapr_rtas.c more conversions] Signed-off-by: Robert Foley <robert.foley@linaro.org> --- hw/ppc/e500.c | 4 ++-- hw/ppc/ppc.c | 10 +++++----- hw/ppc/ppce500_spin.c | 6 ++++-- hw/ppc/spapr_cpu_core.c | 4 ++-- hw/ppc/spapr_hcall.c | 14 ++++++++------ hw/ppc/spapr_rtas.c | 8 ++++---- target/ppc/excp_helper.c | 4 ++-- target/ppc/helper_regs.h | 2 +- target/ppc/kvm.c | 4 ++-- target/ppc/translate_init.inc.c | 8 ++++---- 10 files changed, 34 insertions(+), 30 deletions(-) diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 854cd3ac46..77cc1d245b 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -706,7 +706,7 @@ static void ppce500_cpu_reset_sec(void *opaque) /* Secondary CPU starts in halted state for now. Needs to change when implementing non-kernel boot. */ - cs->halted = 1; + cpu_halted_set(cs, 1); cs->exception_index = EXCP_HLT; } @@ -720,7 +720,7 @@ static void ppce500_cpu_reset(void *opaque) cpu_reset(cs); /* Set initial guest state. */ - cs->halted = 0; + cpu_halted_set(cs, 0); env->gpr[1] = (16 * MiB) - 8; env->gpr[3] = bi->dt_base; env->gpr[4] = 0; diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index 4a11fb1640..0e7386ff88 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -149,7 +149,7 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) /* XXX: Note that the only way to restart the CPU is to reset it */ if (level) { LOG_IRQ("%s: stop the CPU\n", __func__); - cs->halted = 1; + cpu_halted_set(cs, 1); } break; case PPC6xx_INPUT_HRESET: @@ -228,10 +228,10 @@ static void ppc970_set_irq(void *opaque, int pin, int level) /* XXX: TODO: relay the signal to CKSTP_OUT pin */ if (level) { LOG_IRQ("%s: stop the CPU\n", __func__); - cs->halted = 1; + cpu_halted_set(cs, 1); } else { LOG_IRQ("%s: restart the CPU\n", __func__); - cs->halted = 0; + cpu_halted_set(cs, 0); qemu_cpu_kick(cs); } break; @@ -445,10 +445,10 @@ static void ppc40x_set_irq(void *opaque, int pin, int level) /* Level sensitive - active low */ if (level) { LOG_IRQ("%s: stop the CPU\n", __func__); - cs->halted = 1; + cpu_halted_set(cs, 1); } else { LOG_IRQ("%s: restart the CPU\n", __func__); - cs->halted = 0; + cpu_halted_set(cs, 0); qemu_cpu_kick(cs); } break; diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c index 66c1065db2..79313944cf 100644 --- a/hw/ppc/ppce500_spin.c +++ b/hw/ppc/ppce500_spin.c @@ -107,9 +107,11 @@ static void spin_kick(CPUState *cs, run_on_cpu_data data) map_start = ldq_p(&curspin->addr) & ~(map_size - 1); mmubooke_create_initial_mapping(env, 0, map_start, map_size); - cs->halted = 0; - cs->exception_index = -1; + cpu_mutex_lock(cs); + cpu_halted_set(cs, 0); cs->stopped = false; + cpu_mutex_unlock(cs); + cs->exception_index = -1; qemu_cpu_kick(cs); } diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index ac1c109427..d655ce588f 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -39,7 +39,7 @@ static void spapr_reset_vcpu(PowerPCCPU *cpu) /* All CPUs start halted. CPU0 is unhalted from the machine level * reset code and the rest are explicitly started up by the guest * using an RTAS call */ - cs->halted = 1; + cpu_halted_set(cs, 1); env->spr[SPR_HIOR] = 0; @@ -88,7 +88,7 @@ void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip, env->gpr[3] = r3; env->gpr[4] = r4; kvmppc_set_reg_ppc_online(cpu, 1); - CPU(cpu)->halted = 0; + cpu_halted_set(CPU(cpu), 0); /* Enable Power-saving mode Exit Cause exceptions */ ppc_store_lpcr(cpu, env->spr[SPR_LPCR] | pcc->lpcr_pm); } diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 0d50fc9117..a9485000e4 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -1058,17 +1058,19 @@ static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr, env->msr |= (1ULL << MSR_EE); hreg_compute_hflags(env); + cpu_mutex_lock(cs); if (spapr_cpu->prod) { spapr_cpu->prod = false; + cpu_mutex_unlock(cs); return H_SUCCESS; } if (!cpu_has_work(cs)) { - cs->halted = 1; + cpu_halted_set(cs, 1); cs->exception_index = EXCP_HLT; cs->exit_request = 1; } - + cpu_mutex_unlock(cs); return H_SUCCESS; } @@ -1085,7 +1087,7 @@ static target_ulong h_confer_self(PowerPCCPU *cpu) spapr_cpu->prod = false; return H_SUCCESS; } - cs->halted = 1; + cpu_halted_set(cs, 1); cs->exception_index = EXCP_HALTED; cs->exit_request = 1; @@ -1116,7 +1118,7 @@ static target_ulong h_join(PowerPCCPU *cpu, SpaprMachineState *spapr, } /* Don't have a way to indicate joined, so use halted && MSR[EE]=0 */ - if (!cs->halted || (e->msr & (1ULL << MSR_EE))) { + if (!cpu_halted(cs) || (e->msr & (1ULL << MSR_EE))) { last_unjoined = false; break; } @@ -1199,7 +1201,7 @@ static target_ulong h_prod(PowerPCCPU *cpu, SpaprMachineState *spapr, spapr_cpu = spapr_cpu_state(tcpu); spapr_cpu->prod = true; - cs->halted = 0; + cpu_halted_set(cs, 0); qemu_cpu_kick(cs); return H_SUCCESS; @@ -1688,7 +1690,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu, if (cs == CPU(cpu)) { continue; } - if (!cs->halted) { + if (!cpu_halted(cs)) { warn_report("guest has multiple active vCPUs at CAS, which is not allowed"); return H_MULTI_THREADS_ACTIVE; } diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c index 9fb8c8632a..84c26edb60 100644 --- a/hw/ppc/spapr_rtas.c +++ b/hw/ppc/spapr_rtas.c @@ -111,7 +111,7 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_, id = rtas_ld(args, 0); cpu = spapr_find_cpu(id); if (cpu != NULL) { - if (CPU(cpu)->halted) { + if (cpu_halted(CPU(cpu))) { rtas_st(rets, 1, 0); } else { rtas_st(rets, 1, 2); @@ -155,7 +155,7 @@ static void rtas_start_cpu(PowerPCCPU *callcpu, SpaprMachineState *spapr, env = &newcpu->env; pcc = POWERPC_CPU_GET_CLASS(newcpu); - if (!CPU(newcpu)->halted) { + if (!cpu_halted(CPU(newcpu))) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } @@ -213,7 +213,7 @@ static void rtas_stop_self(PowerPCCPU *cpu, SpaprMachineState *spapr, */ ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm); env->spr[SPR_PSSCR] |= PSSCR_EC; - cs->halted = 1; + cpu_halted_set(cs, 1); kvmppc_set_reg_ppc_online(cpu, 0); qemu_cpu_kick(cs); } @@ -238,7 +238,7 @@ static void rtas_ibm_suspend_me(PowerPCCPU *cpu, SpaprMachineState *spapr, } /* See h_join */ - if (!cs->halted || (e->msr & (1ULL << MSR_EE))) { + if (!cpu_halted(cs) || (e->msr & (1ULL << MSR_EE))) { rtas_st(rets, 0, H_MULTI_THREADS_ACTIVE); return; } diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 08bc885ca6..e686eda0f4 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -276,7 +276,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) qemu_log("Machine check while not allowed. " "Entering checkstop state\n"); } - cs->halted = 1; + cpu_halted_set(cs, 1); cpu_interrupt_exittb(cs); } if (env->msr_mask & MSR_HVB) { @@ -1075,7 +1075,7 @@ void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn) CPUState *cs; cs = env_cpu(env); - cs->halted = 1; + cpu_halted_set(cs, 1); /* * The architecture specifies that HDEC interrupts are discarded diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h index d78c2af63e..f84438f639 100644 --- a/target/ppc/helper_regs.h +++ b/target/ppc/helper_regs.h @@ -168,7 +168,7 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value, #if !defined(CONFIG_USER_ONLY) if (unlikely(msr_pow == 1)) { if (!env->pending_interrupts && (*env->check_pow)(env)) { - cs->halted = 1; + cpu_halted_set(cs, 1); excp = EXCP_HALTED; } } diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 597f72be1b..13c6626ca7 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -1340,7 +1340,7 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) int kvm_arch_process_async_events(CPUState *cs) { - return cs->halted; + return cpu_halted(cs); } static int kvmppc_handle_halt(PowerPCCPU *cpu) @@ -1349,7 +1349,7 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu) CPUPPCState *env = &cpu->env; if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) { - cs->halted = 1; + cpu_halted_set(cs, 1); cs->exception_index = EXCP_HLT; } diff --git a/target/ppc/translate_init.inc.c b/target/ppc/translate_init.inc.c index e853164a86..5c2f8ffa4a 100644 --- a/target/ppc/translate_init.inc.c +++ b/target/ppc/translate_init.inc.c @@ -8539,7 +8539,7 @@ static bool cpu_has_work_POWER7(CPUState *cs) PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; - if (cs->halted) { + if (cpu_halted(cs)) { if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { return false; } @@ -8701,7 +8701,7 @@ static bool cpu_has_work_POWER8(CPUState *cs) PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; - if (cs->halted) { + if (cpu_halted(cs)) { if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { return false; } @@ -8901,7 +8901,7 @@ static bool cpu_has_work_POWER9(CPUState *cs) PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; - if (cs->halted) { + if (cpu_halted(cs)) { uint64_t psscr = env->spr[SPR_PSSCR]; if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { @@ -9117,7 +9117,7 @@ static bool cpu_has_work_POWER10(CPUState *cs) PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; - if (cs->halted) { + if (cpu_halted(cs)) { uint64_t psscr = env->spr[SPR_PSSCR]; if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { -- 2.17.1
next prev parent reply index Thread overview: 100+ messages / expand[flat|nested] mbox.gz Atom feed top 2020-03-26 19:30 [PATCH v8 00/74] per-CPU locks Robert Foley 2020-03-26 19:30 ` [PATCH v8 01/74] cpu: convert queued work to a QSIMPLEQ Robert Foley 2020-03-26 19:30 ` [PATCH v8 02/74] cpu: rename cpu->work_mutex to cpu->lock Robert Foley 2020-05-11 14:48 ` Alex Bennée 2020-05-11 16:33 ` Robert Foley 2020-03-26 19:30 ` [PATCH v8 03/74] cpu: introduce cpu_mutex_lock/unlock Robert Foley 2020-05-11 10:24 ` Alex Bennée 2020-05-11 16:09 ` Robert Foley 2020-03-26 19:30 ` [PATCH v8 04/74] cpu: make qemu_work_cond per-cpu Robert Foley 2020-03-26 19:30 ` [PATCH v8 05/74] cpu: move run_on_cpu to cpus-common Robert Foley 2020-03-26 19:30 ` [PATCH v8 06/74] cpu: introduce process_queued_cpu_work_locked Robert Foley 2020-03-26 19:30 ` [PATCH v8 07/74] cpu: make per-CPU locks an alias of the BQL in TCG rr mode Robert Foley 2020-03-26 19:30 ` [PATCH v8 08/74] tcg-runtime: define helper_cpu_halted_set Robert Foley 2020-03-26 19:30 ` [PATCH v8 09/74] ppc: convert to helper_cpu_halted_set Robert Foley 2020-03-26 19:30 ` [PATCH v8 10/74] cris: " Robert Foley 2020-03-26 19:30 ` [PATCH v8 11/74] hppa: " Robert Foley 2020-03-26 19:30 ` [PATCH v8 12/74] m68k: " Robert Foley 2020-03-26 19:30 ` [PATCH v8 13/74] alpha: " Robert Foley 2020-03-26 19:30 ` [PATCH v8 14/74] microblaze: " Robert Foley 2020-03-26 19:30 ` [PATCH v8 15/74] cpu: define cpu_halted helpers Robert Foley 2020-03-26 19:30 ` [PATCH v8 16/74] tcg-runtime: convert to cpu_halted_set Robert Foley 2020-03-26 19:30 ` [PATCH v8 17/74] hw/semihosting: " Robert Foley 2020-05-11 10:25 ` Alex Bennée 2020-03-26 19:31 ` [PATCH v8 18/74] arm: convert to cpu_halted Robert Foley 2020-03-26 19:31 ` Robert Foley [this message] 2020-03-26 19:31 ` [PATCH v8 20/74] sh4: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 21/74] i386: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 22/74] lm32: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 23/74] m68k: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 24/74] mips: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 25/74] riscv: " Robert Foley 2020-05-11 10:40 ` Alex Bennée 2020-05-11 16:13 ` Robert Foley 2020-03-26 19:31 ` [PATCH v8 26/74] s390x: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 27/74] sparc: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 28/74] xtensa: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 29/74] gdbstub: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 30/74] openrisc: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 31/74] cpu-exec: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 32/74] cpu: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 33/74] cpu: define cpu_interrupt_request helpers Robert Foley 2020-03-26 19:31 ` [PATCH v8 34/74] ppc: use cpu_reset_interrupt Robert Foley 2020-03-26 19:31 ` [PATCH v8 35/74] exec: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 36/74] i386: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 37/74] s390x: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 38/74] openrisc: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 39/74] arm: convert to cpu_interrupt_request Robert Foley 2020-03-26 19:31 ` [PATCH v8 40/74] i386: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 41/74] i386/kvm: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 42/74] i386/hax-all: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 43/74] i386/whpx-all: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 44/74] i386/hvf: convert to cpu_request_interrupt Robert Foley 2020-03-26 19:31 ` [PATCH v8 45/74] ppc: convert to cpu_interrupt_request Robert Foley 2020-03-26 19:31 ` [PATCH v8 46/74] sh4: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 47/74] cris: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 48/74] hppa: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 49/74] lm32: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 50/74] m68k: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 51/74] mips: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 52/74] nios: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 53/74] s390x: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 54/74] alpha: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 55/74] moxie: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 56/74] sparc: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 57/74] openrisc: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 58/74] unicore32: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 59/74] microblaze: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 60/74] accel/tcg: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 61/74] cpu: convert to interrupt_request Robert Foley 2020-03-26 19:31 ` [PATCH v8 62/74] cpu: call .cpu_has_work with the CPU lock held Robert Foley 2020-03-26 19:31 ` [PATCH v8 63/74] cpu: introduce cpu_has_work_with_iothread_lock Robert Foley 2020-03-26 19:31 ` [PATCH v8 64/74] ppc: convert to cpu_has_work_with_iothread_lock Robert Foley 2020-03-26 19:31 ` [PATCH v8 65/74] mips: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 66/74] s390x: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 67/74] riscv: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 68/74] sparc: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 69/74] xtensa: " Robert Foley 2020-03-26 19:31 ` [PATCH v8 70/74] cpu: rename all_cpu_threads_idle to qemu_tcg_rr_all_cpu_threads_idle Robert Foley 2020-03-26 19:31 ` [PATCH v8 71/74] cpu: protect CPU state with cpu->lock instead of the BQL Robert Foley 2020-03-26 19:31 ` [PATCH v8 72/74] cpus-common: release BQL earlier in run_on_cpu Robert Foley 2020-03-26 19:31 ` [PATCH v8 73/74] cpu: add async_run_on_cpu_no_bql Robert Foley 2020-03-26 19:31 ` [PATCH v8 74/74] cputlb: queue async flush jobs without the BQL Robert Foley 2020-05-12 16:27 ` Alex Bennée 2020-05-12 19:26 ` Robert Foley 2020-05-18 13:46 ` Robert Foley 2020-05-20 4:46 ` Emilio G. Cota 2020-05-20 15:01 ` Robert Foley 2020-05-21 14:17 ` Robert Foley 2020-05-12 18:38 ` Alex Bennée 2020-03-26 22:58 ` [PATCH v8 00/74] per-CPU locks Aleksandar Markovic 2020-03-27 9:39 ` Alex Bennée 2020-03-27 9:50 ` Aleksandar Markovic 2020-03-27 10:24 ` Aleksandar Markovic 2020-03-27 17:21 ` Robert Foley 2020-03-27 5:14 ` Emilio G. Cota 2020-03-27 10:59 ` Philippe Mathieu-Daudé 2020-03-30 8:57 ` Stefan Hajnoczi 2020-03-27 18:23 ` Alex Bennée 2020-03-27 18:30 ` Robert Foley 2020-05-12 16:29 ` Alex Bennée
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20200326193156.4322-20-robert.foley@linaro.org \ --to=robert.foley@linaro.org \ --cc=alex.bennee@linaro.org \ --cc=cota@braap.org \ --cc=david@gibson.dropbear.id.au \ --cc=peter.puhov@linaro.org \ --cc=qemu-devel@nongnu.org \ --cc=qemu-ppc@nongnu.org \ --cc=richard.henderson@linaro.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
QEMU-Devel Archive on lore.kernel.org Archives are clonable: git clone --mirror https://lore.kernel.org/qemu-devel/0 qemu-devel/git/0.git git clone --mirror https://lore.kernel.org/qemu-devel/1 qemu-devel/git/1.git git clone --mirror https://lore.kernel.org/qemu-devel/2 qemu-devel/git/2.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 qemu-devel qemu-devel/ https://lore.kernel.org/qemu-devel \ qemu-devel@nongnu.org public-inbox-index qemu-devel Example config snippet for mirrors Newsgroup available over NNTP: nntp://nntp.lore.kernel.org/org.nongnu.qemu-devel AGPL code for this site: git clone https://public-inbox.org/public-inbox.git