From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail.linutronix.de (146.0.238.70:993) by crypto-ml.lab.linutronix.de with IMAP4-SSL for ; 20 Feb 2019 14:37:40 -0000 Received: from bombadil.infradead.org ([2607:7c80:54:e::133]) by Galois.linutronix.de with esmtps (TLS1.2:RSA_AES_256_CBC_SHA256:256) (Exim 4.80) (envelope-from ) id 1gwT0F-0004tp-7k for speck@linutronix.de; Wed, 20 Feb 2019 15:37:39 +0100 Received: from j217100.upc-j.chello.nl ([24.132.217.100] helo=hirez.programming.kicks-ass.net) by bombadil.infradead.org with esmtpsa (Exim 4.90_1 #2 (Red Hat Linux)) id 1gwT0D-0003pq-Ki for speck@linutronix.de; Wed, 20 Feb 2019 14:37:37 +0000 Date: Wed, 20 Feb 2019 15:37:34 +0100 From: Peter Zijlstra Subject: [MODERATED] Re: Message-ID: <20190220143734.GL32494@hirez.programming.kicks-ass.net> References: <20190219155807.878855982@infradead.org> <20190219155859.834625575@infradead.org> MIME-Version: 1.0 In-Reply-To: Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit To: speck@linutronix.de List-ID: On Tue, Feb 19, 2019 at 10:49:41AM -0800, speck for Linus Torvalds wrote: > On Tue, Feb 19, 2019 at 8:03 AM speck for Peter Zijlstra > wrote: > > > > When TFA is allowed (default); the MSR gets set when PMC3 gets > > scheduled and cleared when, after scheduling, PMC3 is unused. > > > > When TFA is not allowed; clear PMC3 from all constraints such that it > > will not get used. > > I wonder if somebody wants a "abort always" mode for testing? > > IOW, have a mode where even if PCM3 is not used, set the TSX_FORCE_ABORT bit. > > That way people can actually verify that their code works even if it > sees TSX enabled and uses it (but it always aborts). Basically > verifying their fallback code. Something like the below would make that happen. Not sure it's worth it to backport this to everything though. --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2000,6 +2000,8 @@ static void intel_pmu_nhm_enable_all(int intel_pmu_enable_all(added); } +static int allow_tsx_force_abort = 1; + static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) { u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; @@ -2027,7 +2029,7 @@ static void intel_skl_pmu_enable_all(int * If we find PMC3 is no longer used when we enable the PMU, we can * clear TFA. */ - if (!test_bit(3, cpuc->active_mask)) + if (!test_bit(3, cpuc->active_mask) && allow_tsx_force_abort < 2) intel_set_tfa(cpuc, false); intel_pmu_enable_all(added); @@ -3399,8 +3401,6 @@ glp_get_event_constraints(struct cpu_hw_ return c; } -static bool allow_tsx_force_abort = true; - static struct event_constraint * skl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) @@ -4115,7 +4115,7 @@ static ssize_t freeze_on_smi_show(struct return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi); } -static DEFINE_MUTEX(freeze_on_smi_mutex); +static DEFINE_MUTEX(sysfs_mutex); static ssize_t freeze_on_smi_store(struct device *cdev, struct device_attribute *attr, @@ -4131,7 +4131,7 @@ static ssize_t freeze_on_smi_store(struc if (val > 1) return -EINVAL; - mutex_lock(&freeze_on_smi_mutex); + mutex_lock(&sysfs_mutex); if (x86_pmu.attr_freeze_on_smi == val) goto done; @@ -4142,7 +4142,7 @@ static ssize_t freeze_on_smi_store(struc on_each_cpu(flip_smm_bit, &val, 1); put_online_cpus(); done: - mutex_unlock(&freeze_on_smi_mutex); + mutex_unlock(&sysfs_mutex); return count; } @@ -4179,11 +4179,59 @@ static struct attribute *intel_pmu_caps_ NULL }; -DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort); +static ssize_t allow_tsx_force_abort_show(struct device *cdev + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", allow_tsx_force_abort); +} + +static void flip_tfa_bit(void *data) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int val = *(int *)data; + bool set; + + lockdep_assert_irqs_disabled(); + + if (val == 2) + set = true; + else + set = test_bit(3, cpuc->active_mask); + + intel_set_tfa(cpuc, set); +} + +static ssize_t allow_tsx_force_abort_store(struct device *cdev, + struct device_attribute *attr, + const char *buf, ssize_t count) +{ + ssize_t ret; + int val; + + ret = kstrtoint(buf, 0, &val); + if (ret) + return ret; + + if ((unsigned int)val > 2) + return -EINVAL; + + mutex_lock(&sysfs_mutex); + if (allow_tsx_force_abort != val) { + allow_tsx_force_abort = val; + cpus_read_lock(); + on_each_cpu(flip_tfa_bit, &val, 1); + cpus_read_unlock(); + } + mutex_unlock(&sysfs_mutex); + return ret; +} + +DEVICE_ATTR_RW(allow_tsx_force_abort); static struct attribute *intel_pmu_attrs[] = { &dev_attr_freeze_on_smi.attr, - NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */ + NULL, /* &dev_attr_allow_tsx_force_abort.attr */ NULL, }; @@ -4688,7 +4736,7 @@ __init int intel_pmu_init(void) x86_pmu.get_event_constraints = skl_get_event_constraints; x86_pmu.enable_all = intel_skl_pmu_enable_all; x86_pmu.commit_scheduling = intel_skl_commit_scheduling; - intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr; + intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr; } pr_cont("Skylake events, ");