* [PATCH] powerpc/perf: Enable PMU counters post partition migration if PMU is active
@ 2021-07-11 7:06 Athira Rajeev
2021-07-11 9:05 ` kernel test robot
0 siblings, 1 reply; 3+ messages in thread
From: Athira Rajeev @ 2021-07-11 7:06 UTC (permalink / raw)
To: mpe; +Cc: kjain, maddy, linuxppc-dev, rnsastry
During Live Partition Migration (LPM), it is observed that after
migration completion, perf counter values reports 0 incase of
system/cpu wide monitoring. However 'perf stat' with workload
continues to show counts post migration since PMU gets disabled/enabled
during sched switches.
Example:
./perf stat -e r1001e -I 1000
time counts unit events
1.001010437 22,137,414 r1001e
2.002495447 15,455,821 r1001e
<<>> As seen in next below logs, the counter values shows zero
after migration is completed.
<<>>
86.142535370 129,392,333,440 r1001e
87.144714617 0 r1001e
88.146526636 0 r1001e
89.148085029 0 r1001e
Here PMU is enabled during start of perf session and counter
values are read at intervals. Counters are only disabled at the
end of session. The powerpc mobility code presently does not handle
disabling and enabling back of PMU counters during partition
migration. Also since the PMU register values are not saved/restored
during migration, PMU registers like Monitor Mode Control Register 0
(MMCR0), Monitor Mode Control Register 1 (MMCR1) will not contain
the value it was programmed with. Hence PMU counters will not be
enabled correctly post migration.
Fix this in mobility code by handling disabling and enabling of
PMU in all cpu's before and after migration. Patch introduces two
functions 'mobility_pmu_disable' and 'mobility_pmu_enable'.
mobility_pmu_disable() is called before the processor threads goes
to suspend state so as to disable the PMU counters. And disable is
done only if there are any active events running on that cpu.
mobility_pmu_enable() is called after the processor threads are
back online to enable back the PMU counters.
Since the performance Monitor counters ( PMCs) are not
saved/restored during LPM, results in PMC value being zero and the
'event->hw.prev_count' being non-zero value. This causes problem
during updation of event->count since we always accumulate
(event->hw.prev_count - PMC value) in event->count. If
event->hw.prev_count is greater PMC value, event->count becomes
negative. Fix this by re-initialising 'prev_count' also for all
events while enabling back the events. A new variable 'migrate' is
introduced in 'struct cpu_hw_event' to achieve this for LPM cases
in power_pmu_enable. Use the 'migrate' value to clear the PMC
index (stored in event->hw.idx) for all events so that event
count settings will get re-initialised correctly.
Signed-off-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/rtas.h | 4 +++
arch/powerpc/perf/core-book3s.c | 43 ++++++++++++++++++++++++++++---
arch/powerpc/platforms/pseries/mobility.c | 4 +++
3 files changed, 48 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 9dc97d2..3fc478a 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -376,8 +376,12 @@ static inline void rtas_initialize(void) { }
#ifdef CONFIG_HV_PERF_CTRS
void read_24x7_sys_info(void);
+void mobility_pmu_disable(void);
+void mobility_pmu_enable(void);
#else
static inline void read_24x7_sys_info(void) { }
+static inline void mobility_pmu_disable(void) { }
+static inline void mobility_pmu_enable(void) { }
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index bb0ee71..e86df45 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -58,6 +58,7 @@ struct cpu_hw_events {
/* Store the PMC values */
unsigned long pmcs[MAX_HWEVENTS];
+ int migrate;
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
@@ -1335,6 +1336,22 @@ static void power_pmu_disable(struct pmu *pmu)
}
/*
+ * Called from powerpc mobility code
+ * before migration to disable counters
+ * if the PMU is active.
+ */
+void mobility_pmu_disable(void)
+{
+ struct cpu_hw_events *cpuhw;
+
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
+ if (cpuhw->n_events != 0) {
+ power_pmu_disable(NULL);
+ cpuhw->migrate = 1;
+ }
+}
+
+/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
@@ -1379,8 +1396,10 @@ static void power_pmu_enable(struct pmu *pmu)
* no need to recalculate MMCR* settings and reset the PMCs.
* Just reenable the PMU with the current MMCR* settings
* (possibly updated for removal of events).
+ * While reenabling PMU during partition migration, continue
+ * with normal flow.
*/
- if (!cpuhw->n_added) {
+ if (!cpuhw->n_added && !cpuhw->migrate) {
mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
if (ppmu->flags & PPMU_ARCH_31)
@@ -1434,11 +1453,15 @@ static void power_pmu_enable(struct pmu *pmu)
/*
* Read off any pre-existing events that need to move
* to another PMC.
+ * While enabling PMU during partition migration,
+ * skip power_pmu_read since all event count settings
+ * needs to be re-initialised after migration.
*/
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
- if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
- power_pmu_read(event);
+ if ((event->hw.idx && event->hw.idx != hwc_index[i] + 1) || (cpuhw->migrate)) {
+ if (!cpuhw->migrate)
+ power_pmu_read(event);
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
@@ -1506,6 +1529,20 @@ static void power_pmu_enable(struct pmu *pmu)
local_irq_restore(flags);
}
+/*
+ * Called from powerpc mobility code
+ * during migration completion to
+ * enable back PMU counters.
+ */
+void mobility_pmu_enable(void)
+{
+ struct cpu_hw_events *cpuhw;
+
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
+ power_pmu_enable(NULL);
+ cpuhw->migrate = 0;
+}
+
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[], u64 *events,
unsigned int *flags)
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index e83e089..ff7a77c 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -476,6 +476,8 @@ static int do_join(void *arg)
retry:
/* Must ensure MSR.EE off for H_JOIN. */
hard_irq_disable();
+ /* Disable PMU before suspend */
+ mobility_pmu_disable();
hvrc = plpar_hcall_norets(H_JOIN);
switch (hvrc) {
@@ -530,6 +532,8 @@ static int do_join(void *arg)
* reset the watchdog.
*/
touch_nmi_watchdog();
+ /* Enable PMU after resuming */
+ mobility_pmu_enable();
return ret;
}
--
1.8.3.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] powerpc/perf: Enable PMU counters post partition migration if PMU is active
2021-07-11 7:06 [PATCH] powerpc/perf: Enable PMU counters post partition migration if PMU is active Athira Rajeev
@ 2021-07-11 9:05 ` kernel test robot
0 siblings, 0 replies; 3+ messages in thread
From: kernel test robot @ 2021-07-11 9:05 UTC (permalink / raw)
To: Athira Rajeev, mpe; +Cc: kjain, maddy, kbuild-all, linuxppc-dev, rnsastry
[-- Attachment #1: Type: text/plain, Size: 8601 bytes --]
Hi Athira,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on powerpc/next]
[also build test WARNING on v5.13 next-20210709]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Athira-Rajeev/powerpc-perf-Enable-PMU-counters-post-partition-migration-if-PMU-is-active/20210711-150741
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/2050c82afb3abd9eaa57fee45e71e7fccabfb81f
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Athira-Rajeev/powerpc-perf-Enable-PMU-counters-post-partition-migration-if-PMU-is-active/20210711-150741
git checkout 2050c82afb3abd9eaa57fee45e71e7fccabfb81f
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=powerpc
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
>> arch/powerpc/perf/core-book3s.c:1343:6: warning: no previous prototype for 'mobility_pmu_disable' [-Wmissing-prototypes]
1343 | void mobility_pmu_disable(void)
| ^~~~~~~~~~~~~~~~~~~~
>> arch/powerpc/perf/core-book3s.c:1537:6: warning: no previous prototype for 'mobility_pmu_enable' [-Wmissing-prototypes]
1537 | void mobility_pmu_enable(void)
| ^~~~~~~~~~~~~~~~~~~
vim +/mobility_pmu_disable +1343 arch/powerpc/perf/core-book3s.c
1337
1338 /*
1339 * Called from powerpc mobility code
1340 * before migration to disable counters
1341 * if the PMU is active.
1342 */
> 1343 void mobility_pmu_disable(void)
1344 {
1345 struct cpu_hw_events *cpuhw;
1346
1347 cpuhw = this_cpu_ptr(&cpu_hw_events);
1348 if (cpuhw->n_events != 0) {
1349 power_pmu_disable(NULL);
1350 cpuhw->migrate = 1;
1351 }
1352 }
1353
1354 /*
1355 * Re-enable all events if disable == 0.
1356 * If we were previously disabled and events were added, then
1357 * put the new config on the PMU.
1358 */
1359 static void power_pmu_enable(struct pmu *pmu)
1360 {
1361 struct perf_event *event;
1362 struct cpu_hw_events *cpuhw;
1363 unsigned long flags;
1364 long i;
1365 unsigned long val, mmcr0;
1366 s64 left;
1367 unsigned int hwc_index[MAX_HWEVENTS];
1368 int n_lim;
1369 int idx;
1370 bool ebb;
1371
1372 if (!ppmu)
1373 return;
1374 local_irq_save(flags);
1375
1376 cpuhw = this_cpu_ptr(&cpu_hw_events);
1377 if (!cpuhw->disabled)
1378 goto out;
1379
1380 if (cpuhw->n_events == 0) {
1381 ppc_set_pmu_inuse(0);
1382 goto out;
1383 }
1384
1385 cpuhw->disabled = 0;
1386
1387 /*
1388 * EBB requires an exclusive group and all events must have the EBB
1389 * flag set, or not set, so we can just check a single event. Also we
1390 * know we have at least one event.
1391 */
1392 ebb = is_ebb_event(cpuhw->event[0]);
1393
1394 /*
1395 * If we didn't change anything, or only removed events,
1396 * no need to recalculate MMCR* settings and reset the PMCs.
1397 * Just reenable the PMU with the current MMCR* settings
1398 * (possibly updated for removal of events).
1399 * While reenabling PMU during partition migration, continue
1400 * with normal flow.
1401 */
1402 if (!cpuhw->n_added && !cpuhw->migrate) {
1403 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
1404 mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
1405 if (ppmu->flags & PPMU_ARCH_31)
1406 mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
1407 goto out_enable;
1408 }
1409
1410 /*
1411 * Clear all MMCR settings and recompute them for the new set of events.
1412 */
1413 memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
1414
1415 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
1416 &cpuhw->mmcr, cpuhw->event, ppmu->flags)) {
1417 /* shouldn't ever get here */
1418 printk(KERN_ERR "oops compute_mmcr failed\n");
1419 goto out;
1420 }
1421
1422 if (!(ppmu->flags & PPMU_ARCH_207S)) {
1423 /*
1424 * Add in MMCR0 freeze bits corresponding to the attr.exclude_*
1425 * bits for the first event. We have already checked that all
1426 * events have the same value for these bits as the first event.
1427 */
1428 event = cpuhw->event[0];
1429 if (event->attr.exclude_user)
1430 cpuhw->mmcr.mmcr0 |= MMCR0_FCP;
1431 if (event->attr.exclude_kernel)
1432 cpuhw->mmcr.mmcr0 |= freeze_events_kernel;
1433 if (event->attr.exclude_hv)
1434 cpuhw->mmcr.mmcr0 |= MMCR0_FCHV;
1435 }
1436
1437 /*
1438 * Write the new configuration to MMCR* with the freeze
1439 * bit set and set the hardware events to their initial values.
1440 * Then unfreeze the events.
1441 */
1442 ppc_set_pmu_inuse(1);
1443 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
1444 mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
1445 mtspr(SPRN_MMCR0, (cpuhw->mmcr.mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
1446 | MMCR0_FC);
1447 if (ppmu->flags & PPMU_ARCH_207S)
1448 mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2);
1449
1450 if (ppmu->flags & PPMU_ARCH_31)
1451 mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
1452
1453 /*
1454 * Read off any pre-existing events that need to move
1455 * to another PMC.
1456 * While enabling PMU during partition migration,
1457 * skip power_pmu_read since all event count settings
1458 * needs to be re-initialised after migration.
1459 */
1460 for (i = 0; i < cpuhw->n_events; ++i) {
1461 event = cpuhw->event[i];
1462 if ((event->hw.idx && event->hw.idx != hwc_index[i] + 1) || (cpuhw->migrate)) {
1463 if (!cpuhw->migrate)
1464 power_pmu_read(event);
1465 write_pmc(event->hw.idx, 0);
1466 event->hw.idx = 0;
1467 }
1468 }
1469
1470 /*
1471 * Initialize the PMCs for all the new and moved events.
1472 */
1473 cpuhw->n_limited = n_lim = 0;
1474 for (i = 0; i < cpuhw->n_events; ++i) {
1475 event = cpuhw->event[i];
1476 if (event->hw.idx)
1477 continue;
1478 idx = hwc_index[i] + 1;
1479 if (is_limited_pmc(idx)) {
1480 cpuhw->limited_counter[n_lim] = event;
1481 cpuhw->limited_hwidx[n_lim] = idx;
1482 ++n_lim;
1483 continue;
1484 }
1485
1486 if (ebb)
1487 val = local64_read(&event->hw.prev_count);
1488 else {
1489 val = 0;
1490 if (event->hw.sample_period) {
1491 left = local64_read(&event->hw.period_left);
1492 if (left < 0x80000000L)
1493 val = 0x80000000L - left;
1494 }
1495 local64_set(&event->hw.prev_count, val);
1496 }
1497
1498 event->hw.idx = idx;
1499 if (event->hw.state & PERF_HES_STOPPED)
1500 val = 0;
1501 write_pmc(idx, val);
1502
1503 perf_event_update_userpage(event);
1504 }
1505 cpuhw->n_limited = n_lim;
1506 cpuhw->mmcr.mmcr0 |= MMCR0_PMXE | MMCR0_FCECE;
1507
1508 out_enable:
1509 pmao_restore_workaround(ebb);
1510
1511 mmcr0 = ebb_switch_in(ebb, cpuhw);
1512
1513 mb();
1514 if (cpuhw->bhrb_users)
1515 ppmu->config_bhrb(cpuhw->bhrb_filter);
1516
1517 write_mmcr0(cpuhw, mmcr0);
1518
1519 /*
1520 * Enable instruction sampling if necessary
1521 */
1522 if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) {
1523 mb();
1524 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra);
1525 }
1526
1527 out:
1528
1529 local_irq_restore(flags);
1530 }
1531
1532 /*
1533 * Called from powerpc mobility code
1534 * during migration completion to
1535 * enable back PMU counters.
1536 */
> 1537 void mobility_pmu_enable(void)
1538 {
1539 struct cpu_hw_events *cpuhw;
1540
1541 cpuhw = this_cpu_ptr(&cpu_hw_events);
1542 power_pmu_enable(NULL);
1543 cpuhw->migrate = 0;
1544 }
1545
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 73240 bytes --]
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] powerpc/perf: Enable PMU counters post partition migration if PMU is active
@ 2021-07-11 9:05 ` kernel test robot
0 siblings, 0 replies; 3+ messages in thread
From: kernel test robot @ 2021-07-11 9:05 UTC (permalink / raw)
To: kbuild-all
[-- Attachment #1: Type: text/plain, Size: 8855 bytes --]
Hi Athira,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on powerpc/next]
[also build test WARNING on v5.13 next-20210709]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Athira-Rajeev/powerpc-perf-Enable-PMU-counters-post-partition-migration-if-PMU-is-active/20210711-150741
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/2050c82afb3abd9eaa57fee45e71e7fccabfb81f
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Athira-Rajeev/powerpc-perf-Enable-PMU-counters-post-partition-migration-if-PMU-is-active/20210711-150741
git checkout 2050c82afb3abd9eaa57fee45e71e7fccabfb81f
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=powerpc
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
>> arch/powerpc/perf/core-book3s.c:1343:6: warning: no previous prototype for 'mobility_pmu_disable' [-Wmissing-prototypes]
1343 | void mobility_pmu_disable(void)
| ^~~~~~~~~~~~~~~~~~~~
>> arch/powerpc/perf/core-book3s.c:1537:6: warning: no previous prototype for 'mobility_pmu_enable' [-Wmissing-prototypes]
1537 | void mobility_pmu_enable(void)
| ^~~~~~~~~~~~~~~~~~~
vim +/mobility_pmu_disable +1343 arch/powerpc/perf/core-book3s.c
1337
1338 /*
1339 * Called from powerpc mobility code
1340 * before migration to disable counters
1341 * if the PMU is active.
1342 */
> 1343 void mobility_pmu_disable(void)
1344 {
1345 struct cpu_hw_events *cpuhw;
1346
1347 cpuhw = this_cpu_ptr(&cpu_hw_events);
1348 if (cpuhw->n_events != 0) {
1349 power_pmu_disable(NULL);
1350 cpuhw->migrate = 1;
1351 }
1352 }
1353
1354 /*
1355 * Re-enable all events if disable == 0.
1356 * If we were previously disabled and events were added, then
1357 * put the new config on the PMU.
1358 */
1359 static void power_pmu_enable(struct pmu *pmu)
1360 {
1361 struct perf_event *event;
1362 struct cpu_hw_events *cpuhw;
1363 unsigned long flags;
1364 long i;
1365 unsigned long val, mmcr0;
1366 s64 left;
1367 unsigned int hwc_index[MAX_HWEVENTS];
1368 int n_lim;
1369 int idx;
1370 bool ebb;
1371
1372 if (!ppmu)
1373 return;
1374 local_irq_save(flags);
1375
1376 cpuhw = this_cpu_ptr(&cpu_hw_events);
1377 if (!cpuhw->disabled)
1378 goto out;
1379
1380 if (cpuhw->n_events == 0) {
1381 ppc_set_pmu_inuse(0);
1382 goto out;
1383 }
1384
1385 cpuhw->disabled = 0;
1386
1387 /*
1388 * EBB requires an exclusive group and all events must have the EBB
1389 * flag set, or not set, so we can just check a single event. Also we
1390 * know we have at least one event.
1391 */
1392 ebb = is_ebb_event(cpuhw->event[0]);
1393
1394 /*
1395 * If we didn't change anything, or only removed events,
1396 * no need to recalculate MMCR* settings and reset the PMCs.
1397 * Just reenable the PMU with the current MMCR* settings
1398 * (possibly updated for removal of events).
1399 * While reenabling PMU during partition migration, continue
1400 * with normal flow.
1401 */
1402 if (!cpuhw->n_added && !cpuhw->migrate) {
1403 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
1404 mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
1405 if (ppmu->flags & PPMU_ARCH_31)
1406 mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
1407 goto out_enable;
1408 }
1409
1410 /*
1411 * Clear all MMCR settings and recompute them for the new set of events.
1412 */
1413 memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
1414
1415 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
1416 &cpuhw->mmcr, cpuhw->event, ppmu->flags)) {
1417 /* shouldn't ever get here */
1418 printk(KERN_ERR "oops compute_mmcr failed\n");
1419 goto out;
1420 }
1421
1422 if (!(ppmu->flags & PPMU_ARCH_207S)) {
1423 /*
1424 * Add in MMCR0 freeze bits corresponding to the attr.exclude_*
1425 * bits for the first event. We have already checked that all
1426 * events have the same value for these bits as the first event.
1427 */
1428 event = cpuhw->event[0];
1429 if (event->attr.exclude_user)
1430 cpuhw->mmcr.mmcr0 |= MMCR0_FCP;
1431 if (event->attr.exclude_kernel)
1432 cpuhw->mmcr.mmcr0 |= freeze_events_kernel;
1433 if (event->attr.exclude_hv)
1434 cpuhw->mmcr.mmcr0 |= MMCR0_FCHV;
1435 }
1436
1437 /*
1438 * Write the new configuration to MMCR* with the freeze
1439 * bit set and set the hardware events to their initial values.
1440 * Then unfreeze the events.
1441 */
1442 ppc_set_pmu_inuse(1);
1443 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
1444 mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
1445 mtspr(SPRN_MMCR0, (cpuhw->mmcr.mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
1446 | MMCR0_FC);
1447 if (ppmu->flags & PPMU_ARCH_207S)
1448 mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2);
1449
1450 if (ppmu->flags & PPMU_ARCH_31)
1451 mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
1452
1453 /*
1454 * Read off any pre-existing events that need to move
1455 * to another PMC.
1456 * While enabling PMU during partition migration,
1457 * skip power_pmu_read since all event count settings
1458 * needs to be re-initialised after migration.
1459 */
1460 for (i = 0; i < cpuhw->n_events; ++i) {
1461 event = cpuhw->event[i];
1462 if ((event->hw.idx && event->hw.idx != hwc_index[i] + 1) || (cpuhw->migrate)) {
1463 if (!cpuhw->migrate)
1464 power_pmu_read(event);
1465 write_pmc(event->hw.idx, 0);
1466 event->hw.idx = 0;
1467 }
1468 }
1469
1470 /*
1471 * Initialize the PMCs for all the new and moved events.
1472 */
1473 cpuhw->n_limited = n_lim = 0;
1474 for (i = 0; i < cpuhw->n_events; ++i) {
1475 event = cpuhw->event[i];
1476 if (event->hw.idx)
1477 continue;
1478 idx = hwc_index[i] + 1;
1479 if (is_limited_pmc(idx)) {
1480 cpuhw->limited_counter[n_lim] = event;
1481 cpuhw->limited_hwidx[n_lim] = idx;
1482 ++n_lim;
1483 continue;
1484 }
1485
1486 if (ebb)
1487 val = local64_read(&event->hw.prev_count);
1488 else {
1489 val = 0;
1490 if (event->hw.sample_period) {
1491 left = local64_read(&event->hw.period_left);
1492 if (left < 0x80000000L)
1493 val = 0x80000000L - left;
1494 }
1495 local64_set(&event->hw.prev_count, val);
1496 }
1497
1498 event->hw.idx = idx;
1499 if (event->hw.state & PERF_HES_STOPPED)
1500 val = 0;
1501 write_pmc(idx, val);
1502
1503 perf_event_update_userpage(event);
1504 }
1505 cpuhw->n_limited = n_lim;
1506 cpuhw->mmcr.mmcr0 |= MMCR0_PMXE | MMCR0_FCECE;
1507
1508 out_enable:
1509 pmao_restore_workaround(ebb);
1510
1511 mmcr0 = ebb_switch_in(ebb, cpuhw);
1512
1513 mb();
1514 if (cpuhw->bhrb_users)
1515 ppmu->config_bhrb(cpuhw->bhrb_filter);
1516
1517 write_mmcr0(cpuhw, mmcr0);
1518
1519 /*
1520 * Enable instruction sampling if necessary
1521 */
1522 if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) {
1523 mb();
1524 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra);
1525 }
1526
1527 out:
1528
1529 local_irq_restore(flags);
1530 }
1531
1532 /*
1533 * Called from powerpc mobility code
1534 * during migration completion to
1535 * enable back PMU counters.
1536 */
> 1537 void mobility_pmu_enable(void)
1538 {
1539 struct cpu_hw_events *cpuhw;
1540
1541 cpuhw = this_cpu_ptr(&cpu_hw_events);
1542 power_pmu_enable(NULL);
1543 cpuhw->migrate = 0;
1544 }
1545
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 73240 bytes --]
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2021-07-11 9:08 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-11 7:06 [PATCH] powerpc/perf: Enable PMU counters post partition migration if PMU is active Athira Rajeev
2021-07-11 9:05 ` kernel test robot
2021-07-11 9:05 ` kernel test robot
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.