qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Moger, Babu" <Babu.Moger@amd.com>
To: ssg.sos.staff <ssg.sos.staff@amd.com>,
	"ehabkost@redhat.com" <ehabkost@redhat.com>,
	"marcel.apfelbaum@gmail.com" <marcel.apfelbaum@gmail.com>,
	"mst@redhat.com" <mst@redhat.com>,
	"pbonzini@redhat.com" <pbonzini@redhat.com>,
	"rth@twiddle.net" <rth@twiddle.net>,
	"eblake@redhat.com" <eblake@redhat.com>,
	"armbru@redhat.com" <armbru@redhat.com>,
	"imammedo@redhat.com" <imammedo@redhat.com>
Cc: "qemu-devel@nongnu.org" <qemu-devel@nongnu.org>
Subject: [Qemu-devel] [RFC 2 PATCH 08/16] i386: Cleanup and use the new epyc mode topology functions
Date: Fri, 6 Sep 2019 19:12:33 +0000	[thread overview]
Message-ID: <156779715031.21957.17374671669134234845.stgit@localhost.localdomain> (raw)
In-Reply-To: <156779689013.21957.1631551572950676212.stgit@localhost.localdomain>

Use the new epyc mode functions and delete the unused code.

Signed-off-by: Babu Moger <babu.moger@amd.com>
---
 target/i386/cpu.c |  171 +++++++++++++++--------------------------------------
 1 file changed, 48 insertions(+), 123 deletions(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index ca02bc21ec..f25491a029 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -28,6 +28,7 @@
 #include "sysemu/kvm.h"
 #include "sysemu/hvf.h"
 #include "sysemu/cpus.h"
+#include "sysemu/numa.h"
 #include "kvm_i386.h"
 #include "sev_i386.h"
 
@@ -338,67 +339,19 @@ static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
     }
 }
 
-/*
- * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
- * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
- * Define the constants to build the cpu topology. Right now, TOPOEXT
- * feature is enabled only on EPYC. So, these constants are based on
- * EPYC supported configurations. We may need to handle the cases if
- * these values change in future.
- */
-/* Maximum core complexes in a node */
-#define MAX_CCX 2
-/* Maximum cores in a core complex */
-#define MAX_CORES_IN_CCX 4
-/* Maximum cores in a node */
-#define MAX_CORES_IN_NODE 8
-/* Maximum nodes in a socket */
-#define MAX_NODES_PER_SOCKET 4
-
-/*
- * Figure out the number of nodes required to build this config.
- * Max cores in a node is 8
- */
-static int nodes_in_socket(int nr_cores)
-{
-    int nodes;
-
-    nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
-
-   /* Hardware does not support config with 3 nodes, return 4 in that case */
-    return (nodes == 3) ? 4 : nodes;
-}
-
-/*
- * Decide the number of cores in a core complex with the given nr_cores using
- * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
- * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
- * L3 cache is shared across all cores in a core complex. So, this will also
- * tell us how many cores are sharing the L3 cache.
- */
-static int cores_in_core_complex(int nr_cores)
-{
-    int nodes;
-
-    /* Check if we can fit all the cores in one core complex */
-    if (nr_cores <= MAX_CORES_IN_CCX) {
-        return nr_cores;
-    }
-    /* Get the number of nodes required to build this config */
-    nodes = nodes_in_socket(nr_cores);
-
-    /*
-     * Divide the cores accros all the core complexes
-     * Return rounded up value
-     */
-    return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
-}
-
 /* Encode cache info for CPUID[8000001D] */
-static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
-                                uint32_t *eax, uint32_t *ebx,
-                                uint32_t *ecx, uint32_t *edx)
+static void encode_cache_cpuid8000001d(CPUCacheInfo *cache,
+                                       uint32_t *eax, uint32_t *ebx,
+                                       uint32_t *ecx, uint32_t *edx)
 {
+    MachineState *ms = MACHINE(qdev_get_machine());
+    X86CPUTopoInfo topo_info = {
+        .numa_nodes = nb_numa_nodes,
+        .nr_sockets = ms->smp.sockets,
+        .nr_cores = ms->smp.cores,
+        .nr_threads = ms->smp.threads,
+    };
+
     uint32_t l3_cores;
     assert(cache->size == cache->line_size * cache->associativity *
                           cache->partitions * cache->sets);
@@ -408,10 +361,10 @@ static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
 
     /* L3 is shared among multiple cores */
     if (cache->level == 3) {
-        l3_cores = cores_in_core_complex(cs->nr_cores);
-        *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
+        l3_cores = cores_in_ccx(&topo_info);
+        *eax |= ((l3_cores * topo_info.nr_threads) - 1) << 14;
     } else {
-        *eax |= ((cs->nr_threads - 1) << 14);
+        *eax |= ((topo_info.nr_threads - 1) << 14);
     }
 
     assert(cache->line_size > 0);
@@ -431,56 +384,28 @@ static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
            (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
 }
 
-/* Data structure to hold the configuration info for a given core index */
-struct core_topology {
-    /* core complex id of the current core index */
-    int ccx_id;
-    /*
-     * Adjusted core index for this core in the topology
-     * This can be 0,1,2,3 with max 4 cores in a core complex
-     */
-    int core_id;
-    /* Node id for this core index */
-    int node_id;
-    /* Number of nodes in this config */
-    int num_nodes;
-};
-
-/*
- * Build the configuration closely match the EPYC hardware. Using the EPYC
- * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
- * right now. This could change in future.
- * nr_cores : Total number of cores in the config
- * core_id  : Core index of the current CPU
- * topo     : Data structure to hold all the config info for this core index
- */
-static void build_core_topology(int nr_cores, int core_id,
-                                struct core_topology *topo)
-{
-    int nodes, cores_in_ccx;
-
-    /* First get the number of nodes required */
-    nodes = nodes_in_socket(nr_cores);
-
-    cores_in_ccx = cores_in_core_complex(nr_cores);
-
-    topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
-    topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
-    topo->core_id = core_id % cores_in_ccx;
-    topo->num_nodes = nodes;
-}
-
 /* Encode cache info for CPUID[8000001E] */
-static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
-                                       uint32_t *eax, uint32_t *ebx,
-                                       uint32_t *ecx, uint32_t *edx)
+static void encode_topo_cpuid8000001e(CPUX86State *env,
+                                      uint32_t *eax, uint32_t *ebx,
+                                      uint32_t *ecx, uint32_t *edx)
 {
-    struct core_topology topo = {0};
-    unsigned long nodes;
-    int shift;
+    X86CPUTopoIDs topo_ids = { 0 };
+    unsigned long nodes, shift;
+    X86CPU *cpu = env_archcpu(env);
+    CPUState *cs = env_cpu(env);
+    MachineState *ms = MACHINE(qdev_get_machine());
+    X86CPUTopoInfo topo_info = {
+        .numa_nodes = nb_numa_nodes,
+        .nr_sockets = ms->smp.sockets,
+        .nr_cores = ms->smp.cores,
+        .nr_threads = ms->smp.threads,
+    };
+
+    nodes = nodes_in_pkg(&topo_info);
+    x86_topo_ids_from_idx_epyc(&topo_info, cs->cpu_index, &topo_ids);
 
-    build_core_topology(cs->nr_cores, cpu->core_id, &topo);
     *eax = cpu->apic_id;
+
     /*
      * CPUID_Fn8000001E_EBX
      * 31:16 Reserved
@@ -496,11 +421,12 @@ static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
      *             3 Core complex id
      *           1:0 Core id
      */
-    if (cs->nr_threads - 1) {
-        *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
-                (topo.ccx_id << 2) | topo.core_id;
+    if (topo_info.nr_threads - 1) {
+        *ebx = ((topo_info.nr_threads - 1) << 8) | (topo_ids.node_id << 3) |
+                (topo_ids.ccx_id << 2) | topo_ids.core_id;
     } else {
-        *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
+        *ebx = (topo_ids.node_id << 4) | (topo_ids.ccx_id << 3) |
+                topo_ids.core_id;
     }
     /*
      * CPUID_Fn8000001E_ECX
@@ -510,9 +436,8 @@ static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
      *         2  Socket id
      *       1:0  Node id
      */
-    if (topo.num_nodes <= 4) {
-        *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
-                topo.node_id;
+    if (nodes <= 4) {
+        *ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id;
     } else {
         /*
          * Node id fix up. Actual hardware supports up to 4 nodes. But with
@@ -527,12 +452,12 @@ static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
          * number of nodes. find_last_bit returns last set bit(0 based). Left
          * shift(+1) the socket id to represent all the nodes.
          */
-        nodes = topo.num_nodes - 1;
+        nodes = nodes - 1;
         shift = find_last_bit(&nodes, 8);
-        *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
-                topo.node_id;
+        *ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) | topo_ids.node_id;
     }
     *edx = 0;
+
 }
 
 /*
@@ -4580,19 +4505,19 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
         }
         switch (count) {
         case 0: /* L1 dcache info */
-            encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
+            encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
                                        eax, ebx, ecx, edx);
             break;
         case 1: /* L1 icache info */
-            encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
+            encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
                                        eax, ebx, ecx, edx);
             break;
         case 2: /* L2 cache info */
-            encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
+            encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
                                        eax, ebx, ecx, edx);
             break;
         case 3: /* L3 cache info */
-            encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
+            encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
                                        eax, ebx, ecx, edx);
             break;
         default: /* end of info */
@@ -4602,7 +4527,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
         break;
     case 0x8000001E:
         assert(cpu->core_id <= 255);
-        encode_topo_cpuid8000001e(cs, cpu,
+        encode_topo_cpuid8000001e(env,
                                   eax, ebx, ecx, edx);
         break;
     case 0xC0000000:


  parent reply	other threads:[~2019-09-06 19:20 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-06 19:11 [Qemu-devel] [RFC 2 PATCH 00/16] APIC ID fixes for AMD EPYC CPU models Moger, Babu
2019-09-06 19:11 ` [Qemu-devel] [RFC 2 PATCH 01/16] numa: Split the numa functionality Moger, Babu
2019-10-10  3:25   ` Eduardo Habkost
2019-12-02 20:18     ` Babu Moger
2019-09-06 19:11 ` [Qemu-devel] [RFC 2 PATCH 02/16] hw/i386: Rename X86CPUTopoInfo structure to X86CPUTopoIDs Moger, Babu
2019-10-10  3:26   ` Eduardo Habkost
2019-09-06 19:11 ` [Qemu-devel] [RFC 2 PATCH 03/16] hw/i386: Introduce X86CPUTopoInfo to contain topology info Moger, Babu
2019-10-11  2:29   ` Eduardo Habkost
2019-12-02 20:23     ` Babu Moger
2019-10-11  3:54   ` Eduardo Habkost
2019-12-02 20:25     ` Babu Moger
2019-09-06 19:12 ` [Qemu-devel] [RFC 2 PATCH 04/16] machine: Add SMP Sockets in CpuTopology Moger, Babu
2019-10-11  2:31   ` Eduardo Habkost
2019-09-06 19:12 ` [Qemu-devel] [RFC 2 PATCH 05/16] hw/i386: Simplify topology Offset/width Calculation Moger, Babu
2019-10-11  2:32   ` Eduardo Habkost
2019-12-02 20:29     ` Babu Moger
2019-09-06 19:12 ` [Qemu-devel] [RFC 2 PATCH 06/16] hw/core: Add core complex id in X86CPU topology Moger, Babu
2019-09-06 19:20   ` Eric Blake
2019-09-06 19:34     ` Moger, Babu
2019-09-22 12:48   ` Michael S. Tsirkin
2019-09-23 14:38     ` Moger, Babu
2019-09-06 19:12 ` [Qemu-devel] [RFC 2 PATCH 07/16] hw/386: Add new epyc mode topology decoding functions Moger, Babu
2019-10-11  3:29   ` Eduardo Habkost
2019-12-02 20:38     ` Babu Moger
2019-09-06 19:12 ` Moger, Babu [this message]
2019-10-11  3:51   ` [Qemu-devel] [RFC 2 PATCH 08/16] i386: Cleanup and use the new epyc mode topology functions Eduardo Habkost
2019-09-06 19:12 ` [Qemu-devel] [RFC 2 PATCH 09/16] hw/i386: Introduce initialize_topo_info function Moger, Babu
2019-10-11  3:54   ` Eduardo Habkost
2019-09-06 19:12 ` [Qemu-devel] [RFC 2 PATCH 10/16] hw/i386: Introduce apicid_from_cpu_idx in PCMachineState Moger, Babu
2019-09-06 19:12 ` [Qemu-devel] [RFC 2 PATCH 11/16] Introduce-topo_ids_from_apicid-handler Moger, Babu
2019-09-06 19:13 ` [Qemu-devel] [RFC 2 PATCH 12/16] hw/i386: Introduce apic_id_from_topo_ids handler in PCMachineState Moger, Babu
2019-09-06 19:13 ` [Qemu-devel] [RFC 2 PATCH 13/16] machine: Add new epyc property " Moger, Babu
2019-10-11  3:59   ` Eduardo Habkost
2019-10-11 16:23     ` Moger, Babu
2019-10-11 16:59     ` Moger, Babu
2019-10-11 19:03       ` Eduardo Habkost
2019-09-06 19:13 ` [Qemu-devel] [RFC 2 PATCH 14/16] hw/i386: Introduce epyc mode function handlers Moger, Babu
2019-09-06 19:13 ` [Qemu-devel] [RFC 2 PATCH 15/16] i386: Fix pkg_id offset for epyc mode Moger, Babu
2019-10-11  4:03   ` Eduardo Habkost
2019-09-06 19:13 ` [Qemu-devel] [RFC 2 PATCH 16/16] hw/core: Fix up the machine_set_cpu_numa_node for epyc Moger, Babu
2019-10-11  4:10   ` Eduardo Habkost
2019-12-02 20:44     ` Babu Moger
2019-09-20 22:44 ` [RFC 2 PATCH 00/16] APIC ID fixes for AMD EPYC CPU models Moger, Babu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=156779715031.21957.17374671669134234845.stgit@localhost.localdomain \
    --to=babu.moger@amd.com \
    --cc=armbru@redhat.com \
    --cc=eblake@redhat.com \
    --cc=ehabkost@redhat.com \
    --cc=imammedo@redhat.com \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rth@twiddle.net \
    --cc=ssg.sos.staff@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).