All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Moger, Babu" <Babu.Moger@amd.com>
To: "ehabkost@redhat.com" <ehabkost@redhat.com>,
	"marcel.apfelbaum@gmail.com" <marcel.apfelbaum@gmail.com>,
	"mst@redhat.com" <mst@redhat.com>,
	"pbonzini@redhat.com" <pbonzini@redhat.com>,
	"rth@twiddle.net" <rth@twiddle.net>,
	"imammedo@redhat.com" <imammedo@redhat.com>
Cc: "Moger, Babu" <Babu.Moger@amd.com>,
	"qemu-devel@nongnu.org" <qemu-devel@nongnu.org>
Subject: [Qemu-devel] [RFC PATCH 3/5] i386: Use topology functions from topology.h
Date: Wed, 31 Jul 2019 23:20:49 +0000	[thread overview]
Message-ID: <20190731232032.51786-4-babu.moger@amd.com> (raw)
In-Reply-To: <20190731232032.51786-1-babu.moger@amd.com>

Use the functions defined in topology.h and remove the old code.

Signed-off-by: Babu Moger <babu.moger@amd.com>
---
 target/i386/cpu.c | 146 +++++++++-------------------------------------
 1 file changed, 27 insertions(+), 119 deletions(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 19751e37a7..be4583068c 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -28,6 +28,7 @@
 #include "sysemu/kvm.h"
 #include "sysemu/hvf.h"
 #include "sysemu/cpus.h"
+#include "sysemu/numa.h"
 #include "kvm_i386.h"
 #include "sev_i386.h"
 
@@ -338,64 +339,8 @@ static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
     }
 }
 
-/*
- * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
- * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
- * Define the constants to build the cpu topology. Right now, TOPOEXT
- * feature is enabled only on EPYC. So, these constants are based on
- * EPYC supported configurations. We may need to handle the cases if
- * these values change in future.
- */
-/* Maximum core complexes in a node */
-#define MAX_CCX 2
-/* Maximum cores in a core complex */
-#define MAX_CORES_IN_CCX 4
-/* Maximum cores in a node */
-#define MAX_CORES_IN_NODE 8
-/* Maximum nodes in a socket */
-#define MAX_NODES_PER_SOCKET 4
-
-/*
- * Figure out the number of nodes required to build this config.
- * Max cores in a node is 8
- */
-static int nodes_in_socket(int nr_cores)
-{
-    int nodes;
-
-    nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
-
-   /* Hardware does not support config with 3 nodes, return 4 in that case */
-    return (nodes == 3) ? 4 : nodes;
-}
-
-/*
- * Decide the number of cores in a core complex with the given nr_cores using
- * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
- * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
- * L3 cache is shared across all cores in a core complex. So, this will also
- * tell us how many cores are sharing the L3 cache.
- */
-static int cores_in_core_complex(int nr_cores)
-{
-    int nodes;
-
-    /* Check if we can fit all the cores in one core complex */
-    if (nr_cores <= MAX_CORES_IN_CCX) {
-        return nr_cores;
-    }
-    /* Get the number of nodes required to build this config */
-    nodes = nodes_in_socket(nr_cores);
-
-    /*
-     * Divide the cores accros all the core complexes
-     * Return rounded up value
-     */
-    return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
-}
-
 /* Encode cache info for CPUID[8000001D] */
-static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
+static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, MachineState *ms,
                                 uint32_t *eax, uint32_t *ebx,
                                 uint32_t *ecx, uint32_t *edx)
 {
@@ -408,10 +353,10 @@ static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
 
     /* L3 is shared among multiple cores */
     if (cache->level == 3) {
-        l3_cores = cores_in_core_complex(cs->nr_cores);
-        *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
+        l3_cores = cores_in_ccx(nb_numa_nodes, ms->smp.sockets, ms->smp.cores);
+        *eax |= ((l3_cores * ms->smp.threads) - 1) << 14;
     } else {
-        *eax |= ((cs->nr_threads - 1) << 14);
+        *eax |= ((ms->smp.threads - 1) << 14);
     }
 
     assert(cache->line_size > 0);
@@ -431,55 +376,19 @@ static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
            (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
 }
 
-/* Data structure to hold the configuration info for a given core index */
-struct core_topology {
-    /* core complex id of the current core index */
-    int ccx_id;
-    /*
-     * Adjusted core index for this core in the topology
-     * This can be 0,1,2,3 with max 4 cores in a core complex
-     */
-    int core_id;
-    /* Node id for this core index */
-    int node_id;
-    /* Number of nodes in this config */
-    int num_nodes;
-};
-
-/*
- * Build the configuration closely match the EPYC hardware. Using the EPYC
- * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
- * right now. This could change in future.
- * nr_cores : Total number of cores in the config
- * core_id  : Core index of the current CPU
- * topo     : Data structure to hold all the config info for this core index
- */
-static void build_core_topology(int nr_cores, int core_id,
-                                struct core_topology *topo)
-{
-    int nodes, cores_in_ccx;
-
-    /* First get the number of nodes required */
-    nodes = nodes_in_socket(nr_cores);
-
-    cores_in_ccx = cores_in_core_complex(nr_cores);
-
-    topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
-    topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
-    topo->core_id = core_id % cores_in_ccx;
-    topo->num_nodes = nodes;
-}
-
 /* Encode cache info for CPUID[8000001E] */
-static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
+static void encode_topo_cpuid8000001e(MachineState *ms, X86CPU *cpu,
                                        uint32_t *eax, uint32_t *ebx,
                                        uint32_t *ecx, uint32_t *edx)
 {
-    struct core_topology topo = {0};
-    unsigned long nodes;
-    int shift;
+    X86CPUTopoInfo topo = {0};
+    unsigned long dies, temp_dies, shift;
+    CPUX86State *env = &cpu->env;
+
+    dies = dies_in_socket(nb_numa_nodes, ms->smp.sockets, ms->smp.cores);
+    x86_topo_ids_from_apicid_epyc(cpu->apic_id, env->nr_dies, ms->smp.cores,
+		                  ms->smp.threads, &topo);
 
-    build_core_topology(cs->nr_cores, cpu->core_id, &topo);
     *eax = cpu->apic_id;
     /*
      * CPUID_Fn8000001E_EBX
@@ -496,11 +405,11 @@ static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
      *             3 Core complex id
      *           1:0 Core id
      */
-    if (cs->nr_threads - 1) {
-        *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
+    if (ms->smp.threads - 1) {
+        *ebx = ((ms->smp.threads - 1) << 8) | (topo.die_id << 3) |
                 (topo.ccx_id << 2) | topo.core_id;
     } else {
-        *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
+        *ebx = (topo.die_id << 4) | (topo.ccx_id << 3) | topo.core_id;
     }
     /*
      * CPUID_Fn8000001E_ECX
@@ -510,9 +419,8 @@ static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
      *         2  Socket id
      *       1:0  Node id
      */
-    if (topo.num_nodes <= 4) {
-        *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
-                topo.node_id;
+    if (dies <= 4) {
+        *ecx = ((dies - 1) << 8) | (topo.pkg_id << 2) | topo.die_id;
     } else {
         /*
          * Node id fix up. Actual hardware supports up to 4 nodes. But with
@@ -527,10 +435,9 @@ static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
          * number of nodes. find_last_bit returns last set bit(0 based). Left
          * shift(+1) the socket id to represent all the nodes.
          */
-        nodes = topo.num_nodes - 1;
-        shift = find_last_bit(&nodes, 8);
-        *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
-                topo.node_id;
+        temp_dies = dies - 1;
+        shift = find_last_bit(&temp_dies, 8);
+        *ecx = ((dies - 1) << 8) | (topo.pkg_id << (shift + 1)) | topo.die_id;
     }
     *edx = 0;
 }
@@ -4169,6 +4076,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
                    uint32_t *eax, uint32_t *ebx,
                    uint32_t *ecx, uint32_t *edx)
 {
+    MachineState *ms = MACHINE(qdev_get_machine());
     X86CPU *cpu = env_archcpu(env);
     CPUState *cs = env_cpu(env);
     uint32_t die_offset;
@@ -4584,19 +4492,19 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
         }
         switch (count) {
         case 0: /* L1 dcache info */
-            encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
+            encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, ms,
                                        eax, ebx, ecx, edx);
             break;
         case 1: /* L1 icache info */
-            encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
+            encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, ms,
                                        eax, ebx, ecx, edx);
             break;
         case 2: /* L2 cache info */
-            encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
+            encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, ms,
                                        eax, ebx, ecx, edx);
             break;
         case 3: /* L3 cache info */
-            encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
+            encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, ms,
                                        eax, ebx, ecx, edx);
             break;
         default: /* end of info */
@@ -4606,7 +4514,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
         break;
     case 0x8000001E:
         assert(cpu->core_id <= 255);
-        encode_topo_cpuid8000001e(cs, cpu,
+        encode_topo_cpuid8000001e(ms, cpu,
                                   eax, ebx, ecx, edx);
         break;
     case 0xC0000000:
-- 
2.20.1


  parent reply	other threads:[~2019-07-31 23:21 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-31 23:20 [Qemu-devel] [RFC PATCH 0/5] APIC ID fixes for AMD EPYC CPU models Moger, Babu
2019-07-31 23:20 ` [Qemu-devel] [RFC PATCH 1/5] hw/boards: Add sockets in CpuTopology structure Moger, Babu
2019-07-31 23:20 ` [Qemu-devel] [RFC PATCH 2/5] hw/i386: Add AMD EPYC topology encoding Moger, Babu
2019-07-31 23:20 ` Moger, Babu [this message]
2019-07-31 23:20 ` [Qemu-devel] [RFC PATCH 4/5] hw/i386: Generate apicid based on cpu_type Moger, Babu
2019-08-01 19:28   ` Eduardo Habkost
2019-08-01 19:37     ` Moger, Babu
2019-07-31 23:20 ` [Qemu-devel] [RFC PATCH 5/5] i386: Fix pkg_id offset EPYC Moger, Babu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190731232032.51786-4-babu.moger@amd.com \
    --to=babu.moger@amd.com \
    --cc=ehabkost@redhat.com \
    --cc=imammedo@redhat.com \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rth@twiddle.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.