All of lore.kernel.org
 help / color / mirror / Atom feed
From: Babu Moger <babu.moger@amd.com>
To: mst@redhat.com, marcel.apfelbaum@gmail.com, pbonzini@redhat.com,
	rth@twiddle.net, ehabkost@redhat.com, mtosatti@redhat.com
Cc: geoff@hostfission.com, babu.moger@amd.com, kash@tripleback.net,
	qemu-devel@nongnu.org, kvm@vger.kernel.org
Subject: [PATCH v10 2/5] i386: Populate AMD Processor Cache Information for cpuid 0x8000001D
Date: Mon, 21 May 2018 20:41:12 -0400	[thread overview]
Message-ID: <1526949675-106737-3-git-send-email-babu.moger@amd.com> (raw)
In-Reply-To: <1526949675-106737-1-git-send-email-babu.moger@amd.com>

Add information for cpuid 0x8000001D leaf. Populate cache topology information
for different cache types(Data Cache, Instruction Cache, L2 and L3) supported
by 0x8000001D leaf. Please refer Processor Programming Reference (PPR) for AMD
Family 17h Model for more details.

Signed-off-by: Babu Moger <babu.moger@amd.com>
---
 target/i386/cpu.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 target/i386/kvm.c |  29 +++++++++++++--
 2 files changed, 129 insertions(+), 3 deletions(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index d9773b6..1dd060a 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -336,6 +336,85 @@ static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
     }
 }
 
+/* Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E */
+/* Please refer AMD64 Architecture Programmer’s Manual Volume 3 */
+#define MAX_CCX 2
+#define MAX_CORES_IN_CCX 4
+#define MAX_NODES_EPYC 4
+#define MAX_CORES_IN_NODE 8
+
+/* Number of logical processors sharing L3 cache */
+#define NUM_SHARING_CACHE(threads, num_sharing)   ((threads > 1) ? \
+                         (((num_sharing - 1) * threads) + 1)  : \
+                         (num_sharing - 1))
+/*
+ * L3 Cache is shared between all the cores in a core complex.
+ * Maximum cores that can share L3 is 4.
+ */
+static int num_sharing_l3_cache(int nr_cores)
+{
+    int i, nodes = 1;
+
+    /* Check if we can fit all the cores in one CCX */
+    if (nr_cores <= MAX_CORES_IN_CCX) {
+        return nr_cores;
+    }
+    /*
+     * Figure out the number of nodes(or dies) required to build
+     * this config. Max cores in a node is 8
+     */
+    for (i = nodes; i <= MAX_NODES_EPYC; i++) {
+        if (nr_cores <= (i * MAX_CORES_IN_NODE)) {
+            nodes = i;
+            break;
+        }
+        /* We support nodes 1, 2, 4 */
+        if (i == 3) {
+            continue;
+        }
+    }
+    /* Spread the cores accros all the CCXs and return max cores in a ccx */
+    return (nr_cores / (nodes * MAX_CCX)) +
+            ((nr_cores % (nodes * MAX_CCX)) ? 1 : 0);
+}
+
+/* Encode cache info for CPUID[8000001D] */
+static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
+                                uint32_t *eax, uint32_t *ebx,
+                                uint32_t *ecx, uint32_t *edx)
+{
+    uint32_t num_share_l3;
+    assert(cache->size == cache->line_size * cache->associativity *
+                          cache->partitions * cache->sets);
+
+    *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
+               (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
+
+    /* L3 is shared among multiple cores */
+    if (cache->level == 3) {
+        num_share_l3 = num_sharing_l3_cache(cs->nr_cores);
+        *eax |= (NUM_SHARING_CACHE(cs->nr_threads, num_share_l3) << 14);
+    } else {
+        *eax |= ((cs->nr_threads - 1) << 14);
+    }
+
+    assert(cache->line_size > 0);
+    assert(cache->partitions > 0);
+    assert(cache->associativity > 0);
+    /* We don't implement fully-associative caches */
+    assert(cache->associativity < cache->sets);
+    *ebx = (cache->line_size - 1) |
+           ((cache->partitions - 1) << 12) |
+           ((cache->associativity - 1) << 22);
+
+    assert(cache->sets > 0);
+    *ecx = cache->sets - 1;
+
+    *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
+           (cache->inclusive ? CACHE_INCLUSIVE : 0) |
+           (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
+}
+
 /*
  * Definitions of the hardcoded cache entries we expose:
  * These are legacy cache values. If there is a need to change any
@@ -4005,6 +4084,30 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
             *edx = 0;
         }
         break;
+    case 0x8000001D:
+        *eax = 0;
+        switch (count) {
+        case 0: /* L1 dcache info */
+            encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
+                                       eax, ebx, ecx, edx);
+            break;
+        case 1: /* L1 icache info */
+            encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
+                                       eax, ebx, ecx, edx);
+            break;
+        case 2: /* L2 cache info */
+            encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
+                                       eax, ebx, ecx, edx);
+            break;
+        case 3: /* L3 cache info */
+            encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
+                                       eax, ebx, ecx, edx);
+            break;
+        default: /* end of info */
+            *eax = *ebx = *ecx = *edx = 0;
+            break;
+        }
+        break;
     case 0xC0000000:
         *eax = env->cpuid_xlevel2;
         *ebx = 0;
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index d6666a4..a8bf7eb 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -979,9 +979,32 @@ int kvm_arch_init_vcpu(CPUState *cs)
         }
         c = &cpuid_data.entries[cpuid_i++];
 
-        c->function = i;
-        c->flags = 0;
-        cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+        switch (i) {
+        case 0x8000001d:
+            /* Query for all AMD cache information leaves */
+            for (j = 0; ; j++) {
+                c->function = i;
+                c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                c->index = j;
+                cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+                if (c->eax == 0) {
+                    break;
+                }
+                if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+                    fprintf(stderr, "cpuid_data is full, no space for "
+                            "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
+                    abort();
+                }
+                c = &cpuid_data.entries[cpuid_i++];
+            }
+            break;
+        default:
+            c->function = i;
+            c->flags = 0;
+            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+            break;
+        }
     }
 
     /* Call Centaur's CPUID instructions they are supported. */
-- 
1.8.3.1

WARNING: multiple messages have this Message-ID (diff)
From: Babu Moger <babu.moger@amd.com>
To: mst@redhat.com, marcel.apfelbaum@gmail.com, pbonzini@redhat.com,
	rth@twiddle.net, ehabkost@redhat.com, mtosatti@redhat.com
Cc: qemu-devel@nongnu.org, kvm@vger.kernel.org, babu.moger@amd.com,
	kash@tripleback.net, geoff@hostfission.com
Subject: [Qemu-devel] [PATCH v10 2/5] i386: Populate AMD Processor Cache Information for cpuid 0x8000001D
Date: Mon, 21 May 2018 20:41:12 -0400	[thread overview]
Message-ID: <1526949675-106737-3-git-send-email-babu.moger@amd.com> (raw)
In-Reply-To: <1526949675-106737-1-git-send-email-babu.moger@amd.com>

Add information for cpuid 0x8000001D leaf. Populate cache topology information
for different cache types(Data Cache, Instruction Cache, L2 and L3) supported
by 0x8000001D leaf. Please refer Processor Programming Reference (PPR) for AMD
Family 17h Model for more details.

Signed-off-by: Babu Moger <babu.moger@amd.com>
---
 target/i386/cpu.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 target/i386/kvm.c |  29 +++++++++++++--
 2 files changed, 129 insertions(+), 3 deletions(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index d9773b6..1dd060a 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -336,6 +336,85 @@ static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
     }
 }
 
+/* Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E */
+/* Please refer AMD64 Architecture Programmer’s Manual Volume 3 */
+#define MAX_CCX 2
+#define MAX_CORES_IN_CCX 4
+#define MAX_NODES_EPYC 4
+#define MAX_CORES_IN_NODE 8
+
+/* Number of logical processors sharing L3 cache */
+#define NUM_SHARING_CACHE(threads, num_sharing)   ((threads > 1) ? \
+                         (((num_sharing - 1) * threads) + 1)  : \
+                         (num_sharing - 1))
+/*
+ * L3 Cache is shared between all the cores in a core complex.
+ * Maximum cores that can share L3 is 4.
+ */
+static int num_sharing_l3_cache(int nr_cores)
+{
+    int i, nodes = 1;
+
+    /* Check if we can fit all the cores in one CCX */
+    if (nr_cores <= MAX_CORES_IN_CCX) {
+        return nr_cores;
+    }
+    /*
+     * Figure out the number of nodes(or dies) required to build
+     * this config. Max cores in a node is 8
+     */
+    for (i = nodes; i <= MAX_NODES_EPYC; i++) {
+        if (nr_cores <= (i * MAX_CORES_IN_NODE)) {
+            nodes = i;
+            break;
+        }
+        /* We support nodes 1, 2, 4 */
+        if (i == 3) {
+            continue;
+        }
+    }
+    /* Spread the cores accros all the CCXs and return max cores in a ccx */
+    return (nr_cores / (nodes * MAX_CCX)) +
+            ((nr_cores % (nodes * MAX_CCX)) ? 1 : 0);
+}
+
+/* Encode cache info for CPUID[8000001D] */
+static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
+                                uint32_t *eax, uint32_t *ebx,
+                                uint32_t *ecx, uint32_t *edx)
+{
+    uint32_t num_share_l3;
+    assert(cache->size == cache->line_size * cache->associativity *
+                          cache->partitions * cache->sets);
+
+    *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
+               (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
+
+    /* L3 is shared among multiple cores */
+    if (cache->level == 3) {
+        num_share_l3 = num_sharing_l3_cache(cs->nr_cores);
+        *eax |= (NUM_SHARING_CACHE(cs->nr_threads, num_share_l3) << 14);
+    } else {
+        *eax |= ((cs->nr_threads - 1) << 14);
+    }
+
+    assert(cache->line_size > 0);
+    assert(cache->partitions > 0);
+    assert(cache->associativity > 0);
+    /* We don't implement fully-associative caches */
+    assert(cache->associativity < cache->sets);
+    *ebx = (cache->line_size - 1) |
+           ((cache->partitions - 1) << 12) |
+           ((cache->associativity - 1) << 22);
+
+    assert(cache->sets > 0);
+    *ecx = cache->sets - 1;
+
+    *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
+           (cache->inclusive ? CACHE_INCLUSIVE : 0) |
+           (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
+}
+
 /*
  * Definitions of the hardcoded cache entries we expose:
  * These are legacy cache values. If there is a need to change any
@@ -4005,6 +4084,30 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
             *edx = 0;
         }
         break;
+    case 0x8000001D:
+        *eax = 0;
+        switch (count) {
+        case 0: /* L1 dcache info */
+            encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
+                                       eax, ebx, ecx, edx);
+            break;
+        case 1: /* L1 icache info */
+            encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
+                                       eax, ebx, ecx, edx);
+            break;
+        case 2: /* L2 cache info */
+            encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
+                                       eax, ebx, ecx, edx);
+            break;
+        case 3: /* L3 cache info */
+            encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
+                                       eax, ebx, ecx, edx);
+            break;
+        default: /* end of info */
+            *eax = *ebx = *ecx = *edx = 0;
+            break;
+        }
+        break;
     case 0xC0000000:
         *eax = env->cpuid_xlevel2;
         *ebx = 0;
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index d6666a4..a8bf7eb 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -979,9 +979,32 @@ int kvm_arch_init_vcpu(CPUState *cs)
         }
         c = &cpuid_data.entries[cpuid_i++];
 
-        c->function = i;
-        c->flags = 0;
-        cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+        switch (i) {
+        case 0x8000001d:
+            /* Query for all AMD cache information leaves */
+            for (j = 0; ; j++) {
+                c->function = i;
+                c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                c->index = j;
+                cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+                if (c->eax == 0) {
+                    break;
+                }
+                if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+                    fprintf(stderr, "cpuid_data is full, no space for "
+                            "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
+                    abort();
+                }
+                c = &cpuid_data.entries[cpuid_i++];
+            }
+            break;
+        default:
+            c->function = i;
+            c->flags = 0;
+            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+            break;
+        }
     }
 
     /* Call Centaur's CPUID instructions they are supported. */
-- 
1.8.3.1

  parent reply	other threads:[~2018-05-22  0:41 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-22  0:41 [PATCH v10 0/5] i386: Enable TOPOEXT to support hyperthreading on AMD CPU Babu Moger
2018-05-22  0:41 ` [Qemu-devel] " Babu Moger
2018-05-22  0:41 ` [PATCH v10 1/5] i386: Clean up cache CPUID code Babu Moger
2018-05-22  0:41   ` [Qemu-devel] " Babu Moger
2018-05-22  0:41 ` Babu Moger [this message]
2018-05-22  0:41   ` [Qemu-devel] [PATCH v10 2/5] i386: Populate AMD Processor Cache Information for cpuid 0x8000001D Babu Moger
2018-05-22  1:32   ` Duran, Leo
2018-05-22  1:32     ` [Qemu-devel] " Duran, Leo
2018-05-22 13:32     ` Moger, Babu
2018-05-22 13:32       ` [Qemu-devel] " Moger, Babu
2018-05-22 14:03       ` Eduardo Habkost
2018-05-22 14:03         ` [Qemu-devel] " Eduardo Habkost
2018-05-23 16:18         ` Moger, Babu
2018-05-23 16:18           ` [Qemu-devel] " Moger, Babu
2018-05-22 13:54   ` Eduardo Habkost
2018-05-22 13:54     ` [Qemu-devel] " Eduardo Habkost
2018-05-23 18:16     ` Moger, Babu
2018-05-23 18:16       ` [Qemu-devel] " Moger, Babu
2018-05-22  0:41 ` [PATCH v10 3/5] i386: Add support for CPUID_8000_001E for AMD Babu Moger
2018-05-22  0:41   ` [Qemu-devel] " Babu Moger
2018-05-22  0:41 ` [PATCH v10 4/5] i386: Enable TOPOEXT feature on AMD EPYC CPU Babu Moger
2018-05-22  0:41   ` [Qemu-devel] " Babu Moger
2018-05-22  0:41 ` [PATCH v10 5/5] i386: Remove generic SMT thread check Babu Moger
2018-05-22  0:41   ` [Qemu-devel] " Babu Moger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1526949675-106737-3-git-send-email-babu.moger@amd.com \
    --to=babu.moger@amd.com \
    --cc=ehabkost@redhat.com \
    --cc=geoff@hostfission.com \
    --cc=kash@tripleback.net \
    --cc=kvm@vger.kernel.org \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rth@twiddle.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.