All of lore.kernel.org
 help / color / mirror / Atom feed
From: Roger Pau Monne <roger.pau@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Roger Pau Monne <roger.pau@citrix.com>, Wei Liu <wl@xen.org>,
	Anthony PERARD <anthony.perard@citrix.com>,
	Juergen Gross <jgross@suse.com>
Subject: [PATCH v3 3/6] libxl: introduce MSR data in libxl_cpuid_policy
Date: Thu, 20 Jul 2023 10:25:37 +0200	[thread overview]
Message-ID: <20230720082540.69444-4-roger.pau@citrix.com> (raw)
In-Reply-To: <20230720082540.69444-1-roger.pau@citrix.com>

Add a new array field to libxl_cpuid_policy in order to store the MSR
policies.

Adding the MSR data in the libxl_cpuid_policy_list type is done so
that existing users can seamlessly pass MSR features as part of the
CPUID data, without requiring the introduction of a separate
domain_build_info field, and a new set of handlers functions.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Changes since v2:
 - Unconditionally call free().
 - Implement the JSON marshaling functions.
---
It would be nice to rename the json output field to 'cpu_policy'
instead of 'cpuid', so that it looks like:

"cpu_policy": {
    "cpuid": [
        {
            "leaf": 7,
            "subleaf": 0,
            "edx": "xx1xxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
        },
        {
            "leaf": 1,
            "ebx": "xxxxxxxxxxxxxxxx00010000xxxxxxxx"
        }
        }
        }
    ],
    "msr": [
        {
            "index": 266,
            "policy": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1xx1x1"
        }
    ]
},

Sadly I have no idea how to do that, and can be done in a followup
change anyway.
---
 tools/libs/light/libxl_cpuid.c    | 142 ++++++++++++++++++++++++++----
 tools/libs/light/libxl_internal.h |   1 +
 tools/libs/light/libxl_types.idl  |   2 +-
 3 files changed, 128 insertions(+), 17 deletions(-)

diff --git a/tools/libs/light/libxl_cpuid.c b/tools/libs/light/libxl_cpuid.c
index 3c8b2a72c0b8..68b797886642 100644
--- a/tools/libs/light/libxl_cpuid.c
+++ b/tools/libs/light/libxl_cpuid.c
@@ -16,7 +16,7 @@
 
 int libxl__cpuid_policy_is_empty(libxl_cpuid_policy_list *pl)
 {
-    return !libxl_cpuid_policy_list_length(pl);
+    return !*pl || (!libxl_cpuid_policy_list_length(pl) && !(*pl)->msr);
 }
 
 void libxl_cpuid_dispose(libxl_cpuid_policy_list *pl)
@@ -40,6 +40,8 @@ void libxl_cpuid_dispose(libxl_cpuid_policy_list *pl)
         free(policy->cpuid);
     }
 
+    free(policy->msr);
+
     free(policy);
     *pl = NULL;
     return;
@@ -516,7 +518,8 @@ int libxl__cpuid_legacy(libxl_ctx *ctx, uint32_t domid, bool restore,
 
     r = xc_cpuid_apply_policy(ctx->xch, domid, restore, NULL, 0,
                               pae, itsc, nested_virt,
-                              info->cpuid ? info->cpuid->cpuid : NULL, NULL);
+                              info->cpuid ? info->cpuid->cpuid : NULL,
+                              info->cpuid ? info->cpuid->msr : NULL);
     if (r)
         LOGEVD(ERROR, -r, domid, "Failed to apply CPUID policy");
 
@@ -528,16 +531,22 @@ static const char *input_names[2] = { "leaf", "subleaf" };
 static const char *policy_names[4] = { "eax", "ebx", "ecx", "edx" };
 /*
  * Aiming for:
- * [
- *     { 'leaf':    'val-eax',
- *       'subleaf': 'val-ecx',
- *       'eax':     'filter',
- *       'ebx':     'filter',
- *       'ecx':     'filter',
- *       'edx':     'filter' },
- *     { 'leaf':    'val-eax', ..., 'eax': 'filter', ... },
- *     ... etc ...
- * ]
+ * {   'cpuid': [
+ *              { 'leaf':    'val-eax',
+ *                'subleaf': 'val-ecx',
+ *                'eax':     'filter',
+ *                'ebx':     'filter',
+ *                'ecx':     'filter',
+ *                'edx':     'filter' },
+ *              { 'leaf':    'val-eax', ..., 'eax': 'filter', ... },
+ *              ... etc ...
+ *     ],
+ *     'msr': [
+ *            { 'index': 'val-index',
+ *              'policy': 'filter', },
+ *              ... etc ...
+ *     ],
+ * }
  */
 
 yajl_gen_status libxl_cpuid_policy_list_gen_json(yajl_gen hand,
@@ -545,9 +554,16 @@ yajl_gen_status libxl_cpuid_policy_list_gen_json(yajl_gen hand,
 {
     libxl_cpuid_policy_list policy = *pl;
     struct xc_xend_cpuid *cpuid;
+    struct xc_msr *msr;
     yajl_gen_status s;
     int i, j;
 
+    s = yajl_gen_map_open(hand);
+    if (s != yajl_gen_status_ok) goto out;
+
+    s = libxl__yajl_gen_asciiz(hand, "cpuid");
+    if (s != yajl_gen_status_ok) goto out;
+
     s = yajl_gen_array_open(hand);
     if (s != yajl_gen_status_ok) goto out;
 
@@ -582,6 +598,39 @@ yajl_gen_status libxl_cpuid_policy_list_gen_json(yajl_gen hand,
 
 empty:
     s = yajl_gen_array_close(hand);
+    if (s != yajl_gen_status_ok) goto out;
+
+    s = libxl__yajl_gen_asciiz(hand, "msr");
+    if (s != yajl_gen_status_ok) goto out;
+
+    s = yajl_gen_array_open(hand);
+    if (s != yajl_gen_status_ok) goto out;
+
+    if (!policy || !policy->msr) goto done;
+    msr = policy->msr;
+
+    for (i = 0; msr[i].index != XC_MSR_INPUT_UNUSED; i++) {
+        s = yajl_gen_map_open(hand);
+        if (s != yajl_gen_status_ok) goto out;
+
+        s = libxl__yajl_gen_asciiz(hand, "index");
+        if (s != yajl_gen_status_ok) goto out;
+        s = yajl_gen_integer(hand, msr[i].index);
+        if (s != yajl_gen_status_ok) goto out;
+        s = libxl__yajl_gen_asciiz(hand, "policy");
+        if (s != yajl_gen_status_ok) goto out;
+        s = yajl_gen_string(hand,
+                            (const unsigned char *)msr[i].policy, 64);
+        if (s != yajl_gen_status_ok) goto out;
+
+        s = yajl_gen_map_close(hand);
+        if (s != yajl_gen_status_ok) goto out;
+    }
+
+done:
+    s = yajl_gen_array_close(hand);
+    if (s != yajl_gen_status_ok) goto out;
+    s = yajl_gen_map_close(hand);
 out:
     return s;
 }
@@ -592,17 +641,24 @@ int libxl__cpuid_policy_list_parse_json(libxl__gc *gc,
 {
     int i, size;
     struct xc_xend_cpuid *l;
+    struct xc_msr *msr;
+    const libxl__json_object *co;
     flexarray_t *array;
 
-    if (!libxl__json_object_is_array(o))
+    if (!libxl__json_object_is_map(o))
         return ERROR_FAIL;
 
-    array = libxl__json_object_get_array(o);
+    co = libxl__json_map_get("cpuid", o, JSON_ARRAY);
+    if (!libxl__json_object_is_array(co))
+        return ERROR_FAIL;
+
+    *p = libxl__calloc(NOGC, 1, sizeof(**p));
+
+    array = libxl__json_object_get_array(co);
     if (!array->count)
-        return 0;
+        goto cpuid_empty;
 
     size = array->count;
-    *p = libxl__calloc(NOGC, 1, sizeof(**p));
     /* need one extra slot as sentinel */
     l = (*p)->cpuid = libxl__calloc(NOGC, size + 1,
                                     sizeof(struct xc_xend_cpuid));
@@ -642,6 +698,40 @@ int libxl__cpuid_policy_list_parse_json(libxl__gc *gc,
         }
     }
 
+cpuid_empty:
+    co = libxl__json_map_get("msr", o, JSON_ARRAY);
+    if (!libxl__json_object_is_array(co))
+        return ERROR_FAIL;
+
+    array = libxl__json_object_get_array(co);
+    if (!array->count)
+        return 0;
+    size = array->count;
+    /* need one extra slot as sentinel */
+    msr = (*p)->msr = libxl__calloc(NOGC, size + 1, sizeof(struct xc_msr));
+
+    msr[size].index = XC_MSR_INPUT_UNUSED;
+
+    for (i = 0; i < size; i++) {
+        const libxl__json_object *t, *r;
+
+        if (flexarray_get(array, i, (void**)&t) != 0)
+            return ERROR_FAIL;
+
+        if (!libxl__json_object_is_map(t))
+            return ERROR_FAIL;
+
+        r = libxl__json_map_get("index", t, JSON_INTEGER);
+        if (!r) return ERROR_FAIL;
+        msr[i].index = libxl__json_object_get_integer(r);
+        r = libxl__json_map_get("policy", t, JSON_STRING);
+        if (!r) return ERROR_FAIL;
+        if (strlen(libxl__json_object_get_string(r)) !=
+            ARRAY_SIZE(msr[i].policy) - 1)
+            return ERROR_FAIL;
+        strcpy(msr[i].policy, libxl__json_object_get_string(r));
+    }
+
     return 0;
 }
 
@@ -677,6 +767,10 @@ void libxl_cpuid_policy_list_copy(libxl_ctx *ctx,
     }
 
     *pdst = libxl__calloc(NOGC, 1, sizeof(**pdst));
+
+    if (!(*psrc)->cpuid)
+        goto copy_msr;
+
     dst = &(*pdst)->cpuid;
     src = &(*psrc)->cpuid;
     len = libxl_cpuid_policy_list_length(psrc);
@@ -696,6 +790,22 @@ void libxl_cpuid_policy_list_copy(libxl_ctx *ctx,
                 (*dst)[i].policy[j] = NULL;
     }
 
+copy_msr:
+    if ((*psrc)->msr) {
+        const struct xc_msr *msr = (*psrc)->msr;
+
+        for (i = 0; msr[i].index != XC_MSR_INPUT_UNUSED; i++)
+            ;
+        len = i;
+        (*pdst)->msr = libxl__calloc(NOGC, len + 1, sizeof(struct xc_msr));
+        (*pdst)->msr[len].index = XC_MSR_INPUT_UNUSED;
+
+        for (i = 0; i < len; i++) {
+            (*pdst)->msr[i].index = msr[i].index;
+            strcpy((*pdst)->msr[i].policy, msr[i].policy);
+        }
+    }
+
 out:
     GC_FREE;
 }
diff --git a/tools/libs/light/libxl_internal.h b/tools/libs/light/libxl_internal.h
index ef882cff3912..b1a7cd9f615b 100644
--- a/tools/libs/light/libxl_internal.h
+++ b/tools/libs/light/libxl_internal.h
@@ -4871,6 +4871,7 @@ _hidden int libxl__domain_set_paging_mempool_size(
 
 struct libxl__cpu_policy {
     struct xc_xend_cpuid *cpuid;
+    struct xc_msr *msr;
 };
 
 #endif
diff --git a/tools/libs/light/libxl_types.idl b/tools/libs/light/libxl_types.idl
index 9e48bb772646..887824fdd828 100644
--- a/tools/libs/light/libxl_types.idl
+++ b/tools/libs/light/libxl_types.idl
@@ -19,7 +19,7 @@ libxl_mac = Builtin("mac", json_parse_type="JSON_STRING", passby=PASS_BY_REFEREN
 libxl_bitmap = Builtin("bitmap", json_parse_type="JSON_ARRAY", dispose_fn="libxl_bitmap_dispose", passby=PASS_BY_REFERENCE,
                        check_default_fn="libxl_bitmap_is_empty", copy_fn="libxl_bitmap_copy_alloc")
 libxl_cpuid_policy_list = Builtin("cpuid_policy_list", dispose_fn="libxl_cpuid_dispose", passby=PASS_BY_REFERENCE,
-                                  json_parse_type="JSON_ARRAY", check_default_fn="libxl__cpuid_policy_is_empty",
+                                  json_parse_type="JSON_MAP", check_default_fn="libxl__cpuid_policy_is_empty",
                                   copy_fn="libxl_cpuid_policy_list_copy")
 
 libxl_string_list = Builtin("string_list", dispose_fn="libxl_string_list_dispose", passby=PASS_BY_REFERENCE,
-- 
2.41.0



  parent reply	other threads:[~2023-07-20  8:27 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-07-20  8:25 [PATCH v3 0/6] lib{xc,xl}: support for guest MSR features Roger Pau Monne
2023-07-20  8:25 ` [PATCH v3 1/6] libs/guest: introduce support for setting guest MSRs Roger Pau Monne
2023-07-20  8:25 ` [PATCH v3 2/6] libxl: change the type of libxl_cpuid_policy_list Roger Pau Monne
2023-07-20  8:25 ` Roger Pau Monne [this message]
2023-07-24 17:26   ` [PATCH v3 3/6] libxl: introduce MSR data in libxl_cpuid_policy Anthony PERARD
2023-07-25  8:55     ` Roger Pau Monné
2023-07-20  8:25 ` [PATCH v3 4/6] libxl: split logic to parse user provided CPUID features Roger Pau Monne
2023-07-20  8:25 ` [PATCH v3 5/6] libxl: use the cpuid feature names from cpufeatureset.h Roger Pau Monne
2023-07-20  8:25 ` [PATCH v3 6/6] libxl: add support for parsing MSR features Roger Pau Monne

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230720082540.69444-4-roger.pau@citrix.com \
    --to=roger.pau@citrix.com \
    --cc=anthony.perard@citrix.com \
    --cc=jgross@suse.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.