linux-cxl.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: ira.weiny@intel.com
To: Michael Tsirkin <mst@redhat.com>,
	Ben Widawsky <bwidawsk@kernel.org>,
	Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Ira Weiny <ira.weiny@intel.com>,
	qemu-devel@nongnu.org, linux-cxl@vger.kernel.org
Subject: [RFC PATCH 4/6] hw/cxl/mailbox: Wire up get/clear event mailbox commands
Date: Mon, 10 Oct 2022 15:29:42 -0700	[thread overview]
Message-ID: <20221010222944.3923556-5-ira.weiny@intel.com> (raw)
In-Reply-To: <20221010222944.3923556-1-ira.weiny@intel.com>

From: Ira Weiny <ira.weiny@intel.com>

Replace the stubbed out CXL Get/Clear Event mailbox commands with
commands which return the mock event information.

Signed-off-by: Ira Weiny <ira.weiny@intel.com>
---
 hw/cxl/cxl-device-utils.c  |   1 +
 hw/cxl/cxl-mailbox-utils.c | 103 +++++++++++++++++++++++++++++++++++--
 2 files changed, 101 insertions(+), 3 deletions(-)

diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c
index 687759b3017b..4bb41101882e 100644
--- a/hw/cxl/cxl-device-utils.c
+++ b/hw/cxl/cxl-device-utils.c
@@ -262,4 +262,5 @@ void cxl_device_register_init_common(CXLDeviceState *cxl_dstate)
     memdev_reg_init_common(cxl_dstate);
 
     assert(cxl_initialize_mailbox(cxl_dstate) == 0);
+    cxl_mock_add_event_logs(cxl_dstate);
 }
diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c
index bb66c765a538..df345f23a30c 100644
--- a/hw/cxl/cxl-mailbox-utils.c
+++ b/hw/cxl/cxl-mailbox-utils.c
@@ -9,6 +9,7 @@
 
 #include "qemu/osdep.h"
 #include "hw/cxl/cxl.h"
+#include "hw/cxl/cxl_events.h"
 #include "hw/pci/pci.h"
 #include "qemu/cutils.h"
 #include "qemu/log.h"
@@ -116,11 +117,107 @@ struct cxl_cmd {
         return CXL_MBOX_SUCCESS;                                          \
     }
 
-DEFINE_MAILBOX_HANDLER_ZEROED(events_get_records, 0x20);
-DEFINE_MAILBOX_HANDLER_NOP(events_clear_records);
 DEFINE_MAILBOX_HANDLER_ZEROED(events_get_interrupt_policy, 4);
 DEFINE_MAILBOX_HANDLER_NOP(events_set_interrupt_policy);
 
+static ret_code cmd_events_get_records(struct cxl_cmd *cmd,
+                                       CXLDeviceState *cxlds,
+                                       uint16_t *len)
+{
+    struct cxl_get_event_payload *pl;
+    struct cxl_event_log *log;
+    uint8_t log_type;
+    uint16_t nr_overflow;
+
+    if (cmd->in < sizeof(log_type)) {
+        return CXL_MBOX_INVALID_INPUT;
+    }
+
+    log_type = *((uint8_t *)cmd->payload);
+    if (log_type >= CXL_EVENT_TYPE_MAX) {
+        return CXL_MBOX_INVALID_INPUT;
+    }
+
+    pl = (struct cxl_get_event_payload *)cmd->payload;
+
+    log = find_event_log(cxlds, log_type);
+    if (!log || log_empty(log)) {
+        goto no_data;
+    }
+
+    memset(pl, 0, sizeof(*pl));
+    pl->record_count = const_le16(1);
+
+    if (log_rec_left(log) > 1) {
+        pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
+    }
+
+    nr_overflow = log_overflow(log);
+    if (nr_overflow) {
+        struct timespec ts;
+        uint64_t ns;
+
+        clock_gettime(CLOCK_REALTIME, &ts);
+
+        ns = ((uint64_t)ts.tv_sec * 1000000000) + (uint64_t)ts.tv_nsec;
+
+        pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
+        pl->overflow_err_count = cpu_to_le16(nr_overflow);
+        ns -= 5000000000; /* 5s ago */
+        pl->first_overflow_timestamp = cpu_to_le64(ns);
+        ns -= 1000000000; /* 1s ago */
+        pl->last_overflow_timestamp = cpu_to_le64(ns);
+    }
+
+    memcpy(&pl->record, get_cur_event(log), sizeof(pl->record));
+    pl->record.hdr.handle = get_cur_event_handle(log);
+    *len = sizeof(pl->record);
+    return CXL_MBOX_SUCCESS;
+
+no_data:
+    *len = sizeof(*pl) - sizeof(pl->record);
+    memset(pl, 0, *len);
+    return CXL_MBOX_SUCCESS;
+}
+
+static ret_code cmd_events_clear_records(struct cxl_cmd *cmd,
+                                         CXLDeviceState *cxlds,
+                                         uint16_t *len)
+{
+    struct cxl_mbox_clear_event_payload *pl;
+    struct cxl_event_log *log;
+    uint8_t log_type;
+
+    pl = (struct cxl_mbox_clear_event_payload *)cmd->payload;
+    log_type = pl->event_log;
+
+    /* Don't handle more than 1 record at a time */
+    if (pl->nr_recs != 1) {
+        return CXL_MBOX_INVALID_INPUT;
+    }
+
+    if (log_type >= CXL_EVENT_TYPE_MAX) {
+        return CXL_MBOX_INVALID_INPUT;
+    }
+
+    log = find_event_log(cxlds, log_type);
+    if (!log) {
+        return CXL_MBOX_SUCCESS;
+    }
+
+    /*
+     * The current code clears events as they are read.  Test that behavior
+     * only; don't support clearning from the middle of the log
+     */
+    if (log->cur_event != le16_to_cpu(pl->handle)) {
+        return CXL_MBOX_INVALID_INPUT;
+    }
+
+    log->cur_event++;
+    *len = 0;
+    return CXL_MBOX_SUCCESS;
+}
+
 /* 8.2.9.2.1 */
 static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd,
                                              CXLDeviceState *cxl_dstate,
@@ -391,7 +488,7 @@ static struct cxl_cmd cxl_cmd_set[256][256] = {
     [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
         cmd_events_get_records, 1, 0 },
     [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
-        cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE },
+        cmd_events_clear_records, 8, IMMEDIATE_LOG_CHANGE },
     [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
         cmd_events_get_interrupt_policy, 0, 0 },
     [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
-- 
2.37.2


  parent reply	other threads:[~2022-10-10 22:30 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-10 22:29 [RFC PATCH 0/6] QEMU CXL Provide mock CXL events and irq support ira.weiny
2022-10-10 22:29 ` [RFC PATCH 1/6] qemu/bswap: Add const_le64() ira.weiny
2022-10-11  9:03   ` Jonathan Cameron
2022-10-13 22:52     ` Ira Weiny
2022-10-11  9:48   ` Peter Maydell
2022-10-11 15:22     ` Richard Henderson
2022-10-11 15:45       ` Peter Maydell
2022-10-13 22:47         ` Ira Weiny
2022-10-10 22:29 ` [RFC PATCH 2/6] qemu/uuid: Add UUID static initializer ira.weiny
2022-10-11  9:13   ` Jonathan Cameron
2022-10-13 23:11     ` Ira Weiny
2022-10-10 22:29 ` [RFC PATCH 3/6] hw/cxl/cxl-events: Add CXL mock events ira.weiny
2022-10-11 10:07   ` Jonathan Cameron
2022-10-14  0:21     ` Ira Weiny
2022-10-17 15:57       ` Jonathan Cameron
2022-12-19 10:07   ` Jonathan Cameron
2022-12-21 18:56     ` Ira Weiny
2022-10-10 22:29 ` ira.weiny [this message]
2022-10-11 10:26   ` [RFC PATCH 4/6] hw/cxl/mailbox: Wire up get/clear event mailbox commands Jonathan Cameron
2022-10-10 22:29 ` [RFC PATCH 5/6] hw/cxl/cxl-events: Add event interrupt support ira.weiny
2022-10-11 10:30   ` Jonathan Cameron
2022-10-10 22:29 ` [RFC PATCH 6/6] hw/cxl/mailbox: Wire up Get/Set Event Interrupt policy ira.weiny
2022-10-11 10:40   ` Jonathan Cameron
2022-10-10 22:45 ` [RFC PATCH 0/6] QEMU CXL Provide mock CXL events and irq support Ira Weiny
2022-10-11  9:40 ` Jonathan Cameron
2022-10-11 17:03   ` Ira Weiny

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221010222944.3923556-5-ira.weiny@intel.com \
    --to=ira.weiny@intel.com \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=bwidawsk@kernel.org \
    --cc=linux-cxl@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).