All of lore.kernel.org
 help / color / mirror / Atom feed
From: Eric Auger <eric.auger@redhat.com>
To: eric.auger.pro@gmail.com, eric.auger@redhat.com,
	peter.maydell@linaro.org, edgar.iglesias@gmail.com,
	qemu-arm@nongnu.org, qemu-devel@nongnu.org,
	prem.mallappa@gmail.com
Cc: drjones@redhat.com, christoffer.dall@linaro.org,
	Radha.Chintakuntla@cavium.com, Sunil.Goutham@cavium.com
Subject: [Qemu-devel] [RFC v3 3/5] hw/arm/smmuv3: smmuv3 emulation model
Date: Thu, 30 Mar 2017 21:42:16 +0200	[thread overview]
Message-ID: <1490902938-9009-4-git-send-email-eric.auger@redhat.com> (raw)
In-Reply-To: <1490902938-9009-1-git-send-email-eric.auger@redhat.com>

From: Prem Mallappa <prem.mallappa@broadcom.com>

Introduces the SMMUv3 derived model. This is based on
System MMUv3 specification (v17).

Signed-off-by: Prem Mallappa <prem.mallappa@broadcom.com>
Signed-off-by: Eric Auger <eric.auger@redhat.com>

---

v2 -> v3:
- move creation of include/hw/arm/smmuv3.h to this patch to fix compil issue
- compilation allowed
- fix sbus allocation in smmu_init_pci_iommu
- restructure code into headers
- misc cleanups
---
 hw/arm/Makefile.objs     |    2 +-
 hw/arm/smmuv3-internal.h |  544 ++++++++++++++++++++++
 hw/arm/smmuv3.c          | 1131 ++++++++++++++++++++++++++++++++++++++++++++++
 include/hw/arm/smmuv3.h  |   88 ++++
 4 files changed, 1764 insertions(+), 1 deletion(-)
 create mode 100644 hw/arm/smmuv3-internal.h
 create mode 100644 hw/arm/smmuv3.c
 create mode 100644 include/hw/arm/smmuv3.h

diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
index 6c7d4af..02cd23f 100644
--- a/hw/arm/Makefile.objs
+++ b/hw/arm/Makefile.objs
@@ -18,4 +18,4 @@ obj-$(CONFIG_FSL_IMX25) += fsl-imx25.o imx25_pdk.o
 obj-$(CONFIG_FSL_IMX31) += fsl-imx31.o kzm.o
 obj-$(CONFIG_FSL_IMX6) += fsl-imx6.o sabrelite.o
 obj-$(CONFIG_ASPEED_SOC) += aspeed_soc.o aspeed.o
-obj-$(CONFIG_ARM_SMMUV3) += smmu-common.o
+obj-$(CONFIG_ARM_SMMUV3) += smmu-common.o smmuv3.o
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
new file mode 100644
index 0000000..1675e03
--- /dev/null
+++ b/hw/arm/smmuv3-internal.h
@@ -0,0 +1,544 @@
+/*
+ * ARM SMMUv3 support - Internal API
+ *
+ * Copyright (C) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2017 Red Hat, Inc.
+ * Written by Prem Mallappa, Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_ARM_SMMU_V3_INTERNAL_H
+#define HW_ARM_SMMU_V3_INTERNAL_H
+
+/*****************************
+ * MMIO Register
+ *****************************/
+enum {
+    SMMU_REG_IDR0            = 0x0,
+
+#define SMMU_IDR0_S2P            (1 << 0)
+#define SMMU_IDR0_S1P            (1 << 1)
+#define SMMU_IDR0_TTF            (0x3 << 2)
+#define SMMU_IDR0_HTTU           (0x3 << 6)
+#define SMMU_IDR0_HYP            (1 << 9)
+#define SMMU_IDR0_ATS            (1 << 10)
+#define SMMU_IDR0_VMID16         (1 << 18)
+#define SMMU_IDR0_CD2L           (1 << 19)
+
+    SMMU_REG_IDR1            = 0x4,
+    SMMU_REG_IDR2            = 0x8,
+    SMMU_REG_IDR3            = 0xc,
+    SMMU_REG_IDR4            = 0x10,
+    SMMU_REG_IDR5            = 0x14,
+    SMMU_REG_IIDR            = 0x1c,
+    SMMU_REG_CR0             = 0x20,
+
+#define SMMU_CR0_SMMU_ENABLE (1 << 0)
+#define SMMU_CR0_PRIQ_ENABLE (1 << 1)
+#define SMMU_CR0_EVTQ_ENABLE (1 << 2)
+#define SMMU_CR0_CMDQ_ENABLE (1 << 3)
+#define SMMU_CR0_ATS_CHECK   (1 << 4)
+
+    SMMU_REG_CR0_ACK         = 0x24,
+    SMMU_REG_CR1             = 0x28,
+    SMMU_REG_CR2             = 0x2c,
+
+    SMMU_REG_STATUSR         = 0x40,
+
+    SMMU_REG_IRQ_CTRL        = 0x50,
+    SMMU_REG_IRQ_CTRL_ACK    = 0x54,
+
+#define SMMU_IRQ_CTRL_GERROR_EN (1 << 0)
+#define SMMU_IRQ_CTRL_EVENT_EN  (1 << 1)
+#define SMMU_IRQ_CTRL_PRI_EN    (1 << 2)
+
+    SMMU_REG_GERROR          = 0x60,
+
+#define SMMU_GERROR_CMDQ       (1 << 0)
+#define SMMU_GERROR_EVENTQ     (1 << 2)
+#define SMMU_GERROR_PRIQ       (1 << 3)
+#define SMMU_GERROR_MSI_CMDQ   (1 << 4)
+#define SMMU_GERROR_MSI_EVENTQ (1 << 5)
+#define SMMU_GERROR_MSI_PRIQ   (1 << 6)
+#define SMMU_GERROR_MSI_GERROR (1 << 7)
+#define SMMU_GERROR_SFM_ERR    (1 << 8)
+
+    SMMU_REG_GERRORN         = 0x64,
+    SMMU_REG_GERROR_IRQ_CFG0 = 0x68,
+    SMMU_REG_GERROR_IRQ_CFG1 = 0x70,
+    SMMU_REG_GERROR_IRQ_CFG2 = 0x74,
+
+    /* SMMU_BASE_RA Applies to STRTAB_BASE, CMDQ_BASE and EVTQ_BASE */
+#define SMMU_BASE_RA        (1ULL << 62)
+    SMMU_REG_STRTAB_BASE     = 0x80,
+    SMMU_REG_STRTAB_BASE_CFG = 0x88,
+
+    SMMU_REG_CMDQ_BASE       = 0x90,
+    SMMU_REG_CMDQ_PROD       = 0x98,
+    SMMU_REG_CMDQ_CONS       = 0x9c,
+    /* CMD Consumer (CONS) */
+#define SMMU_CMD_CONS_ERR_SHIFT        24
+#define SMMU_CMD_CONS_ERR_BITS         7
+
+    SMMU_REG_EVTQ_BASE       = 0xa0,
+    SMMU_REG_EVTQ_PROD       = 0xa8,
+    SMMU_REG_EVTQ_CONS       = 0xac,
+    SMMU_REG_EVTQ_IRQ_CFG0   = 0xb0,
+    SMMU_REG_EVTQ_IRQ_CFG1   = 0xb8,
+    SMMU_REG_EVTQ_IRQ_CFG2   = 0xbc,
+
+    SMMU_REG_PRIQ_BASE       = 0xc0,
+    SMMU_REG_PRIQ_PROD       = 0xc8,
+    SMMU_REG_PRIQ_CONS       = 0xcc,
+    SMMU_REG_PRIQ_IRQ_CFG0   = 0xd0,
+    SMMU_REG_PRIQ_IRQ_CFG1   = 0xd8,
+    SMMU_REG_PRIQ_IRQ_CFG2   = 0xdc,
+
+    SMMU_ID_REGS_OFFSET      = 0xfd0,
+
+    /* Secure registers are not used for now */
+    SMMU_SECURE_OFFSET       = 0x8000,
+};
+
+/**********************
+ * Data Structures
+ **********************/
+
+struct __smmu_data2 {
+    uint32_t word[2];
+};
+
+struct __smmu_data8 {
+    uint32_t word[8];
+};
+
+struct __smmu_data16 {
+    uint32_t word[16];
+};
+
+struct __smmu_data4 {
+    uint32_t word[4];
+};
+
+typedef struct __smmu_data2  STEDesc; /* STE Level 1 Descriptor */
+typedef struct __smmu_data16 Ste;     /* Stream Table Entry(STE) */
+typedef struct __smmu_data2  CDDesc;  /* CD Level 1 Descriptor */
+typedef struct __smmu_data16 Cd;      /* Context Descriptor(CD) */
+
+typedef struct __smmu_data4  Cmd; /* Command Entry */
+typedef struct __smmu_data8  Evt; /* Event Entry */
+typedef struct __smmu_data4  Pri; /* PRI entry */
+
+/*****************************
+ * STE fields
+ *****************************/
+
+#define STE_VALID(x)   extract32((x)->word[0], 0, 1) /* 0 */
+#define STE_CONFIG(x)  (extract32((x)->word[0], 1, 3) & 0x7)
+enum {
+    STE_CONFIG_NONE      = 0,
+    STE_CONFIG_BYPASS    = 4,           /* S1 Bypass, S2 Bypass */
+    STE_CONFIG_S1TR      = 1,           /* S1 Translate, S2 Bypass */
+    STE_CONFIG_S2TR      = 2,           /* S1 Bypass, S2 Translate */
+    STE_CONFIG_S1TR_S2TR = 3,           /* S1 Translate, S2 Translate */
+};
+#define STE_S1FMT(x)   extract32((x)->word[0], 4, 2)
+#define STE_S1CDMAX(x) extract32((x)->word[1], 8, 2)
+#define STE_EATS(x)    extract32((x)->word[2], 28, 2)
+#define STE_STRW(x)    extract32((x)->word[2], 30, 2)
+#define STE_S2VMID(x)  extract32((x)->word[4], 0, 16) /* 4 */
+#define STE_S2T0SZ(x)  extract32((x)->word[5], 0, 6) /* 5 */
+#define STE_S2TG(x)    extract32((x)->word[5], 14, 2)
+#define STE_S2PS(x)    extract32((x)->word[5], 16, 3)
+#define STE_S2AA64(x)  extract32((x)->word[5], 19, 1)
+#define STE_S2HD(x)    extract32((x)->word[5], 24, 1)
+#define STE_S2HA(x)    extract32((x)->word[5], 25, 1)
+#define STE_S2S(x)     extract32((x)->word[5], 26, 1)
+#define STE_CTXPTR(x)                                           \
+    ({                                                          \
+        unsigned long addr;                                     \
+        addr = (uint64_t)extract32((x)->word[1], 0, 16) << 32;  \
+        addr |= (uint64_t)((x)->word[0] & 0xffffffc0);          \
+        addr;                                                   \
+    })
+
+#define STE_S2TTB(x)                                            \
+    ({                                                          \
+        unsigned long addr;                                     \
+        addr = (uint64_t)extract32((x)->word[7], 0, 16) << 32;  \
+        addr |= (uint64_t)((x)->word[6] & 0xfffffff0);          \
+        addr;                                                   \
+    })
+
+static inline int is_ste_valid(SMMUV3State *s, Ste *ste)
+{
+    return STE_VALID(ste);
+}
+
+static inline int is_ste_bypass(SMMUV3State *s, Ste *ste)
+{
+    return STE_CONFIG(ste) == STE_CONFIG_BYPASS;
+}
+
+/*****************************
+ * CD fields
+ *****************************/
+#define CD_VALID(x)   extract32((x)->word[0], 30, 1)
+#define CD_ASID(x)    extract32((x)->word[1], 16, 16)
+#define CD_TTB(x, sel)                                      \
+    ({                                                      \
+        uint64_t hi, lo;                                    \
+        hi = extract32((x)->word[(sel) * 2 + 3], 0, 16);    \
+        hi <<= 32;                                          \
+        lo = (x)->word[(sel) * 2 + 2] & ~0xf;               \
+        hi | lo;                                            \
+    })
+
+#define CD_TSZ(x, sel)   extract32((x)->word[0], (16 * (sel)) + 0, 6)
+#define CD_TG(x, sel)    extract32((x)->word[0], (16 * (sel)) + 6, 2)
+#define CD_EPD(x, sel)   extract32((x)->word[0], (16 * (sel)) + 14, 1)
+
+#define CD_T0SZ(x)    CD_TSZ((x), 0)
+#define CD_T1SZ(x)    CD_TSZ((x), 1)
+#define CD_TG0(x)     CD_TG((x), 0)
+#define CD_TG1(x)     CD_TG((x), 1)
+#define CD_EPD0(x)    CD_EPD((x), 0)
+#define CD_EPD1(x)    CD_EPD((x), 1)
+#define CD_IPS(x)     extract32((x)->word[1], 0, 3)
+#define CD_AARCH64(x) extract32((x)->word[1], 9, 1)
+#define CD_TTB0(x)    CD_TTB((x), 0)
+#define CD_TTB1(x)    CD_TTB((x), 1)
+
+#define CDM_VALID(x)    ((x)->word[0] & 0x1)
+
+static inline int is_cd_valid(SMMUV3State *s, Ste *ste, Cd *cd)
+{
+    return CD_VALID(cd);
+}
+
+/*****************************
+ * Commands
+ *****************************/
+enum {
+    SMMU_CMD_PREFETCH_CONFIG = 0x01,
+    SMMU_CMD_PREFETCH_ADDR,
+    SMMU_CMD_CFGI_STE,
+    SMMU_CMD_CFGI_STE_RANGE,
+    SMMU_CMD_CFGI_CD,
+    SMMU_CMD_CFGI_CD_ALL,
+    SMMU_CMD_TLBI_NH_ALL     = 0x10,
+    SMMU_CMD_TLBI_NH_ASID,
+    SMMU_CMD_TLBI_NH_VA,
+    SMMU_CMD_TLBI_NH_VAA,
+    SMMU_CMD_TLBI_EL3_ALL    = 0x18,
+    SMMU_CMD_TLBI_EL3_VA     = 0x1a,
+    SMMU_CMD_TLBI_EL2_ALL    = 0x20,
+    SMMU_CMD_TLBI_EL2_ASID,
+    SMMU_CMD_TLBI_EL2_VA,
+    SMMU_CMD_TLBI_EL2_VAA,  /* 0x23 */
+    SMMU_CMD_TLBI_S12_VMALL  = 0x28,
+    SMMU_CMD_TLBI_S2_IPA     = 0x2a,
+    SMMU_CMD_TLBI_NSNH_ALL   = 0x30,
+    SMMU_CMD_ATC_INV         = 0x40,
+    SMMU_CMD_PRI_RESP,
+    SMMU_CMD_RESUME          = 0x44,
+    SMMU_CMD_STALL_TERM,
+    SMMU_CMD_SYNC,          /* 0x46 */
+};
+
+/*****************************
+ *  Register Access Primitives
+ *****************************/
+
+static void smmu_write64_reg(SMMUV3State *s, uint32_t addr, uint64_t val)
+{
+    addr >>= 2;
+    s->regs[addr] = val & 0xFFFFFFFFULL;
+    s->regs[addr + 1] = val & ~0xFFFFFFFFULL;
+}
+
+static void smmu_write_reg(SMMUV3State *s, uint32_t addr, uint64_t val)
+{
+    s->regs[addr >> 2] = val;
+}
+
+static inline uint32_t smmu_read_reg(SMMUV3State *s, uint32_t addr)
+{
+    return s->regs[addr >> 2];
+}
+
+static inline uint64_t smmu_read64_reg(SMMUV3State *s, uint32_t addr)
+{
+    addr >>= 2;
+    return s->regs[addr] | (s->regs[addr + 1] << 32);
+}
+
+#define smmu_read32_reg smmu_read_reg
+#define smmu_write32_reg smmu_write_reg
+
+/*****************************
+ * CMDQ fields
+ *****************************/
+
+enum { /* Command Errors */
+    SMMU_CMD_ERR_NONE = 0,
+    SMMU_CMD_ERR_ILLEGAL,
+    SMMU_CMD_ERR_ABORT
+};
+
+enum { /* Command completion notification */
+    CMD_SYNC_SIG_NONE,
+    CMD_SYNC_SIG_IRQ,
+    CMD_SYNC_SIG_SEV,
+};
+
+#define CMD_TYPE(x)  extract32((x)->word[0], 0, 8)
+#define CMD_SEC(x)   extract32((x)->word[0], 9, 1)
+#define CMD_SEV(x)   extract32((x)->word[0], 10, 1)
+#define CMD_AC(x)    extract32((x)->word[0], 12, 1)
+#define CMD_AB(x)    extract32((x)->word[0], 13, 1)
+#define CMD_CS(x)    extract32((x)->word[0], 12, 2)
+#define CMD_SSID(x)  extract32((x)->word[0], 16, 16)
+#define CMD_SID(x)   ((x)->word[1])
+#define CMD_VMID(x)  extract32((x)->word[1], 0, 16)
+#define CMD_ASID(x)  extract32((x)->word[1], 16, 16)
+#define CMD_STAG(x)  extract32((x)->word[2], 0, 16)
+#define CMD_RESP(x)  extract32((x)->word[2], 11, 2)
+#define CMD_GRPID(x) extract32((x)->word[3], 0, 8)
+#define CMD_SIZE(x)  extract32((x)->word[3], 0, 16)
+#define CMD_LEAF(x)  extract32((x)->word[3], 0, 1)
+#define CMD_SPAN(x)  extract32((x)->word[3], 0, 5)
+#define CMD_ADDR(x) ({                                  \
+            uint64_t addr = (uint64_t)(x)->word[3];     \
+            addr <<= 32;                                \
+            addr |=  extract32((x)->word[3], 12, 20);   \
+            addr;                                       \
+        })
+
+/***************************
+ * Queue Handling
+ ***************************/
+
+typedef enum {
+    CMD_Q_EMPTY,
+    CMD_Q_FULL,
+    CMD_Q_INUSE,
+} SMMUQStatus;
+
+#define Q_ENTRY(q, idx)  (q->base + q->ent_size * idx)
+#define Q_WRAP(q, pc)    ((pc) >> (q)->shift)
+#define Q_IDX(q, pc)     ((pc) & ((1 << (q)->shift) - 1))
+
+static inline SMMUQStatus
+__smmu_queue_status(SMMUV3State *s, SMMUQueue *q)
+{
+    uint32_t prod = Q_IDX(q, q->prod), cons = Q_IDX(q, q->cons);
+    if ((prod == cons) && (q->wrap.prod != q->wrap.cons)) {
+        return CMD_Q_FULL;
+    } else if ((prod == cons) && (q->wrap.prod == q->wrap.cons)) {
+        return CMD_Q_EMPTY;
+    }
+    return CMD_Q_INUSE;
+}
+#define smmu_is_q_full(s, q) (__smmu_queue_status(s, q) == CMD_Q_FULL)
+#define smmu_is_q_empty(s, q) (__smmu_queue_status(s, q) == CMD_Q_EMPTY)
+
+static int __smmu_q_enabled(SMMUV3State *s, uint32_t q)
+{
+    return smmu_read32_reg(s, SMMU_REG_CR0) & q;
+}
+#define smmu_cmd_q_enabled(s) __smmu_q_enabled(s, SMMU_CR0_CMDQ_ENABLE)
+#define smmu_evt_q_enabled(s) __smmu_q_enabled(s, SMMU_CR0_EVTQ_ENABLE)
+
+#define SMMU_CMDQ_ERR(s) ((smmu_read32_reg(s, SMMU_REG_GERROR) ^    \
+                           smmu_read32_reg(s, SMMU_REG_GERRORN)) &  \
+                          SMMU_GERROR_CMDQ)
+
+/*****************************
+ * EVTQ fields
+ *****************************/
+
+#define EVT_Q_OVERFLOW        (1 << 31)
+
+#define EVT_SET_TYPE(x, t)    deposit32((x)->word[0], 0, 8, t)
+#define EVT_SET_SID(x, s)     ((x)->word[1] =  s)
+#define EVT_SET_INPUT_ADDR(x, addr) ({                    \
+            (x)->word[5] = (uint32_t)(addr >> 32);        \
+            (x)->word[4] = (uint32_t)(addr & 0xffffffff); \
+            addr;                                         \
+        })
+
+/*****************************
+ * Events
+ *****************************/
+
+enum evt_err {
+    SMMU_EVT_F_UUT    = 0x1,
+    SMMU_EVT_C_BAD_SID,
+    SMMU_EVT_F_STE_FETCH,
+    SMMU_EVT_C_BAD_STE,
+    SMMU_EVT_F_BAD_ATS_REQ,
+    SMMU_EVT_F_STREAM_DISABLED,
+    SMMU_EVT_F_TRANS_FORBIDDEN,
+    SMMU_EVT_C_BAD_SSID,
+    SMMU_EVT_F_CD_FETCH,
+    SMMU_EVT_C_BAD_CD,
+    SMMU_EVT_F_WALK_EXT_ABRT,
+    SMMU_EVT_F_TRANS        = 0x10,
+    SMMU_EVT_F_ADDR_SZ,
+    SMMU_EVT_F_ACCESS,
+    SMMU_EVT_F_PERM,
+    SMMU_EVT_F_TLB_CONFLICT = 0x20,
+    SMMU_EVT_F_CFG_CONFLICT = 0x21,
+    SMMU_EVT_E_PAGE_REQ     = 0x24,
+};
+
+typedef enum evt_err SMMUEvtErr;
+
+/*****************************
+ * Interrupts
+ *****************************/
+
+static inline int __smmu_irq_enabled(SMMUV3State *s, uint32_t q)
+{
+    return smmu_read64_reg(s, SMMU_REG_IRQ_CTRL) & q;
+}
+#define smmu_evt_irq_enabled(s)                   \
+    __smmu_irq_enabled(s, SMMU_IRQ_CTRL_EVENT_EN)
+#define smmu_gerror_irq_enabled(s)                  \
+    __smmu_irq_enabled(s, SMMU_IRQ_CTRL_GERROR_EN)
+#define smmu_pri_irq_enabled(s)                 \
+    __smmu_irq_enabled(s, SMMU_IRQ_CTRL_PRI_EN)
+
+static inline bool
+smmu_is_irq_pending(SMMUV3State *s, int irq)
+{
+    return smmu_read32_reg(s, SMMU_REG_GERROR) ^
+        smmu_read32_reg(s, SMMU_REG_GERRORN);
+}
+
+/*****************************
+ * Hash Table
+ *****************************/
+
+static inline gboolean smmu_uint64_equal(gconstpointer v1, gconstpointer v2)
+{
+    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
+}
+
+static inline guint smmu_uint64_hash(gconstpointer v)
+{
+    return (guint)*(const uint64_t *)v;
+}
+
+/*****************************
+ * Misc
+ *****************************/
+
+static int tg2granule(int bits, bool tg1)
+{
+    switch (bits) {
+    case 1:
+        return tg1 ? 14 : 16;
+    case 2:
+        return tg1 ? 14 : 12;
+    case 3:
+        return tg1 ? 16 : 12;
+    default:
+        return 12;
+    }
+}
+
+static inline int oas2bits(int oas)
+{
+    switch (oas) {
+    case 2:
+        return 40;
+    case 3:
+        return 42;
+    case 4:
+        return 44;
+    case 5:
+    default: return 48;
+    }
+}
+
+#define STM2U64(stm) ({                                 \
+            uint64_t hi, lo;                            \
+            hi = (stm)->word[1];                        \
+            lo = (stm)->word[0] & ~(uint64_t)0x1f;      \
+            hi << 32 | lo;                              \
+        })
+
+#define STMSPAN(stm) (1 << (extract32((stm)->word[0], 0, 4) - 1))
+
+/*****************************
+ * Debug
+ *****************************/
+
+#ifdef ARM_SMMU_DEBUG
+static inline void dump_ste(Ste *ste)
+{
+    int i;
+
+    for (i = 0; i < ARRAY_SIZE(ste->word); i += 2) {
+        SMMU_DPRINTF(STE, "STE[%2d]: %#010x\t STE[%2d]: %#010x\n",
+                i, ste->word[i], i + 1, ste->word[i + 1]);
+    }
+}
+
+static inline void dump_cd(Cd *cd)
+{
+    int i;
+    for (i = 0; i < ARRAY_SIZE(cd->word); i += 2) {
+        SMMU_DPRINTF(CD, "CD[%2d]: %#010x\t CD[%2d]: %#010x\n",
+                i, cd->word[i], i + 1, cd->word[i + 1]);
+    }
+}
+
+static inline void dump_evt(Evt *e)
+{}
+
+static inline void dump_cmd(Cmd *cmd)
+{
+    int i;
+    for (i = 0; i < ARRAY_SIZE(cmd->word); i += 2) {
+        SMMU_DPRINTF(CMDQ, "CMD[%2d]: %#010x\t CMD[%2d]: %#010x\n",
+                i, cmd->word[i], i + 1, cmd->word[i + 1]);
+    }
+}
+
+static void dump_smmutranscfg(SMMUTransCfg *cfg)
+{
+    int i;
+    SMMU_DPRINTF(TT_1, "TransCFG stage:%d va:%lx pa:%lx s2_needed:%d\n",
+                 cfg->stage, cfg->va, cfg->pa, cfg->s2_needed);
+    for (i = 1; i <= 2; i++) {
+        SMMU_DPRINTF(TT_1, "TransCFG i:%d oas:%x tsz:%x ttbr:%lx granule:%x"
+                     " va_size:%x gran_sz:%x\n", i, cfg->oas[i], cfg->tsz[i],
+                     cfg->ttbr[i], cfg->granule[i], cfg->va_size[i],
+                     cfg->granule_sz[i]);
+    }
+}
+
+#else
+#define dump_ste(...) do {} while (0)
+#define dump_cd(...) do {} while (0)
+#define dump_evt(...) do {} while (0)
+#define dump_cmd(...) do {} while (0)
+static void dump_smmutranscfg(SMMUTransCfg *cfg) {}
+#endif
+
+#endif
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
new file mode 100644
index 0000000..e4c091a
--- /dev/null
+++ b/hw/arm/smmuv3.c
@@ -0,0 +1,1131 @@
+/*
+ * Copyright (C) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2017 Red Hat, Inc.
+ * Written by Prem Mallappa, Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/boards.h"
+#include "sysemu/sysemu.h"
+#include "hw/sysbus.h"
+#include "hw/pci/pci.h"
+#include "exec/address-spaces.h"
+
+#include "hw/arm/smmuv3.h"
+#include "smmuv3-internal.h"
+
+static inline int smmu_enabled(SMMUV3State *s)
+{
+    return smmu_read32_reg(s, SMMU_REG_CR0) & SMMU_CR0_SMMU_ENABLE;
+}
+
+/**
+ * smmu_irq_update - update the GERROR register according to
+ * the IRQ and the enable state
+ *
+ * return > 0 when IRQ is supposed to be raised
+ * Spec req:
+ * Raise irq only when it not active already,
+ * blindly toggling bits may actually clear the error
+ */
+static int smmu_irq_update(SMMUV3State *s, int irq, uint64_t data)
+{
+    uint32_t error = 0;
+
+    switch (irq) {
+    case SMMU_IRQ_EVTQ:
+        if (smmu_evt_irq_enabled(s)) {
+            error = SMMU_GERROR_EVENTQ;
+        }
+        break;
+    case SMMU_IRQ_CMD_SYNC:
+        if (smmu_gerror_irq_enabled(s)) {
+            uint32_t err_type = (uint32_t)data;
+
+            if (err_type) {
+                uint32_t regval = smmu_read32_reg(s, SMMU_REG_CMDQ_CONS);
+                smmu_write32_reg(s, SMMU_REG_CMDQ_CONS,
+                                 regval | err_type << SMMU_CMD_CONS_ERR_SHIFT);
+            }
+            error = SMMU_GERROR_CMDQ;
+        }
+        break;
+    case SMMU_IRQ_PRIQ:
+        if (smmu_pri_irq_enabled(s)) {
+            error = SMMU_GERROR_PRIQ;
+        }
+        break;
+    }
+    SMMU_DPRINTF(IRQ, "<< error:%x\n", error);
+
+    if (error && smmu_gerror_irq_enabled(s)) {
+        uint32_t gerror = smmu_read32_reg(s, SMMU_REG_GERROR);
+        uint32_t gerrorn = smmu_read32_reg(s, SMMU_REG_GERRORN);
+
+        SMMU_DPRINTF(IRQ, "<<<< error:%x gerror:%x gerrorn:%x\n",
+                     error, gerror, gerrorn);
+
+        if (!((gerror ^ gerrorn) & error)) {
+            smmu_write32_reg(s, SMMU_REG_GERROR, gerror ^ error);
+        }
+    }
+
+    return error;
+}
+
+static void smmu_irq_raise(SMMUV3State *s, int irq, uint64_t data)
+{
+    SMMU_DPRINTF(IRQ, "irq:%d\n", irq);
+    if (smmu_irq_update(s, irq, data)) {
+            qemu_irq_raise(s->irq[irq]);
+    }
+}
+
+static MemTxResult smmu_q_read(SMMUV3State *s, SMMUQueue *q, void *data)
+{
+    uint64_t addr = Q_ENTRY(q, Q_IDX(q, q->cons));
+
+    q->cons++;
+    if (q->cons == q->entries) {
+        q->cons = 0;
+        q->wrap.cons++;     /* this will toggle */
+    }
+
+    return smmu_read_sysmem(addr, data, q->ent_size, false);
+}
+
+static MemTxResult smmu_q_write(SMMUV3State *s, SMMUQueue *q, void *data)
+{
+    uint64_t addr = Q_ENTRY(q, Q_IDX(q, q->prod));
+
+    if (q->prod == q->entries) {
+        q->prod = 0;
+        q->wrap.prod++;     /* this will toggle */
+    }
+
+    q->prod++;
+
+    smmu_write_sysmem(addr, data, q->ent_size, false);
+
+    return MEMTX_OK;
+}
+
+static MemTxResult smmu_read_cmdq(SMMUV3State *s, Cmd *cmd)
+{
+    SMMUQueue *q = &s->cmdq;
+    MemTxResult ret = smmu_q_read(s, q, cmd);
+    uint32_t val = 0;
+
+    val |= (q->wrap.cons << q->shift) | q->cons;
+
+    /* Update consumer pointer */
+    smmu_write32_reg(s, SMMU_REG_CMDQ_CONS, val);
+
+    return ret;
+}
+
+static int smmu_cmdq_consume(SMMUV3State *s)
+{
+    uint32_t error = SMMU_CMD_ERR_NONE;
+
+    SMMU_DPRINTF(CMDQ, "CMDQ_ERR: %d\n", SMMU_CMDQ_ERR(s));
+
+    if (!smmu_cmd_q_enabled(s)) {
+        goto out;
+    }
+
+    while (!SMMU_CMDQ_ERR(s) && !smmu_is_q_empty(s, &s->cmdq)) {
+        Cmd cmd;
+#ifdef ARM_SMMU_DEBUG
+        SMMUQueue *q = &s->cmdq;
+#endif
+        if (smmu_read_cmdq(s, &cmd) != MEMTX_OK) {
+            error = SMMU_CMD_ERR_ABORT;
+            goto out;
+        }
+
+        SMMU_DPRINTF(DBG2, "CMDQ base: %lx cons:%d prod:%d val:%x wrap:%d\n",
+                     q->base, q->cons, q->prod, cmd.word[0], q->wrap.cons);
+
+        switch (CMD_TYPE(&cmd)) {
+        case SMMU_CMD_CFGI_STE:
+        case SMMU_CMD_CFGI_STE_RANGE:
+            break;
+        case SMMU_CMD_TLBI_NSNH_ALL: /* TLB not implemented */
+        case SMMU_CMD_TLBI_EL2_ALL:  /* Fallthrough */
+        case SMMU_CMD_TLBI_EL3_ALL:
+        case SMMU_CMD_TLBI_NH_ALL:
+        case SMMU_CMD_TLBI_S2_IPA:
+            break;
+        case SMMU_CMD_SYNC:     /* Fallthrough */
+            if (CMD_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
+                smmu_irq_raise(s, SMMU_IRQ_CMD_SYNC, SMMU_CMD_ERR_NONE);
+            }
+            break;
+        case SMMU_CMD_PREFETCH_CONFIG:
+            break;
+        case SMMU_CMD_TLBI_NH_ASID:
+        case SMMU_CMD_TLBI_NH_VA:   /* too many of this is sent */
+            break;
+
+        default:
+            error = SMMU_CMD_ERR_ILLEGAL;
+            SMMU_DPRINTF(CRIT, "Unknown Command type: %x, ignoring\n",
+                         CMD_TYPE(&cmd));
+            if (IS_DBG_ENABLED(CD)) {
+                dump_cmd(&cmd);
+            }
+            break;
+        }
+
+        if (error != SMMU_CMD_ERR_NONE) {
+            SMMU_DPRINTF(INFO, "CMD Error\n");
+            goto out;
+        }
+    }
+
+out:
+    if (error) {
+        smmu_irq_raise(s, SMMU_IRQ_GERROR, error);
+    }
+
+    SMMU_DPRINTF(CMDQ, "prod_wrap:%d, prod:%x cons_wrap:%d cons:%x\n",
+                 s->cmdq.wrap.prod, s->cmdq.prod,
+                 s->cmdq.wrap.cons, s->cmdq.cons);
+
+    return 0;
+}
+
+/*
+ * GERROR is updated when raising an interrupt, GERRORN will be updated
+ * by s/w and should match GERROR before normal operation resumes.
+ */
+static void smmu_irq_clear(SMMUV3State *s, uint64_t gerrorn)
+{
+    int irq = SMMU_IRQ_GERROR;
+    uint32_t toggled;
+
+    toggled = smmu_read32_reg(s, SMMU_REG_GERRORN) ^ gerrorn;
+
+    while (toggled) {
+        irq = ctz32(toggled);
+
+        qemu_irq_lower(s->irq[irq]);
+
+        toggled &= toggled - 1;
+    }
+}
+
+static int smmu_evtq_update(SMMUV3State *s)
+{
+    if (!smmu_enabled(s)) {
+        return 0;
+    }
+
+    if (!smmu_is_q_empty(s, &s->evtq)) {
+        if (smmu_evt_irq_enabled(s)) {
+            smmu_irq_raise(s, SMMU_IRQ_EVTQ, 0);
+        }
+    }
+
+    if (smmu_is_q_empty(s, &s->evtq)) {
+        smmu_irq_clear(s, SMMU_GERROR_EVENTQ);
+    }
+
+    return 1;
+}
+
+static void smmu_create_event(SMMUV3State *s, hwaddr iova,
+                              uint32_t sid, bool is_write, int error);
+
+static void smmu_update(SMMUV3State *s)
+{
+    int error = 0;
+
+    /* SMMU starts processing commands even when not enabled */
+    if (!smmu_enabled(s)) {
+        goto check_cmdq;
+    }
+
+    /* EVENT Q updates takes more priority */
+    if ((smmu_evt_q_enabled(s)) && !smmu_is_q_empty(s, &s->evtq)) {
+        SMMU_DPRINTF(CRIT, "q empty:%d prod:%d cons:%d p.wrap:%d p.cons:%d\n",
+                     smmu_is_q_empty(s, &s->evtq), s->evtq.prod,
+                     s->evtq.cons, s->evtq.wrap.prod, s->evtq.wrap.cons);
+        error = smmu_evtq_update(s);
+    }
+
+    if (error) {
+        /* TODO: May be in future we create proper event queue entry */
+        /* an error condition is not a recoverable event, like other devices */
+        SMMU_DPRINTF(CRIT, "An unfavourable condition\n");
+        smmu_create_event(s, 0, 0, 0, error);
+    }
+
+check_cmdq:
+    if (smmu_cmd_q_enabled(s) && !SMMU_CMDQ_ERR(s)) {
+        smmu_cmdq_consume(s);
+    } else {
+        SMMU_DPRINTF(INFO, "cmdq not enabled or error :%x\n", SMMU_CMDQ_ERR(s));
+    }
+
+}
+
+static void smmu_update_irq(SMMUV3State *s, uint64_t addr, uint64_t val)
+{
+    smmu_irq_clear(s, val);
+
+    smmu_write32_reg(s, SMMU_REG_GERRORN, val);
+
+    SMMU_DPRINTF(IRQ, "irq pend: %d gerror:%x gerrorn:%x\n",
+                 smmu_is_irq_pending(s, 0),
+                 smmu_read32_reg(s, SMMU_REG_GERROR),
+                 smmu_read32_reg(s, SMMU_REG_GERRORN));
+
+    /* Clear only when no more left */
+    if (!smmu_is_irq_pending(s, 0)) {
+        qemu_irq_lower(s->irq[0]);
+    }
+}
+
+#define SMMU_ID_REG_INIT(s, reg, d) do {        \
+    s->regs[reg >> 2] = d;                      \
+    } while (0)
+
+static void smmuv3_id_reg_init(SMMUV3State *s)
+{
+    uint32_t data =
+        1 << 27 |                   /* 2 Level stream id */
+        1 << 26 |                   /* Term Model  */
+        1 << 24 |                   /* Stall model not supported */
+        1 << 18 |                   /* VMID 16 bits */
+        1 << 16 |                   /* PRI */
+        1 << 12 |                   /* ASID 16 bits */
+        1 << 10 |                   /* ATS */
+        1 << 9 |                    /* HYP */
+        2 << 6 |                    /* HTTU */
+        1 << 4 |                    /* COHACC */
+        2 << 2 |                    /* TTF=Arch64 */
+        1 << 1 |                    /* Stage 1 */
+        1 << 0;                     /* Stage 2 */
+
+    SMMU_ID_REG_INIT(s, SMMU_REG_IDR0, data);
+
+#define SMMU_SID_SIZE         16
+#define SMMU_QUEUE_SIZE_LOG2  19
+    data =
+        1 << 27 |                    /* Attr Types override */
+        SMMU_QUEUE_SIZE_LOG2 << 21 | /* Cmd Q size */
+        SMMU_QUEUE_SIZE_LOG2 << 16 | /* Event Q size */
+        SMMU_QUEUE_SIZE_LOG2 << 11 | /* PRI Q size */
+        0  << 6 |                    /* SSID not supported */
+        SMMU_SID_SIZE << 0 ;         /* SID size  */
+
+    SMMU_ID_REG_INIT(s, SMMU_REG_IDR1, data);
+
+    data =
+        1 << 6 |                    /* Granule 64K */
+        1 << 4 |                    /* Granule 4K */
+        4 << 0;                     /* OAS = 44 bits */
+
+    SMMU_ID_REG_INIT(s, SMMU_REG_IDR5, data);
+
+}
+
+static void smmuv3_init(SMMUV3State *s)
+{
+    smmuv3_id_reg_init(s);      /* Update ID regs alone */
+
+    s->sid_size = SMMU_SID_SIZE;
+
+    s->cmdq.entries = (smmu_read32_reg(s, SMMU_REG_IDR1) >> 21) & 0x1f;
+    s->cmdq.ent_size = sizeof(Cmd);
+    s->evtq.entries = (smmu_read32_reg(s, SMMU_REG_IDR1) >> 16) & 0x1f;
+    s->evtq.ent_size = sizeof(Evt);
+}
+
+/*
+ * All SMMU data structures are little endian, and are aligned to 8 bytes
+ * L1STE/STE/L1CD/CD, Queue entries in CMDQ/EVTQ/PRIQ
+ */
+static inline int smmu_get_ste(SMMUV3State *s, hwaddr addr, Ste *buf)
+{
+    return dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
+}
+
+/*
+ * For now we only support CD with a single entry, 'ssid' is used to identify
+ * otherwise
+ */
+static inline int smmu_get_cd(SMMUV3State *s, Ste *ste, uint32_t ssid, Cd *buf)
+{
+    hwaddr addr = STE_CTXPTR(ste);
+
+    if (STE_S1CDMAX(ste) != 0) {
+        SMMU_DPRINTF(CRIT, "Multilevel Ctx Descriptor not supported yet\n");
+    }
+
+    return dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
+}
+
+static int
+is_ste_consistent(SMMUV3State *s, Ste *ste)
+{
+    uint32_t _config = STE_CONFIG(ste) & 0x7,
+        idr0 = smmu_read32_reg(s, SMMU_REG_IDR0),
+        idr5 = smmu_read32_reg(s, SMMU_REG_IDR5);
+
+    uint32_t httu = extract32(idr0, 6, 2);
+    bool config[] = {_config & 0x1,
+                     _config & 0x2,
+                     _config & 0x3};
+    bool granule_supported;
+
+    bool s1p = idr0 & SMMU_IDR0_S1P,
+        s2p = idr0 & SMMU_IDR0_S2P,
+        hyp = idr0 & SMMU_IDR0_HYP,
+        cd2l = idr0 & SMMU_IDR0_CD2L,
+        idr0_vmid = idr0 & SMMU_IDR0_VMID16,
+        ats = idr0 & SMMU_IDR0_ATS,
+        ttf0 = (idr0 >> 2) & 0x1,
+        ttf1 = (idr0 >> 3) & 0x1;
+
+    int ssidsz = (smmu_read32_reg(s, SMMU_REG_IDR1) >> 6) & 0x1f;
+
+    uint32_t ste_vmid = STE_S2VMID(ste),
+        ste_eats = STE_EATS(ste),
+        ste_s2s = STE_S2S(ste),
+        ste_s1fmt = STE_S1FMT(ste),
+        aa64 = STE_S2AA64(ste),
+        ste_s1cdmax = STE_S1CDMAX(ste);
+
+    uint8_t ste_strw = STE_STRW(ste);
+    uint64_t oas, max_pa;
+    bool strw_ign;
+    bool addr_out_of_range;
+
+    if (!STE_VALID(ste)) {
+        SMMU_DPRINTF(STE, "STE NOT valid\n");
+        return false;
+    }
+
+    switch (STE_S2TG(ste)) {
+    case 1:
+        granule_supported = 0x4; break;
+    case 2:
+        granule_supported = 0x2; break;
+    case 0:
+        granule_supported = 0x1; break;
+    }
+    granule_supported &= (idr5 >> 4);
+
+    if (!config[2]) {
+        if ((!s1p && config[0]) ||
+            (!s2p && config[1]) ||
+            (s2p && config[1])) {
+            SMMU_DPRINTF(STE, "STE inconsistant, S2P mismatch\n");
+            return false;
+        }
+        if (!ssidsz && ste_s1cdmax && config[0] && !cd2l &&
+            (ste_s1fmt == 1 || ste_s1fmt == 2)) {
+            SMMU_DPRINTF(STE, "STE inconsistant, CD mismatch\n");
+            return false;
+        }
+        if (ats && ((_config & 0x3) == 0) &&
+            ((ste_eats == 2 && (_config != 0x7 || ste_s2s)) ||
+             (ste_eats == 1 && !ste_s2s))) {
+            SMMU_DPRINTF(STE, "STE inconsistant, EATS/S2S mismatch\n");
+            return false;
+        }
+        if (config[0] && (ssidsz && (ste_s1cdmax > ssidsz))) {
+            SMMU_DPRINTF(STE, "STE inconsistant, SSID out of range\n");
+            return false;
+        }
+    }
+
+    oas = MIN(STE_S2PS(ste), idr5 & 0x7);
+
+    if (oas == 3) {
+        max_pa = deposit64(0, 0, 42, ~0UL);
+    } else {
+        max_pa = deposit64(0, 0, (32 + (oas * 4)), ~0UL);
+    }
+
+    strw_ign = (!s1p || !hyp || (_config == 4));
+
+    addr_out_of_range = (int64_t)(max_pa - STE_S2TTB(ste)) < 0;
+
+    if (config[1] && (
+        (aa64 && !granule_supported) ||
+        (!aa64 && !ttf0) ||
+        (aa64 && !ttf1)  ||
+        ((STE_S2HA(ste) || STE_S2HD(ste)) && !aa64) ||
+        ((STE_S2HA(ste) || STE_S2HD(ste)) && !httu) ||
+        (STE_S2HD(ste) && (httu == 1)) ||
+        addr_out_of_range)) {
+        SMMU_DPRINTF(STE, "STE inconsistant\n");
+        SMMU_DPRINTF(STE, "config[1]:%d gran:%d addr:%d\n"
+                     " aa64:%d ttf0:%d ttf1:%d s2ha:%d s2hd:%d httu:%d\n",
+                     config[1], granule_supported,
+                     addr_out_of_range, aa64, ttf0, ttf1, STE_S2HA(ste),
+                     STE_S2HD(ste), httu);
+        SMMU_DPRINTF(STE, "maxpa:%lx s2ttb:%lx\n", max_pa, STE_S2TTB(ste));
+        return false;
+    }
+    if (s2p && (config[0] == 0 && config[1]) &&
+        (strw_ign || !ste_strw) && !idr0_vmid && !(ste_vmid >> 8)) {
+        SMMU_DPRINTF(STE, "STE inconsistant, VMID out of range\n");
+        return false;
+    }
+
+    return true;
+}
+
+static int smmu_find_ste(SMMUV3State *s, uint16_t sid, Ste *ste)
+{
+    hwaddr addr;
+
+    SMMU_DPRINTF(STE, "SID:%x\n", sid);
+    /* Check SID range */
+    if (sid > (1 << s->sid_size)) {
+        return SMMU_EVT_C_BAD_SID;
+    }
+    SMMU_DPRINTF(STE, "features:%x\n", s->features);
+    if (s->features & SMMU_FEATURE_2LVL_STE) {
+        int span;
+        hwaddr stm_addr;
+        STEDesc stm;
+        int l1_ste_offset, l2_ste_offset;
+        SMMU_DPRINTF(STE, "no. ste: %x\n", s->sid_split);
+
+        l1_ste_offset = sid >> s->sid_split;
+        l2_ste_offset = sid & ((1 << s->sid_split) - 1);
+        SMMU_DPRINTF(STE, "l1_off:%x, l2_off:%x\n", l1_ste_offset,
+                     l2_ste_offset);
+        stm_addr = (hwaddr)(s->strtab_base + l1_ste_offset * sizeof(stm));
+        smmu_read_sysmem(stm_addr, &stm, sizeof(stm), false);
+
+        SMMU_DPRINTF(STE, "strtab_base:%lx stm_addr:%lx\n"
+                     "l1_ste_offset:%x l1(64):%#016lx\n",
+                     s->strtab_base, stm_addr, l1_ste_offset,
+                     STM2U64(&stm));
+
+        span = STMSPAN(&stm);
+        SMMU_DPRINTF(STE, "l2_ste_offset:%x ~ span:%d\n", l2_ste_offset, span);
+        if (l2_ste_offset > span) {
+            SMMU_DPRINTF(CRIT, "l2_ste_offset > span\n");
+            return SMMU_EVT_C_BAD_STE;
+        }
+        addr = STM2U64(&stm) + l2_ste_offset * sizeof(*ste);
+    } else {
+        addr = s->strtab_base + sid * sizeof(*ste);
+    }
+    SMMU_DPRINTF(STE, "ste:%lx\n", addr);
+    if (smmu_get_ste(s, addr, ste)) {
+        SMMU_DPRINTF(CRIT, "Unable to Fetch STE\n");
+        return SMMU_EVT_F_UUT;
+    }
+
+    return 0;
+}
+
+static void smmu_cfg_populate_s2(SMMUTransCfg *cfg, Ste *ste)
+{                           /* stage 2 cfg */
+    bool s2a64 = STE_S2AA64(ste);
+    const int stage = 2;
+
+    cfg->granule[stage] = STE_S2TG(ste);
+    cfg->tsz[stage] = STE_S2T0SZ(ste);
+    cfg->ttbr[stage] = STE_S2TTB(ste);
+    cfg->oas[stage] = oas2bits(STE_S2PS(ste));
+
+    if (s2a64) {
+        cfg->tsz[stage] = MIN(cfg->tsz[stage], 39);
+        cfg->tsz[stage] = MAX(cfg->tsz[stage], 16);
+    }
+    cfg->va_size[stage] = STE_S2AA64(ste) ? 64 : 32;
+    cfg->granule_sz[stage] = tg2granule(cfg->granule[stage], 0) - 3;
+}
+
+static void smmu_cfg_populate_s1(SMMUTransCfg *cfg, Cd *cd)
+{                           /* stage 1 cfg */
+    bool s1a64 = CD_AARCH64(cd);
+    const int stage = 1;
+
+    cfg->granule[stage] = (CD_EPD0(cd)) ? CD_TG1(cd) : CD_TG0(cd);
+    cfg->tsz[stage] = (CD_EPD0(cd)) ? CD_T1SZ(cd) : CD_T0SZ(cd);
+    cfg->ttbr[stage] = (CD_EPD0(cd)) ? CD_TTB1(cd) : CD_TTB0(cd);
+    cfg->oas[stage] = oas2bits(CD_IPS(cd));
+
+    if (s1a64) {
+        cfg->tsz[stage] = MIN(cfg->tsz[stage], 39);
+        cfg->tsz[stage] = MAX(cfg->tsz[stage], 16);
+    }
+    cfg->va_size[stage] = CD_AARCH64(cd) ? 64 : 32;
+    cfg->granule_sz[stage] = tg2granule(cfg->granule[stage], CD_EPD0(cd)) - 3;
+}
+
+static SMMUEvtErr smmu_walk_pgtable(SMMUV3State *s, Ste *ste, Cd *cd,
+                                    IOMMUTLBEntry *tlbe, bool is_write)
+{
+    SMMUState *sys = SMMU_SYS_DEV(s);
+    SMMUBaseClass *sbc = SMMU_DEVICE_GET_CLASS(sys);
+    SMMUTransCfg _cfg = {};
+    SMMUTransCfg *cfg = &_cfg;
+    SMMUEvtErr retval = 0;
+    uint32_t ste_cfg = STE_CONFIG(ste);
+    uint32_t page_size = 0, perm = 0;
+    hwaddr pa;                 /* Input address, output address */
+    int stage = 0;
+
+    SMMU_DPRINTF(DBG1, "ste_cfg :%x\n", ste_cfg);
+    /* Both Bypass, we dont need to do anything */
+    if (is_ste_bypass(s, ste)) {
+        return 0;
+    }
+
+    SMMU_DPRINTF(TT_1, "Input addr: %lx ste_config:%d\n",
+                 tlbe->iova, ste_cfg);
+
+    if (ste_cfg & STE_CONFIG_S1TR) {
+        stage = cfg->stage = 1;
+        smmu_cfg_populate_s1(cfg, cd);
+
+        cfg->oas[stage] = MIN(oas2bits(smmu_read32_reg(s, SMMU_REG_IDR5) & 0xf),
+                              cfg->oas[stage]);
+        /* fix ttbr - make top bits zero*/
+        cfg->ttbr[stage] = extract64(cfg->ttbr[stage], 0, cfg->oas[stage]);
+        cfg->s2_needed = (STE_CONFIG(ste) == STE_CONFIG_S1TR_S2TR) ? 1 : 0;
+
+        SMMU_DPRINTF(DBG1, "S1 populated\n ");
+    }
+
+    if (ste_cfg & STE_CONFIG_S2TR) {
+        stage = 2;
+        if (cfg->stage) {               /* S1+S2 */
+            cfg->s2_needed = true;
+        } else {                        /* Stage2 only */
+            cfg->stage = stage;
+        }
+
+        /* Stage2 only configuratoin */
+        smmu_cfg_populate_s2(cfg, ste);
+
+        cfg->oas[stage] = MIN(oas2bits(smmu_read32_reg(s, SMMU_REG_IDR5) & 0xf),
+                              cfg->oas[stage]);
+        /* fix ttbr - make top bits zero*/
+        cfg->ttbr[stage] = extract64(cfg->ttbr[stage], 0, cfg->oas[stage]);
+
+        SMMU_DPRINTF(DBG1, "S2 populated\n ");
+    }
+
+    cfg->va = tlbe->iova;
+
+    if ((cfg->stage == 1 && CD_AARCH64(cd)) ||
+        STE_S2AA64(ste)) {
+        SMMU_DPRINTF(DBG1, "Translate 64\n");
+        retval = sbc->translate_64(cfg, &page_size, &perm,
+                                   is_write);
+    } else {
+        SMMU_DPRINTF(DBG1, "Translate 32\n");
+        retval = sbc->translate_32(cfg, &page_size, &perm, is_write);
+    }
+
+    if (retval != 0) {
+        SMMU_DPRINTF(CRIT, "FAILED Stage1 translation\n");
+        goto exit;
+    }
+    pa = cfg->pa;
+
+    SMMU_DPRINTF(TT_1, "DONE: o/p addr:%lx mask:%x is_write:%d\n ",
+                 pa, page_size - 1, is_write);
+    tlbe->translated_addr = pa;
+    tlbe->addr_mask = page_size - 1;
+    tlbe->perm = perm;
+
+exit:
+    dump_smmutranscfg(cfg);
+    return retval;
+}
+
+static MemTxResult smmu_write_evtq(SMMUV3State *s, Evt *evt)
+{
+    SMMUQueue *q = &s->evtq;
+    int ret = smmu_q_write(s, q, evt);
+    uint32_t val = 0;
+
+    val |= (q->wrap.prod << q->shift) | q->prod;
+
+    smmu_write32_reg(s, SMMU_REG_EVTQ_PROD, val);
+
+    return ret;
+}
+
+/*
+ * Events created on the EventQ
+ */
+static void smmu_create_event(SMMUV3State *s, hwaddr iova,
+                              uint32_t sid, bool is_write, int error)
+{
+    SMMUQueue *q = &s->evtq;
+    uint64_t head;
+    Evt evt;
+
+    if (!smmu_evt_q_enabled(s)) {
+        return;
+    }
+
+    EVT_SET_TYPE(&evt, error);
+    EVT_SET_SID(&evt, sid);
+
+    switch (error) {
+    case SMMU_EVT_F_UUT:
+    case SMMU_EVT_C_BAD_STE:
+        break;
+    case SMMU_EVT_C_BAD_CD:
+    case SMMU_EVT_F_CD_FETCH:
+        break;
+    case SMMU_EVT_F_TRANS_FORBIDDEN:
+    case SMMU_EVT_F_WALK_EXT_ABRT:
+        EVT_SET_INPUT_ADDR(&evt, iova);
+    default:
+        break;
+    }
+
+    smmu_write_evtq(s, &evt);
+
+    head = Q_IDX(q, q->prod);
+
+    if (smmu_is_q_full(s, &s->evtq)) {
+        head = q->prod ^ (1 << 31);     /* Set overflow */
+    }
+
+    smmu_write32_reg(s, SMMU_REG_EVTQ_PROD, head);
+
+    smmu_irq_raise(s, SMMU_IRQ_EVTQ, (uint64_t)&evt);
+}
+
+/*
+ * TR - Translation Request
+ * TT - Translated Tansaction
+ * OT - Other Transaction
+ */
+static IOMMUTLBEntry
+smmuv3_translate(MemoryRegion *mr, hwaddr addr, bool is_write)
+{
+    SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
+    SMMUV3State *s = sdev->smmu;
+    uint16_t sid = 0, config;
+    Ste ste;
+    Cd cd;
+    SMMUEvtErr error = 0;
+
+    IOMMUTLBEntry ret = {
+        .target_as = &address_space_memory,
+        .iova = addr,
+        .translated_addr = addr,
+        .addr_mask = ~(hwaddr)0,
+        .perm = IOMMU_NONE,
+    };
+
+    /* SMMU Bypass, We allow traffic through if SMMU is disabled  */
+    if (!smmu_enabled(s)) {
+        SMMU_DPRINTF(CRIT, "SMMU Not enabled.. bypassing addr:%lx\n", addr);
+        goto bypass;
+    }
+
+    sid = smmu_get_sid(sdev);
+    SMMU_DPRINTF(TT_1, "SID:%x bus:%d ste_base:%lx\n",
+                 sid, pci_bus_num(sdev->bus), s->strtab_base);
+
+    /* Fetch & Check STE */
+    error = smmu_find_ste(s, sid, &ste);
+    if (error) {
+        goto error_out;  /* F_STE_FETCH or F_CFG_CONFLICT */
+    }
+
+    if (IS_DBG_ENABLED(STE)) {
+        dump_ste(&ste);
+    }
+
+    if (is_ste_valid(s, &ste) && is_ste_bypass(s, &ste)) {
+        goto bypass;
+    }
+
+    SMMU_DPRINTF(STE, "STE is not bypass\n");
+    if (!is_ste_consistent(s, &ste)) {
+        error = SMMU_EVT_C_BAD_STE;
+        goto error_out;
+    }
+    SMMU_DPRINTF(INFO, "Valid STE Found\n");
+
+    /* Stream Bypass */
+    config = STE_CONFIG(&ste) & 0x3;
+
+    if (config & (STE_CONFIG_S1TR)) {
+        smmu_get_cd(s, &ste, 0, &cd); /* We dont have SSID yet, so 0 */
+        SMMU_DPRINTF(INFO, "GET_CD CTXPTR:%p\n", (void *)STE_CTXPTR(&ste));
+        if (IS_DBG_ENABLED(CD)) {
+            dump_cd(&cd);
+        }
+
+        if (!is_cd_valid(s, &ste, &cd)) {
+            error = SMMU_EVT_C_BAD_CD;
+            goto error_out;
+        }
+    }
+
+    /* Walk Stage1, if S2 is enabled, S2 walked for Every access on S1 */
+    error = smmu_walk_pgtable(s, &ste, &cd, &ret, is_write);
+
+    SMMU_DPRINTF(INFO, "DONE walking tables\n");
+
+error_out:
+    if (error) {        /* Post the Error using Event Q */
+        SMMU_DPRINTF(CRIT, "Translation Error: %x\n", error);
+        smmu_create_event(s, ret.iova, sid, is_write, error);
+        goto out;
+    }
+
+bypass:
+    ret.perm = is_write ? IOMMU_RW : IOMMU_RO;
+
+out:
+    return ret;
+}
+
+static const MemoryRegionIOMMUOps smmu_iommu_ops = {
+    .translate = smmuv3_translate,
+};
+
+static AddressSpace *smmu_init_pci_iommu(PCIBus *bus, void *opaque, int devfn)
+{
+    SMMUV3State *s = opaque;
+    SMMUState *sys = SMMU_SYS_DEV(s);
+    uintptr_t key = (uintptr_t)bus;
+    SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_as_by_busptr, &key);
+    SMMUDevice *sdev;
+
+    if (!sbus) {
+        sbus = g_malloc0(sizeof(SMMUPciBus) +
+                         sizeof(SMMUDevice *) * SMMU_PCI_DEVFN_MAX);
+        sbus->bus = bus;
+        g_hash_table_insert(s->smmu_as_by_busptr, &key, sbus);
+    }
+
+    sdev = sbus->pbdev[devfn];
+    if (!sdev) {
+        sdev = sbus->pbdev[devfn] = g_malloc0(sizeof(SMMUDevice));
+
+        sdev->smmu = s;
+        sdev->bus = bus;
+        sdev->devfn = devfn;
+
+        memory_region_init_iommu(&sdev->iommu, OBJECT(sys),
+                                 &smmu_iommu_ops, TYPE_SMMU_V3_DEV, UINT64_MAX);
+        address_space_init(&sdev->as, &sdev->iommu, TYPE_SMMU_V3_DEV);
+    }
+
+    return &sdev->as;
+}
+
+static inline void smmu_update_base_reg(SMMUV3State *s, uint64_t *base,
+                                        uint64_t val)
+{
+    *base = val & ~(SMMU_BASE_RA | 0x3fULL);
+}
+
+static void smmu_update_qreg(SMMUV3State *s, SMMUQueue *q, hwaddr reg,
+                             uint32_t off, uint64_t val, unsigned size)
+{
+    if (size == 8 && off == 0) {
+        smmu_write64_reg(s, reg, val);
+    } else {
+        smmu_write_reg(s, reg, val);
+    }
+
+    switch (off) {
+    case 0:                             /* BASE register */
+        val = smmu_read64_reg(s, reg);
+        q->shift = val & 0x1f;
+        q->entries = 1 << (q->shift);
+        smmu_update_base_reg(s, &q->base, val);
+        break;
+
+    case 4:                             /* CONS */
+        q->cons = Q_IDX(q, val);
+        q->wrap.cons = val >> q->shift;
+        SMMU_DPRINTF(DBG2, "cons written : %d val:%lx\n", q->cons, val);
+        break;
+
+    case 8:                             /* PROD */
+        q->prod = Q_IDX(q, val);
+        q->wrap.prod = val >> q->shift;
+        break;
+    }
+
+    switch (reg) {
+    case SMMU_REG_CMDQ_PROD:            /* should be only for CMDQ_PROD */
+    case SMMU_REG_CMDQ_CONS:            /* but we do it anyway */
+        smmu_update(s);
+        break;
+    }
+}
+
+static void smmu_write_mmio_fixup(SMMUV3State *s, hwaddr *addr)
+{
+    switch (*addr) {
+    case 0x100a8: case 0x100ac:         /* Aliasing => page0 registers */
+    case 0x100c8: case 0x100cc:
+        *addr ^= (hwaddr)0x10000;
+    }
+}
+
+static void smmu_write_mmio(void *opaque, hwaddr addr,
+                            uint64_t val, unsigned size)
+{
+    SMMUState *sys = opaque;
+    SMMUV3State *s = SMMU_V3_DEV(sys);
+    bool update = false;
+
+    smmu_write_mmio_fixup(s, &addr);
+
+    SMMU_DPRINTF(DBG2, "addr: %lx val:%lx\n", addr, val);
+
+    switch (addr) {
+    case 0xFDC ... 0xFFC:
+    case SMMU_REG_IDR0 ... SMMU_REG_IDR5:
+        SMMU_DPRINTF(CRIT, "write to RO/Unimpl reg %lx val64:%lx\n",
+                     addr, val);
+        return;
+
+    case SMMU_REG_GERRORN:
+        smmu_update_irq(s, addr, val);
+        return;
+
+    case SMMU_REG_CR0:
+        smmu_write32_reg(s, SMMU_REG_CR0_ACK, val);
+        update = true;
+        break;
+
+    case SMMU_REG_IRQ_CTRL:
+        smmu_write32_reg(s, SMMU_REG_IRQ_CTRL_ACK, val);
+        update = true;
+        break;
+
+    case SMMU_REG_STRTAB_BASE:
+        smmu_update_base_reg(s, &s->strtab_base, val);
+        return;
+
+    case SMMU_REG_STRTAB_BASE_CFG:
+        if (((val >> 16) & 0x3) == 0x1) {
+            s->sid_split = (val >> 6) & 0x1f;
+            s->features |= SMMU_FEATURE_2LVL_STE;
+        }
+        break;
+
+    case SMMU_REG_CMDQ_PROD:
+    case SMMU_REG_CMDQ_CONS:
+    case SMMU_REG_CMDQ_BASE:
+    case SMMU_REG_CMDQ_BASE + 4:
+        smmu_update_qreg(s, &s->cmdq, addr, addr - SMMU_REG_CMDQ_BASE,
+                         val, size);
+        return;
+
+    case SMMU_REG_EVTQ_CONS:            /* fallthrough */
+    {
+        SMMUQueue *evtq = &s->evtq;
+        evtq->cons = Q_IDX(evtq, val);
+        evtq->wrap.cons = Q_WRAP(evtq, val);
+
+        SMMU_DPRINTF(IRQ, "Before clearing interrupt "
+                     "prod:%x cons:%x prod.w:%d cons.w:%d\n",
+                     evtq->prod, evtq->cons, evtq->wrap.prod, evtq->wrap.cons);
+        if (smmu_is_q_empty(s, &s->evtq)) {
+            SMMU_DPRINTF(IRQ, "Clearing interrupt"
+                         " prod:%x cons:%x prod.w:%d cons.w:%d\n",
+                         evtq->prod, evtq->cons, evtq->wrap.prod,
+                         evtq->wrap.cons);
+            qemu_irq_lower(s->irq[SMMU_IRQ_EVTQ]);
+        }
+    }
+    case SMMU_REG_EVTQ_BASE:
+    case SMMU_REG_EVTQ_BASE + 4:
+    case SMMU_REG_EVTQ_PROD:
+        smmu_update_qreg(s, &s->evtq, addr, addr - SMMU_REG_EVTQ_BASE,
+                         val, size);
+        return;
+
+    case SMMU_REG_PRIQ_CONS:
+    case SMMU_REG_PRIQ_BASE:
+    case SMMU_REG_PRIQ_BASE + 4:
+    case SMMU_REG_PRIQ_PROD:
+        smmu_update_qreg(s, &s->priq, addr, addr - SMMU_REG_PRIQ_BASE,
+                         val, size);
+        return;
+    }
+
+    if (size == 8) {
+        smmu_write_reg(s, addr, val);
+    } else {
+        smmu_write32_reg(s, addr, (uint32_t)val);
+    }
+
+    if (update) {
+        smmu_update(s);
+    }
+}
+
+static uint64_t smmu_read_mmio(void *opaque, hwaddr addr, unsigned size)
+{
+    SMMUState *sys = opaque;
+    SMMUV3State *s = SMMU_V3_DEV(sys);
+    uint64_t val;
+
+    smmu_write_mmio_fixup(s, &addr);
+
+    /* Primecell/Corelink ID registers */
+    switch (addr) {
+    case 0xFF0 ... 0xFFC:
+    case 0xFDC ... 0xFE4:
+        val = 0;
+        SMMU_DPRINTF(CRIT, "***************** addr: %lx val:%lx\n", addr, val);
+        break;
+
+    default:
+        val = (uint64_t)smmu_read32_reg(s, addr);
+        break;
+
+    case SMMU_REG_STRTAB_BASE ... SMMU_REG_CMDQ_BASE:
+    case SMMU_REG_EVTQ_BASE:
+    case SMMU_REG_PRIQ_BASE ... SMMU_REG_PRIQ_IRQ_CFG1:
+        val = smmu_read64_reg(s, addr);
+        break;
+    }
+
+    SMMU_DPRINTF(DBG2, "addr: %lx val:%lx\n", addr, val);
+    SMMU_DPRINTF(DBG2, "cmdq cons:%d\n", s->cmdq.cons);
+    return val;
+}
+
+static const MemoryRegionOps smmu_mem_ops = {
+    .read = smmu_read_mmio,
+    .write = smmu_write_mmio,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .valid = {
+        .min_access_size = 4,
+        .max_access_size = 8,
+    },
+};
+
+static void smmu_init_irq(SMMUV3State *s, SysBusDevice *dev)
+{
+    int i;
+
+    for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
+        sysbus_init_irq(dev, &s->irq[i]);
+    }
+}
+
+static void smmu_init_iommu_as(SMMUV3State *sys)
+{
+    SMMUState *s = SMMU_SYS_DEV(sys);
+    PCIBus *pcibus = pci_find_primary_bus();
+
+    if (pcibus) {
+        SMMU_DPRINTF(CRIT, "Found PCI bus, setting up iommu\n");
+        pci_setup_iommu(pcibus, smmu_init_pci_iommu, s);
+    } else {
+        SMMU_DPRINTF(CRIT, "No PCI bus, SMMU is not registered\n");
+    }
+}
+
+static void smmu_reset(DeviceState *dev)
+{
+    SMMUV3State *s = SMMU_V3_DEV(dev);
+    smmuv3_init(s);
+}
+
+static int smmu_populate_internal_state(void *opaque, int version_id)
+{
+    SMMUV3State *s = opaque;
+
+    smmu_update(s);
+    return 0;
+}
+
+static void smmu_realize(DeviceState *d, Error **errp)
+{
+    SMMUState *sys = SMMU_SYS_DEV(d);
+    SMMUV3State *s = SMMU_V3_DEV(sys);
+    SysBusDevice *dev = SYS_BUS_DEVICE(d);
+
+    /* Register Access */
+    memory_region_init_io(&sys->iomem, OBJECT(s),
+                          &smmu_mem_ops, sys, TYPE_SMMU_V3_DEV, 0x20000);
+
+    s->smmu_as_by_busptr = g_hash_table_new_full(smmu_uint64_hash,
+                                                 smmu_uint64_equal,
+                                                 g_free, g_free);
+    sysbus_init_mmio(dev, &sys->iomem);
+
+    smmu_init_irq(s, dev);
+
+    smmu_init_iommu_as(s);
+}
+
+static const VMStateDescription vmstate_smmuv3 = {
+    .name = "smmuv3",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .post_load = smmu_populate_internal_state,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64_ARRAY(regs, SMMUV3State, SMMU_NREGS),
+        VMSTATE_END_OF_LIST(),
+    },
+};
+
+static void smmuv3_instance_init(Object *obj)
+{
+    /* Nothing much to do here as of now */
+}
+
+static void smmuv3_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+
+    dc->reset   = smmu_reset;
+    dc->vmsd    = &vmstate_smmuv3;
+    dc->realize = smmu_realize;
+}
+
+static const TypeInfo smmuv3_type_info = {
+    .name          = TYPE_SMMU_V3_DEV,
+    .parent        = TYPE_SMMU_DEV_BASE,
+    .instance_size = sizeof(SMMUV3State),
+    .instance_init = smmuv3_instance_init,
+    .class_data    = NULL,
+    .class_size    = sizeof(SMMUV3Class),
+    .class_init    = smmuv3_class_init,
+};
+
+static void smmuv3_register_types(void)
+{
+    type_register(&smmuv3_type_info);
+}
+
+type_init(smmuv3_register_types)
+
diff --git a/include/hw/arm/smmuv3.h b/include/hw/arm/smmuv3.h
new file mode 100644
index 0000000..4e43f67
--- /dev/null
+++ b/include/hw/arm/smmuv3.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2017 Red Hat, Inc.
+ * Written by Prem Mallappa, Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_ARM_SMMUV3_H
+#define HW_ARM_SMMUV3_H
+
+#include "hw/arm/smmu-common.h"
+
+#define SMMU_NREGS            0x200
+#define SMMU_PCI_DEVFN_MAX    256
+
+typedef struct SMMUQueue {
+     hwaddr base;
+     uint32_t prod;
+     uint32_t cons;
+     union {
+          struct {
+               uint8_t prod:1;
+               uint8_t cons:1;
+          };
+          uint8_t unused;
+     } wrap;
+
+     uint16_t entries;           /* Number of entries */
+     uint8_t  ent_size;          /* Size of entry in bytes */
+     uint8_t  shift;             /* Size in log2 */
+} SMMUQueue;
+
+typedef struct SMMUV3State {
+    SMMUState     smmu_state;
+
+#define SMMU_FEATURE_2LVL_STE (1 << 0)
+    /* Local cache of most-frequently used register */
+    uint32_t     features;
+    uint16_t     sid_size;
+    uint16_t     sid_split;
+    uint64_t     strtab_base;
+
+    uint64_t    regs[SMMU_NREGS];
+
+    qemu_irq     irq[4];
+
+    SMMUQueue    cmdq, evtq, priq;
+
+    /* IOMMU Address space */
+    MemoryRegion iommu;
+    AddressSpace iommu_as;
+    /*
+     * Bus number is not populated in the beginning, hence we need
+     * a mechanism to retrieve the corresponding address space for each
+     * pci device.
+    */
+    GHashTable   *smmu_as_by_busptr;
+} SMMUV3State;
+
+typedef enum {
+    SMMU_IRQ_GERROR,
+    SMMU_IRQ_PRIQ,
+    SMMU_IRQ_EVTQ,
+    SMMU_IRQ_CMD_SYNC,
+} SMMUIrq;
+
+typedef struct {
+    SMMUBaseClass smmu_base_class;
+} SMMUV3Class;
+
+#define TYPE_SMMU_V3_DEV   "smmuv3"
+#define SMMU_V3_DEV(obj) OBJECT_CHECK(SMMUV3State, (obj), TYPE_SMMU_V3_DEV)
+#define SMMU_V3_DEVICE_GET_CLASS(obj)                              \
+    OBJECT_GET_CLASS(SMMUBaseClass, (obj), TYPE_SMMU_V3_DEV)
+
+#endif
-- 
2.5.5

  parent reply	other threads:[~2017-03-30 19:43 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-03-30 19:42 [Qemu-devel] [RFC v3 0/5] SMMUv3 Emmulation Support Eric Auger
2017-03-30 19:42 ` [Qemu-devel] [RFC v3 1/5] log: Add new IOMMU type Eric Auger
2017-03-30 19:42 ` [Qemu-devel] [RFC v3 2/5] hw/arm/smmu-common: smmu base class Eric Auger
2017-03-30 19:42 ` Eric Auger [this message]
2017-03-30 19:42 ` [Qemu-devel] [RFC v3 4/5] hw/arm/virt: Add SMMUv3 to the virt board Eric Auger
2017-03-30 19:42 ` [Qemu-devel] [RFC v3 5/5] hw/arm/virt: Add 2.10 machine type Eric Auger
2017-03-31 16:03 ` [Qemu-devel] [RFC v3 0/5] SMMUv3 Emmulation Support no-reply
2017-04-01  0:56 ` Radha Mohan
2017-04-03  7:34   ` Auger Eric

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1490902938-9009-4-git-send-email-eric.auger@redhat.com \
    --to=eric.auger@redhat.com \
    --cc=Radha.Chintakuntla@cavium.com \
    --cc=Sunil.Goutham@cavium.com \
    --cc=christoffer.dall@linaro.org \
    --cc=drjones@redhat.com \
    --cc=edgar.iglesias@gmail.com \
    --cc=eric.auger.pro@gmail.com \
    --cc=peter.maydell@linaro.org \
    --cc=prem.mallappa@gmail.com \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.