All of lore.kernel.org
 help / color / mirror / Atom feed
From: Taylor Simpson <tsimpson@quicinc.com>
To: qemu-devel@nongnu.org
Cc: riku.voipio@iki.fi, richard.henderson@linaro.org,
	laurent@vivier.eu, Taylor Simpson <tsimpson@quicinc.com>,
	philmd@redhat.com, aleksandar.m.mail@gmail.com
Subject: [RFC PATCH 62/66] Hexagon HVX macros referenced in instruction semantics
Date: Mon, 10 Feb 2020 18:40:40 -0600	[thread overview]
Message-ID: <1581381644-13678-63-git-send-email-tsimpson@quicinc.com> (raw)
In-Reply-To: <1581381644-13678-1-git-send-email-tsimpson@quicinc.com>

Signed-off-by: Taylor Simpson <tsimpson@quicinc.com>
---
 target/hexagon/mmvec/macros.h | 436 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 436 insertions(+)

diff --git a/target/hexagon/mmvec/macros.h b/target/hexagon/mmvec/macros.h
index 80adb83..93d86e7 100644
--- a/target/hexagon/mmvec/macros.h
+++ b/target/hexagon/mmvec/macros.h
@@ -229,4 +229,440 @@
 #define WRITE_QREG_e(NUM, VAR, VNEW)     LOG_QREG_WRITE(NUM, VAR, VNEW)
 #define WRITE_QREG_x(NUM, VAR, VNEW)     LOG_QREG_WRITE(NUM, VAR, VNEW)
 
+#define LOG_VTCM_BYTE(VA, MASK, VAL, IDX) \
+    do { \
+        env->vtcm_log.data.ub[IDX] = (VAL); \
+        env->vtcm_log.mask.ub[IDX] = (MASK); \
+        env->vtcm_log.va[IDX] = (VA); \
+    } while (0)
+
+/* VTCM Banks */
+#define LOG_VTCM_BANK(VAL, MASK, IDX) \
+    do { \
+        env->vtcm_log.offsets.uh[IDX]  = (VAL & 0xFFF); \
+        env->vtcm_log.offsets.uh[IDX] |= ((MASK & 0xF) << 12) ; \
+    } while (0)
+
+#define fUSE_LOOKUP_ADDRESS_BY_REV(PROC) true
+#define fUSE_LOOKUP_ADDRESS() 1
+#define fRT8NOTE()
+#define fNOTQ(VAL) \
+    ({ \
+        mmqreg_t _ret;  \
+        int _i_;  \
+        for (_i_ = 0; _i_ < fVECSIZE() / 64; _i_++) { \
+            _ret.ud[_i_] = ~VAL.ud[_i_]; \
+        } \
+        _ret;\
+     })
+#define fGETQBITS(REG, WIDTH, MASK, BITNO) \
+    ((MASK) & (REG.w[(BITNO) >> 5] >> ((BITNO) & 0x1f)))
+#define fGETQBIT(REG, BITNO) fGETQBITS(REG, 1, 1, BITNO)
+#define fGENMASKW(QREG, IDX) \
+    (((fGETQBIT(QREG, (IDX * 4 + 0)) ? 0xFF : 0x0) << 0)  | \
+     ((fGETQBIT(QREG, (IDX * 4 + 1)) ? 0xFF : 0x0) << 8)  | \
+     ((fGETQBIT(QREG, (IDX * 4 + 2)) ? 0xFF : 0x0) << 16) | \
+     ((fGETQBIT(QREG, (IDX * 4 + 3)) ? 0xFF : 0x0) << 24))
+#define fGETNIBBLE(IDX, SRC) (fSXTN(4, 8, (SRC >> (4 * IDX)) & 0xF))
+#define fGETCRUMB(IDX, SRC) (fSXTN(2, 8, (SRC >> (2 * IDX)) & 0x3))
+#define fGETCRUMB_SYMMETRIC(IDX, SRC) \
+    ((fGETCRUMB(IDX, SRC) >= 0 ? (2 - fGETCRUMB(IDX, SRC)) \
+                               : fGETCRUMB(IDX, SRC)))
+#define fGENMASKH(QREG, IDX) \
+    (((fGETQBIT(QREG, (IDX * 2 + 0)) ? 0xFF : 0x0) << 0) | \
+     ((fGETQBIT(QREG, (IDX * 2 + 1)) ? 0xFF : 0x0) << 8))
+#define fGETMASKW(VREG, QREG, IDX) (VREG.w[IDX] & fGENMASKW((QREG), IDX))
+#define fGETMASKH(VREG, QREG, IDX) (VREG.h[IDX] & fGENMASKH((QREG), IDX))
+#define fCONDMASK8(QREG, IDX, YESVAL, NOVAL) \
+    (fGETQBIT(QREG, IDX) ? (YESVAL) : (NOVAL))
+#define fCONDMASK16(QREG, IDX, YESVAL, NOVAL) \
+    ((fGENMASKH(QREG, IDX) & (YESVAL)) | \
+     (fGENMASKH(fNOTQ(QREG), IDX) & (NOVAL)))
+#define fCONDMASK32(QREG, IDX, YESVAL, NOVAL) \
+    ((fGENMASKW(QREG, IDX) & (YESVAL)) | \
+     (fGENMASKW(fNOTQ(QREG), IDX) & (NOVAL)))
+#define fSETQBITS(REG, WIDTH, MASK, BITNO, VAL) \
+    do { \
+        size4u_t __TMP = (VAL); \
+        REG.w[(BITNO) >> 5] &= ~((MASK) << ((BITNO) & 0x1f)); \
+        REG.w[(BITNO) >> 5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f)); \
+    } while (0)
+#define fSETQBIT(REG, BITNO, VAL) fSETQBITS(REG, 1, 1, BITNO, VAL)
+#define fVBYTES() (fVECSIZE())
+#define fVALIGN(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR & ~(LOG2_ALIGNMENT - 1))
+#define fVLASTBYTE(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR | (LOG2_ALIGNMENT - 1))
+#define fVELEM(WIDTH) ((fVECSIZE() * 8) / WIDTH)
+#define fVECLOGSIZE() (7)
+#define fVECSIZE() (1 << fVECLOGSIZE())
+#define fSWAPB(A, B) do { size1u_t tmp = A; A = B; B = tmp; } while (0)
+static inline mmvector_t mmvec_zero_vector(void)
+{
+    mmvector_t ret;
+    memset(&ret, 0, sizeof(ret));
+    return ret;
+}
+#define fVZERO() mmvec_zero_vector()
+#define fNEWVREG(VNUM) \
+    ((env->VRegs_updated & (((VRegMask)1) << VNUM)) ? env->future_VRegs[VNUM] \
+                                                    : mmvec_zero_vector())
+#define fV_AL_CHECK(EA, MASK) \
+    if ((EA) & (MASK)) { \
+        warn("aligning misaligned vector. EA=%08x", (EA)); \
+    }
+#define fSCATTER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \
+    do { \
+        mem_vector_scatter_init(env, slot, REGION_START, LENGTH, ELEMENT_SIZE);\
+        if (EXCEPTION_DETECTED) { \
+            return; \
+        } \
+    } while (0)
+#define fGATHER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \
+    do { \
+        mem_vector_gather_init(env, slot, REGION_START, LENGTH, ELEMENT_SIZE); \
+        if (EXCEPTION_DETECTED) { \
+            return; \
+        } \
+    } while (0)
+#define fSCATTER_FINISH(OP) \
+    do { \
+        if (EXCEPTION_DETECTED) { \
+            return; \
+        } \
+        mem_vector_scatter_finish(env, slot, OP); \
+    } while (0);
+#define fGATHER_FINISH() \
+    do { \
+        if (EXCEPTION_DETECTED) { \
+            return; \
+        } \
+        mem_vector_gather_finish(env, slot); \
+    } while (0);
+#define fLOG_SCATTER_OP(SIZE) \
+    do { \
+        env->vtcm_log.op = 1; \
+        env->vtcm_log.op_size = SIZE; \
+    } while (0)
+#define fVLOG_VTCM_WORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \
+    do { \
+        int log_byte = 0; \
+        vaddr_t va = EA; \
+        vaddr_t va_high = EA + LEN; \
+        for (int i0 = 0; i0 < 4; i0++) { \
+            log_byte = (va + i0) <= va_high; \
+            LOG_VTCM_BYTE(va + i0, log_byte, INC. ub[4 * IDX + i0], \
+                          4 * IDX + i0); \
+        } \
+    } while (0)
+#define fVLOG_VTCM_HALFWORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \
+    do { \
+        int log_byte = 0; \
+        vaddr_t va = EA; \
+        vaddr_t va_high = EA + LEN; \
+        for (int i0 = 0; i0 < 2; i0++) { \
+            log_byte = (va + i0) <= va_high; \
+            LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \
+                          2 * IDX + i0); \
+        } \
+    } while (0)
+
+#define fVLOG_VTCM_HALFWORD_INCREMENT_DV(EA, OFFSET, INC, IDX, IDX2, IDX_H, \
+                                         ALIGNMENT, LEN) \
+    do { \
+        int log_byte = 0; \
+        vaddr_t va = EA; \
+        vaddr_t va_high = EA + LEN; \
+        for (int i0 = 0; i0 < 2; i0++) { \
+            log_byte = (va + i0) <= va_high; \
+            LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \
+                          2 * IDX + i0); \
+        } \
+    } while (0)
+
+/* NOTE - Will this always be tmp_VRegs[0]; */
+#define GATHER_FUNCTION(EA, OFFSET, IDX, LEN, ELEMENT_SIZE, BANK_IDX, QVAL) \
+    do { \
+        int i0; \
+        vaddr_t va = EA; \
+        vaddr_t va_high = EA + LEN; \
+        int log_bank = 0; \
+        int log_byte = 0; \
+        for (i0 = 0; i0 < ELEMENT_SIZE; i0++) { \
+            log_byte = ((va + i0) <= va_high) && QVAL; \
+            log_bank |= (log_byte << i0); \
+            size1u_t B; \
+            get_user_u8(B, EA + i0); \
+            env->tmp_VRegs[0].ub[ELEMENT_SIZE * IDX + i0] = B; \
+            LOG_VTCM_BYTE(va + i0, log_byte, B, ELEMENT_SIZE * IDX + i0); \
+        } \
+        LOG_VTCM_BANK(va, log_bank, BANK_IDX); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_WORD(EA, OFFSET, IDX, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORD(EA, OFFSET, IDX, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORD_DV(EA, OFFSET, IDX, IDX2, IDX_H, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), 1); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_WORDQ(EA, OFFSET, IDX, Q, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \
+                        fGETQBIT(QsV, 4 * IDX + i0)); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORDQ(EA, OFFSET, IDX, Q, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \
+                        fGETQBIT(QsV, 2 * IDX + i0)); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORDQ_DV(EA, OFFSET, IDX, IDX2, IDX_H, Q, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \
+                        fGETQBIT(QsV, 2 * IDX + i0)); \
+    } while (0)
+#define SCATTER_OP_WRITE_TO_MEM(TYPE) \
+    do { \
+        for (int i = 0; i < env->vtcm_log.size; i += sizeof(TYPE)) { \
+            if (env->vtcm_log.mask.ub[i] != 0) { \
+                TYPE dst = 0; \
+                TYPE inc = 0; \
+                for (int j = 0; j < sizeof(TYPE); j++) { \
+                    size1u_t val; \
+                    get_user_u8(val, env->vtcm_log.va[i + j]); \
+                    dst |= val << (8 * j); \
+                    inc |= env->vtcm_log.data.ub[j + i] << (8 * j); \
+                    env->vtcm_log.mask.ub[j + i] = 0; \
+                    env->vtcm_log.data.ub[j + i] = 0; \
+                    env->vtcm_log.offsets.ub[j + i] = 0; \
+                } \
+                dst += inc; \
+                for (int j = 0; j < sizeof(TYPE); j++) { \
+                    put_user_u8((dst >> (8 * j)) & 0xFF, \
+                        env->vtcm_log.va[i + j]);  \
+                } \
+            } \
+        } \
+    } while (0)
+#define SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, ELEM_SIZE, BANK_IDX, QVAL, IN) \
+    do { \
+        int i0; \
+        target_ulong va = EA; \
+        target_ulong va_high = EA + LEN; \
+        int log_bank = 0; \
+        int log_byte = 0; \
+        for (i0 = 0; i0 < ELEM_SIZE; i0++) { \
+            log_byte = ((va + i0) <= va_high) && QVAL; \
+            log_bank |= (log_byte << i0); \
+            LOG_VTCM_BYTE(va + i0, log_byte, IN.ub[ELEM_SIZE * IDX + i0], \
+                          ELEM_SIZE * IDX + i0); \
+        } \
+        LOG_VTCM_BANK(va, log_bank, BANK_IDX); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORD(EA, OFFSET, IN, IDX, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1, IN); \
+    } while (0)
+#define fVLOG_VTCM_WORD(EA, OFFSET, IN, IDX, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1, IN); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORDQ(EA, OFFSET, IN, IDX, Q, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \
+                         fGETQBIT(QsV, 2 * IDX + i0), IN); \
+    } while (0)
+#define fVLOG_VTCM_WORDQ(EA, OFFSET, IN, IDX, Q, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \
+                         fGETQBIT(QsV, 4 * IDX + i0), IN); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORD_DV(EA, OFFSET, IN, IDX, IDX2, IDX_H, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, \
+                         (2 * IDX2 + IDX_H), 1, IN); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORDQ_DV(EA, OFFSET, IN, IDX, Q, IDX2, IDX_H, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \
+                         fGETQBIT(QsV, 2 * IDX + i0), IN); \
+    } while (0)
+#define fSTORERELEASE(EA, TYPE) \
+    do { \
+        fV_AL_CHECK(EA, fVECSIZE() - 1); \
+    } while (0)
+#define fLOADMMV_AL(EA, ALIGNMENT, LEN, DST) \
+    do { \
+        fV_AL_CHECK(EA, ALIGNMENT - 1); \
+        mem_load_vector_oddva(env, EA & ~(ALIGNMENT - 1), EA, slot, LEN, \
+                              &DST.ub[0], fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fLOADMMV(EA, DST) fLOADMMV_AL(EA, fVECSIZE(), fVECSIZE(), DST)
+#define fLOADMMVU_AL(EA, ALIGNMENT, LEN, DST) \
+    do { \
+        size4u_t size2 = (EA) & (ALIGNMENT - 1); \
+        size4u_t size1 = LEN - size2; \
+        mem_load_vector_oddva(env, EA + size1, EA + fVECSIZE(), 1, size2, \
+                              &DST.ub[size1], fUSE_LOOKUP_ADDRESS()); \
+        mem_load_vector_oddva(env, EA, EA, 0, size1, &DST.ub[0], \
+                              fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fLOADMMVU(EA, DST) \
+    do { \
+        if ((EA & (fVECSIZE() - 1)) == 0) { \
+            fLOADMMV_AL(EA, fVECSIZE(), fVECSIZE(), DST); \
+        } else { \
+            fLOADMMVU_AL(EA, fVECSIZE(), fVECSIZE(), DST); \
+        } \
+    } while (0)
+#define fSTOREMMV_AL(EA, ALIGNMENT, LEN, SRC) \
+    do  { \
+        fV_AL_CHECK(EA, ALIGNMENT - 1); \
+        mem_store_vector_oddva(env, EA & ~(ALIGNMENT - 1), EA, slot, LEN, \
+                               &SRC.ub[0], 0, 0, \
+                               fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fSTOREMMV(EA, SRC) fSTOREMMV_AL(EA, fVECSIZE(), fVECSIZE(), SRC)
+#define fSTOREMMVQ_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        mmvector_t maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        mem_store_vector_oddva(env, EA & ~(ALIGNMENT - 1), EA, slot, LEN, \
+                               &SRC.ub[0], &maskvec.ub[0], 0, \
+                               fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fSTOREMMVQ(EA, SRC, MASK) \
+    fSTOREMMVQ_AL(EA, fVECSIZE(), fVECSIZE(), SRC, MASK)
+#define fSTOREMMVNQ_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        mmvector_t maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        fV_AL_CHECK(EA, ALIGNMENT - 1); \
+        mem_store_vector_oddva(env, EA & ~(ALIGNMENT - 1), EA, slot, LEN, \
+                               &SRC.ub[0], &maskvec.ub[0], 1, \
+                               fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fSTOREMMVNQ(EA, SRC, MASK) \
+    fSTOREMMVNQ_AL(EA, fVECSIZE(), fVECSIZE(), SRC, MASK)
+#define fSTOREMMVU_AL(EA, ALIGNMENT, LEN, SRC) \
+    do { \
+        size4u_t size1 = ALIGNMENT - ((EA) & (ALIGNMENT - 1)); \
+        size4u_t size2; \
+        if (size1 > LEN) { \
+            size1 = LEN; \
+        } \
+        size2 = LEN - size1; \
+        mem_store_vector_oddva(env, EA + size1, EA + fVECSIZE(), 1, size2, \
+                               &SRC.ub[size1], 0, 0, \
+                               fUSE_LOOKUP_ADDRESS()); \
+        mem_store_vector_oddva(env, EA, EA, 0, size1, &SRC.ub[0], 0, 0, \
+                               fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fSTOREMMVU(EA, SRC) \
+    do { \
+        if ((EA & (fVECSIZE() - 1)) == 0) { \
+            fSTOREMMV_AL(EA, fVECSIZE(), fVECSIZE(), SRC); \
+        } else { \
+            fSTOREMMVU_AL(EA, fVECSIZE(), fVECSIZE(), SRC); \
+        } \
+    } while (0)
+#define fSTOREMMVQU_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        size4u_t size1 = ALIGNMENT - ((EA) & (ALIGNMENT - 1)); \
+        size4u_t size2; \
+        mmvector_t maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        if (size1 > LEN) { \
+            size1 = LEN; \
+        } \
+        size2 = LEN - size1; \
+        mem_store_vector_oddva(env, EA + size1, EA + fVECSIZE(), 1, size2, \
+                               &SRC.ub[size1], &maskvec.ub[size1], 0, \
+                               fUSE_LOOKUP_ADDRESS()); \
+        mem_store_vector_oddva(env, EA, 0, size1, &SRC.ub[0], &maskvec.ub[0], \
+                               0, fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fSTOREMMVNQU_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        size4u_t size1 = ALIGNMENT - ((EA) & (ALIGNMENT - 1)); \
+        size4u_t size2; \
+        mmvector_t maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        if (size1 > LEN) { \
+            size1 = LEN; \
+        } \
+        size2 = LEN - size1; \
+        mem_store_vector_oddva(env, EA + size1, EA + fVECSIZE(), 1, size2, \
+                               &SRC.ub[size1], &maskvec.ub[size1], 1, \
+                               fUSE_LOOKUP_ADDRESS()); \
+        mem_store_vector_oddva(env, EA, EA, 0, size1, &SRC.ub[0], \
+                               &maskvec.ub[0], 1, \
+                               fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fVFOREACH(WIDTH, VAR) for (VAR = 0; VAR < fVELEM(WIDTH); VAR++)
+#define fVARRAY_ELEMENT_ACCESS(ARRAY, TYPE, INDEX) \
+    ARRAY.v[(INDEX) / (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % \
+    (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))]
+/* Grabs the .tmp data, wherever it is, and clears the .tmp status */
+/* Used for vhist */
+static inline mmvector_t mmvec_vtmp_data(void)
+{
+    mmvector_t ret;
+    g_assert_not_reached();
+    return ret;
+}
+#define fTMPVDATA() mmvec_vtmp_data()
+#define fVSATDW(U, V) fVSATW(((((long long)U) << 32) | fZXTN(32, 64, V)))
+#define fVASL_SATHI(U, V) fVSATW(((U) << 1) | ((V) >> 31))
+#define fVUADDSAT(WIDTH, U, V) \
+    fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V))
+#define fVSADDSAT(WIDTH, U, V) \
+    fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V))
+#define fVUSUBSAT(WIDTH, U, V) \
+    fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V))
+#define fVSSUBSAT(WIDTH, U, V) \
+    fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V))
+#define fVAVGU(WIDTH, U, V) \
+    ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVAVGURND(WIDTH, U, V) \
+    ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGU(WIDTH, U, V) \
+    ((fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVNAVGURNDSAT(WIDTH, U, V) \
+    fVSATUN(WIDTH, ((fZXTN(WIDTH, 2 * WIDTH, U) - \
+                     fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1))
+#define fVAVGS(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVAVGSRND(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGS(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVNAVGSRND(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGSRNDSAT(WIDTH, U, V) \
+    fVSATN(WIDTH, ((fSXTN(WIDTH, 2 * WIDTH, U) - \
+                    fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1))
+#define fVNOROUND(VAL, SHAMT) VAL
+#define fVNOSAT(VAL) VAL
+#define fVROUND(VAL, SHAMT) \
+    ((VAL) + (((SHAMT) > 0) ? (1LL << ((SHAMT) - 1)) : 0))
+#define fCARRY_FROM_ADD32(A, B, C) \
+    (((fZXTN(32, 64, A) + fZXTN(32, 64, B) + C) >> 32) & 1)
+#define fUARCH_NOTE_PUMP_4X()
+#define fUARCH_NOTE_PUMP_2X()
+
 #endif
-- 
2.7.4


  parent reply	other threads:[~2020-02-11  1:19 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-11  0:39 [RFC PATCH 00/66] Hexagon patch series Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 01/66] Hexagon Maintainers Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 02/66] Hexagon ELF Machine Definition Taylor Simpson
2020-02-11  7:16   ` Philippe Mathieu-Daudé
2020-02-11  0:39 ` [RFC PATCH 03/66] Hexagon CPU Scalar Core Definition Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 04/66] Hexagon register names Taylor Simpson
2020-02-11  7:18   ` Philippe Mathieu-Daudé
2020-02-11  0:39 ` [RFC PATCH 05/66] Hexagon Disassembler Taylor Simpson
2020-02-11  7:20   ` Philippe Mathieu-Daudé
2020-02-11  0:39 ` [RFC PATCH 06/66] Hexagon CPU Scalar Core Helpers Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 07/66] Hexagon GDB Stub Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 08/66] Hexagon instruction and packet types Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 09/66] Hexagon architecture types Taylor Simpson
2020-02-11  7:23   ` Philippe Mathieu-Daudé
2020-02-11  0:39 ` [RFC PATCH 10/66] Hexagon register fields Taylor Simpson
2020-02-11 15:29   ` Philippe Mathieu-Daudé
2020-02-11  0:39 ` [RFC PATCH 11/66] Hexagon instruction attributes Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 12/66] Hexagon register map Taylor Simpson
2020-02-11  7:26   ` Philippe Mathieu-Daudé
2020-02-11  0:39 ` [RFC PATCH 13/66] Hexagon instruction/packet decode Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 14/66] Hexagon instruction printing Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 15/66] Hexagon arch import - instruction semantics definitions Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 16/66] Hexagon arch import - macro definitions Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 17/66] Hexagon arch import - instruction encoding Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 18/66] Hexagon instruction class definitions Taylor Simpson
2020-02-11  0:39 ` [RFC PATCH 19/66] Hexagon instruction utility functions Taylor Simpson
2020-02-11  7:29   ` Philippe Mathieu-Daudé
2020-02-11  0:39 ` [RFC PATCH 20/66] Hexagon generator phase 1 - C preprocessor for semantics Taylor Simpson
2020-02-11  7:30   ` Philippe Mathieu-Daudé
2020-02-11  0:39 ` [RFC PATCH 21/66] Hexagon generator phase 2 - qemu_def_generated.h Taylor Simpson
2020-02-11  7:33   ` Philippe Mathieu-Daudé
2020-02-11  0:40 ` [RFC PATCH 22/66] Hexagon generator phase 2 - qemu_wrap_generated.h Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 23/66] Hexagon generator phase 2 - opcodes_def_generated.h Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 24/66] Hexagon generator phase 2 - op_attribs_generated.h Taylor Simpson
2020-02-11  8:01   ` Philippe Mathieu-Daudé
2020-02-11  0:40 ` [RFC PATCH 25/66] Hexagon generator phase 2 - op_regs_generated.h Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 26/66] Hexagon generator phase 2 - printinsn-generated.h Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 27/66] Hexagon generator phase 3 - C preprocessor for decode tree Taylor Simpson
2020-02-11  7:35   ` Philippe Mathieu-Daudé
2020-02-11  0:40 ` [RFC PATCH 28/66] Hexagon generater phase 4 - Decode tree Taylor Simpson
2020-02-11  7:37   ` Philippe Mathieu-Daudé
2020-02-11  8:03     ` Philippe Mathieu-Daudé
2020-02-11  0:40 ` [RFC PATCH 29/66] Hexagon opcode data structures Taylor Simpson
2020-02-11  7:40   ` Philippe Mathieu-Daudé
2020-02-12 17:36     ` Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 30/66] Hexagon macros to interface with the generator Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 31/66] Hexagon macros referenced in instruction semantics Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 32/66] Hexagon instruction classes Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 33/66] Hexagon TCG generation helpers - step 1 Taylor Simpson
2020-02-11 15:22   ` Philippe Mathieu-Daudé
2020-02-11  0:40 ` [RFC PATCH 34/66] Hexagon TCG generation helpers - step 2 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 35/66] Hexagon TCG generation helpers - step 3 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 36/66] Hexagon TCG generation helpers - step 4 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 37/66] Hexagon TCG generation helpers - step 5 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 38/66] Hexagon TCG generation - step 01 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 39/66] Hexagon TCG generation - step 02 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 40/66] Hexagon TCG generation - step 03 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 41/66] Hexagon TCG generation - step 04 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 42/66] Hexagon TCG generation - step 05 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 43/66] Hexagon TCG generation - step 06 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 44/66] Hexagon TCG generation - step 07 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 45/66] Hexagon TCG generation - step 08 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 46/66] Hexagon TCG generation - step 09 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 47/66] Hexagon TCG generation - step 10 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 48/66] Hexagon TCG generation - step 11 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 49/66] Hexagon TCG generation - step 12 Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 50/66] Hexagon translation Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 51/66] Hexagon Linux user emulation Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 52/66] Hexagon build infrastructure Taylor Simpson
2020-02-11  7:15   ` Philippe Mathieu-Daudé
2020-02-11  0:40 ` [RFC PATCH 53/66] Hexagon - Add Hexagon Vector eXtensions (HVX) to core definition Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 54/66] Hexagon HVX support in gdbstub Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 55/66] Hexagon HVX import instruction encodings Taylor Simpson
2020-02-11  7:02   ` Philippe Mathieu-Daudé
2020-02-11 14:35     ` Taylor Simpson
2020-02-11 14:40       ` Philippe Mathieu-Daudé
2020-02-11 14:43         ` Philippe Mathieu-Daudé
2020-02-11  0:40 ` [RFC PATCH 56/66] Hexagon HVX import semantics Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 57/66] Hexagon HVX import macro definitions Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 58/66] Hexagon HVX semantics generator Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 59/66] Hexagon HVX instruction decoding Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 60/66] Hexagon HVX instruction utility functions Taylor Simpson
2020-02-11  7:46   ` Philippe Mathieu-Daudé
2020-02-11  0:40 ` [RFC PATCH 61/66] Hexagon HVX macros to interface with the generator Taylor Simpson
2020-02-11  0:40 ` Taylor Simpson [this message]
2020-02-11  0:40 ` [RFC PATCH 63/66] Hexagon HVX helper to commit vector stores (masked and scatter/gather) Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 64/66] Hexagon HVX TCG generation Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 65/66] Hexagon HVX translation Taylor Simpson
2020-02-11  0:40 ` [RFC PATCH 66/66] Hexagon HVX build infrastructure Taylor Simpson
2020-02-11  1:31 ` [RFC PATCH 00/66] Hexagon patch series no-reply
2020-02-11  7:49   ` Philippe Mathieu-Daudé
2020-02-11  7:53 ` Philippe Mathieu-Daudé
2020-02-11 15:32 ` Philippe Mathieu-Daudé
2020-02-26 16:13   ` Taylor Simpson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1581381644-13678-63-git-send-email-tsimpson@quicinc.com \
    --to=tsimpson@quicinc.com \
    --cc=aleksandar.m.mail@gmail.com \
    --cc=laurent@vivier.eu \
    --cc=philmd@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    --cc=riku.voipio@iki.fi \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.