All of lore.kernel.org
 help / color / mirror / Atom feed
From: Taylor Simpson <tsimpson@quicinc.com>
To: qemu-devel@nongnu.org
Cc: riku.voipio@iki.fi, richard.henderson@linaro.org,
	laurent@vivier.eu, Taylor Simpson <tsimpson@quicinc.com>,
	philmd@redhat.com, aleksandar.m.mail@gmail.com
Subject: [RFC PATCH v2 63/67] Hexagon HVX macros referenced in instruction semantics
Date: Fri, 28 Feb 2020 10:43:59 -0600	[thread overview]
Message-ID: <1582908244-304-64-git-send-email-tsimpson@quicinc.com> (raw)
In-Reply-To: <1582908244-304-1-git-send-email-tsimpson@quicinc.com>

Signed-off-by: Taylor Simpson <tsimpson@quicinc.com>
---
 target/hexagon/mmvec/macros.h | 436 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 436 insertions(+)

diff --git a/target/hexagon/mmvec/macros.h b/target/hexagon/mmvec/macros.h
index be80bbd..c63a00a 100644
--- a/target/hexagon/mmvec/macros.h
+++ b/target/hexagon/mmvec/macros.h
@@ -259,4 +259,440 @@ static bool readonly_ok(insn_t *insn)
 #define WRITE_QREG_e(NUM, VAR, VNEW)     LOG_QREG_WRITE(NUM, VAR, VNEW)
 #define WRITE_QREG_x(NUM, VAR, VNEW)     LOG_QREG_WRITE(NUM, VAR, VNEW)
 
+#define LOG_VTCM_BYTE(VA, MASK, VAL, IDX) \
+    do { \
+        env->vtcm_log.data.ub[IDX] = (VAL); \
+        env->vtcm_log.mask.ub[IDX] = (MASK); \
+        env->vtcm_log.va[IDX] = (VA); \
+    } while (0)
+
+/* VTCM Banks */
+#define LOG_VTCM_BANK(VAL, MASK, IDX) \
+    do { \
+        env->vtcm_log.offsets.uh[IDX]  = (VAL & 0xFFF); \
+        env->vtcm_log.offsets.uh[IDX] |= ((MASK & 0xF) << 12) ; \
+    } while (0)
+
+#define fUSE_LOOKUP_ADDRESS_BY_REV(PROC) true
+#define fUSE_LOOKUP_ADDRESS() 1
+#define fRT8NOTE()
+#define fNOTQ(VAL) \
+    ({ \
+        mmqreg_t _ret;  \
+        int _i_;  \
+        for (_i_ = 0; _i_ < fVECSIZE() / 64; _i_++) { \
+            _ret.ud[_i_] = ~VAL.ud[_i_]; \
+        } \
+        _ret;\
+     })
+#define fGETQBITS(REG, WIDTH, MASK, BITNO) \
+    ((MASK) & (REG.w[(BITNO) >> 5] >> ((BITNO) & 0x1f)))
+#define fGETQBIT(REG, BITNO) fGETQBITS(REG, 1, 1, BITNO)
+#define fGENMASKW(QREG, IDX) \
+    (((fGETQBIT(QREG, (IDX * 4 + 0)) ? 0xFF : 0x0) << 0)  | \
+     ((fGETQBIT(QREG, (IDX * 4 + 1)) ? 0xFF : 0x0) << 8)  | \
+     ((fGETQBIT(QREG, (IDX * 4 + 2)) ? 0xFF : 0x0) << 16) | \
+     ((fGETQBIT(QREG, (IDX * 4 + 3)) ? 0xFF : 0x0) << 24))
+#define fGETNIBBLE(IDX, SRC) (fSXTN(4, 8, (SRC >> (4 * IDX)) & 0xF))
+#define fGETCRUMB(IDX, SRC) (fSXTN(2, 8, (SRC >> (2 * IDX)) & 0x3))
+#define fGETCRUMB_SYMMETRIC(IDX, SRC) \
+    ((fGETCRUMB(IDX, SRC) >= 0 ? (2 - fGETCRUMB(IDX, SRC)) \
+                               : fGETCRUMB(IDX, SRC)))
+#define fGENMASKH(QREG, IDX) \
+    (((fGETQBIT(QREG, (IDX * 2 + 0)) ? 0xFF : 0x0) << 0) | \
+     ((fGETQBIT(QREG, (IDX * 2 + 1)) ? 0xFF : 0x0) << 8))
+#define fGETMASKW(VREG, QREG, IDX) (VREG.w[IDX] & fGENMASKW((QREG), IDX))
+#define fGETMASKH(VREG, QREG, IDX) (VREG.h[IDX] & fGENMASKH((QREG), IDX))
+#define fCONDMASK8(QREG, IDX, YESVAL, NOVAL) \
+    (fGETQBIT(QREG, IDX) ? (YESVAL) : (NOVAL))
+#define fCONDMASK16(QREG, IDX, YESVAL, NOVAL) \
+    ((fGENMASKH(QREG, IDX) & (YESVAL)) | \
+     (fGENMASKH(fNOTQ(QREG), IDX) & (NOVAL)))
+#define fCONDMASK32(QREG, IDX, YESVAL, NOVAL) \
+    ((fGENMASKW(QREG, IDX) & (YESVAL)) | \
+     (fGENMASKW(fNOTQ(QREG), IDX) & (NOVAL)))
+#define fSETQBITS(REG, WIDTH, MASK, BITNO, VAL) \
+    do { \
+        size4u_t __TMP = (VAL); \
+        REG.w[(BITNO) >> 5] &= ~((MASK) << ((BITNO) & 0x1f)); \
+        REG.w[(BITNO) >> 5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f)); \
+    } while (0)
+#define fSETQBIT(REG, BITNO, VAL) fSETQBITS(REG, 1, 1, BITNO, VAL)
+#define fVBYTES() (fVECSIZE())
+#define fVALIGN(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR & ~(LOG2_ALIGNMENT - 1))
+#define fVLASTBYTE(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR | (LOG2_ALIGNMENT - 1))
+#define fVELEM(WIDTH) ((fVECSIZE() * 8) / WIDTH)
+#define fVECLOGSIZE() (7)
+#define fVECSIZE() (1 << fVECLOGSIZE())
+#define fSWAPB(A, B) do { size1u_t tmp = A; A = B; B = tmp; } while (0)
+static inline mmvector_t mmvec_zero_vector(void)
+{
+    mmvector_t ret;
+    memset(&ret, 0, sizeof(ret));
+    return ret;
+}
+#define fVZERO() mmvec_zero_vector()
+#define fNEWVREG(VNUM) \
+    ((env->VRegs_updated & (((VRegMask)1) << VNUM)) ? env->future_VRegs[VNUM] \
+                                                    : mmvec_zero_vector())
+#define fV_AL_CHECK(EA, MASK) \
+    if ((EA) & (MASK)) { \
+        warn("aligning misaligned vector. EA=%08x", (EA)); \
+    }
+#define fSCATTER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \
+    do { \
+        mem_vector_scatter_init(env, slot, REGION_START, LENGTH, ELEMENT_SIZE);\
+        if (EXCEPTION_DETECTED) { \
+            return; \
+        } \
+    } while (0)
+#define fGATHER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \
+    do { \
+        mem_vector_gather_init(env, slot, REGION_START, LENGTH, ELEMENT_SIZE); \
+        if (EXCEPTION_DETECTED) { \
+            return; \
+        } \
+    } while (0)
+#define fSCATTER_FINISH(OP) \
+    do { \
+        if (EXCEPTION_DETECTED) { \
+            return; \
+        } \
+        mem_vector_scatter_finish(env, slot, OP); \
+    } while (0);
+#define fGATHER_FINISH() \
+    do { \
+        if (EXCEPTION_DETECTED) { \
+            return; \
+        } \
+        mem_vector_gather_finish(env, slot); \
+    } while (0);
+#define fLOG_SCATTER_OP(SIZE) \
+    do { \
+        env->vtcm_log.op = 1; \
+        env->vtcm_log.op_size = SIZE; \
+    } while (0)
+#define fVLOG_VTCM_WORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \
+    do { \
+        int log_byte = 0; \
+        vaddr_t va = EA; \
+        vaddr_t va_high = EA + LEN; \
+        for (int i0 = 0; i0 < 4; i0++) { \
+            log_byte = (va + i0) <= va_high; \
+            LOG_VTCM_BYTE(va + i0, log_byte, INC. ub[4 * IDX + i0], \
+                          4 * IDX + i0); \
+        } \
+    } while (0)
+#define fVLOG_VTCM_HALFWORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \
+    do { \
+        int log_byte = 0; \
+        vaddr_t va = EA; \
+        vaddr_t va_high = EA + LEN; \
+        for (int i0 = 0; i0 < 2; i0++) { \
+            log_byte = (va + i0) <= va_high; \
+            LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \
+                          2 * IDX + i0); \
+        } \
+    } while (0)
+
+#define fVLOG_VTCM_HALFWORD_INCREMENT_DV(EA, OFFSET, INC, IDX, IDX2, IDX_H, \
+                                         ALIGNMENT, LEN) \
+    do { \
+        int log_byte = 0; \
+        vaddr_t va = EA; \
+        vaddr_t va_high = EA + LEN; \
+        for (int i0 = 0; i0 < 2; i0++) { \
+            log_byte = (va + i0) <= va_high; \
+            LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \
+                          2 * IDX + i0); \
+        } \
+    } while (0)
+
+/* NOTE - Will this always be tmp_VRegs[0]; */
+#define GATHER_FUNCTION(EA, OFFSET, IDX, LEN, ELEMENT_SIZE, BANK_IDX, QVAL) \
+    do { \
+        int i0; \
+        vaddr_t va = EA; \
+        vaddr_t va_high = EA + LEN; \
+        int log_bank = 0; \
+        int log_byte = 0; \
+        for (i0 = 0; i0 < ELEMENT_SIZE; i0++) { \
+            log_byte = ((va + i0) <= va_high) && QVAL; \
+            log_bank |= (log_byte << i0); \
+            size1u_t B; \
+            get_user_u8(B, EA + i0); \
+            env->tmp_VRegs[0].ub[ELEMENT_SIZE * IDX + i0] = B; \
+            LOG_VTCM_BYTE(va + i0, log_byte, B, ELEMENT_SIZE * IDX + i0); \
+        } \
+        LOG_VTCM_BANK(va, log_bank, BANK_IDX); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_WORD(EA, OFFSET, IDX, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORD(EA, OFFSET, IDX, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORD_DV(EA, OFFSET, IDX, IDX2, IDX_H, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), 1); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_WORDQ(EA, OFFSET, IDX, Q, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \
+                        fGETQBIT(QsV, 4 * IDX + i0)); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORDQ(EA, OFFSET, IDX, Q, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \
+                        fGETQBIT(QsV, 2 * IDX + i0)); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORDQ_DV(EA, OFFSET, IDX, IDX2, IDX_H, Q, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \
+                        fGETQBIT(QsV, 2 * IDX + i0)); \
+    } while (0)
+#define SCATTER_OP_WRITE_TO_MEM(TYPE) \
+    do { \
+        for (int i = 0; i < env->vtcm_log.size; i += sizeof(TYPE)) { \
+            if (env->vtcm_log.mask.ub[i] != 0) { \
+                TYPE dst = 0; \
+                TYPE inc = 0; \
+                for (int j = 0; j < sizeof(TYPE); j++) { \
+                    size1u_t val; \
+                    get_user_u8(val, env->vtcm_log.va[i + j]); \
+                    dst |= val << (8 * j); \
+                    inc |= env->vtcm_log.data.ub[j + i] << (8 * j); \
+                    env->vtcm_log.mask.ub[j + i] = 0; \
+                    env->vtcm_log.data.ub[j + i] = 0; \
+                    env->vtcm_log.offsets.ub[j + i] = 0; \
+                } \
+                dst += inc; \
+                for (int j = 0; j < sizeof(TYPE); j++) { \
+                    put_user_u8((dst >> (8 * j)) & 0xFF, \
+                        env->vtcm_log.va[i + j]);  \
+                } \
+            } \
+        } \
+    } while (0)
+#define SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, ELEM_SIZE, BANK_IDX, QVAL, IN) \
+    do { \
+        int i0; \
+        target_ulong va = EA; \
+        target_ulong va_high = EA + LEN; \
+        int log_bank = 0; \
+        int log_byte = 0; \
+        for (i0 = 0; i0 < ELEM_SIZE; i0++) { \
+            log_byte = ((va + i0) <= va_high) && QVAL; \
+            log_bank |= (log_byte << i0); \
+            LOG_VTCM_BYTE(va + i0, log_byte, IN.ub[ELEM_SIZE * IDX + i0], \
+                          ELEM_SIZE * IDX + i0); \
+        } \
+        LOG_VTCM_BANK(va, log_bank, BANK_IDX); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORD(EA, OFFSET, IN, IDX, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1, IN); \
+    } while (0)
+#define fVLOG_VTCM_WORD(EA, OFFSET, IN, IDX, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1, IN); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORDQ(EA, OFFSET, IN, IDX, Q, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \
+                         fGETQBIT(QsV, 2 * IDX + i0), IN); \
+    } while (0)
+#define fVLOG_VTCM_WORDQ(EA, OFFSET, IN, IDX, Q, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \
+                         fGETQBIT(QsV, 4 * IDX + i0), IN); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORD_DV(EA, OFFSET, IN, IDX, IDX2, IDX_H, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, \
+                         (2 * IDX2 + IDX_H), 1, IN); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORDQ_DV(EA, OFFSET, IN, IDX, Q, IDX2, IDX_H, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \
+                         fGETQBIT(QsV, 2 * IDX + i0), IN); \
+    } while (0)
+#define fSTORERELEASE(EA, TYPE) \
+    do { \
+        fV_AL_CHECK(EA, fVECSIZE() - 1); \
+    } while (0)
+#define fLOADMMV_AL(EA, ALIGNMENT, LEN, DST) \
+    do { \
+        fV_AL_CHECK(EA, ALIGNMENT - 1); \
+        mem_load_vector_oddva(env, EA & ~(ALIGNMENT - 1), EA, slot, LEN, \
+                              &DST.ub[0], fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fLOADMMV(EA, DST) fLOADMMV_AL(EA, fVECSIZE(), fVECSIZE(), DST)
+#define fLOADMMVU_AL(EA, ALIGNMENT, LEN, DST) \
+    do { \
+        size4u_t size2 = (EA) & (ALIGNMENT - 1); \
+        size4u_t size1 = LEN - size2; \
+        mem_load_vector_oddva(env, EA + size1, EA + fVECSIZE(), 1, size2, \
+                              &DST.ub[size1], fUSE_LOOKUP_ADDRESS()); \
+        mem_load_vector_oddva(env, EA, EA, 0, size1, &DST.ub[0], \
+                              fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fLOADMMVU(EA, DST) \
+    do { \
+        if ((EA & (fVECSIZE() - 1)) == 0) { \
+            fLOADMMV_AL(EA, fVECSIZE(), fVECSIZE(), DST); \
+        } else { \
+            fLOADMMVU_AL(EA, fVECSIZE(), fVECSIZE(), DST); \
+        } \
+    } while (0)
+#define fSTOREMMV_AL(EA, ALIGNMENT, LEN, SRC) \
+    do  { \
+        fV_AL_CHECK(EA, ALIGNMENT - 1); \
+        mem_store_vector_oddva(env, EA & ~(ALIGNMENT - 1), EA, slot, LEN, \
+                               &SRC.ub[0], 0, 0, \
+                               fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fSTOREMMV(EA, SRC) fSTOREMMV_AL(EA, fVECSIZE(), fVECSIZE(), SRC)
+#define fSTOREMMVQ_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        mmvector_t maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        mem_store_vector_oddva(env, EA & ~(ALIGNMENT - 1), EA, slot, LEN, \
+                               &SRC.ub[0], &maskvec.ub[0], 0, \
+                               fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fSTOREMMVQ(EA, SRC, MASK) \
+    fSTOREMMVQ_AL(EA, fVECSIZE(), fVECSIZE(), SRC, MASK)
+#define fSTOREMMVNQ_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        mmvector_t maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        fV_AL_CHECK(EA, ALIGNMENT - 1); \
+        mem_store_vector_oddva(env, EA & ~(ALIGNMENT - 1), EA, slot, LEN, \
+                               &SRC.ub[0], &maskvec.ub[0], 1, \
+                               fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fSTOREMMVNQ(EA, SRC, MASK) \
+    fSTOREMMVNQ_AL(EA, fVECSIZE(), fVECSIZE(), SRC, MASK)
+#define fSTOREMMVU_AL(EA, ALIGNMENT, LEN, SRC) \
+    do { \
+        size4u_t size1 = ALIGNMENT - ((EA) & (ALIGNMENT - 1)); \
+        size4u_t size2; \
+        if (size1 > LEN) { \
+            size1 = LEN; \
+        } \
+        size2 = LEN - size1; \
+        mem_store_vector_oddva(env, EA + size1, EA + fVECSIZE(), 1, size2, \
+                               &SRC.ub[size1], 0, 0, \
+                               fUSE_LOOKUP_ADDRESS()); \
+        mem_store_vector_oddva(env, EA, EA, 0, size1, &SRC.ub[0], 0, 0, \
+                               fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fSTOREMMVU(EA, SRC) \
+    do { \
+        if ((EA & (fVECSIZE() - 1)) == 0) { \
+            fSTOREMMV_AL(EA, fVECSIZE(), fVECSIZE(), SRC); \
+        } else { \
+            fSTOREMMVU_AL(EA, fVECSIZE(), fVECSIZE(), SRC); \
+        } \
+    } while (0)
+#define fSTOREMMVQU_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        size4u_t size1 = ALIGNMENT - ((EA) & (ALIGNMENT - 1)); \
+        size4u_t size2; \
+        mmvector_t maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        if (size1 > LEN) { \
+            size1 = LEN; \
+        } \
+        size2 = LEN - size1; \
+        mem_store_vector_oddva(env, EA + size1, EA + fVECSIZE(), 1, size2, \
+                               &SRC.ub[size1], &maskvec.ub[size1], 0, \
+                               fUSE_LOOKUP_ADDRESS()); \
+        mem_store_vector_oddva(env, EA, 0, size1, &SRC.ub[0], &maskvec.ub[0], \
+                               0, fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fSTOREMMVNQU_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        size4u_t size1 = ALIGNMENT - ((EA) & (ALIGNMENT - 1)); \
+        size4u_t size2; \
+        mmvector_t maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        if (size1 > LEN) { \
+            size1 = LEN; \
+        } \
+        size2 = LEN - size1; \
+        mem_store_vector_oddva(env, EA + size1, EA + fVECSIZE(), 1, size2, \
+                               &SRC.ub[size1], &maskvec.ub[size1], 1, \
+                               fUSE_LOOKUP_ADDRESS()); \
+        mem_store_vector_oddva(env, EA, EA, 0, size1, &SRC.ub[0], \
+                               &maskvec.ub[0], 1, \
+                               fUSE_LOOKUP_ADDRESS_BY_REV()); \
+    } while (0)
+#define fVFOREACH(WIDTH, VAR) for (VAR = 0; VAR < fVELEM(WIDTH); VAR++)
+#define fVARRAY_ELEMENT_ACCESS(ARRAY, TYPE, INDEX) \
+    ARRAY.v[(INDEX) / (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % \
+    (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))]
+/* Grabs the .tmp data, wherever it is, and clears the .tmp status */
+/* Used for vhist */
+static inline mmvector_t mmvec_vtmp_data(void)
+{
+    mmvector_t ret;
+    g_assert_not_reached();
+    return ret;
+}
+#define fTMPVDATA() mmvec_vtmp_data()
+#define fVSATDW(U, V) fVSATW(((((long long)U) << 32) | fZXTN(32, 64, V)))
+#define fVASL_SATHI(U, V) fVSATW(((U) << 1) | ((V) >> 31))
+#define fVUADDSAT(WIDTH, U, V) \
+    fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V))
+#define fVSADDSAT(WIDTH, U, V) \
+    fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V))
+#define fVUSUBSAT(WIDTH, U, V) \
+    fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V))
+#define fVSSUBSAT(WIDTH, U, V) \
+    fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V))
+#define fVAVGU(WIDTH, U, V) \
+    ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVAVGURND(WIDTH, U, V) \
+    ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGU(WIDTH, U, V) \
+    ((fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVNAVGURNDSAT(WIDTH, U, V) \
+    fVSATUN(WIDTH, ((fZXTN(WIDTH, 2 * WIDTH, U) - \
+                     fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1))
+#define fVAVGS(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVAVGSRND(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGS(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVNAVGSRND(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGSRNDSAT(WIDTH, U, V) \
+    fVSATN(WIDTH, ((fSXTN(WIDTH, 2 * WIDTH, U) - \
+                    fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1))
+#define fVNOROUND(VAL, SHAMT) VAL
+#define fVNOSAT(VAL) VAL
+#define fVROUND(VAL, SHAMT) \
+    ((VAL) + (((SHAMT) > 0) ? (1LL << ((SHAMT) - 1)) : 0))
+#define fCARRY_FROM_ADD32(A, B, C) \
+    (((fZXTN(32, 64, A) + fZXTN(32, 64, B) + C) >> 32) & 1)
+#define fUARCH_NOTE_PUMP_4X()
+#define fUARCH_NOTE_PUMP_2X()
+
 #endif
-- 
2.7.4


  parent reply	other threads:[~2020-02-28 17:24 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-28 16:42 [RFC PATCH v2 00/67] Hexagon patch series Taylor Simpson
2020-02-28 16:42 ` [RFC PATCH v2 01/67] Hexagon Maintainers Taylor Simpson
2020-02-28 16:42 ` [RFC PATCH v2 02/67] Hexagon README Taylor Simpson
2020-02-28 16:42 ` [RFC PATCH v2 03/67] Hexagon ELF Machine Definition Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 04/67] Hexagon CPU Scalar Core Definition Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 05/67] Hexagon register names Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 06/67] Hexagon Disassembler Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 07/67] Hexagon CPU Scalar Core Helpers Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 08/67] Hexagon GDB Stub Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 09/67] Hexagon architecture types Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 10/67] Hexagon instruction and packet types Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 11/67] Hexagon register fields Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 12/67] Hexagon instruction attributes Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 13/67] Hexagon register map Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 14/67] Hexagon instruction/packet decode Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 15/67] Hexagon instruction printing Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 16/67] Hexagon arch import - instruction semantics definitions Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 17/67] Hexagon arch import - macro definitions Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 18/67] Hexagon arch import - instruction encoding Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 19/67] Hexagon instruction class definitions Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 20/67] Hexagon instruction utility functions Taylor Simpson
2020-04-09 18:53   ` Brian Cain
2020-04-09 20:22     ` Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 21/67] Hexagon generator phase 1 - C preprocessor for semantics Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 22/67] Hexagon generator phase 2 - qemu_def_generated.h Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 23/67] Hexagon generator phase 2 - qemu_wrap_generated.h Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 24/67] Hexagon generator phase 2 - opcodes_def_generated.h Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 25/67] Hexagon generator phase 2 - op_attribs_generated.h Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 26/67] Hexagon generator phase 2 - op_regs_generated.h Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 27/67] Hexagon generator phase 2 - printinsn-generated.h Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 28/67] Hexagon generator phase 3 - C preprocessor for decode tree Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 29/67] Hexagon generater phase 4 - Decode tree Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 30/67] Hexagon opcode data structures Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 31/67] Hexagon macros to interface with the generator Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 32/67] Hexagon macros referenced in instruction semantics Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 33/67] Hexagon instruction classes Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 34/67] Hexagon TCG generation helpers - step 1 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 35/67] Hexagon TCG generation helpers - step 2 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 36/67] Hexagon TCG generation helpers - step 3 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 37/67] Hexagon TCG generation helpers - step 4 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 38/67] Hexagon TCG generation helpers - step 5 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 39/67] Hexagon TCG generation - step 01 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 40/67] Hexagon TCG generation - step 02 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 41/67] Hexagon TCG generation - step 03 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 42/67] Hexagon TCG generation - step 04 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 43/67] Hexagon TCG generation - step 05 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 44/67] Hexagon TCG generation - step 06 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 45/67] Hexagon TCG generation - step 07 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 46/67] Hexagon TCG generation - step 08 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 47/67] Hexagon TCG generation - step 09 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 48/67] Hexagon TCG generation - step 10 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 49/67] Hexagon TCG generation - step 11 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 50/67] Hexagon TCG generation - step 12 Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 51/67] Hexagon translation Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 52/67] Hexagon Linux user emulation Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 53/67] Hexagon build infrastructure Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 54/67] Hexagon - Add Hexagon Vector eXtensions (HVX) to core definition Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 55/67] Hexagon HVX support in gdbstub Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 56/67] Hexagon HVX import instruction encodings Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 57/67] Hexagon HVX import semantics Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 58/67] Hexagon HVX import macro definitions Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 59/67] Hexagon HVX semantics generator Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 60/67] Hexagon HVX instruction decoding Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 61/67] Hexagon HVX instruction utility functions Taylor Simpson
2020-02-28 16:43 ` [RFC PATCH v2 62/67] Hexagon HVX macros to interface with the generator Taylor Simpson
2020-02-28 16:43 ` Taylor Simpson [this message]
2020-02-28 16:44 ` [RFC PATCH v2 64/67] Hexagon HVX helper to commit vector stores (masked and scatter/gather) Taylor Simpson
2020-02-28 16:44 ` [RFC PATCH v2 65/67] Hexagon HVX TCG generation Taylor Simpson
2020-02-28 16:44 ` [RFC PATCH v2 66/67] Hexagon HVX translation Taylor Simpson
2020-02-28 16:44 ` [RFC PATCH v2 67/67] Hexagon HVX build infrastructure Taylor Simpson
2020-03-25 21:13 ` [RFC PATCH v2 00/67] Hexagon patch series Taylor Simpson
2020-04-30 20:53   ` Taylor Simpson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1582908244-304-64-git-send-email-tsimpson@quicinc.com \
    --to=tsimpson@quicinc.com \
    --cc=aleksandar.m.mail@gmail.com \
    --cc=laurent@vivier.eu \
    --cc=philmd@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    --cc=riku.voipio@iki.fi \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.