All of lore.kernel.org
 help / color / mirror / Atom feed
From: Taylor Simpson <tsimpson@quicinc.com>
To: qemu-devel@nongnu.org
Cc: ale@rev.ng, peter.maydell@linaro.org, bcain@quicinc.com,
	richard.henderson@linaro.org, tsimpson@quicinc.com,
	philmd@redhat.com
Subject: [PATCH 06/20] Hexagon HVX (target/hexagon) macros
Date: Mon,  5 Jul 2021 18:34:20 -0500	[thread overview]
Message-ID: <1625528074-19440-7-git-send-email-tsimpson@quicinc.com> (raw)
In-Reply-To: <1625528074-19440-1-git-send-email-tsimpson@quicinc.com>

macros to interface with the generator
macros referenced in instruction semantics

Signed-off-by: Taylor Simpson <tsimpson@quicinc.com>
---
 target/hexagon/macros.h       |  22 ++
 target/hexagon/mmvec/macros.h | 478 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 500 insertions(+)
 create mode 100644 target/hexagon/mmvec/macros.h

diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h
index 094b8da..852183f 100644
--- a/target/hexagon/macros.h
+++ b/target/hexagon/macros.h
@@ -269,6 +269,10 @@ static inline void gen_pred_cancel(TCGv pred, int slot_num)
 
 #define fNEWREG_ST(VAL) (VAL)
 
+#define fVSATUVALN(N, VAL) \
+    ({ \
+        (((int)(VAL)) < 0) ? 0 : ((1LL << (N)) - 1); \
+    })
 #define fSATUVALN(N, VAL) \
     ({ \
         fSET_OVERFLOW(); \
@@ -279,10 +283,16 @@ static inline void gen_pred_cancel(TCGv pred, int slot_num)
         fSET_OVERFLOW(); \
         ((VAL) < 0) ? (-(1LL << ((N) - 1))) : ((1LL << ((N) - 1)) - 1); \
     })
+#define fVSATVALN(N, VAL) \
+    ({ \
+        ((VAL) < 0) ? (-(1LL << ((N) - 1))) : ((1LL << ((N) - 1)) - 1); \
+    })
 #define fZXTN(N, M, VAL) (((N) != 0) ? extract64((VAL), 0, (N)) : 0LL)
 #define fSXTN(N, M, VAL) (((N) != 0) ? sextract64((VAL), 0, (N)) : 0LL)
 #define fSATN(N, VAL) \
     ((fSXTN(N, 64, VAL) == (VAL)) ? (VAL) : fSATVALN(N, VAL))
+#define fVSATN(N, VAL) \
+    ((fSXTN(N, 64, VAL) == (VAL)) ? (VAL) : fVSATVALN(N, VAL))
 #define fADDSAT64(DST, A, B) \
     do { \
         uint64_t __a = fCAST8u(A); \
@@ -305,12 +315,18 @@ static inline void gen_pred_cancel(TCGv pred, int slot_num)
             DST = __sum; \
         } \
     } while (0)
+#define fVSATUN(N, VAL) \
+    ((fZXTN(N, 64, VAL) == (VAL)) ? (VAL) : fVSATUVALN(N, VAL))
 #define fSATUN(N, VAL) \
     ((fZXTN(N, 64, VAL) == (VAL)) ? (VAL) : fSATUVALN(N, VAL))
 #define fSATH(VAL) (fSATN(16, VAL))
 #define fSATUH(VAL) (fSATUN(16, VAL))
+#define fVSATH(VAL) (fVSATN(16, VAL))
+#define fVSATUH(VAL) (fVSATUN(16, VAL))
 #define fSATUB(VAL) (fSATUN(8, VAL))
 #define fSATB(VAL) (fSATN(8, VAL))
+#define fVSATUB(VAL) (fVSATUN(8, VAL))
+#define fVSATB(VAL) (fVSATN(8, VAL))
 #define fIMMEXT(IMM) (IMM = IMM)
 #define fMUST_IMMEXT(IMM) fIMMEXT(IMM)
 
@@ -417,6 +433,8 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift)
 #define fCAST4s(A) ((int32_t)(A))
 #define fCAST8u(A) ((uint64_t)(A))
 #define fCAST8s(A) ((int64_t)(A))
+#define fCAST2_2s(A) ((int16_t)(A))
+#define fCAST2_2u(A) ((uint16_t)(A))
 #define fCAST4_4s(A) ((int32_t)(A))
 #define fCAST4_4u(A) ((uint32_t)(A))
 #define fCAST4_8s(A) ((int64_t)((int32_t)(A)))
@@ -514,7 +532,9 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift)
 #define fPM_M(REG, MVAL)    do { REG = REG + (MVAL); } while (0)
 #endif
 #define fSCALE(N, A) (((int64_t)(A)) << N)
+#define fVSATW(A) fVSATN(32, ((long long)A))
 #define fSATW(A) fSATN(32, ((long long)A))
+#define fVSAT(A) fVSATN(32, (A))
 #define fSAT(A) fSATN(32, (A))
 #define fSAT_ORIG_SHL(A, ORIG_REG) \
     ((((int32_t)((fSAT(A)) ^ ((int32_t)(ORIG_REG)))) < 0) \
@@ -651,12 +671,14 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift)
             fSETBIT(j, DST, VAL); \
         } \
     } while (0)
+#define fCOUNTONES_2(VAL) ctpop16(VAL)
 #define fCOUNTONES_4(VAL) ctpop32(VAL)
 #define fCOUNTONES_8(VAL) ctpop64(VAL)
 #define fBREV_8(VAL) revbit64(VAL)
 #define fBREV_4(VAL) revbit32(VAL)
 #define fCL1_8(VAL) clo64(VAL)
 #define fCL1_4(VAL) clo32(VAL)
+#define fCL1_2(VAL) count_leading_ones_2(VAL)
 #define fINTERLEAVE(ODD, EVEN) interleave(ODD, EVEN)
 #define fDEINTERLEAVE(MIXED) deinterleave(MIXED)
 #define fHIDE(A) A
diff --git a/target/hexagon/mmvec/macros.h b/target/hexagon/mmvec/macros.h
new file mode 100644
index 0000000..52a5b87
--- /dev/null
+++ b/target/hexagon/mmvec/macros.h
@@ -0,0 +1,478 @@
+/*
+ *  Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_MMVEC_MACROS_H
+#define HEXAGON_MMVEC_MACROS_H
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+#include "arch.h"
+#include "mmvec/system_ext_mmvec.h"
+
+#ifndef QEMU_GENERATE
+#define VdV      (*(MMVector *)(VdV_void))
+#define VsV      (*(MMVector *)(VsV_void))
+#define VuV      (*(MMVector *)(VuV_void))
+#define VvV      (*(MMVector *)(VvV_void))
+#define VwV      (*(MMVector *)(VwV_void))
+#define VxV      (*(MMVector *)(VxV_void))
+#define VyV      (*(MMVector *)(VyV_void))
+
+#define VddV     (*(MMVectorPair *)(VddV_void))
+#define VuuV     (*(MMVectorPair *)(VuuV_void))
+#define VvvV     (*(MMVectorPair *)(VvvV_void))
+#define VxxV     (*(MMVectorPair *)(VxxV_void))
+
+#define QeV      (*(MMQReg *)(QeV_void))
+#define QdV      (*(MMQReg *)(QdV_void))
+#define QsV      (*(MMQReg *)(QsV_void))
+#define QtV      (*(MMQReg *)(QtV_void))
+#define QuV      (*(MMQReg *)(QuV_void))
+#define QvV      (*(MMQReg *)(QvV_void))
+#define QxV      (*(MMQReg *)(QxV_void))
+#endif
+
+#define NEW_WRITTEN(NUM) ((env->VRegs_select >> (NUM)) & 1)
+#define TMP_WRITTEN(NUM) ((env->VRegs_updated_tmp >> (NUM)) & 1)
+
+#define TYPE_VOID(X)   __builtin_types_compatible_p(typeof(X), void *)
+#define TYPE_MMVECTOR(X)   __builtin_types_compatible_p(typeof(X), MMVector)
+
+#define LOG_VREG_WRITE_FUNC(X) \
+    __builtin_choose_expr(TYPE_VOID(X), \
+        log_vreg_write, \
+        __builtin_choose_expr(TYPE_MMVECTOR(X), \
+            log_mmvector_write, (void)0))
+#define LOG_VREG_WRITE(NUM, VAR, VNEW) \
+    LOG_VREG_WRITE_FUNC(VAR)(env, NUM, VAR, VNEW)
+
+#define READ_EXT_VREG(NUM, VAR, VTMP) \
+    do { \
+        VAR = ((NEW_WRITTEN(NUM)) ? env->future_VRegs[NUM] \
+                                  : env->VRegs[NUM]); \
+        VAR = ((TMP_WRITTEN(NUM)) ? env->tmp_VRegs[NUM] : VAR); \
+        if (VTMP == EXT_TMP) { \
+            if (env->VRegs_updated & ((VRegMask)1) << (NUM)) { \
+                VAR = env->future_VRegs[NUM]; \
+                env->VRegs_updated ^= ((VRegMask)1) << (NUM); \
+            } \
+        } \
+    } while (0)
+
+
+#define WRITE_EXT_VREG(NUM, VAR, VNEW)   LOG_VREG_WRITE(NUM, VAR, VNEW)
+
+#define LOG_VTCM_BYTE(VA, MASK, VAL, IDX) \
+    do { \
+        env->vtcm_log.data.ub[IDX] = (VAL); \
+        env->vtcm_log.mask.ub[IDX] = (MASK); \
+        env->vtcm_log.va[IDX] = (VA); \
+    } while (0)
+
+#define fNOTQ(VAL) \
+    ({ \
+        MMQReg _ret;  \
+        int _i_;  \
+        for (_i_ = 0; _i_ < fVECSIZE() / 64; _i_++) { \
+            _ret.ud[_i_] = ~VAL.ud[_i_]; \
+        } \
+        _ret;\
+     })
+#define fGETQBITS(REG, WIDTH, MASK, BITNO) \
+    ((MASK) & (REG.w[(BITNO) >> 5] >> ((BITNO) & 0x1f)))
+#define fGETQBIT(REG, BITNO) fGETQBITS(REG, 1, 1, BITNO)
+#define fGENMASKW(QREG, IDX) \
+    (((fGETQBIT(QREG, (IDX * 4 + 0)) ? 0xFF : 0x0) << 0)  | \
+     ((fGETQBIT(QREG, (IDX * 4 + 1)) ? 0xFF : 0x0) << 8)  | \
+     ((fGETQBIT(QREG, (IDX * 4 + 2)) ? 0xFF : 0x0) << 16) | \
+     ((fGETQBIT(QREG, (IDX * 4 + 3)) ? 0xFF : 0x0) << 24))
+#define fGETNIBBLE(IDX, SRC) (fSXTN(4, 8, (SRC >> (4 * IDX)) & 0xF))
+#define fGETCRUMB(IDX, SRC) (fSXTN(2, 8, (SRC >> (2 * IDX)) & 0x3))
+#define fGETCRUMB_SYMMETRIC(IDX, SRC) \
+    ((fGETCRUMB(IDX, SRC) >= 0 ? (2 - fGETCRUMB(IDX, SRC)) \
+                               : fGETCRUMB(IDX, SRC)))
+#define fGENMASKH(QREG, IDX) \
+    (((fGETQBIT(QREG, (IDX * 2 + 0)) ? 0xFF : 0x0) << 0) | \
+     ((fGETQBIT(QREG, (IDX * 2 + 1)) ? 0xFF : 0x0) << 8))
+#define fGETMASKW(VREG, QREG, IDX) (VREG.w[IDX] & fGENMASKW((QREG), IDX))
+#define fGETMASKH(VREG, QREG, IDX) (VREG.h[IDX] & fGENMASKH((QREG), IDX))
+#define fCONDMASK8(QREG, IDX, YESVAL, NOVAL) \
+    (fGETQBIT(QREG, IDX) ? (YESVAL) : (NOVAL))
+#define fCONDMASK16(QREG, IDX, YESVAL, NOVAL) \
+    ((fGENMASKH(QREG, IDX) & (YESVAL)) | \
+     (fGENMASKH(fNOTQ(QREG), IDX) & (NOVAL)))
+#define fCONDMASK32(QREG, IDX, YESVAL, NOVAL) \
+    ((fGENMASKW(QREG, IDX) & (YESVAL)) | \
+     (fGENMASKW(fNOTQ(QREG), IDX) & (NOVAL)))
+#define fSETQBITS(REG, WIDTH, MASK, BITNO, VAL) \
+    do { \
+        uint32_t __TMP = (VAL); \
+        REG.w[(BITNO) >> 5] &= ~((MASK) << ((BITNO) & 0x1f)); \
+        REG.w[(BITNO) >> 5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f)); \
+    } while (0)
+#define fSETQBIT(REG, BITNO, VAL) fSETQBITS(REG, 1, 1, BITNO, VAL)
+#define fVBYTES() (fVECSIZE())
+#define fVALIGN(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR & ~(LOG2_ALIGNMENT - 1))
+#define fVLASTBYTE(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR | (LOG2_ALIGNMENT - 1))
+#define fVELEM(WIDTH) ((fVECSIZE() * 8) / WIDTH)
+#define fVECLOGSIZE() (7)
+#define fVECSIZE() (1 << fVECLOGSIZE())
+#define fSWAPB(A, B) do { uint8_t tmp = A; A = B; B = tmp; } while (0)
+static inline MMVector mmvec_zero_vector(void)
+{
+    MMVector ret;
+    memset(&ret, 0, sizeof(ret));
+    return ret;
+}
+#define fVZERO() mmvec_zero_vector()
+#define fNEWVREG(VNUM) \
+    ((env->VRegs_updated & (((VRegMask)1) << VNUM)) ? env->future_VRegs[VNUM] \
+                                                    : mmvec_zero_vector())
+#define fV_AL_CHECK(EA, MASK) \
+    if ((EA) & (MASK)) { \
+        warn("aligning misaligned vector. EA=%08x", (EA)); \
+    }
+#define fSCATTER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \
+    mem_vector_scatter_init(env, slot, REGION_START, LENGTH, ELEMENT_SIZE)
+#define fGATHER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \
+    mem_vector_gather_init(env, slot, REGION_START, LENGTH, ELEMENT_SIZE)
+#define fSCATTER_FINISH(OP)
+#define fGATHER_FINISH()
+#define fLOG_SCATTER_OP(SIZE) \
+    do { \
+        env->vtcm_log.op = true; \
+        env->vtcm_log.op_size = SIZE; \
+    } while (0)
+#define fVLOG_VTCM_WORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \
+    do { \
+        int log_byte = 0; \
+        target_ulong va = EA; \
+        target_ulong va_high = EA + LEN; \
+        for (int i0 = 0; i0 < 4; i0++) { \
+            log_byte = (va + i0) <= va_high; \
+            LOG_VTCM_BYTE(va + i0, log_byte, INC. ub[4 * IDX + i0], \
+                          4 * IDX + i0); \
+        } \
+    } while (0)
+#define fVLOG_VTCM_HALFWORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \
+    do { \
+        int log_byte = 0; \
+        target_ulong va = EA; \
+        target_ulong va_high = EA + LEN; \
+        for (int i0 = 0; i0 < 2; i0++) { \
+            log_byte = (va + i0) <= va_high; \
+            LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \
+                          2 * IDX + i0); \
+        } \
+    } while (0)
+
+#define fVLOG_VTCM_HALFWORD_INCREMENT_DV(EA, OFFSET, INC, IDX, IDX2, IDX_H, \
+                                         ALIGNMENT, LEN) \
+    do { \
+        int log_byte = 0; \
+        target_ulong va = EA; \
+        target_ulong va_high = EA + LEN; \
+        for (int i0 = 0; i0 < 2; i0++) { \
+            log_byte = (va + i0) <= va_high; \
+            LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \
+                          2 * IDX + i0); \
+        } \
+    } while (0)
+
+/* NOTE - Will this always be tmp_VRegs[0]; */
+#define GATHER_FUNCTION(EA, OFFSET, IDX, LEN, ELEMENT_SIZE, BANK_IDX, QVAL) \
+    do { \
+        int i0; \
+        target_ulong va = EA; \
+        target_ulong va_high = EA + LEN; \
+        int log_bank = 0; \
+        int log_byte = 0; \
+        for (i0 = 0; i0 < ELEMENT_SIZE; i0++) { \
+            log_byte = ((va + i0) <= va_high) && QVAL; \
+            log_bank |= (log_byte << i0); \
+            uint8_t B; \
+            get_user_u8(B, EA + i0); \
+            env->tmp_VRegs[0].ub[ELEMENT_SIZE * IDX + i0] = B; \
+            LOG_VTCM_BYTE(va + i0, log_byte, B, ELEMENT_SIZE * IDX + i0); \
+        } \
+    } while (0)
+#define fVLOG_VTCM_GATHER_WORD(EA, OFFSET, IDX, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORD(EA, OFFSET, IDX, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORD_DV(EA, OFFSET, IDX, IDX2, IDX_H, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), 1); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_WORDQ(EA, OFFSET, IDX, Q, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \
+                        fGETQBIT(QsV, 4 * IDX + i0)); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORDQ(EA, OFFSET, IDX, Q, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \
+                        fGETQBIT(QsV, 2 * IDX + i0)); \
+    } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORDQ_DV(EA, OFFSET, IDX, IDX2, IDX_H, Q, LEN) \
+    do { \
+        GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \
+                        fGETQBIT(QsV, 2 * IDX + i0)); \
+    } while (0)
+#define SCATTER_OP_WRITE_TO_MEM(TYPE) \
+    do { \
+        for (int i = 0; i < env->vtcm_log.size; i += sizeof(TYPE)) { \
+            if (env->vtcm_log.mask.ub[i] != 0) { \
+                TYPE dst = 0; \
+                TYPE inc = 0; \
+                for (int j = 0; j < sizeof(TYPE); j++) { \
+                    uint8_t val; \
+                    get_user_u8(val, env->vtcm_log.va[i + j]); \
+                    dst |= val << (8 * j); \
+                    inc |= env->vtcm_log.data.ub[j + i] << (8 * j); \
+                    env->vtcm_log.mask.ub[j + i] = 0; \
+                    env->vtcm_log.data.ub[j + i] = 0; \
+                } \
+                dst += inc; \
+                for (int j = 0; j < sizeof(TYPE); j++) { \
+                    put_user_u8((dst >> (8 * j)) & 0xFF, \
+                        env->vtcm_log.va[i + j]);  \
+                } \
+            } \
+        } \
+    } while (0)
+#define SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, ELEM_SIZE, BANK_IDX, QVAL, IN) \
+    do { \
+        int i0; \
+        target_ulong va = EA; \
+        target_ulong va_high = EA + LEN; \
+        int log_bank = 0; \
+        int log_byte = 0; \
+        for (i0 = 0; i0 < ELEM_SIZE; i0++) { \
+            log_byte = ((va + i0) <= va_high) && QVAL; \
+            log_bank |= (log_byte << i0); \
+            LOG_VTCM_BYTE(va + i0, log_byte, IN.ub[ELEM_SIZE * IDX + i0], \
+                          ELEM_SIZE * IDX + i0); \
+        } \
+    } while (0)
+#define fVLOG_VTCM_HALFWORD(EA, OFFSET, IN, IDX, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1, IN); \
+    } while (0)
+#define fVLOG_VTCM_WORD(EA, OFFSET, IN, IDX, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1, IN); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORDQ(EA, OFFSET, IN, IDX, Q, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \
+                         fGETQBIT(QsV, 2 * IDX + i0), IN); \
+    } while (0)
+#define fVLOG_VTCM_WORDQ(EA, OFFSET, IN, IDX, Q, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \
+                         fGETQBIT(QsV, 4 * IDX + i0), IN); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORD_DV(EA, OFFSET, IN, IDX, IDX2, IDX_H, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, \
+                         (2 * IDX2 + IDX_H), 1, IN); \
+    } while (0)
+#define fVLOG_VTCM_HALFWORDQ_DV(EA, OFFSET, IN, IDX, Q, IDX2, IDX_H, LEN) \
+    do { \
+        SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \
+                         fGETQBIT(QsV, 2 * IDX + i0), IN); \
+    } while (0)
+#define fSTORERELEASE(EA, TYPE) \
+    do { \
+        fV_AL_CHECK(EA, fVECSIZE() - 1); \
+    } while (0)
+#define fLOADMMV_AL(EA, ALIGNMENT, LEN, DST) \
+    do { \
+        fV_AL_CHECK(EA, ALIGNMENT - 1); \
+        mem_load_vector(env, EA & ~(ALIGNMENT - 1), LEN, &DST.ub[0]); \
+    } while (0)
+#define fLOADMMV(EA, DST) fLOADMMV_AL(EA, fVECSIZE(), fVECSIZE(), DST)
+#define fLOADMMVU_AL(EA, ALIGNMENT, LEN, DST) \
+    do { \
+        uint32_t size2 = (EA) & (ALIGNMENT - 1); \
+        uint32_t size1 = LEN - size2; \
+        mem_load_vector(env, EA + size1, size2, &DST.ub[size1]); \
+        mem_load_vector(env, EA, size1, &DST.ub[0]); \
+    } while (0)
+#define fLOADMMVU(EA, DST) \
+    do { \
+        if ((EA & (fVECSIZE() - 1)) == 0) { \
+            fLOADMMV_AL(EA, fVECSIZE(), fVECSIZE(), DST); \
+        } else { \
+            fLOADMMVU_AL(EA, fVECSIZE(), fVECSIZE(), DST); \
+        } \
+    } while (0)
+#define fSTOREMMV_AL(EA, ALIGNMENT, LEN, SRC) \
+    do  { \
+        fV_AL_CHECK(EA, ALIGNMENT - 1); \
+        mem_store_vector(env, EA & ~(ALIGNMENT - 1), slot, LEN, \
+                         &SRC.ub[0], NULL, false); \
+    } while (0)
+#define fSTOREMMV(EA, SRC) fSTOREMMV_AL(EA, fVECSIZE(), fVECSIZE(), SRC)
+#define fSTOREMMVQ_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        MMVector maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        mem_store_vector(env, EA & ~(ALIGNMENT - 1), slot, LEN, \
+                         &SRC.ub[0], &maskvec.ub[0], false); \
+    } while (0)
+#define fSTOREMMVQ(EA, SRC, MASK) \
+    fSTOREMMVQ_AL(EA, fVECSIZE(), fVECSIZE(), SRC, MASK)
+#define fSTOREMMVNQ_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        MMVector maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        fV_AL_CHECK(EA, ALIGNMENT - 1); \
+        mem_store_vector(env, EA & ~(ALIGNMENT - 1), slot, LEN, \
+                         &SRC.ub[0], &maskvec.ub[0], true); \
+    } while (0)
+#define fSTOREMMVNQ(EA, SRC, MASK) \
+    fSTOREMMVNQ_AL(EA, fVECSIZE(), fVECSIZE(), SRC, MASK)
+#define fSTOREMMVU_AL(EA, ALIGNMENT, LEN, SRC) \
+    do { \
+        uint32_t size1 = ALIGNMENT - ((EA) & (ALIGNMENT - 1)); \
+        uint32_t size2; \
+        if (size1 > LEN) { \
+            size1 = LEN; \
+        } \
+        size2 = LEN - size1; \
+        mem_store_vector(env, EA + size1, 1, size2, \
+                         &SRC.ub[size1], NULL, false); \
+        mem_store_vector(env, EA, 0, size1, &SRC.ub[0], NULL, false); \
+    } while (0)
+#define fSTOREMMVU(EA, SRC) \
+    do { \
+        if ((EA & (fVECSIZE() - 1)) == 0) { \
+            fSTOREMMV_AL(EA, fVECSIZE(), fVECSIZE(), SRC); \
+        } else { \
+            fSTOREMMVU_AL(EA, fVECSIZE(), fVECSIZE(), SRC); \
+        } \
+    } while (0)
+#define fSTOREMMVQU_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        uint32_t size1 = ALIGNMENT - ((EA) & (ALIGNMENT - 1)); \
+        uint32_t size2; \
+        MMVector maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        if (size1 > LEN) { \
+            size1 = LEN; \
+        } \
+        size2 = LEN - size1; \
+        mem_store_vector(env, EA + size1, 1, size2, \
+                         &SRC.ub[size1], &maskvec.ub[size1], false); \
+        mem_store_vector(env, EA, size1, &SRC.ub[0], &maskvec.ub[0], false); \
+    } while (0)
+#define fSTOREMMVNQU_AL(EA, ALIGNMENT, LEN, SRC, MASK) \
+    do { \
+        uint32_t size1 = ALIGNMENT - ((EA) & (ALIGNMENT - 1)); \
+        uint32_t size2; \
+        MMVector maskvec; \
+        int i; \
+        for (i = 0; i < fVECSIZE(); i++) { \
+            maskvec.ub[i] = fGETQBIT(MASK, i); \
+        } \
+        if (size1 > LEN) { \
+            size1 = LEN; \
+        } \
+        size2 = LEN - size1; \
+        mem_store_vector(env, EA + size1, 1, size2, \
+                         &SRC.ub[size1], &maskvec.ub[size1], true); \
+        mem_store_vector(env, EA, 0, size1, &SRC.ub[0], \
+                         &maskvec.ub[0], true); \
+    } while (0)
+#define fVFOREACH(WIDTH, VAR) for (VAR = 0; VAR < fVELEM(WIDTH); VAR++)
+#define fVARRAY_ELEMENT_ACCESS(ARRAY, TYPE, INDEX) \
+    ARRAY.v[(INDEX) / (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % \
+    (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))]
+
+#ifndef QEMU_GENERATE
+/* Grabs the .tmp data, wherever it is, and clears the .tmp status */
+/* Used for vhist */
+static inline MMVector mmvec_vtmp_data(CPUHexagonState *env)
+{
+    VRegMask vsel = env->VRegs_updated_tmp;
+    MMVector ret;
+    int idx = clo32(~revbit32(vsel));
+    if (vsel == 0) {
+        printf("[UNDEFINED] no .tmp load when implicitly required...");
+    }
+    ret = env->tmp_VRegs[idx];
+    env->VRegs_updated_tmp = 0;
+    return ret;
+}
+#define fTMPVDATA() mmvec_vtmp_data(env)
+#endif
+#define fVSATDW(U, V) fVSATW(((((long long)U) << 32) | fZXTN(32, 64, V)))
+#define fVASL_SATHI(U, V) fVSATW(((U) << 1) | ((V) >> 31))
+#define fVUADDSAT(WIDTH, U, V) \
+    fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V))
+#define fVSADDSAT(WIDTH, U, V) \
+    fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V))
+#define fVUSUBSAT(WIDTH, U, V) \
+    fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V))
+#define fVSSUBSAT(WIDTH, U, V) \
+    fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V))
+#define fVAVGU(WIDTH, U, V) \
+    ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVAVGURND(WIDTH, U, V) \
+    ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGU(WIDTH, U, V) \
+    ((fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVNAVGURNDSAT(WIDTH, U, V) \
+    fVSATUN(WIDTH, ((fZXTN(WIDTH, 2 * WIDTH, U) - \
+                     fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1))
+#define fVAVGS(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVAVGSRND(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGS(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVNAVGSRND(WIDTH, U, V) \
+    ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGSRNDSAT(WIDTH, U, V) \
+    fVSATN(WIDTH, ((fSXTN(WIDTH, 2 * WIDTH, U) - \
+                    fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1))
+#define fVNOROUND(VAL, SHAMT) VAL
+#define fVNOSAT(VAL) VAL
+#define fVROUND(VAL, SHAMT) \
+    ((VAL) + (((SHAMT) > 0) ? (1LL << ((SHAMT) - 1)) : 0))
+#define fCARRY_FROM_ADD32(A, B, C) \
+    (((fZXTN(32, 64, A) + fZXTN(32, 64, B) + C) >> 32) & 1)
+#define fUARCH_NOTE_PUMP_4X()
+#define fUARCH_NOTE_PUMP_2X()
+
+#define IV1DEAD()
+#endif
-- 
2.7.4


  parent reply	other threads:[~2021-07-05 23:39 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-05 23:34 [PATCH 00/20] Hexagon HVX (target/hexagon) patch series Taylor Simpson
2021-07-05 23:34 ` [PATCH 01/20] Hexagon HVX (target/hexagon) README Taylor Simpson
2021-07-12  8:16   ` Rob Landley
2021-07-12 13:42     ` Brian Cain
2021-07-19  1:10       ` Rob Landley
2021-07-19 13:39         ` Brian Cain
2021-07-19 16:19           ` Sid Manning
2021-07-26  7:57             ` Rob Landley
2021-07-26  8:54               ` Rob Landley
2021-07-26 13:59                 ` Taylor Simpson
2021-07-28  8:11                   ` Rob Landley
2021-11-25  6:26                   ` Rob Landley
2021-07-05 23:34 ` [PATCH 02/20] Hexagon HVX (target/hexagon) add Hexagon Vector eXtensions (HVX) to core Taylor Simpson
2021-07-25 13:08   ` Richard Henderson
2021-07-26  4:02     ` Taylor Simpson
2021-07-27 17:21       ` Taylor Simpson
2021-07-05 23:34 ` [PATCH 03/20] Hexagon HVX (target/hexagon) register names Taylor Simpson
2021-07-25 13:10   ` Richard Henderson
2021-07-05 23:34 ` [PATCH 04/20] Hexagon HVX (target/hexagon) support in gdbstub Taylor Simpson
2021-07-05 23:34 ` [PATCH 05/20] Hexagon HVX (target/hexagon) instruction attributes Taylor Simpson
2021-07-05 23:34 ` Taylor Simpson [this message]
2021-07-25 13:13   ` [PATCH 06/20] Hexagon HVX (target/hexagon) macros Richard Henderson
2021-07-05 23:34 ` [PATCH 07/20] Hexagon HVX (target/hexagon) import macro definitions Taylor Simpson
2021-07-05 23:34 ` [PATCH 08/20] Hexagon HVX (target/hexagon) semantics generator Taylor Simpson
2021-07-05 23:34 ` [PATCH 09/20] Hexagon HVX (target/hexagon) semantics generator - part 2 Taylor Simpson
2021-07-05 23:34 ` [PATCH 10/20] Hexagon HVX (target/hexagon) C preprocessor for decode tree Taylor Simpson
2021-07-25 13:15   ` Richard Henderson
2021-07-05 23:34 ` [PATCH 11/20] Hexagon HVX (target/hexagon) instruction utility functions Taylor Simpson
2021-07-25 13:21   ` Richard Henderson
2021-07-05 23:34 ` [PATCH 12/20] Hexagon HVX (target/hexagon) helper functions Taylor Simpson
2021-07-25 13:22   ` Richard Henderson
2021-07-26  4:02     ` Taylor Simpson
2021-07-05 23:34 ` [PATCH 13/20] Hexagon HVX (target/hexagon) TCG generation Taylor Simpson
2021-07-05 23:34 ` [PATCH 14/20] Hexagon HVX (target/hexagon) import semantics Taylor Simpson
2021-07-05 23:34 ` [PATCH 15/20] Hexagon HVX (target/hexagon) instruction decoding Taylor Simpson
2021-07-05 23:34 ` [PATCH 16/20] Hexagon HVX (target/hexagon) import instruction encodings Taylor Simpson
2021-07-05 23:34 ` [PATCH 17/20] Hexagon HVX (tests/tcg/hexagon) vector_add_int test Taylor Simpson
2021-07-05 23:34 ` [PATCH 18/20] Hexagon HVX (tests/tcg/hexagon) hvx_misc test Taylor Simpson
2021-07-05 23:34 ` [PATCH 19/20] Hexagon HVX (tests/tcg/hexagon) scatter_gather test Taylor Simpson
2021-07-05 23:34 ` [PATCH 20/20] Hexagon HVX (tests/tcg/hexagon) histogram test Taylor Simpson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1625528074-19440-7-git-send-email-tsimpson@quicinc.com \
    --to=tsimpson@quicinc.com \
    --cc=ale@rev.ng \
    --cc=bcain@quicinc.com \
    --cc=peter.maydell@linaro.org \
    --cc=philmd@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.