All of lore.kernel.org
 help / color / mirror / Atom feed
From: Blue Swirl <blauwirbel@gmail.com>
To: Max Filippov <jcmvbkbc@gmail.com>
Cc: qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH v4 29/32] target-xtensa: implement memory protection options
Date: Sun, 4 Sep 2011 18:32:16 +0000	[thread overview]
Message-ID: <CAAu8pHtEMWW2-F-dhCUc8Oa-at+jvJcNsEYa-Pn8v0YnXP22WA@mail.gmail.com> (raw)
In-Reply-To: <1314909960-31738-30-git-send-email-jcmvbkbc@gmail.com>

On Thu, Sep 1, 2011 at 8:45 PM, Max Filippov <jcmvbkbc@gmail.com> wrote:
> - TLB opcode group;
> - region protection option (ISA, 4.6.3);
> - region translation option (ISA, 4.6.4);
> - MMU option (ISA, 4.6.5).
>
> Cache control attribute bits are not used by this implementation.
>
> Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
> ---
>  target-xtensa/cpu.h       |   56 ++++++++-
>  target-xtensa/helper.c    |  340 ++++++++++++++++++++++++++++++++++++++++++++-
>  target-xtensa/helpers.h   |    7 +
>  target-xtensa/op_helper.c |  301 +++++++++++++++++++++++++++++++++++++++-
>  target-xtensa/translate.c |   91 ++++++++++++-
>  5 files changed, 782 insertions(+), 13 deletions(-)
>
> diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h
> index 93e17d1..05948f1 100644
> --- a/target-xtensa/cpu.h
> +++ b/target-xtensa/cpu.h
> @@ -114,6 +114,10 @@ enum {
>     SCOMPARE1 = 12,
>     WINDOW_BASE = 72,
>     WINDOW_START = 73,
> +    PTEVADDR = 83,
> +    RASID = 90,
> +    ITLBCFG = 91,
> +    DTLBCFG = 92,
>     EPC1 = 177,
>     DEPC = 192,
>     EPS2 = 194,
> @@ -154,6 +158,9 @@ enum {
>  #define MAX_NLEVEL 6
>  #define MAX_NNMI 1
>  #define MAX_NCCOMPARE 3
> +#define MAX_TLB_WAY_SIZE 8
> +
> +#define REGION_PAGE_MASK 0xe0000000
>
>  enum {
>     /* Static vectors */
> @@ -214,6 +221,21 @@ typedef enum {
>     INTTYPE_MAX
>  } interrupt_type;
>
> +typedef struct xtensa_tlb_entry {
> +    uint32_t vaddr;
> +    uint32_t paddr;
> +    uint8_t asid;
> +    uint8_t attr;
> +    bool variable;
> +} xtensa_tlb_entry;
> +
> +typedef struct xtensa_tlb {
> +    unsigned nways;
> +    const unsigned way_size[10];
> +    bool varway56;
> +    unsigned nrefillentries;
> +} xtensa_tlb;
> +
>  typedef struct XtensaGdbReg {
>     int targno;
>     int type;
> @@ -248,6 +270,9 @@ typedef struct XtensaConfig {
>     unsigned nccompare;
>     uint32_t timerint[MAX_NCCOMPARE];
>     uint32_t clock_freq_khz;
> +
> +    xtensa_tlb itlb;
> +    xtensa_tlb dtlb;
>  } XtensaConfig;
>
>  typedef struct CPUXtensaState {
> @@ -258,6 +283,10 @@ typedef struct CPUXtensaState {
>     uint32_t uregs[256];
>     uint32_t phys_regs[MAX_NAREG];
>
> +    xtensa_tlb_entry itlb[7][MAX_TLB_WAY_SIZE];
> +    xtensa_tlb_entry dtlb[10][MAX_TLB_WAY_SIZE];
> +    unsigned autorefill_idx;
> +
>     int pending_irq_level; /* level of last raised IRQ */
>     void **irq_inputs;
>     QEMUTimer *ccompare_timer;
> @@ -287,12 +316,29 @@ int cpu_xtensa_signal_handler(int host_signum, void *pinfo, void *puc);
>  void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf);
>  void xtensa_sync_window_from_phys(CPUState *env);
>  void xtensa_sync_phys_from_window(CPUState *env);
> +uint32_t xtensa_tlb_get_addr_mask(const CPUState *env, bool dtlb, uint32_t way);
> +void split_tlb_entry_spec_way(const CPUState *env, uint32_t v, bool dtlb,
> +        uint32_t *vpn, uint32_t wi, uint32_t *ei);
> +int xtensa_tlb_lookup(const CPUState *env, uint32_t addr, bool dtlb,
> +        uint32_t *_wi, uint32_t *_ei, uint8_t *_ring);
> +void xtensa_tlb_set_entry(CPUState *env, bool dtlb,
> +        unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte);
> +int xtensa_get_physical_addr(CPUState *env,
> +        uint32_t vaddr, int is_write, int mmu_idx,
> +        uint32_t *paddr, uint32_t *page_size, unsigned *access);
> +
>
>  #define XTENSA_OPTION_BIT(opt) (((uint64_t)1) << (opt))
>
> +static inline bool xtensa_option_bits_enabled(const XtensaConfig *config,
> +        uint64_t opt)
> +{
> +    return (config->options & opt) != 0;
> +}
> +
>  static inline bool xtensa_option_enabled(const XtensaConfig *config, int opt)
>  {
> -    return (config->options & XTENSA_OPTION_BIT(opt)) != 0;
> +    return xtensa_option_bits_enabled(config, XTENSA_OPTION_BIT(opt));
>  }
>
>  static inline int xtensa_get_cintlevel(const CPUState *env)
> @@ -323,6 +369,14 @@ static inline int xtensa_get_cring(const CPUState *env)
>     }
>  }
>
> +static inline xtensa_tlb_entry *xtensa_tlb_get_entry(CPUState *env,
> +        bool dtlb, unsigned wi, unsigned ei)
> +{
> +    return dtlb ?
> +        env->dtlb[wi] + ei :
> +        env->itlb[wi] + ei;
> +}
> +
>  /* MMU modes definitions */
>  #define MMU_MODE0_SUFFIX _ring0
>  #define MMU_MODE1_SUFFIX _ring1
> diff --git a/target-xtensa/helper.c b/target-xtensa/helper.c
> index 487847c..00571e8 100644
> --- a/target-xtensa/helper.c
> +++ b/target-xtensa/helper.c
> @@ -38,6 +38,8 @@
>         a1, a2, a3, a4, a5, a6) \
>     { .targno = (_targno), .type = (typ), .group = (_group) },
>
> +static void reset_mmu(CPUState *env);
> +
>  void cpu_reset(CPUXtensaState *env)
>  {
>     env->exception_taken = 0;
> @@ -48,6 +50,7 @@ void cpu_reset(CPUXtensaState *env)
>     env->sregs[VECBASE] = env->config->vecbase;
>
>     env->pending_irq_level = 0;
> +    reset_mmu(env);
>  }
>
>  static const XtensaConfig core_config[] = {
> @@ -150,7 +153,19 @@ void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
>
>  target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
>  {
> -    return addr;
> +    uint32_t paddr;
> +    uint32_t page_size;
> +    unsigned access;
> +
> +    if (0 == xtensa_get_physical_addr(env, addr, 0, 0,
> +                &paddr, &page_size, &access)) {

This order in comparison looks alien.

> +        return paddr;
> +    }
> +    if (0 == xtensa_get_physical_addr(env, addr, 2, 0,
> +                &paddr, &page_size, &access)) {
> +        return paddr;
> +    }
> +    return ~0;
>  }
>
>  static uint32_t relocated_vector(CPUState *env, uint32_t vector)
> @@ -255,3 +270,326 @@ void do_interrupt(CPUState *env)
>     }
>     check_interrupts(env);
>  }
> +
> +static void reset_tlb_mmu_all_ways(CPUState *env,
> +        const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
> +{
> +    unsigned wi, ei;
> +
> +    for (wi = 0; wi < tlb->nways; ++wi) {
> +        for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
> +            entry[wi][ei].asid = 0;
> +            entry[wi][ei].variable = true;
> +        }
> +    }
> +}
> +
> +static void reset_tlb_mmu_ways56(CPUState *env,
> +        const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
> +{
> +    if (!tlb->varway56) {
> +        static const xtensa_tlb_entry way5[] = {
> +            {
> +                .vaddr = 0xd0000000,
> +                .paddr = 0,
> +                .asid = 1,
> +                .attr = 7,
> +                .variable = false,
> +            }, {
> +                .vaddr = 0xd8000000,
> +                .paddr = 0,
> +                .asid = 1,
> +                .attr = 3,
> +                .variable = false,
> +            }
> +        };
> +        static const xtensa_tlb_entry way6[] = {
> +            {
> +                .vaddr = 0xe0000000,
> +                .paddr = 0xf0000000,
> +                .asid = 1,
> +                .attr = 7,
> +                .variable = false,
> +            }, {
> +                .vaddr = 0xf0000000,
> +                .paddr = 0xf0000000,
> +                .asid = 1,
> +                .attr = 3,
> +                .variable = false,
> +            }
> +        };
> +        memcpy(entry[5], way5, sizeof(way5));
> +        memcpy(entry[6], way6, sizeof(way6));
> +    } else {
> +        uint32_t ei;
> +        for (ei = 0; ei < 8; ++ei) {
> +            entry[6][ei].vaddr = ei << 29;
> +            entry[6][ei].paddr = ei << 29;
> +            entry[6][ei].asid = 1;
> +            entry[6][ei].attr = 2;
> +        }
> +    }
> +}
> +
> +static void reset_tlb_region_way0(CPUState *env,
> +        xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
> +{
> +    unsigned ei;
> +
> +    for (ei = 0; ei < 8; ++ei) {
> +        entry[0][ei].vaddr = ei << 29;
> +        entry[0][ei].paddr = ei << 29;
> +        entry[0][ei].asid = 1;
> +        entry[0][ei].attr = 2;
> +        entry[0][ei].variable = true;
> +    }
> +}
> +
> +static void reset_mmu(CPUState *env)
> +{
> +    if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
> +        env->sregs[RASID] = 0x04030201;
> +        env->sregs[ITLBCFG] = 0;
> +        env->sregs[DTLBCFG] = 0;
> +        env->autorefill_idx = 0;
> +        reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
> +        reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
> +        reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
> +        reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
> +    } else {
> +        reset_tlb_region_way0(env, env->itlb);
> +        reset_tlb_region_way0(env, env->dtlb);
> +    }
> +}
> +
> +static unsigned get_ring(const CPUState *env, uint8_t asid)
> +{
> +    unsigned i;
> +    for (i = 0; i < 4; ++i) {
> +        if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
> +            return i;
> +        }
> +    }
> +    return 0xff;
> +}
> +
> +/*!
> + * Lookup xtensa TLB for the given virtual address.
> + * See ISA, 4.6.2.2
> + *
> + * \param _wi: [out] way index
> + * \param _ei: [out] entry index
> + * \param _ring: [out] access ring
> + * \return 0 if ok, exception cause code otherwise
> + */
> +int xtensa_tlb_lookup(const CPUState *env, uint32_t addr, bool dtlb,
> +        uint32_t *_wi, uint32_t *_ei, uint8_t *_ring)

Underscores.

> +{
> +    const xtensa_tlb *tlb = dtlb ?
> +        &env->config->dtlb : &env->config->itlb;
> +    const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
> +        env->dtlb : env->itlb;
> +
> +    int nhits = 0;
> +    unsigned wi;
> +
> +    for (wi = 0; wi < tlb->nways; ++wi) {
> +        uint32_t vpn;
> +        uint32_t ei;
> +        split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
> +        if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
> +            unsigned ring = get_ring(env, entry[wi][ei].asid);
> +            if (ring < 4) {
> +                if (++nhits > 1) {
> +                    return dtlb ?
> +                        LOAD_STORE_TLB_MULTI_HIT_CAUSE :
> +                        INST_TLB_MULTI_HIT_CAUSE;
> +                }
> +                *_wi = wi;
> +                *_ei = ei;
> +                *_ring = ring;
> +            }
> +        }
> +    }
> +    return nhits ? 0 :
> +        (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
> +}
> +
> +/*!
> + * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
> + * See ISA, 4.6.5.10
> + */
> +static unsigned mmu_attr_to_access(uint32_t attr)
> +{
> +    unsigned access = 0;
> +    if (attr < 12) {
> +        access |= PAGE_READ;
> +        if (attr & 0x1) {
> +            access |= PAGE_EXEC;
> +        }
> +        if (attr & 0x2) {
> +            access |= PAGE_WRITE;
> +        }
> +    } else if (attr == 13) {
> +        access |= PAGE_READ | PAGE_WRITE;
> +    }
> +    return access;
> +}
> +
> +/*!
> + * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
> + * See ISA, 4.6.3.3
> + */
> +static unsigned region_attr_to_access(uint32_t attr)
> +{
> +    unsigned access = 0;
> +    if ((attr < 6 && attr != 3) || attr == 14) {
> +        access |= PAGE_READ | PAGE_WRITE;
> +    }
> +    if (attr > 0 && attr < 6) {
> +        access |= PAGE_EXEC;
> +    }
> +    return access;
> +}
> +
> +static bool is_access_granted(unsigned access, int is_write)
> +{
> +    switch (is_write) {
> +    case 0:
> +        return access & PAGE_READ;
> +
> +    case 1:
> +        return access & PAGE_WRITE;
> +
> +    case 2:
> +        return access & PAGE_EXEC;
> +
> +    default:
> +        return 0;
> +    }
> +}
> +
> +static int autorefill_mmu(CPUState *env, uint32_t vaddr, bool dtlb,
> +        uint32_t *wi, uint32_t *ei, uint8_t *ring);
> +
> +static int get_physical_addr_mmu(CPUState *env,
> +        uint32_t vaddr, int is_write, int mmu_idx,
> +        uint32_t *paddr, uint32_t *page_size, unsigned *access)
> +{
> +    bool dtlb = is_write != 2;
> +    uint32_t wi;
> +    uint32_t ei;
> +    uint8_t ring;
> +    int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
> +
> +    if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
> +            (mmu_idx != 0 || ((vaddr ^ env->sregs[PTEVADDR]) & 0xffc00000)) &&
> +            autorefill_mmu(env, vaddr, dtlb, &wi, &ei, &ring) == 0) {
> +        ret = 0;
> +    }
> +    if (ret != 0) {
> +        return ret;
> +    }
> +
> +    const xtensa_tlb_entry *entry =
> +        xtensa_tlb_get_entry(env, dtlb, wi, ei);
> +
> +    if (ring < mmu_idx) {
> +        return dtlb ?
> +            LOAD_STORE_PRIVILEGE_CAUSE :
> +            INST_FETCH_PRIVILEGE_CAUSE;
> +    }
> +
> +    *access = mmu_attr_to_access(entry->attr);
> +    if (!is_access_granted(*access, is_write)) {
> +        return dtlb ?
> +            (is_write ?
> +             STORE_PROHIBITED_CAUSE :
> +             LOAD_PROHIBITED_CAUSE) :
> +            INST_FETCH_PROHIBITED_CAUSE;
> +    }
> +
> +    *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
> +    *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
> +
> +    return 0;
> +}
> +
> +static int autorefill_mmu(CPUState *env, uint32_t vaddr, bool dtlb,
> +        uint32_t *wi, uint32_t *ei, uint8_t *ring)
> +{
> +    uint32_t paddr;
> +    uint32_t page_size;
> +    unsigned access;
> +    uint32_t pt_vaddr =
> +        (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
> +    int ret = get_physical_addr_mmu(env, pt_vaddr, 0, 0,
> +            &paddr, &page_size, &access);
> +
> +    qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__,
> +            vaddr, ret ? ~0 : paddr);
> +
> +    if (ret == 0) {
> +        uint32_t vpn;
> +        uint32_t pte = ldl_phys(paddr);
> +
> +        *ring = (pte >> 4) & 0x3;
> +        *wi = (++env->autorefill_idx) & 0x3;
> +        split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, *wi, ei);
> +        xtensa_tlb_set_entry(env, dtlb, *wi, *ei, vpn, pte);
> +        qemu_log("%s: autorefill(%08x): %08x -> %08x\n",
> +                __func__, vaddr, vpn, pte);
> +    }
> +    return ret;
> +}
> +
> +static int get_physical_addr_region(CPUState *env,
> +        uint32_t vaddr, int is_write, int mmu_idx,
> +        uint32_t *paddr, uint32_t *page_size, unsigned *access)
> +{
> +    bool dtlb = is_write != 2;
> +    uint32_t wi = 0;
> +    uint32_t ei = (vaddr >> 29) & 0x7;
> +    const xtensa_tlb_entry *entry =
> +        xtensa_tlb_get_entry(env, dtlb, wi, ei);
> +
> +    *access = region_attr_to_access(entry->attr);
> +    if (!is_access_granted(*access, is_write)) {
> +        return dtlb ?
> +            (is_write ?
> +             STORE_PROHIBITED_CAUSE :
> +             LOAD_PROHIBITED_CAUSE) :
> +            INST_FETCH_PROHIBITED_CAUSE;
> +    }
> +
> +    *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
> +    *page_size = ~REGION_PAGE_MASK + 1;
> +
> +    return 0;
> +}
> +
> +/*!
> + * Convert virtual address to physical addr.
> + * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
> + *
> + * \return 0 if ok, exception cause code otherwise
> + */
> +int xtensa_get_physical_addr(CPUState *env,
> +        uint32_t vaddr, int is_write, int mmu_idx,
> +        uint32_t *paddr, uint32_t *page_size, unsigned *access)
> +{
> +    if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
> +        return get_physical_addr_mmu(env, vaddr, is_write, mmu_idx,
> +                paddr, page_size, access);
> +    } else if (xtensa_option_bits_enabled(env->config,
> +                XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
> +                XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
> +        return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
> +                paddr, page_size, access);
> +    } else {
> +        *paddr = vaddr;
> +        *page_size = TARGET_PAGE_SIZE;
> +        *access = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
> +        return 0;
> +    }
> +}
> diff --git a/target-xtensa/helpers.h b/target-xtensa/helpers.h
> index 28689c3..09ab332 100644
> --- a/target-xtensa/helpers.h
> +++ b/target-xtensa/helpers.h
> @@ -22,4 +22,11 @@ DEF_HELPER_2(timer_irq, void, i32, i32)
>  DEF_HELPER_1(advance_ccount, void, i32)
>  DEF_HELPER_1(check_interrupts, void, env)
>
> +DEF_HELPER_1(wsr_rasid, void, i32)
> +DEF_HELPER_2(rtlb0, i32, i32, i32)
> +DEF_HELPER_2(rtlb1, i32, i32, i32)
> +DEF_HELPER_2(itlb, void, i32, i32)
> +DEF_HELPER_2(ptlb, i32, i32, i32)
> +DEF_HELPER_3(wtlb, void, i32, i32, i32)
> +
>  #include "def-helper.h"
> diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c
> index fcec506..6a9c886 100644
> --- a/target-xtensa/op_helper.c
> +++ b/target-xtensa/op_helper.c
> @@ -70,13 +70,32 @@ static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
>     }
>  }
>
> -void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
> +void tlb_fill(target_ulong vaddr, int is_write, int mmu_idx, void *retaddr)
>  {
> -    tlb_set_page(cpu_single_env,
> -            addr & ~(TARGET_PAGE_SIZE - 1),
> -            addr & ~(TARGET_PAGE_SIZE - 1),
> -            PAGE_READ | PAGE_WRITE | PAGE_EXEC,
> -            mmu_idx, TARGET_PAGE_SIZE);
> +    CPUState *saved_env = env;
> +
> +    env = cpu_single_env;
> +    {
> +        uint32_t paddr;
> +        uint32_t page_size;
> +        unsigned access;
> +        int ret = xtensa_get_physical_addr(env, vaddr, is_write, mmu_idx,
> +                &paddr, &page_size, &access);
> +
> +        qemu_log("%s(%08x, %d, %d) -> %08x, ret = %d\n", __func__,
> +                vaddr, is_write, mmu_idx, paddr, ret);
> +
> +        if (ret == 0) {
> +            tlb_set_page(env,
> +                    vaddr & TARGET_PAGE_MASK,
> +                    paddr & TARGET_PAGE_MASK,
> +                    access, mmu_idx, page_size);
> +        } else {
> +            do_restore_state(retaddr);
> +            HELPER(exception_cause_vaddr)(env->pc, ret, vaddr);
> +        }
> +    }
> +    env = saved_env;
>  }
>
>  void HELPER(exception)(uint32_t excp)
> @@ -377,3 +396,273 @@ void HELPER(check_interrupts)(CPUState *env)
>  {
>     check_interrupts(env);
>  }
> +
> +void HELPER(wsr_rasid)(uint32_t v)
> +{
> +    v = (v & 0xffffff00) | 0x1;
> +    if (v != env->sregs[RASID]) {
> +        env->sregs[RASID] = v;
> +        tlb_flush(env, 1);
> +    }
> +}
> +
> +static uint32_t get_page_size(const CPUState *env, bool dtlb, uint32_t way)
> +{
> +    uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
> +
> +    switch (way) {
> +    case 4:
> +        return (tlbcfg >> 16) & 0x3;
> +
> +    case 5:
> +        return (tlbcfg >> 20) & 0x1;
> +
> +    case 6:
> +        return (tlbcfg >> 24) & 0x1;
> +
> +    default:
> +        return 0;
> +    }
> +}
> +
> +/*!
> + * Get bit mask for the virtual address bits translated by the TLB way
> + */
> +uint32_t xtensa_tlb_get_addr_mask(const CPUState *env, bool dtlb, uint32_t way)
> +{
> +    if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
> +        bool varway56 = dtlb ?
> +            env->config->dtlb.varway56 :
> +            env->config->itlb.varway56;
> +
> +        switch (way) {
> +        case 4:
> +            return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
> +
> +        case 5:
> +            if (varway56) {
> +                return 0xf8000000 << get_page_size(env, dtlb, way);
> +            } else {
> +                return 0xf8000000;
> +            }
> +
> +        case 6:
> +            if (varway56) {
> +                return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
> +            } else {
> +                return 0xf0000000;
> +            }
> +
> +        default:
> +            return 0xfffff000;
> +        }
> +    } else {
> +        return REGION_PAGE_MASK;
> +    }
> +}
> +
> +/*!
> + * Get bit mask for the 'VPN without index' field.
> + * See ISA, 4.6.5.6, data format for RxTLB0
> + */
> +static uint32_t get_vpn_mask(const CPUState *env, bool dtlb, uint32_t way)
> +{
> +    if (way < 4) {
> +        bool is32 = (dtlb ?
> +                env->config->dtlb.nrefillentries :
> +                env->config->itlb.nrefillentries) == 32;
> +        return is32 ? 0xffff8000 : 0xffffc000;
> +    } else if (way == 4) {
> +        return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
> +    } else if (way <= 6) {
> +        uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
> +        bool varway56 = dtlb ?
> +            env->config->dtlb.varway56 :
> +            env->config->itlb.varway56;
> +
> +        if (varway56) {
> +            return mask << (way == 5 ? 2 : 3);
> +        } else {
> +            return mask << 1;
> +        }
> +    } else {
> +        return 0xfffff000;
> +    }
> +}
> +
> +/*!
> + * Split virtual address into VPN (with index) and entry index
> + * for the given TLB way
> + */
> +void split_tlb_entry_spec_way(const CPUState *env, uint32_t v, bool dtlb,
> +        uint32_t *vpn, uint32_t wi, uint32_t *ei)
> +{
> +    bool varway56 = dtlb ?
> +        env->config->dtlb.varway56 :
> +        env->config->itlb.varway56;
> +
> +    if (!dtlb) {
> +        wi &= 7;
> +    }
> +
> +    if (wi < 4) {
> +        bool is32 = (dtlb ?
> +                env->config->dtlb.nrefillentries :
> +                env->config->itlb.nrefillentries) == 32;
> +        *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
> +    } else {
> +        switch (wi) {
> +        case 4:
> +            {
> +                uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
> +                *ei = (v >> eibase) & 0x3;
> +            }
> +            break;
> +
> +        case 5:
> +            if (varway56) {
> +                uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
> +                *ei = (v >> eibase) & 0x3;
> +            } else {
> +                *ei = (v >> 27) & 0x1;
> +            }
> +            break;
> +
> +        case 6:
> +            if (varway56) {
> +                uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
> +                *ei = (v >> eibase) & 0x7;
> +            } else {
> +                *ei = (v >> 28) & 0x1;
> +            }
> +            break;
> +
> +        default:
> +            *ei = 0;
> +            break;
> +        }
> +    }
> +    *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
> +}
> +
> +/*!
> + * Split TLB address into TLB way, entry index and VPN (with index).
> + * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
> + */
> +static void split_tlb_entry_spec(uint32_t v, bool dtlb,
> +        uint32_t *vpn, uint32_t *wi, uint32_t *ei)
> +{
> +    if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
> +        *wi = v & (dtlb ? 0xf : 0x7);
> +        split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
> +    } else {
> +        *vpn = v & REGION_PAGE_MASK;
> +        *wi = 0;
> +        *ei = (v >> 29) & 0x7;
> +    }
> +}
> +
> +static xtensa_tlb_entry *get_tlb_entry(uint32_t v, bool dtlb, uint32_t *_wi)
> +{
> +    uint32_t vpn;
> +    uint32_t wi;
> +    uint32_t ei;
> +
> +    split_tlb_entry_spec(v, dtlb, &vpn, &wi, &ei);
> +    if (_wi) {
> +        *_wi = wi;
> +    }
> +    return xtensa_tlb_get_entry(env, dtlb, wi, ei);
> +}
> +
> +uint32_t HELPER(rtlb0)(uint32_t v, uint32_t dtlb)
> +{
> +    if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
> +        uint32_t wi;
> +        const xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, &wi);
> +        return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
> +    } else {
> +        return v & REGION_PAGE_MASK;
> +    }
> +}
> +
> +uint32_t HELPER(rtlb1)(uint32_t v, uint32_t dtlb)
> +{
> +    const xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, NULL);
> +    return entry->paddr | entry->attr;
> +}
> +
> +void HELPER(itlb)(uint32_t v, uint32_t dtlb)
> +{
> +    if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
> +        uint32_t wi;
> +        xtensa_tlb_entry *entry = get_tlb_entry(v, dtlb, &wi);
> +        if (entry->variable && entry->asid) {
> +            tlb_flush_page(env, entry->vaddr);
> +            entry->asid = 0;
> +        }
> +    }
> +}
> +
> +uint32_t HELPER(ptlb)(uint32_t v, uint32_t dtlb)
> +{
> +    if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
> +        uint32_t wi;
> +        uint32_t ei;
> +        uint8_t ring;
> +        int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
> +
> +        switch (res) {
> +        case 0:
> +            if (ring >= xtensa_get_ring(env)) {
> +                return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
> +            }
> +            break;
> +
> +        case INST_TLB_MULTI_HIT_CAUSE:
> +        case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
> +            HELPER(exception_cause_vaddr)(env->pc, res, v);
> +            break;
> +        }
> +        return 0;
> +    } else {
> +        return (v & REGION_PAGE_MASK) | 0x1;
> +    }
> +}
> +
> +void xtensa_tlb_set_entry(CPUState *env, bool dtlb,
> +        unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
> +{
> +    xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
> +
> +    if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
> +        if (entry->variable) {
> +            if (entry->asid) {
> +                tlb_flush_page(env, entry->vaddr);
> +            }
> +            entry->vaddr = vpn;
> +            entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
> +            entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
> +            entry->attr = pte & 0xf;
> +        } else {
> +            qemu_log("%s %d, %d, %d trying to set immutable entry\n",
> +                    __func__, dtlb, wi, ei);
> +        }
> +    } else {
> +        tlb_flush_page(env, entry->vaddr);
> +        if (xtensa_option_enabled(env->config,
> +                    XTENSA_OPTION_REGION_TRANSLATION)) {
> +            entry->paddr = pte & REGION_PAGE_MASK;
> +        }
> +        entry->attr = pte & 0xf;
> +    }
> +}
> +
> +void HELPER(wtlb)(uint32_t p, uint32_t v, uint32_t dtlb)
> +{
> +    uint32_t vpn;
> +    uint32_t wi;
> +    uint32_t ei;
> +    split_tlb_entry_spec(v, dtlb, &vpn, &wi, &ei);
> +    xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
> +}
> diff --git a/target-xtensa/translate.c b/target-xtensa/translate.c
> index 3b21bc4..731f04f 100644
> --- a/target-xtensa/translate.c
> +++ b/target-xtensa/translate.c
> @@ -80,6 +80,10 @@ static const char * const sregnames[256] = {
>     [SCOMPARE1] = "SCOMPARE1",
>     [WINDOW_BASE] = "WINDOW_BASE",
>     [WINDOW_START] = "WINDOW_START",
> +    [PTEVADDR] = "PTEVADDR",
> +    [RASID] = "RASID",
> +    [ITLBCFG] = "ITLBCFG",
> +    [DTLBCFG] = "DTLBCFG",
>     [EPC1] = "EPC1",
>     [EPC1 + 1] = "EPC2",
>     [EPC1 + 2] = "EPC3",
> @@ -161,6 +165,11 @@ void xtensa_translate_init(void)
>  #include "helpers.h"
>  }
>
> +static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
> +{
> +    return xtensa_option_bits_enabled(dc->config, opt);
> +}
> +
>  static inline bool option_enabled(DisasContext *dc, int opt)
>  {
>     return xtensa_option_enabled(dc->config, opt);
> @@ -379,11 +388,19 @@ static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
>     tcg_gen_mov_i32(d, cpu_SR[sr]);
>  }
>
> +static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
> +{
> +    tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
> +    tcg_gen_or_i32(d, d, cpu_SR[sr]);
> +    tcg_gen_andi_i32(d, d, 0xfffffffc);
> +}
> +
>  static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
>  {
>     static void (* const rsr_handler[256])(DisasContext *dc,
>             TCGv_i32 d, uint32_t sr) = {
>         [CCOUNT] = gen_rsr_ccount,
> +        [PTEVADDR] = gen_rsr_ptevaddr,
>     };
>
>     if (sregnames[sr]) {
> @@ -436,6 +453,23 @@ static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v)
>     reset_used_window(dc);
>  }
>
> +static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v)
> +{
> +    tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
> +}
> +
> +static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
> +{
> +    gen_helper_wsr_rasid(v);
> +    /* This can change tb->flags, so exit tb */
> +    gen_jumpi_check_loop_end(dc, -1);
> +}
> +
> +static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v)
> +{
> +    tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
> +}
> +
>  static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v)
>  {
>     tcg_gen_andi_i32(cpu_SR[sr], v,
> @@ -505,6 +539,10 @@ static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
>         [LITBASE] = gen_wsr_litbase,
>         [WINDOW_BASE] = gen_wsr_windowbase,
>         [WINDOW_START] = gen_wsr_windowstart,
> +        [PTEVADDR] = gen_wsr_ptevaddr,
> +        [RASID] = gen_wsr_rasid,
> +        [ITLBCFG] = gen_wsr_tlbcfg,
> +        [DTLBCFG] = gen_wsr_tlbcfg,
>         [INTSET] = gen_wsr_intset,
>         [INTCLEAR] = gen_wsr_intclear,
>         [INTENABLE] = gen_wsr_intenable,
> @@ -585,14 +623,16 @@ static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2,
>
>  static void disas_xtensa_insn(DisasContext *dc)
>  {
> -#define HAS_OPTION(opt) do { \
> -        if (!option_enabled(dc, opt)) { \
> -            qemu_log("Option %d is not enabled %s:%d\n", \
> -                    (opt), __FILE__, __LINE__); \
> +#define HAS_OPTION_BITS(opt) do { \
> +        if (!option_bits_enabled(dc, opt)) { \
> +            qemu_log("Option is not enabled %s:%d\n", \
> +                    __FILE__, __LINE__); \
>             goto invalid_opcode; \
>         } \
>     } while (0)
>
> +#define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
> +
>  #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
>  #define RESERVED() do { \
>         qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
> @@ -1055,7 +1095,48 @@ static void disas_xtensa_insn(DisasContext *dc)
>                 break;
>
>             case 5: /*TLB*/
> -                TBD();
> +                HAS_OPTION_BITS(
> +                        XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
> +                        XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
> +                        XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION));
> +                gen_check_privilege(dc);
> +                gen_window_check2(dc, RRR_S, RRR_T);
> +                {
> +                    TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0);
> +
> +                    switch (RRR_R & 7) {
> +                    case 3: /*RITLB0*/ /*RDTLB0*/
> +                        gen_helper_rtlb0(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
> +                        break;
> +
> +                    case 4: /*IITLB*/ /*IDTLB*/
> +                        gen_helper_itlb(cpu_R[RRR_S], dtlb);
> +                        /* This could change memory mapping, so exit tb */
> +                        gen_jumpi_check_loop_end(dc, -1);
> +                        break;
> +
> +                    case 5: /*PITLB*/ /*PDTLB*/
> +                        tcg_gen_movi_i32(cpu_pc, dc->pc);
> +                        gen_helper_ptlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
> +                        break;
> +
> +                    case 6: /*WITLB*/ /*WDTLB*/
> +                        gen_helper_wtlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
> +                        /* This could change memory mapping, so exit tb */
> +                        gen_jumpi_check_loop_end(dc, -1);
> +                        break;
> +
> +                    case 7: /*RITLB1*/ /*RDTLB1*/
> +                        gen_helper_rtlb1(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
> +                        break;
> +
> +                    default:
> +                        tcg_temp_free(dtlb);
> +                        RESERVED();
> +                        break;
> +                    }
> +                    tcg_temp_free(dtlb);
> +                }
>                 break;
>
>             case 6: /*RT0*/
> --
> 1.7.6
>
>
>

  reply	other threads:[~2011-09-04 18:32 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-09-01 20:45 [Qemu-devel] [PATCH v4 00/32] target-xtensa: new target architecture Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 01/32] target-xtensa: add target stubs Max Filippov
2011-09-04 18:14   ` Blue Swirl
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 02/32] target-xtensa: add target to the configure script Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 03/32] target-xtensa: implement disas_xtensa_insn Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 04/32] target-xtensa: implement narrow instructions Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 05/32] target-xtensa: implement RT0 group Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 06/32] target-xtensa: add sample board Max Filippov
2011-09-04 18:17   ` Blue Swirl
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 07/32] target-xtensa: implement conditional jumps Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 08/32] target-xtensa: implement JX/RET0/CALLX Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 09/32] target-xtensa: add special and user registers Max Filippov
2011-09-04 18:18   ` Blue Swirl
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 10/32] target-xtensa: implement RST3 group Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 11/32] target-xtensa: implement shifts (ST1 and RST1 groups) Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 12/32] target-xtensa: implement LSAI group Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 13/32] target-xtensa: mark reserved and TBD opcodes Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 14/32] target-xtensa: implement SYNC group Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 15/32] target-xtensa: implement CACHE group Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 16/32] target-xtensa: add PS register and access control Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 17/32] target-xtensa: implement exceptions Max Filippov
2011-09-04 18:22   ` Blue Swirl
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 18/32] target-xtensa: implement RST2 group (32 bit mul/div/rem) Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 19/32] target-xtensa: implement windowed registers Max Filippov
2011-09-04 18:27   ` Blue Swirl
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 20/32] target-xtensa: implement loop option Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 21/32] target-xtensa: implement extended L32R Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 22/32] target-xtensa: implement unaligned exception option Max Filippov
2011-09-04 18:28   ` Blue Swirl
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 23/32] target-xtensa: implement SIMCALL Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 24/32] target-xtensa: implement interrupt option Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 25/32] target-xtensa: implement accurate window check Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 26/32] target-xtensa: implement CPENABLE and PRID SRs Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 27/32] target-xtensa: implement relocatable vectors Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 28/32] target-xtensa: add gdb support Max Filippov
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 29/32] target-xtensa: implement memory protection options Max Filippov
2011-09-04 18:32   ` Blue Swirl [this message]
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 30/32] target-xtensa: add dc232b core and board Max Filippov
2011-09-04 18:33   ` Blue Swirl
2011-09-01 20:45 ` [Qemu-devel] [PATCH v4 31/32] MAINTAINERS: add xtensa maintainer Max Filippov
2011-09-01 20:46 ` [Qemu-devel] [PATCH v4 32/32] target-xtensa: add regression testsuite Max Filippov
2011-09-04 18:35 ` [Qemu-devel] [PATCH v4 00/32] target-xtensa: new target architecture Blue Swirl
2011-09-05 10:55   ` Edgar E. Iglesias
2011-09-05 12:35     ` Max Filippov
2011-09-14 21:18       ` Edgar E. Iglesias
2011-09-14 22:24         ` Max Filippov
2011-09-14 22:28           ` Edgar E. Iglesias

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAAu8pHtEMWW2-F-dhCUc8Oa-at+jvJcNsEYa-Pn8v0YnXP22WA@mail.gmail.com \
    --to=blauwirbel@gmail.com \
    --cc=jcmvbkbc@gmail.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.