diff --git a/target/riscv/cpu_bits b/target/riscv/cpu_bits new file mode 100644 index 00000000000..d5f8bb36711 Binary files /dev/null and b/target/riscv/cpu_bits differ diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 61b2a3e1ae1..605c204ca97 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -414,6 +414,7 @@ int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, #if !defined(CONFIG_USER_ONLY) hwaddr pa = 0; int prot; + target_ulong tlb_size = TARGET_PAGE_SIZE; #endif int ret = TRANSLATE_FAIL; @@ -423,17 +424,20 @@ int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, #if !defined(CONFIG_USER_ONLY) ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx); + qemu_log_mask(CPU_LOG_MMU, "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx " prot %d\n", __func__, address, ret, pa, prot); - if (riscv_feature(env, RISCV_FEATURE_PMP) && - !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) { - ret = TRANSLATE_FAIL; + + if (ret == TRANSLATE_SUCCESS && riscv_feature(env, RISCV_FEATURE_PMP)) { + ret = pmp_has_access(env, pa, size, rw, &tlb_size) ? + TRANSLATE_SUCCESS : TRANSLATE_FAIL; } + if (ret == TRANSLATE_SUCCESS) { tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, - prot, mmu_idx, TARGET_PAGE_SIZE); - } else if (ret == TRANSLATE_FAIL) { + prot, mmu_idx, tlb_size); + } else { raise_mmu_exception(env, address, rw); } #else diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 3f950c99dc8..fc374c63f1b 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -1,8 +1,9 @@ /* * QEMU RISC-V PMP (Physical Memory Protection) * - * Author: Daire McNamara, daire.mcnamara@emdalo.com - * Ivan Griffin, ivan.griffin@emdalo.com + * Authors: Daire McNamara + * Ivan Griffin + * Michael Clark * * This provides a RISC-V Physical Memory Protection implementation * @@ -28,98 +29,78 @@ #include "qapi/error.h" #include "cpu.h" #include "qemu-common.h" +#include "trace.h" #ifndef CONFIG_USER_ONLY -#define RISCV_DEBUG_PMP 0 -#define PMP_DEBUG(fmt, ...) \ - do { \ - if (RISCV_DEBUG_PMP) { \ - qemu_log_mask(LOG_TRACE, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\ - } \ - } while (0) - -static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, - uint8_t val); -static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); -static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index); +/* + * Round up to the next highest power of two + * + * Source: https://graphics.stanford.edu/~seander/bithacks.html + */ +static inline uint64_t roundpow2(uint64_t val) +{ + val--; + val |= val >> 1; + val |= val >> 2; + val |= val >> 4; + val |= val >> 8; + val |= val >> 16; + val |= val >> 32; + val++; + return val; +} /* * Accessor method to extract address matching type 'a field' from cfg reg */ static inline uint8_t pmp_get_a_field(uint8_t cfg) { - uint8_t a = cfg >> 3; - return a & 0x3; + return (cfg >> 3) & 0x3; } /* * Check whether a PMP is locked or not. */ -static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) +static inline int pmp_is_locked(CPURISCVState *env, size_t i) { + uint8_t cfg_reg = env->pmp_state.pmp[i].cfg_reg; - if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { + if (cfg_reg & PMP_LOCK) { return 1; } - /* Top PMP has no 'next' to check */ - if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { - return 0; - } - - /* In TOR mode, need to check the lock bit of the next pmp - * (if there is a next) - */ - const uint8_t a_field = - pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg); - if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) && - (PMP_AMATCH_TOR == a_field)) { - return 1; + /* In TOR mode, need to check the lock bit of the next entry */ + if (i + 1 < MAX_RISCV_PMPS) { + cfg_reg = env->pmp_state.pmp[i + 1].cfg_reg; + if (pmp_get_a_field(cfg_reg) == PMP_AMATCH_TOR) { + if (cfg_reg & PMP_LOCK) { + return 1; + } + } } return 0; } /* - * Count the number of active rules. + * Convert QEMU access type to PMP privileges */ -static inline uint32_t pmp_get_num_rules(CPURISCVState *env) +static inline int pmp_access_priv(int access_type) { - return env->pmp_state.num_rules; -} - -/* - * Accessor to get the cfg reg for a specific PMP/HART - */ -static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) -{ - if (pmp_index < MAX_RISCV_PMPS) { - return env->pmp_state.pmp[pmp_index].cfg_reg; + switch (access_type) { + case MMU_INST_FETCH: + return PMP_EXEC; + case MMU_DATA_LOAD: + return PMP_READ; + case MMU_DATA_STORE: + return PMP_WRITE; + default: + g_assert_not_reached(); } - return 0; } - -/* - * Accessor to set the cfg reg for a specific PMP/HART - * Bounds checks and relevant lock bit. - */ -static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) -{ - if (pmp_index < MAX_RISCV_PMPS) { - if (!pmp_is_locked(env, pmp_index)) { - env->pmp_state.pmp[pmp_index].cfg_reg = val; - pmp_update_rule(env, pmp_index); - } else { - PMP_DEBUG("ignoring write - locked"); - } - } else { - PMP_DEBUG("ignoring write - out of bounds"); - } -} - static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea) { /* @@ -145,81 +126,20 @@ static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea) } } - -/* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea' - * end address values. - * This function is called relatively infrequently whereas the check that - * an address is within a pmp rule is called often, so optimise that one +/* + * Count active rules when config changes */ -static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index) +static inline void pmp_count_active_rules(CPURISCVState *env) { - int i; - - env->pmp_state.num_rules = 0; - - uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; - target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; - target_ulong prev_addr = 0u; - target_ulong sa = 0u; - target_ulong ea = 0u; - - if (pmp_index >= 1u) { - prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; - } - - switch (pmp_get_a_field(this_cfg)) { - case PMP_AMATCH_OFF: - sa = 0u; - ea = -1; - break; - - case PMP_AMATCH_TOR: - sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ - ea = (this_addr << 2) - 1u; - break; - - case PMP_AMATCH_NA4: - sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ - ea = (this_addr + 4u) - 1u; - break; - - case PMP_AMATCH_NAPOT: - pmp_decode_napot(this_addr, &sa, &ea); - break; - - default: - sa = 0u; - ea = 0u; - break; - } - - env->pmp_state.addr[pmp_index].sa = sa; - env->pmp_state.addr[pmp_index].ea = ea; - - for (i = 0; i < MAX_RISCV_PMPS; i++) { - const uint8_t a_field = - pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); - if (PMP_AMATCH_OFF != a_field) { - env->pmp_state.num_rules++; + /* find last rule that is not PMP_AMATCH_OFF */ + env->pmp_state.active_rules = 0; + for (size_t i = 0; i < MAX_RISCV_PMPS; i++) { + if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) != PMP_AMATCH_OFF) { + env->pmp_state.active_rules = i + 1; } } } -static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr) -{ - int result = 0; - - if ((addr >= env->pmp_state.addr[pmp_index].sa) - && (addr <= env->pmp_state.addr[pmp_index].ea)) { - result = 1; - } else { - result = 0; - } - - return result; -} - - /* * Public Interface */ @@ -227,154 +147,153 @@ static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr) /* * Check if the address has required RWX privs to complete desired operation */ -bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, - target_ulong size, pmp_priv_t privs) +bool pmp_has_access(CPURISCVState *env, target_ulong addr, int size, int rw, + target_ulong *tlb_size) { - int i = 0; - int ret = -1; - target_ulong s = 0; - target_ulong e = 0; - pmp_priv_t allowed_privs = 0; - - /* Short cut if no rules */ - if (0 == pmp_get_num_rules(env)) { - return true; - } - - /* 1.10 draft priv spec states there is an implicit order - from low to high */ - for (i = 0; i < MAX_RISCV_PMPS; i++) { - s = pmp_is_in_range(env, i, addr); - e = pmp_is_in_range(env, i, addr + size - 1); + int access = pmp_access_priv(rw); + bool result; + + /* check physical memory protection rules */ + for (size_t i = 0; i < env->pmp_state.active_rules; i++) + { + uint8_t cfg_reg = env->pmp_state.pmp[i].cfg_reg; + target_ulong addr_reg = env->pmp_state.pmp[i].addr_reg; + target_ulong sa, ea; + + switch (pmp_get_a_field(cfg_reg)) { + case PMP_AMATCH_TOR: + sa = (i >= 1 ? env->pmp_state.pmp[i - 1].addr_reg : 0) << 2; + ea = (addr_reg << 2) - 1; + break; - /* partially inside */ - if ((s + e) == 1) { - PMP_DEBUG("pmp violation - access is partially inside"); - ret = 0; + case PMP_AMATCH_NA4: + /* The current spec wording for PMP granularity requires a + * terminal bit for granule size spans; generalize NA4 -> NAG */ + sa = addr_reg << 2; + ea = (addr_reg << 2) + (1 << (env->pmp_state.granularity + 2)) - 1; break; - } - /* fully inside */ - const uint8_t a_field = - pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); - if ((s + e) == 2) { - if (PMP_AMATCH_OFF == a_field) { - return 1; - } + case PMP_AMATCH_NAPOT: + pmp_decode_napot(addr_reg, &sa, &ea); + break; - allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; - if ((env->priv != PRV_M) || pmp_is_locked(env, i)) { - allowed_privs &= env->pmp_state.pmp[i].cfg_reg; - } + case PMP_AMATCH_OFF: + default: + continue; + } - if ((privs & allowed_privs) == privs) { - ret = 1; - break; - } else { - ret = 0; - break; + /* check address and privs, bypass for unlocked rule if PRV_M */ + result = (env->priv == PRV_M && !pmp_is_locked(env, i)) || + (addr >= sa && addr + size - 1 <= ea && (cfg_reg & access)); + trace_pmp_rule_match(env->mhartid, addr, size, access, sa, ea, + cfg_reg, result); + if (result) { + /* return smaller page size if match has finer granularity */ + if ((sa & (*tlb_size - 1) || (ea + 1) & (*tlb_size - 1))) { + /* breaks iff other covering entries (invalid config). Punt */ + *tlb_size = roundpow2(ea - sa); } + goto match; } } - /* No rule matched */ - if (ret == -1) { - if (env->priv == PRV_M) { - ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an - * M-Mode access, the access succeeds */ - } else { - ret = 0; /* Other modes are not allowed to succeed if they don't - * match a rule, but there are rules. We've checked for - * no rule earlier in this function. */ - } - } + /* only allow M mode if no rules are present */ + result = env->priv == PRV_M; - return ret == 1 ? true : false; +match: + trace_pmp_has_access(env->mhartid, addr, size, access, result); + + return result; } /* - * Handle a write to a pmpcfg CSP + * Handle a write to a pmpcfg CSR */ -void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, - target_ulong val) +void pmpcfg_csr_write(CPURISCVState *env, size_t cfg, target_ulong val) { - int i; - uint8_t cfg_val; - - PMP_DEBUG("hart " TARGET_FMT_ld ": reg%d, val: 0x" TARGET_FMT_lx, - env->mhartid, reg_index, val); - - if ((reg_index & 1) && (sizeof(target_ulong) == 8)) { - PMP_DEBUG("ignoring write - incorrect address"); - return; + if (TARGET_LONG_BITS == 64) { + if (cfg & 1) { + return; + } + cfg >>= 1; } - for (i = 0; i < sizeof(target_ulong); i++) { - cfg_val = (val >> 8 * i) & 0xff; - pmp_write_cfg(env, (reg_index * sizeof(target_ulong)) + i, - cfg_val); + for (size_t b = 0; b < sizeof(target_ulong); b++) { + size_t i = cfg * sizeof(target_ulong) + b; + if (!pmp_is_locked(env, i)) { + env->pmp_state.pmp[i].cfg_reg = (val >> 8 * b) & 0xff; + pmp_count_active_rules(env); + } } + + trace_pmpcfg_csr_write(env->mhartid, cfg, val); } /* - * Handle a read from a pmpcfg CSP + * Handle a read from a pmpcfg CSR */ -target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) +target_ulong pmpcfg_csr_read(CPURISCVState *env, size_t cfg) { - int i; target_ulong cfg_val = 0; - target_ulong val = 0; - for (i = 0; i < sizeof(target_ulong); i++) { - val = pmp_read_cfg(env, (reg_index * sizeof(target_ulong)) + i); - cfg_val |= (val << (i * 8)); + if (TARGET_LONG_BITS == 64) { + if (cfg & 1) { + return 0; + } + cfg >>= 1; + } + + for (size_t b = 0; b < sizeof(target_ulong); b++) { + size_t i = cfg * sizeof(target_ulong) + b; + target_ulong val = env->pmp_state.pmp[i].cfg_reg; + cfg_val |= (val << (b * 8)); } - PMP_DEBUG("hart " TARGET_FMT_ld ": reg%d, val: 0x" TARGET_FMT_lx, - env->mhartid, reg_index, cfg_val); + trace_pmpcfg_csr_read(env->mhartid, cfg, cfg_val); return cfg_val; } /* - * Handle a write to a pmpaddr CSP + * Handle a write to a pmpaddr CSR */ -void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, - target_ulong val) +void pmpaddr_csr_write(CPURISCVState *env, size_t addr, target_ulong val) { - PMP_DEBUG("hart " TARGET_FMT_ld ": addr%d, val: 0x" TARGET_FMT_lx, - env->mhartid, addr_index, val); - - if (addr_index < MAX_RISCV_PMPS) { - if (!pmp_is_locked(env, addr_index)) { - env->pmp_state.pmp[addr_index].addr_reg = val; - pmp_update_rule(env, addr_index); - } else { - PMP_DEBUG("ignoring write - locked"); - } - } else { - PMP_DEBUG("ignoring write - out of bounds"); + if (addr >= MAX_RISCV_PMPS) { + return; + } + + if (env->pmp_state.granularity) { + val &= (((target_ulong)1 << env->pmp_state.granularity) - 1); + } + + trace_pmpaddr_csr_write(env->mhartid, addr, val); + + if (!pmp_is_locked(env, addr)) { + env->pmp_state.pmp[addr].addr_reg = val; } } /* - * Handle a read from a pmpaddr CSP + * Handle a read from a pmpaddr CSR */ -target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) +target_ulong pmpaddr_csr_read(CPURISCVState *env, size_t addr) { - PMP_DEBUG("hart " TARGET_FMT_ld ": addr%d, val: 0x" TARGET_FMT_lx, - env->mhartid, addr_index, - env->pmp_state.pmp[addr_index].addr_reg); - if (addr_index < MAX_RISCV_PMPS) { - return env->pmp_state.pmp[addr_index].addr_reg; - } else { - PMP_DEBUG("ignoring read - out of bounds"); + target_ulong val; + + if (addr >= MAX_RISCV_PMPS) { return 0; } + + val = env->pmp_state.pmp[addr].addr_reg; + + trace_pmpaddr_csr_read(env->mhartid, addr, val); + + return val; } #endif diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index e3953c885f6..64512e4111c 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -30,10 +30,10 @@ typedef enum { } pmp_priv_t; typedef enum { - PMP_AMATCH_OFF, /* Null (off) */ - PMP_AMATCH_TOR, /* Top of Range */ - PMP_AMATCH_NA4, /* Naturally aligned four-byte region */ - PMP_AMATCH_NAPOT /* Naturally aligned power-of-two region */ + PMP_AMATCH_OFF = 0, /* Null (off) */ + PMP_AMATCH_TOR = 1, /* Top of Range */ + PMP_AMATCH_NA4 = 2, /* Naturally aligned four-byte region */ + PMP_AMATCH_NAPOT = 3 /* Naturally aligned power-of-two region */ } pmp_am_t; typedef struct { @@ -41,24 +41,17 @@ typedef struct { uint8_t cfg_reg; } pmp_entry_t; -typedef struct { - target_ulong sa; - target_ulong ea; -} pmp_addr_t; - typedef struct { pmp_entry_t pmp[MAX_RISCV_PMPS]; - pmp_addr_t addr[MAX_RISCV_PMPS]; - uint32_t num_rules; + size_t active_rules; + size_t granularity; } pmp_table_t; -void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, - target_ulong val); -target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index); -void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, - target_ulong val); -target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); -bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, - target_ulong size, pmp_priv_t priv); +void pmpcfg_csr_write(CPURISCVState *env, size_t cfg, target_ulong val); +target_ulong pmpcfg_csr_read(CPURISCVState *env, size_t cfg); +void pmpaddr_csr_write(CPURISCVState *env, size_t addr, target_ulong val); +target_ulong pmpaddr_csr_read(CPURISCVState *env, size_t addr); +bool pmp_has_access(CPURISCVState *env, target_ulong addr, int size, int rw, + target_ulong *tlb_entry_size); #endif diff --git a/target/riscv/trace-events b/target/riscv/trace-events index 48af0373df6..ee17a35f82e 100644 --- a/target/riscv/trace-events +++ b/target/riscv/trace-events @@ -1,2 +1,8 @@ # target/riscv/cpu_helper.c -riscv_trap(uint64_t hartid, bool async, uint64_t cause, uint64_t epc, uint64_t tval, const char *desc) "hart:%"PRId64", async:%d, cause:%"PRId64", epc:0x%"PRIx64", tval:0x%"PRIx64", desc=%s" +riscv_trap(uint64_t hartid, bool async, uint64_t cause, uint64_t epc, uint64_t tval, const char *desc) "hart:%"PRId64", async:%d, cause:0x%"PRIx64", epc:0x%"PRIx64", tval:0x%"PRIx64", desc=%s" +pmpcfg_csr_read(uint64_t hartid, int cfg, uint64_t value) "hart:%"PRId64", cfg:%d, value:0x%"PRIx64 +pmpcfg_csr_write(uint64_t hartid, int cfg, uint64_t value) "hart:%"PRId64", cfg:%d, value:0x%"PRIx64 +pmpaddr_csr_read(uint64_t hartid, int addr, uint64_t value) "hart:%"PRId64", addr:%d, value:0x%"PRIx64 +pmpaddr_csr_write(uint64_t hartid, int addr, uint64_t value) "hart:%"PRId64", addr:%d, value:0x%"PRIx64 +pmp_has_access(uint64_t hartid, uint64_t addr, int size, int access, int result) "hart:%"PRId64", addr:0x%"PRIx64", size:%d, access:%d, result:%d" +pmp_rule_match(uint64_t hartid, uint64_t addr, int size, int access, uint64_t sa, uint64_t ea, int cfg, int result) "hart:%"PRId64", addr:0x%"PRIx64", size:%d, access:%d, sa:0x%"PRIx64", ea:0x%"PRIx64", cfg:%d, result:%d"