From: Richard Henderson Date: Fri, 13 Oct 2023 00:46:55 +0000 (-0700) Subject: target/hppa: Make HPPA_BTLB_ENTRIES variable X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=9cf2112be4fe84d41083435e44fa146d13d3f8d7;p=qmiga%2Fqemu.git target/hppa: Make HPPA_BTLB_ENTRIES variable Depend on hppa_is_pa20. Signed-off-by: Richard Henderson --- diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index 67d4d1b5e0..85682e6bab 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -185,6 +185,7 @@ static FWCfgState *create_fw_cfg(MachineState *ms, PCIBus *pci_bus) uint64_t val; const char qemu_version[] = QEMU_VERSION; MachineClass *mc = MACHINE_GET_CLASS(ms); + int btlb_entries = HPPA_BTLB_ENTRIES(&cpu[0]->env); int len; fw_cfg = fw_cfg_init_mem(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4); @@ -196,11 +197,11 @@ static FWCfgState *create_fw_cfg(MachineState *ms, PCIBus *pci_bus) fw_cfg_add_file(fw_cfg, "/etc/firmware-min-version", g_memdup(&val, sizeof(val)), sizeof(val)); - val = cpu_to_le64(HPPA_TLB_ENTRIES - HPPA_BTLB_ENTRIES); + val = cpu_to_le64(HPPA_TLB_ENTRIES - btlb_entries); fw_cfg_add_file(fw_cfg, "/etc/cpu/tlb_entries", g_memdup(&val, sizeof(val)), sizeof(val)); - val = cpu_to_le64(HPPA_BTLB_ENTRIES); + val = cpu_to_le64(btlb_entries); fw_cfg_add_file(fw_cfg, "/etc/cpu/btlb_entries", g_memdup(&val, sizeof(val)), sizeof(val)); @@ -608,10 +609,6 @@ static void hppa_machine_reset(MachineState *ms, ShutdownCause reason) cs->exception_index = -1; cs->halted = 0; - - /* clear any existing TLB and BTLB entries */ - memset(cpu[i]->env.tlb, 0, sizeof(cpu[i]->env.tlb)); - cpu[i]->env.tlb_last = HPPA_BTLB_ENTRIES; } /* already initialized by machine_hppa_init()? */ diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 0ac307e0e9..48ddcffb8a 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -227,15 +227,13 @@ typedef struct CPUArchState { target_ureg cr_back[2]; /* back of cr17/cr18 */ target_ureg shadow[7]; /* shadow registers */ - /* ??? The number of entries isn't specified by the architecture. */ -#ifdef TARGET_HPPA64 -#define HPPA_BTLB_FIXED 0 /* BTLBs are not supported in 64-bit machines */ -#else -#define HPPA_BTLB_FIXED 16 -#endif -#define HPPA_BTLB_VARIABLE 0 + /* + * ??? The number of entries isn't specified by the architecture. + * BTLBs are not supported in 64-bit machines. + */ +#define PA10_BTLB_FIXED 16 +#define PA10_BTLB_VARIABLE 0 #define HPPA_TLB_ENTRIES 256 -#define HPPA_BTLB_ENTRIES (HPPA_BTLB_FIXED + HPPA_BTLB_VARIABLE) /* Index for round-robin tlb eviction. */ uint32_t tlb_last; @@ -277,6 +275,11 @@ static inline bool hppa_is_pa20(CPUHPPAState *env) return object_dynamic_cast(OBJECT(env_cpu(env)), TYPE_HPPA64_CPU) != NULL; } +static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env) +{ + return hppa_is_pa20(env) ? 0 : PA10_BTLB_FIXED + PA10_BTLB_VARIABLE; +} + static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch) { #ifdef CONFIG_USER_ONLY diff --git a/target/hppa/machine.c b/target/hppa/machine.c index 61ae942ff1..473305ffea 100644 --- a/target/hppa/machine.c +++ b/target/hppa/machine.c @@ -139,6 +139,7 @@ static int tlb_pre_load(void *opaque) static int tlb_post_load(void *opaque, int version_id) { CPUHPPAState *env = opaque; + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); HPPATLBEntry **unused = &env->tlb_unused; HPPATLBEntry *partial = NULL; @@ -152,7 +153,7 @@ static int tlb_post_load(void *opaque, int version_id) if (e->entry_valid) { interval_tree_insert(&e->itree, &env->tlb_root); - } else if (i < HPPA_BTLB_ENTRIES) { + } else if (i < btlb_entries) { /* btlb not in unused list */ } else if (partial == NULL && e->itree.start < e->itree.last) { partial = e; diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index b1773ece61..327fb20c17 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -57,7 +57,7 @@ static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent, HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS); /* Never clear BTLBs, unless forced to do so. */ - is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES]; + is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)]; if (is_btlb && !force_flush_btlb) { return; } @@ -93,10 +93,11 @@ static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env) HPPATLBEntry *ent = env->tlb_unused; if (ent == NULL) { + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); uint32_t i = env->tlb_last; - if (i < HPPA_BTLB_ENTRIES || i >= ARRAY_SIZE(env->tlb)) { - i = HPPA_BTLB_ENTRIES; + if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) { + i = btlb_entries; } env->tlb_last = i + 1; @@ -385,23 +386,24 @@ void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) void hppa_ptlbe(CPUHPPAState *env) { + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); uint32_t i; /* Zap the (non-btlb) tlb entries themselves. */ - memset(&env->tlb[HPPA_BTLB_ENTRIES], 0, - sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0])); - env->tlb_last = HPPA_BTLB_ENTRIES; + memset(&env->tlb[btlb_entries], 0, + sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0])); + env->tlb_last = btlb_entries; env->tlb_partial = NULL; /* Put them all onto the unused list. */ - env->tlb_unused = &env->tlb[HPPA_BTLB_ENTRIES]; - for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb) - 1; ++i) { + env->tlb_unused = &env->tlb[btlb_entries]; + for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) { env->tlb[i].unused_next = &env->tlb[i + 1]; } /* Re-initialize the interval tree with only the btlb entries. */ memset(&env->tlb_root, 0, sizeof(env->tlb_root)); - for (i = 0; i < HPPA_BTLB_ENTRIES; ++i) { + for (i = 0; i < btlb_entries; ++i) { if (env->tlb[i].entry_valid) { interval_tree_insert(&env->tlb[i].itree, &env->tlb_root); } @@ -473,12 +475,14 @@ void HELPER(diag_btlb)(CPUHPPAState *env) HPPATLBEntry *btlb; uint64_t virt_page; uint32_t *vaddr; + uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); -#ifdef TARGET_HPPA64 /* BTLBs are not supported on 64-bit CPUs */ - env->gr[28] = -1; /* nonexistent procedure */ - return; -#endif + if (btlb_entries == 0) { + env->gr[28] = -1; /* nonexistent procedure */ + return; + } + env->gr[28] = 0; /* PDC_OK */ switch (env->gr[25]) { @@ -492,8 +496,8 @@ void HELPER(diag_btlb)(CPUHPPAState *env) } else { vaddr[0] = cpu_to_be32(1); vaddr[1] = cpu_to_be32(16 * 1024); - vaddr[2] = cpu_to_be32(HPPA_BTLB_FIXED); - vaddr[3] = cpu_to_be32(HPPA_BTLB_VARIABLE); + vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED); + vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE); } break; case 1: @@ -510,7 +514,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env) (long long) virt_page << TARGET_PAGE_BITS, (long long) (virt_page + len) << TARGET_PAGE_BITS, (long long) virt_page, phys_page, len, slot); - if (slot < HPPA_BTLB_ENTRIES) { + if (slot < btlb_entries) { btlb = &env->tlb[slot]; /* Force flush of possibly existing BTLB entry. */ @@ -532,7 +536,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env) slot = env->gr[22]; qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n", slot); - if (slot < HPPA_BTLB_ENTRIES) { + if (slot < btlb_entries) { btlb = &env->tlb[slot]; hppa_flush_tlb_ent(env, btlb, true); } else { @@ -542,7 +546,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env) case 3: /* Purge all BTLB entries */ qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n"); - for (slot = 0; slot < HPPA_BTLB_ENTRIES; slot++) { + for (slot = 0; slot < btlb_entries; slot++) { btlb = &env->tlb[slot]; hppa_flush_tlb_ent(env, btlb, true); }