2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
28 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
30 if (likely(extract64(addr, 58, 4) != 0xf)) {
31 /* Memory address space */
32 return addr & MAKE_64BIT_MASK(0, 62);
34 if (extract64(addr, 54, 4) != 0) {
35 /* I/O address space */
36 return addr | MAKE_64BIT_MASK(62, 2);
38 /* PDC address space */
39 return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4);
42 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
44 if (likely(extract32(addr, 28, 4) != 0xf)) {
45 /* Memory address space */
46 return addr & MAKE_64BIT_MASK(0, 32);
48 if (extract32(addr, 24, 4) != 0) {
49 /* I/O address space */
50 return addr | MAKE_64BIT_MASK(32, 32);
52 /* PDC address space */
53 return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4);
56 static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr)
58 if (!hppa_is_pa20(env)) {
60 } else if (env->psw & PSW_W) {
61 return hppa_abs_to_phys_pa2_w1(addr);
63 return hppa_abs_to_phys_pa2_w0(addr);
67 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
69 IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
72 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
73 trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
74 ent->itree.start, ent->itree.last, ent->pa);
77 trace_hppa_tlb_find_entry_not_found(env, addr);
81 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
82 bool force_flush_btlb)
84 CPUState *cs = env_cpu(env);
87 if (!ent->entry_valid) {
91 trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
92 ent->itree.last, ent->pa);
94 tlb_flush_range_by_mmuidx(cs, ent->itree.start,
95 ent->itree.last - ent->itree.start + 1,
96 HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
98 /* Never clear BTLBs, unless forced to do so. */
99 is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
100 if (is_btlb && !force_flush_btlb) {
104 interval_tree_remove(&ent->itree, &env->tlb_root);
105 memset(ent, 0, sizeof(*ent));
108 ent->unused_next = env->tlb_unused;
109 env->tlb_unused = ent;
113 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
115 IntervalTreeNode *i, *n;
117 i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
119 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
122 * Find the next entry now: In the normal case the current entry
123 * will be removed, but in the BTLB case it will remain.
125 n = interval_tree_iter_next(i, va_b, va_e);
126 hppa_flush_tlb_ent(env, ent, false);
130 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
132 HPPATLBEntry *ent = env->tlb_unused;
135 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
136 uint32_t i = env->tlb_last;
138 if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
141 env->tlb_last = i + 1;
144 hppa_flush_tlb_ent(env, ent, false);
147 env->tlb_unused = ent->unused_next;
151 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
152 int type, hwaddr *pphys, int *pprot,
153 HPPATLBEntry **tlb_entry)
156 int prot, r_prot, w_prot, x_prot, priv;
164 /* Virtual translation disabled. Direct map virtual to physical. */
165 if (mmu_idx == MMU_PHYS_IDX) {
167 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
171 /* Find a valid tlb entry that matches the virtual address. */
172 ent = hppa_find_tlb(env, addr);
176 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
184 /* We now know the physical address. */
185 phys = ent->pa + (addr - ent->itree.start);
187 /* Map TLB access_rights field to QEMU protection. */
188 priv = MMU_IDX_TO_PRIV(mmu_idx);
189 r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
190 w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
191 x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
192 switch (ent->ar_type) {
193 case 0: /* read-only: data page */
196 case 1: /* read/write: dynamic data page */
197 prot = r_prot | w_prot;
199 case 2: /* read/execute: normal code page */
200 prot = r_prot | x_prot;
202 case 3: /* read/write/execute: dynamic code page */
203 prot = r_prot | w_prot | x_prot;
205 default: /* execute: promote to privilege level type & 3 */
210 /* access_id == 0 means public page and no check is performed */
211 if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
212 /* If bits [31:1] match, and bit 0 is set, suppress write. */
213 int match = ent->access_id * 2 + 1;
215 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
216 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
217 prot &= PAGE_READ | PAGE_EXEC;
218 if (type == PAGE_WRITE) {
225 /* No guest access type indicates a non-architectural access from
226 within QEMU. Bypass checks for access, D, B and T bits. */
231 if (unlikely(!(prot & type))) {
232 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
233 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
237 /* In reverse priority order, check for conditions which raise faults.
238 As we go, remove PROT bits that cover the condition we want to check.
239 In this way, the resulting PROT will force a re-check of the
240 architectural TLB entry for the next access. */
241 if (unlikely(!ent->d)) {
242 if (type & PAGE_WRITE) {
243 /* The D bit is not set -- TLB Dirty Bit Fault. */
244 ret = EXCP_TLB_DIRTY;
246 prot &= PAGE_READ | PAGE_EXEC;
248 if (unlikely(ent->b)) {
249 if (type & PAGE_WRITE) {
250 /* The B bit is set -- Data Memory Break Fault. */
253 prot &= PAGE_READ | PAGE_EXEC;
255 if (unlikely(ent->t)) {
256 if (!(type & PAGE_EXEC)) {
257 /* The T bit is set -- Page Reference Fault. */
264 *pphys = phys = hppa_abs_to_phys(env, phys);
266 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
270 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
272 HPPACPU *cpu = HPPA_CPU(cs);
276 /* If the (data) mmu is disabled, bypass translation. */
277 /* ??? We really ought to know if the code mmu is disabled too,
278 in order to get the correct debugging dumps. */
279 if (!(cpu->env.psw & PSW_D)) {
280 return hppa_abs_to_phys(&cpu->env, addr);
283 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
286 /* Since we're translating for debugging, the only error that is a
287 hard error is no translation at all. Otherwise, while a real cpu
288 access might not have permission, the debugger does. */
289 return excp == EXCP_DTLB_MISS ? -1 : phys;
292 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
293 MMUAccessType type, int mmu_idx,
294 bool probe, uintptr_t retaddr)
296 HPPACPU *cpu = HPPA_CPU(cs);
297 CPUHPPAState *env = &cpu->env;
299 int prot, excp, a_prot;
314 excp = hppa_get_physical_address(env, addr, mmu_idx,
315 a_prot, &phys, &prot, &ent);
316 if (unlikely(excp >= 0)) {
320 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
321 /* Failure. Raise the indicated exception. */
322 cs->exception_index = excp;
323 if (cpu->env.psw & PSW_Q) {
324 /* ??? Needs tweaking for hppa64. */
325 cpu->env.cr[CR_IOR] = addr;
326 cpu->env.cr[CR_ISR] = addr >> 32;
328 cpu_loop_exit_restore(cs, retaddr);
331 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
332 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
335 * Success! Store the translation into the QEMU TLB.
336 * Note that we always install a single-page entry, because that
337 * is what works best with softmmu -- anything else will trigger
338 * the large page protection mask. We do not require this,
339 * because we record the large page here in the hppa tlb.
341 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
342 prot, mmu_idx, TARGET_PAGE_SIZE);
346 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
347 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
351 /* Zap any old entries covering ADDR. */
352 addr &= TARGET_PAGE_MASK;
353 hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
355 ent = env->tlb_partial;
357 ent = hppa_alloc_tlb_ent(env);
358 env->tlb_partial = ent;
361 /* Note that ent->entry_valid == 0 already. */
362 ent->itree.start = addr;
363 ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
364 ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
365 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
368 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
371 ent->access_id = extract32(reg, 1, 18);
372 ent->u = extract32(reg, 19, 1);
373 ent->ar_pl2 = extract32(reg, 20, 2);
374 ent->ar_pl1 = extract32(reg, 22, 2);
375 ent->ar_type = extract32(reg, 24, 3);
376 ent->b = extract32(reg, 27, 1);
377 ent->d = extract32(reg, 28, 1);
378 ent->t = extract32(reg, 29, 1);
379 ent->entry_valid = 1;
381 interval_tree_insert(&ent->itree, &env->tlb_root);
382 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
383 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
386 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
387 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
389 HPPATLBEntry *ent = env->tlb_partial;
392 env->tlb_partial = NULL;
393 if (ent->itree.start <= addr && addr <= ent->itree.last) {
394 set_access_bits_pa11(env, ent, reg);
398 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
401 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
402 target_ulong r2, vaddr va_b)
409 mask_shift = 2 * (r1 & 0xf);
410 va_size = TARGET_PAGE_SIZE << mask_shift;
412 va_e = va_b + va_size - 1;
414 hppa_flush_tlb_range(env, va_b, va_e);
415 ent = hppa_alloc_tlb_ent(env);
417 ent->itree.start = va_b;
418 ent->itree.last = va_e;
419 ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift);
420 ent->t = extract64(r2, 61, 1);
421 ent->d = extract64(r2, 60, 1);
422 ent->b = extract64(r2, 59, 1);
423 ent->ar_type = extract64(r2, 56, 3);
424 ent->ar_pl1 = extract64(r2, 54, 2);
425 ent->ar_pl2 = extract64(r2, 52, 2);
426 ent->u = extract64(r2, 51, 1);
429 ent->access_id = extract64(r2, 1, 31);
430 ent->entry_valid = 1;
432 interval_tree_insert(&ent->itree, &env->tlb_root);
433 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
434 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
435 ent->ar_pl2, ent->ar_pl1, ent->ar_type,
436 ent->b, ent->d, ent->t);
439 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
441 vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
442 itlbt_pa20(env, r1, r2, va_b);
445 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
447 vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
448 itlbt_pa20(env, r1, r2, va_b);
451 /* Purge (Insn/Data) TLB. */
452 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
454 CPUHPPAState *env = cpu_env(cpu);
455 vaddr start = data.target_ptr;
459 * PA2.0 allows a range of pages encoded into GR[b], which we have
460 * copied into the bottom bits of the otherwise page-aligned address.
461 * PA1.x will always provide zero here, for a single page flush.
464 start &= TARGET_PAGE_MASK;
465 end = TARGET_PAGE_SIZE << (2 * end);
466 end = start + end - 1;
468 hppa_flush_tlb_range(env, start, end);
471 /* This is local to the current cpu. */
472 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
474 trace_hppa_tlb_ptlb_local(env);
475 ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
478 /* This is synchronous across all processors. */
479 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
481 CPUState *src = env_cpu(env);
485 trace_hppa_tlb_ptlb(env);
486 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
490 async_run_on_cpu(cpu, ptlb_work, data);
495 async_safe_run_on_cpu(src, ptlb_work, data);
497 ptlb_work(src, data);
501 void hppa_ptlbe(CPUHPPAState *env)
503 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
506 /* Zap the (non-btlb) tlb entries themselves. */
507 memset(&env->tlb[btlb_entries], 0,
508 sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
509 env->tlb_last = btlb_entries;
510 env->tlb_partial = NULL;
512 /* Put them all onto the unused list. */
513 env->tlb_unused = &env->tlb[btlb_entries];
514 for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
515 env->tlb[i].unused_next = &env->tlb[i + 1];
518 /* Re-initialize the interval tree with only the btlb entries. */
519 memset(&env->tlb_root, 0, sizeof(env->tlb_root));
520 for (i = 0; i < btlb_entries; ++i) {
521 if (env->tlb[i].entry_valid) {
522 interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
526 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
529 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
530 number of pages/entries (we choose all), and is local to the cpu. */
531 void HELPER(ptlbe)(CPUHPPAState *env)
533 trace_hppa_tlb_ptlbe(env);
534 qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
538 void cpu_hppa_change_prot_id(CPUHPPAState *env)
540 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
543 void HELPER(change_prot_id)(CPUHPPAState *env)
545 cpu_hppa_change_prot_id(env);
548 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
553 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
556 if (env->psw & PSW_Q) {
557 /* ??? Needs tweaking for hppa64. */
558 env->cr[CR_IOR] = addr;
559 env->cr[CR_ISR] = addr >> 32;
561 if (excp == EXCP_DTLB_MISS) {
562 excp = EXCP_NA_DTLB_MISS;
564 trace_hppa_tlb_lpa_failed(env, addr);
565 hppa_dynamic_excp(env, excp, GETPC());
567 trace_hppa_tlb_lpa_success(env, addr, phys);
571 /* Return the ar_type of the TLB at VADDR, or -1. */
572 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
574 HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
575 return ent ? ent->ar_type : -1;
579 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
580 * allow operating systems to modify the Block TLB (BTLB) entries.
581 * For implementation details see page 1-13 in
582 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
584 void HELPER(diag_btlb)(CPUHPPAState *env)
586 unsigned int phys_page, len, slot;
587 int mmu_idx = cpu_mmu_index(env, 0);
588 uintptr_t ra = GETPC();
592 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
594 /* BTLBs are not supported on 64-bit CPUs */
595 if (btlb_entries == 0) {
596 env->gr[28] = -1; /* nonexistent procedure */
600 env->gr[28] = 0; /* PDC_OK */
602 switch (env->gr[25]) {
604 /* return BTLB parameters */
605 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
606 vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong),
607 MMU_DATA_STORE, mmu_idx, ra);
609 env->gr[28] = -10; /* invalid argument */
611 vaddr[0] = cpu_to_be32(1);
612 vaddr[1] = cpu_to_be32(16 * 1024);
613 vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
614 vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
618 /* insert BTLB entry */
619 virt_page = env->gr[24]; /* upper 32 bits */
621 virt_page |= env->gr[23]; /* lower 32 bits */
622 phys_page = env->gr[22];
625 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
626 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
628 (long long) virt_page << TARGET_PAGE_BITS,
629 (long long) (virt_page + len) << TARGET_PAGE_BITS,
630 (long long) virt_page, phys_page, len, slot);
631 if (slot < btlb_entries) {
632 btlb = &env->tlb[slot];
634 /* Force flush of possibly existing BTLB entry. */
635 hppa_flush_tlb_ent(env, btlb, true);
637 /* Create new BTLB entry */
638 btlb->itree.start = virt_page << TARGET_PAGE_BITS;
639 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
640 btlb->pa = phys_page << TARGET_PAGE_BITS;
641 set_access_bits_pa11(env, btlb, env->gr[20]);
645 env->gr[28] = -10; /* invalid argument */
649 /* Purge BTLB entry */
651 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
653 if (slot < btlb_entries) {
654 btlb = &env->tlb[slot];
655 hppa_flush_tlb_ent(env, btlb, true);
657 env->gr[28] = -10; /* invalid argument */
661 /* Purge all BTLB entries */
662 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
663 for (slot = 0; slot < btlb_entries; slot++) {
664 btlb = &env->tlb[slot];
665 hppa_flush_tlb_ent(env, btlb, true);
669 env->gr[28] = -2; /* nonexistent option */