};
struct obio_intrhand *obio_intrhand[OBIO_NINTS];
-#define INTPRI_CIU (INTPRI_CLOCK + 1)
+#define INTPRI_CIU_0 (INTPRI_CLOCK + 1)
+#define INTPRI_CIU_1 (INTPRI_CIU_0 + 1)
+#define INTPRI_CIU_2 (INTPRI_CIU_1 + 1)
+#define INTPRI_CIU_3 (INTPRI_CIU_2 + 1)
-uint64_t obio_intem, obio_intem;
-uint64_t obio_imask[NIPLS];
+uint64_t obio_intem[MAXCPUS];
+uint64_t obio_imask[MAXCPUS][NIPLS];
/*
* List of obio child devices.
bus_space_write_8(&obio_tag, obio_h, CIU_INT0_EN4_1, 0);
bus_space_write_8(&obio_tag, obio_h, CIU_INT1_EN4_1, 0);
- set_intr(INTPRI_CIU, CR_INT_0, obio_iointr);
+ set_intr(INTPRI_CIU_0, CR_INT_0, obio_iointr);
+ set_intr(INTPRI_CIU_1, CR_INT_1, obio_iointr);
+ set_intr(INTPRI_CIU_2, CR_INT_2, obio_iointr);
+ set_intr(INTPRI_CIU_3, CR_INT_3, obio_iointr);
register_splx_handler(obio_splx);
/*
obio_intr_establish(int irq, int level,
int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
{
+ int cpuid = cpu_number();
struct obio_intrhand **p, *q, *ih;
int s;
;
*p = ih;
- obio_intem |= 1UL << irq;
+ obio_intem[cpuid] |= 1UL << irq;
obio_intr_makemasks();
splx(s); /* causes hw mask update */
void
obio_intr_makemasks()
{
+ int cpuid = cpu_number();
int irq, level;
struct intrhand *q;
uint intrlevel[OBIO_NINTS];
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
/* First, figure out which levels each IRQ uses. */
for (irq = 0; irq < OBIO_NINTS; irq++) {
uint levels = 0;
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
for (q = (struct intrhand *)obio_intrhand[irq]; q != NULL;
q = q->ih_next)
levels |= 1 << q->ih_level;
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
intrlevel[irq] = levels;
}
-
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
/*
* Then figure out which IRQs use each level.
* Note that we make sure never to overwrite imask[IPL_HIGH], in
*/
for (level = IPL_NONE; level < NIPLS; level++) {
uint64_t irqs = 0;
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
for (irq = 0; irq < OBIO_NINTS; irq++)
if (intrlevel[irq] & (1 << level))
irqs |= 1UL << irq;
- obio_imask[level] = irqs;
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
+ obio_imask[cpuid][level] = irqs;
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
}
-
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
/*
* There are tty, network and disk drivers that use free() at interrupt
* time, so vm > (tty | net | bio).
* Enforce a hierarchy that gives slow devices a better chance at not
* dropping data.
*/
- obio_imask[IPL_NET] |= obio_imask[IPL_BIO];
- obio_imask[IPL_TTY] |= obio_imask[IPL_NET];
- obio_imask[IPL_VM] |= obio_imask[IPL_TTY];
- obio_imask[IPL_CLOCK] |= obio_imask[IPL_VM];
- obio_imask[IPL_HIGH] |= obio_imask[IPL_CLOCK];
- obio_imask[IPL_IPI] |= obio_imask[IPL_HIGH];
-
+ obio_imask[cpuid][IPL_NET] |= obio_imask[cpuid][IPL_BIO];
+ obio_imask[cpuid][IPL_TTY] |= obio_imask[cpuid][IPL_NET];
+ obio_imask[cpuid][IPL_VM] |= obio_imask[cpuid][IPL_TTY];
+ obio_imask[cpuid][IPL_CLOCK] |= obio_imask[cpuid][IPL_VM];
+ obio_imask[cpuid][IPL_HIGH] |= obio_imask[cpuid][IPL_CLOCK];
+ obio_imask[cpuid][IPL_IPI] |= obio_imask[cpuid][IPL_HIGH];
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
/*
* These are pseudo-levels.
*/
- obio_imask[IPL_NONE] = 0;
+ obio_imask[cpuid][IPL_NONE] = 0;
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
}
/*
obio_iointr(uint32_t hwpend, struct trap_frame *frame)
{
struct cpu_info *ci = curcpu();
+ int cpuid = cpu_number();
uint64_t imr, isr, mask;
int ipl;
int bit;
struct intrhand *ih;
int rc;
+ uint64_t sum0 = CIU_INT_SUM0(cpuid * 2);
+ uint64_t en0 = CIU_INT_EN0(cpuid * 2);
- isr = bus_space_read_8(&obio_tag, obio_h, CIU_INT0_SUM0);
- imr = bus_space_read_8(&obio_tag, obio_h, CIU_INT0_EN0);
+ isr = bus_space_read_8(&obio_tag, obio_h, sum0);
+ imr = bus_space_read_8(&obio_tag, obio_h, en0);
bit = 63;
isr &= imr;
/*
* Mask all pending interrupts.
*/
- bus_space_write_8(&obio_tag, obio_h, CIU_INT0_EN0, imr & ~isr);
+ bus_space_write_8(&obio_tag, obio_h, en0, imr & ~isr);
/*
* If interrupts are spl-masked, mask them and wait for splx()
* to reenable them when necessary.
*/
- if ((mask = isr & obio_imask[frame->ipl]) != 0) {
+ if ((mask = isr & obio_imask[cpuid][frame->ipl]) != 0) {
isr &= ~mask;
imr &= ~mask;
}
/* Service higher level interrupts first */
for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) {
- tmpisr = isr & (obio_imask[lvl] ^ obio_imask[lvl - 1]);
+ tmpisr = isr & (obio_imask[cpuid][lvl] ^ obio_imask[cpuid][lvl - 1]);
if (tmpisr == 0)
continue;
for (bitno = bit, mask = 1UL << bitno; mask != 0;
/*
* Reenable interrupts which have been serviced.
*/
- bus_space_write_8(&obio_tag, obio_h, CIU_INT0_EN0, imr);
+ bus_space_write_8(&obio_tag, obio_h, en0, imr);
}
return hwpend;
void
obio_setintrmask(int level)
{
+ int cpuid = cpu_number();
+ uint64_t en0 = CIU_INT_EN0(cpuid * 2);
+
*(volatile uint64_t *)(PHYS_TO_XKPHYS(OCTEON_CIU_BASE, CCA_NC) +
- CIU_INT0_EN0) = obio_intem & ~obio_imask[level];
+ en0) = obio_intem[cpuid] & ~obio_imask[cpuid][level];
}
#include <machine/octeon_pcmap_regs.h>
+#include <octeon/dev/obiovar.h>
+#include <octeon/dev/octeonreg.h>
+
#define btoc(x) (((vm_offset_t)(x)+PAGE_MASK)>>PAGE_SHIFT)
#define MIPS_PHYS_MASK (0x1fffffff)
#define MIPS_KSEG0_TO_PHYS(x) ((__uintptr_t)(x) & MIPS_PHYS_MASK)
if ((octeon_board_real()) &&
(realmem_bytes > OCTEON_DRAM_FIRST_256_END)) {
/* take out the upper non-cached 1/2 */
+#if 0
realmem_bytes -= OCTEON_DRAM_FIRST_256_END;
realmem_bytes &= ~(PAGE_SIZE - 1);
+#endif
/* Now map the rest of the memory */
- phys_avail[2] = 0x20000000;
- phys_avail[3] = ((uint32_t) 0x20000000 + realmem_bytes);
+ phys_avail[2] = 0x10000000;
+ phys_avail[3] = ((uint32_t) 0x10000000 + realmem_bytes);
physmem += btoc(phys_avail[3] - phys_avail[2]);
mem_layout[1].mem_first_page = atop(phys_avail[2]);
mem_layout[1].mem_last_page = atop(phys_avail[3]-1);
if (m == NULL)
return ENOMEM;
- printf("mem_layout:%p m:%p\n", mem_layout, m);
m->mem_first_page = startpfn;
m->mem_last_page = endpfn;
m->mem_freelist = freelist;
- printf("first_page:%x last_page:%x freelist:%p\n", startpfn, endpfn, freelist);
return 0;
}
bootcpu_hwinfo.type = (prid >> 8) & 0xff;
/* FPU reports itself as type 5, version 0.1... */
bootcpu_hwinfo.c1prid = bootcpu_hwinfo.c0prid;
- bootcpu_hwinfo.tlbsize = 32;
+ bootcpu_hwinfo.tlbsize = 64;
bcopy(&bootcpu_hwinfo, &curcpu()->ci_hw, sizeof(struct cpu_hwinfo));
/*
}
#ifdef MULTIPROCESSOR
+unsigned octeon_ap_boot = ~0;
+struct cpu_info *cpu_info_boot_secondary = NULL;
+
void
hw_cpu_boot_secondary(struct cpu_info *ci)
{
panic("unable to allocate idle stack\n");
ci->ci_curprocpaddr = (void *)kstack;
+ cpu_info_boot_secondary = ci;
+
+ printf("spinup ci:%p cpuid:%d\n", ci, ci->ci_cpuid);
+
+ octeon_ap_boot = ci->ci_cpuid;
+
while (!cpuset_isset(&cpus_running, ci))
;
}
hw_cpu_hatch(struct cpu_info *ci)
{
int s;
-
+
/*
* Set curcpu address on this processor.
*/
cpu_startclock(ci);
+ printf("ci:%p cpu_number:%d\n", ci, cpu_number());
+
ncpus++;
cpuset_add(&cpus_running, ci);
mips64_ipi_init();
-// xheart_setintrmask(0);
+ obio_setintrmask(0);
spl0();
(void)updateimask(0);
int
hw_ipi_intr_establish(int (*func)(void *), u_long cpuid)
{
+ printf("hw_ipi_intr_establish(%d)\n", cpuid);
+ obio_intr_establish(CIU_INT_MBOX(cpuid), IPL_IPI, func,
+ (void *)cpuid, NULL);
return 0;
};
void
hw_ipi_intr_set(u_long cpuid)
{
+ printf("hw_ipi_intr_set(%d)\n", cpuid);
+ *(uint64_t *)OCTEON_CIU_MBOX_SETX(cpuid) = 1;
}
void
hw_ipi_intr_clear(u_long cpuid)
{
+ printf("hw_ipi_intr_clear(%d)\n", cpuid);
+ *(uint64_t *)OCTEON_CIU_MBOX_CLRX(cpuid) = 1;
}
#endif