uint64_t startpfn, endpfn;
uint32_t realmem;
extern char end[];
-
+ int i;
startpfn = atop(CKSEG0_TO_PHYS((vaddr_t)&end) + PAGE_SIZE);
endpfn = atop(96 << 20);
mem_layout[0].mem_first_page = startpfn;
realmem_bytes = (96 << 20);
}
/* phys_avail regions are in bytes */
- phys_avail[0] = (MIPS_KSEG0_TO_PHYS((vm_offset_t)&end) + PAGE_SIZE) & ~(PAGE_SIZE - 1);
+ memset((void*)phys_avail,0,sizeof(phys_avail));
+ phys_avail[0] = (MIPS_KSEG0_TO_PHYS((vm_offset_t)&end) +
+ PAGE_SIZE ) & ~(PAGE_SIZE - 1);
if (octeon_board_real()) {
if (realmem_bytes > OCTEON_DRAM_FIRST_256_END)
phys_avail[1] = OCTEON_DRAM_FIRST_256_END;
/*-
* Octeon Memory looks as follows:
* PA
- * 0000 0000 to 0x0 0000 0000 0000
- * 0FFF FFFF First 256 MB memory Maps to 0x0 0000 0FFF FFFF
- *
- * 1000 0000 to 0x1 0000 1000 0000
- * 1FFF FFFF Uncached Bu I/O space.converted to 0x1 0000 1FFF FFFF
- *
- * 2FFF FFFF to Cached 0x0 0000 2000 0000
- * FFFF FFFF all dram mem above the first 512M 0x3 FFFF FFFF FFFF
+ * First 256 MB DR0
+ * 0000 0000 0000 0000 to 0000 0000 0000 0000
+ * 0000 0000 0FFF FFFF to 0000 0000 0FFF FFFF
+ * Second 256 MB DR1
+ * 0000 0004 1000 0000 to 0000 0004 1000 0000
+ * 0000 0004 1FFF FFFF to 0000 0004 1FFF FFFF
+ * Over 512MB Memory DR2 15.5GB
+ * 0000 0000 2000 0000 to 0000 0000 2000 0000
+ * 0000 0003 FFFF FFFF to 0000 0003 FFFF FFFF
*
*/
physmem = btoc(phys_avail[1] - phys_avail[0]);
- if ((octeon_board_real()) &&
- (realmem_bytes > OCTEON_DRAM_FIRST_256_END)) {
- /* take out the upper non-cached 1/2 */
-#if 0
- realmem_bytes -= OCTEON_DRAM_FIRST_256_END;
- realmem_bytes &= ~(PAGE_SIZE - 1);
-#endif
- /* Now map the rest of the memory */
- phys_avail[2] = 0x10000000;
- phys_avail[3] = ((uint32_t) 0x10000000 + realmem_bytes);
- physmem += btoc(phys_avail[3] - phys_avail[2]);
- mem_layout[1].mem_first_page = atop(phys_avail[2]);
- mem_layout[1].mem_last_page = atop(phys_avail[3]-1);
- mem_layout[1].mem_freelist = VM_FREELIST_DEFAULT;
+ if (octeon_board_real()){
+ if(realmem_bytes > OCTEON_DRAM_FIRST_256_END){
+ /* take out the upper non-cached 1/2 */
+ phys_avail[2] = 0x410000000ULL;
+ phys_avail[3] = (0x410000000ULL
+ + OCTEON_DRAM_FIRST_256_END);
+ physmem += btoc(phys_avail[3] - phys_avail[2]);
+ mem_layout[1].mem_first_page = atop(phys_avail[2]);
+ mem_layout[1].mem_last_page = atop(phys_avail[3]-1);
+ mem_layout[1].mem_freelist = VM_FREELIST_DEFAULT;
+ realmem_bytes -= OCTEON_DRAM_FIRST_256_END;
+ /* Now map the rest of the memory */
+ phys_avail[4] = 0x20000000ULL;
+ phys_avail[5] = (0x20000000ULL + realmem_bytes);
+ physmem += btoc(phys_avail[5] - phys_avail[4]);
+ mem_layout[2].mem_first_page = atop(phys_avail[4]);
+ mem_layout[2].mem_last_page = atop(phys_avail[5]-1);
+ mem_layout[2].mem_freelist = VM_FREELIST_DEFAULT;
+ realmem_bytes=0;
+ }else{
+ /* Now map the rest of the memory */
+ phys_avail[2] = 0x410000000ULL;
+ phys_avail[3] = (0x410000000ULL + realmem_bytes);
+ physmem += btoc(phys_avail[3] - phys_avail[2]);
+ mem_layout[1].mem_first_page = atop(phys_avail[2]);
+ mem_layout[1].mem_last_page = atop(phys_avail[3]-1);
+ mem_layout[1].mem_freelist = VM_FREELIST_DEFAULT;
+ realmem_bytes=0;
+ }
+ }
+ realmem = physmem;
+ printf("Total DRAM Size 0x%016X\n", (uint32_t) octeon_dram);
+ for(i=0;phys_avail[i];i+=2){
+ printf("Bank %d = 0x%016lX -> 0x%016lX\n",i>>1,
+ (long)phys_avail[i], (long)phys_avail[i+1]);
+ }
+ for( i=0;mem_layout[i].mem_last_page;i++){
+ printf("mem_layout[%d] page 0x%016lX -> 0x%016lX\n",i,
+ mem_layout[i].mem_first_page,
+ mem_layout[i].mem_last_page);
}
- realmem = physmem;
- printf("Total DRAM Size %#X\n", (uint32_t) octeon_dram);
- printf("Bank 0 = %08lX -> %08lX\n", (long)phys_avail[0], (long)phys_avail[1]);
- printf("Bank 1 = %08lX -> %08lX\n", (long)phys_avail[2], (long)phys_avail[3]);
-
}
/*
firstkernpa = CKSEG0_TO_PHYS((vaddr_t)start);
lastkernpa = CKSEG0_TO_PHYS((vaddr_t)ekern);
- firstkernpage = atop(trunc_page(firstkernpa)) +
- mem_layout[0].mem_first_page - 1;
- lastkernpage = atop(round_page(lastkernpa)) +
- mem_layout[0].mem_first_page - 1;
+ firstkernpage = atop(trunc_page(firstkernpa));
+ lastkernpage = atop(round_page(lastkernpa));
fp = mem_layout[i].mem_first_page;
lp = mem_layout[i].mem_last_page;
#ifdef MULTIPROCESSOR
unsigned octeon_ap_boot = ~0;
struct cpu_info *cpu_info_boot_secondary = NULL;
-
+char _kstack[USPACE];
void
hw_cpu_boot_secondary(struct cpu_info *ci)
{
vaddr_t kstack;
+ kstack = (vaddr_t)_kstack;
+#if 0
kstack = alloc_contiguous_pages(USPACE);
if (kstack == NULL)
panic("unable to allocate idle stack\n");
+#endif
ci->ci_curprocpaddr = (void *)kstack;
cpu_info_boot_secondary = ci;
cpu_startclock(ci);
- printf("ci:%p cpu_number:%d\n", ci, cpu_number());
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
ncpus++;
cpuset_add(&cpus_running, ci);
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
mips64_ipi_init();
obio_setintrmask(0);
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
spl0();
(void)updateimask(0);
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
SCHED_LOCK(s);
cpu_switchto(NULL, sched_chooseproc());
+ printf("%d %s:%s:%d\n", cpu_number(), __FILE__, __func__, __LINE__);
}
int
#include <uvm/uvm.h>
+#ifdef CPU_OCTEON
+#include <machine/cpu.h>
+#endif
/*
* for object trees
*/
RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp);
+/*
+ *
+ *
+ */
+#ifdef CPU_OCTEON
+#define PTOA(p) (PHYS_TO_XKPHYS(ptoa(p), CCA_CACHED))
+#else
+#define PTOA(p) (ptoa(p))
+#endif
+
int
uvm_pagecmp(struct vm_page *a, struct vm_page *b)
{
vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
/* init and free vm_pages (we've already zeroed them) */
- paddr = ptoa(vm_physmem[lcv].start);
+ paddr = PTOA(vm_physmem[lcv].start);
for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
vm_physmem[lcv].pgs[i].phys_addr = paddr;
#ifdef __HAVE_VM_PAGE_MD
/* try from front */
if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
- *paddrp = ptoa(vm_physmem[lcv].avail_start);
+ *paddrp = PTOA(vm_physmem[lcv].avail_start);
vm_physmem[lcv].avail_start++;
vm_physmem[lcv].start++;
/* nothing left? nuke it */
/* try from rear */
if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
- *paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
+ *paddrp = PTOA(vm_physmem[lcv].avail_end - 1);
vm_physmem[lcv].avail_end--;
vm_physmem[lcv].end--;
/* nothing left? nuke it */
if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
continue; /* nope */
- *paddrp = ptoa(vm_physmem[lcv].avail_start);
+ *paddrp = PTOA(vm_physmem[lcv].avail_start);
vm_physmem[lcv].avail_start++;
/* truncate! */
vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
return;
}
/* init phys_addr and free pages, XXX uvmexp.npages */
- for (lcv = 0, paddr = ptoa(start); lcv < npages;
+ for (lcv = 0, paddr = PTOA(start); lcv < npages;
lcv++, paddr += PAGE_SIZE) {
pgs[lcv].phys_addr = paddr;
#ifdef __HAVE_VM_PAGE_MD