#include "aplos.h" #define MAX_REGIONS 1024 #define INVALID_PTR UINT64_MAX #define PADDR_MASK 0x000FFFFFFFFFF000 struct region { uint64_t start; uint64_t size; uint64_t next; int available : 1; }; struct region_map { uint64_t used; /* Chain of used indices in the regions table */ uint64_t unused; /* Chain of unused indices in the regions table */ struct region regions[MAX_REGIONS]; }; static struct region_map physical; static struct region_map virtual; static uint64_t physbase; static void init_map(struct region_map *, bool); static void mark_region(struct region_map *, uint64_t, uint64_t, bool); static uint64_t lookup(struct region_map *, uint64_t); static void delete_node(struct region_map *, uint64_t); static uint64_t add_node(struct region_map *, uint64_t, uint64_t, bool); static void debug_region_map(struct region_map *); static void print_size(uint64_t); static uint64_t allocate_virtual(uint64_t); static void update_pagetables(uint64_t, uint64_t, uint64_t, bool); static uint64_t *table_ptr(uint64_t *, uint16_t); static uint64_t encode_entry(uint64_t, bool); static uint64_t get_physical_page(void); void setup_paging(struct boot_info *info) { print(u8"Setting up paging. All usable physical memory is mapped starting at %p\n", info->physbase); physbase = info->physbase; init_map(&physical, false); init_map(&virtual, true); for(uint64_t i = 0; i < info->memmap_count; i++) mark_region(&physical, info->memmaps[i].start, info->memmaps[i].size, true); debug_region_map(&physical); /* TODO fill in the used regions of virtual memory, by inspection the page tables that was setup for us * by the bootloader. */ mark_region(&virtual, 0, PAGE_SIZE, false); /* don't use the first page of virtual memory */ } bool page_fault_handler(uint32_t code) { int write = code&0x2; uint64_t addr = get_cr2(); print(u8"Page fault trying to %s %p\n", write ? u8"write to" : u8"read from", addr); return false; /* We didn't actually handle it */ } uint64_t map_page(uint64_t paddr, uint64_t pagecount, bool cacheable) { /* Find a region of virtual memory which isn't in use yet, and which is large enough */ uint64_t offset = paddr % PAGE_SIZE; if(offset != 0){ pagecount++; paddr -= offset; } uint64_t vaddr = allocate_virtual(pagecount); update_pagetables(paddr, vaddr, pagecount, cacheable); return vaddr + offset; } static void init_map(struct region_map *map, bool available) { map->used = INVALID_PTR; map->unused = 0; for(uint64_t p = 0; p < MAX_REGIONS-1; p++) map->regions[p].next = p+1; map->regions[MAX_REGIONS-1].next = INVALID_PTR; add_node(map, 0, UINT64_MAX, available); } static void mark_region(struct region_map *map, uint64_t addr, uint64_t size, bool available) { uint64_t p = lookup(map, addr); struct region *r = &map->regions[p]; if((r->start <= addr) && (r->start+r->size >= addr+size) && (r->available != available)){ /* Entirely within a current region. Split the current one up. */ uint64_t oldstart = r->start; uint64_t oldend = r->start+r->size; delete_node(map, p); add_node(map, oldstart, addr - r->start, !available); add_node(map, addr, size, available); add_node(map, addr+size, oldend-(addr+size), !available); }else panic(); /* I don't want to think about this right now */ } static uint64_t lookup(struct region_map *map, uint64_t addr) { uint64_t p; struct region *r; for(p = map->used; p != INVALID_PTR; p = r->next){ r = &map->regions[p]; if((r->start <= addr) && (r->start+r->size > addr)) return p; } panic(); } static void delete_node(struct region_map *map, uint64_t p) { uint64_t x; uint64_t prev = INVALID_PTR; for(x = map->used; x != INVALID_PTR; prev = x, x = map->regions[x].next){ if(x == p){ if(prev != INVALID_PTR) map->regions[prev].next = map->regions[x].next; else map->used = INVALID_PTR; map->regions[x].next = map->unused; map->unused = x; return; } } panic(); } static uint64_t add_node(struct region_map *map, uint64_t start, uint64_t size, bool available) { struct region *r; uint64_t p; uint64_t prev = INVALID_PTR; for(p = map->used; p != INVALID_PTR; prev = p, p = map->regions[p].next){ if(map->regions[p].start > start) break; } uint64_t new = map->unused; if(prev == INVALID_PTR){ r = &map->regions[map->unused]; uint64_t tmp = map->used; map->used = map->unused; map->unused = r->next; r->next = tmp; }else{ r = &map->regions[map->unused]; map->unused = r->next; r->next = map->regions[prev].next; map->regions[prev].next = new; } r->start = start; r->size = size; r->available = available; return new; } static void debug_region_map(struct region_map *map) { uint64_t available = 0; for(uint64_t p = map->used; p != INVALID_PTR; p = map->regions[p].next){ struct region *r = &map->regions[p]; if(r->available) available += r->size; } print(u8"Total available memory: "); print_size(available); print(u8"\n"); for(uint64_t p = map->used; p != INVALID_PTR; p = map->regions[p].next){ struct region *r = &map->regions[p]; if(r->available){ print(u8"[%p - %p] (", r->start, r->start+r->size); print_size(r->size); print(u8")\n"); } } } static void print_size(uint64_t size) { uint64_t gb, mb, kb, b; gb = size >> 30; size -= gb << 30; mb = size >> 20; size -= mb << 20; kb = size >> 10; size -= kb << 10; b = size; int printed = 0; #define P(what, spec, force) \ if(what || (force && !printed)){ \ if(printed) \ print(u8" + "); \ printed = 1; \ print(u8"%u64 " spec, what); \ } P(gb, "GB", 0); P(mb, "MB", 0); P(kb, "KB", 0); P(b, "B", 1); } static uint64_t allocate_virtual(uint64_t pagecount) { uint64_t size = PAGE_SIZE * pagecount; for(uint64_t p = virtual.used; p != INVALID_PTR; p = virtual.regions[p].next){ struct region *r = &virtual.regions[p]; if(r->available && r->size >= size){ uint64_t vaddr = r->start; mark_region(&virtual, vaddr, size, false); return vaddr; } } panic(); } static void update_pagetables(uint64_t paddr, uint64_t vaddr, uint64_t pagecount, bool cacheable) { uint64_t cr3 = get_cr3(); uint64_t *pml4 = table_ptr(&cr3, 0); for(uint64_t i = 0; i < pagecount; i++){ uint16_t pml4o = (vaddr >> 39) & 0x1F; uint16_t pdpo = (vaddr >> 30) & 0x1F; uint16_t pdeo = (vaddr >> 21) & 0x1F; uint16_t pteo = (vaddr >> 12) & 0x1F; uint64_t *pdp = table_ptr(pml4, pml4o); uint64_t *pde = table_ptr(pdp, pdpo); uint64_t *pte = table_ptr(pde, pdeo); pte[pteo] = encode_entry(paddr, cacheable); paddr += PAGE_SIZE; vaddr += PAGE_SIZE; } } static uint64_t * table_ptr(uint64_t *table, uint16_t index) { uint64_t v = table[index]; if(v == 0){ v = get_physical_page(); memset((void*)(v + physbase), 0, PAGE_SIZE); table[index] = encode_entry(v, true); }else v &= PADDR_MASK; return (uint64_t *)(v + physbase); } static uint64_t encode_entry(uint64_t paddr, bool cacheable) { uint64_t v = 0; v |= (paddr & PADDR_MASK); v |= 0x3; /* Present + RW */ if(!cacheable) v |= 0x10; /* Cache disable */ return v; } static uint64_t get_physical_page(void) { /* Almost identical to allocate_virtual. merge them. */ for(uint64_t p = physical.used; p != INVALID_PTR; p = physical.regions[p].next){ struct region *r = &physical.regions[p]; if(r->available && r->size >= PAGE_SIZE){ uint64_t paddr = r->start; mark_region(&physical, paddr, PAGE_SIZE, false); return paddr; } } panic(); }