summaryrefslogtreecommitdiff
path: root/src/paging.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/paging.c')
-rw-r--r--src/paging.c108
1 files changed, 105 insertions, 3 deletions
diff --git a/src/paging.c b/src/paging.c
index ecbd647..9fdf6ea 100644
--- a/src/paging.c
+++ b/src/paging.c
@@ -2,6 +2,7 @@
#define MAX_REGIONS 1024
#define INVALID_PTR UINT64_MAX
+#define PADDR_MASK 0x000FFFFFFFFFF000
struct region
{
@@ -21,6 +22,8 @@ struct region_map
static struct region_map physical;
static struct region_map virtual;
+static uint64_t physbase;
+
static void init_map(struct region_map *, bool);
static void mark_region(struct region_map *, uint64_t, uint64_t, bool);
static uint64_t lookup(struct region_map *, uint64_t);
@@ -28,11 +31,17 @@ static void delete_node(struct region_map *, uint64_t);
static uint64_t add_node(struct region_map *, uint64_t, uint64_t, bool);
static void debug_region_map(struct region_map *);
static void print_size(uint64_t);
+static uint64_t allocate_virtual(uint64_t);
+static void update_pagetables(uint64_t, uint64_t, uint64_t, bool);
+static uint64_t *table_ptr(uint64_t *, uint16_t);
+static uint64_t encode_entry(uint64_t, bool);
+static uint64_t get_physical_page(void);
void
setup_paging(struct boot_info *info)
{
print(u8"Setting up paging. All usable physical memory is mapped starting at %p\n", info->physbase);
+ physbase = info->physbase;
init_map(&physical, false);
init_map(&virtual, true);
@@ -44,9 +53,7 @@ setup_paging(struct boot_info *info)
/* TODO fill in the used regions of virtual memory, by inspection the page tables that was setup for us
* by the bootloader.
*/
-
- uint64_t cr3 = get_cr3();
- print(u8"CR3: %p\n", cr3);
+ mark_region(&virtual, 0, PAGE_SIZE, false); /* don't use the first page of virtual memory */
}
bool
@@ -60,6 +67,21 @@ page_fault_handler(uint32_t code)
return false; /* We didn't actually handle it */
}
+uint64_t
+map_page(uint64_t paddr, uint64_t pagecount, bool cacheable)
+{
+ /* Find a region of virtual memory which isn't in use yet, and which is large enough */
+ uint64_t offset = paddr % PAGE_SIZE;
+ if(offset != 0){
+ pagecount++;
+ paddr -= offset;
+ }
+
+ uint64_t vaddr = allocate_virtual(pagecount);
+ update_pagetables(paddr, vaddr, pagecount, cacheable);
+ return vaddr + offset;
+}
+
static void
init_map(struct region_map *map, bool available)
{
@@ -203,3 +225,83 @@ print_size(uint64_t size)
P(kb, "KB", 0);
P(b, "B", 1);
}
+
+static uint64_t
+allocate_virtual(uint64_t pagecount)
+{
+ uint64_t size = PAGE_SIZE * pagecount;
+
+ for(uint64_t p = virtual.used; p != INVALID_PTR; p = virtual.regions[p].next){
+ struct region *r = &virtual.regions[p];
+ if(r->available && r->size >= size){
+ uint64_t vaddr = r->start;
+ mark_region(&virtual, vaddr, size, false);
+ return vaddr;
+ }
+ }
+ panic();
+}
+
+static void
+update_pagetables(uint64_t paddr, uint64_t vaddr, uint64_t pagecount, bool cacheable)
+{
+ uint64_t cr3 = get_cr3();
+ uint64_t *pml4 = table_ptr(&cr3, 0);
+
+ for(uint64_t i = 0; i < pagecount; i++){
+ uint16_t pml4o = (vaddr >> 39) & 0x1F;
+ uint16_t pdpo = (vaddr >> 30) & 0x1F;
+ uint16_t pdeo = (vaddr >> 21) & 0x1F;
+ uint16_t pteo = (vaddr >> 12) & 0x1F;
+
+ uint64_t *pdp = table_ptr(pml4, pml4o);
+ uint64_t *pde = table_ptr(pdp, pdpo);
+ uint64_t *pte = table_ptr(pde, pdeo);
+
+ pte[pteo] = encode_entry(paddr, cacheable);
+
+ paddr += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ }
+}
+
+static uint64_t *
+table_ptr(uint64_t *table, uint16_t index)
+{
+ uint64_t v = table[index];
+ if(v == 0){
+ v = get_physical_page();
+ memset((void*)(v + physbase), 0, PAGE_SIZE);
+ table[index] = encode_entry(v, true);
+ }else
+ v &= PADDR_MASK;
+
+ return (uint64_t *)(v + physbase);
+}
+
+static uint64_t
+encode_entry(uint64_t paddr, bool cacheable)
+{
+ uint64_t v = 0;
+ v |= (paddr & PADDR_MASK);
+ v |= 0x3; /* Present + RW */
+ if(!cacheable)
+ v |= 0x10; /* Cache disable */
+ return v;
+}
+
+static uint64_t
+get_physical_page(void)
+{
+ /* Almost identical to allocate_virtual. merge them. */
+
+ for(uint64_t p = physical.used; p != INVALID_PTR; p = physical.regions[p].next){
+ struct region *r = &physical.regions[p];
+ if(r->available && r->size >= PAGE_SIZE){
+ uint64_t paddr = r->start;
+ mark_region(&physical, paddr, PAGE_SIZE, false);
+ return paddr;
+ }
+ }
+ panic();
+}