summaryrefslogtreecommitdiff
path: root/src/paging.c
diff options
context:
space:
mode:
authorPeter Mikkelsen <petermikkelsen10@gmail.com>2025-07-29 21:40:45 +0200
committerPeter Mikkelsen <petermikkelsen10@gmail.com>2025-07-29 21:40:45 +0200
commit0a817a5a74c328229f8a732fc3ec22d8fd7dc20d (patch)
treea5f906687a28420a2ccb29c788e6779f221e7a75 /src/paging.c
parentca84afb315e813f08d725082320d40969b9f93e4 (diff)
Some work on keeping track of memory, and providing more info in panics
Diffstat (limited to 'src/paging.c')
-rw-r--r--src/paging.c185
1 files changed, 185 insertions, 0 deletions
diff --git a/src/paging.c b/src/paging.c
index 0728178..ecbd647 100644
--- a/src/paging.c
+++ b/src/paging.c
@@ -1,9 +1,50 @@
#include "aplos.h"
+#define MAX_REGIONS 1024
+#define INVALID_PTR UINT64_MAX
+
+struct region
+{
+ uint64_t start;
+ uint64_t size;
+ uint64_t next;
+ int available : 1;
+};
+
+struct region_map
+{
+ uint64_t used; /* Chain of used indices in the regions table */
+ uint64_t unused; /* Chain of unused indices in the regions table */
+ struct region regions[MAX_REGIONS];
+};
+
+static struct region_map physical;
+static struct region_map virtual;
+
+static void init_map(struct region_map *, bool);
+static void mark_region(struct region_map *, uint64_t, uint64_t, bool);
+static uint64_t lookup(struct region_map *, uint64_t);
+static void delete_node(struct region_map *, uint64_t);
+static uint64_t add_node(struct region_map *, uint64_t, uint64_t, bool);
+static void debug_region_map(struct region_map *);
+static void print_size(uint64_t);
+
void
setup_paging(struct boot_info *info)
{
print(u8"Setting up paging. All usable physical memory is mapped starting at %p\n", info->physbase);
+
+ init_map(&physical, false);
+ init_map(&virtual, true);
+
+ for(uint64_t i = 0; i < info->memmap_count; i++)
+ mark_region(&physical, info->memmaps[i].start, info->memmaps[i].size, true);
+ debug_region_map(&physical);
+
+ /* TODO fill in the used regions of virtual memory, by inspection the page tables that was setup for us
+ * by the bootloader.
+ */
+
uint64_t cr3 = get_cr3();
print(u8"CR3: %p\n", cr3);
}
@@ -18,3 +59,147 @@ page_fault_handler(uint32_t code)
return false; /* We didn't actually handle it */
}
+
+static void
+init_map(struct region_map *map, bool available)
+{
+ map->used = INVALID_PTR;
+ map->unused = 0;
+
+ for(uint64_t p = 0; p < MAX_REGIONS-1; p++)
+ map->regions[p].next = p+1;
+ map->regions[MAX_REGIONS-1].next = INVALID_PTR;
+
+ add_node(map, 0, UINT64_MAX, available);
+}
+
+static void
+mark_region(struct region_map *map, uint64_t addr, uint64_t size, bool available)
+{
+ uint64_t p = lookup(map, addr);
+ struct region *r = &map->regions[p];
+
+ if((r->start <= addr) && (r->start+r->size >= addr+size) && (r->available != available)){
+ /* Entirely within a current region. Split the current one up. */
+ uint64_t oldstart = r->start;
+ uint64_t oldend = r->start+r->size;
+ delete_node(map, p);
+ add_node(map, oldstart, addr - r->start, !available);
+ add_node(map, addr, size, available);
+ add_node(map, addr+size, oldend-(addr+size), !available);
+ }else
+ panic(); /* I don't want to think about this right now */
+}
+
+static uint64_t
+lookup(struct region_map *map, uint64_t addr)
+{
+ uint64_t p;
+ struct region *r;
+
+ for(p = map->used; p != INVALID_PTR; p = r->next){
+ r = &map->regions[p];
+ if((r->start <= addr) && (r->start+r->size > addr))
+ return p;
+ }
+ panic();
+}
+
+static void
+delete_node(struct region_map *map, uint64_t p)
+{
+ uint64_t x;
+ uint64_t prev = INVALID_PTR;
+ for(x = map->used; x != INVALID_PTR; prev = x, x = map->regions[x].next){
+ if(x == p){
+ if(prev != INVALID_PTR)
+ map->regions[prev].next = map->regions[x].next;
+ else
+ map->used = INVALID_PTR;
+ map->regions[x].next = map->unused;
+ map->unused = x;
+ return;
+ }
+ }
+ panic();
+}
+
+static uint64_t
+add_node(struct region_map *map, uint64_t start, uint64_t size, bool available)
+{
+ struct region *r;
+ uint64_t p;
+ uint64_t prev = INVALID_PTR;
+ for(p = map->used; p != INVALID_PTR; prev = p, p = map->regions[p].next){
+ if(map->regions[p].start > start)
+ break;
+ }
+
+ uint64_t new = map->unused;
+
+ if(prev == INVALID_PTR){
+ r = &map->regions[map->unused];
+ uint64_t tmp = map->used;
+ map->used = map->unused;
+ map->unused = r->next;
+ r->next = tmp;
+ }else{
+ r = &map->regions[map->unused];
+ map->unused = r->next;
+ r->next = map->regions[prev].next;
+ map->regions[prev].next = new;
+ }
+
+ r->start = start;
+ r->size = size;
+ r->available = available;
+ return new;
+}
+
+static void
+debug_region_map(struct region_map *map)
+{
+ uint64_t available = 0;
+ for(uint64_t p = map->used; p != INVALID_PTR; p = map->regions[p].next){
+ struct region *r = &map->regions[p];
+ if(r->available)
+ available += r->size;
+ }
+ print(u8"Total available memory: ");
+ print_size(available);
+ print(u8"\n");
+
+ for(uint64_t p = map->used; p != INVALID_PTR; p = map->regions[p].next){
+ struct region *r = &map->regions[p];
+ if(r->available){
+ print(u8"[%p - %p] (", r->start, r->start+r->size);
+ print_size(r->size);
+ print(u8")\n");
+ }
+ }
+}
+
+static void
+print_size(uint64_t size)
+{
+ uint64_t gb, mb, kb, b;
+ gb = size >> 30; size -= gb << 30;
+ mb = size >> 20; size -= mb << 20;
+ kb = size >> 10; size -= kb << 10;
+ b = size;
+
+ int printed = 0;
+
+#define P(what, spec, force) \
+ if(what || (force && !printed)){ \
+ if(printed) \
+ print(u8" + "); \
+ printed = 1; \
+ print(u8"%u64 " spec, what); \
+ }
+
+ P(gb, "GB", 0);
+ P(mb, "MB", 0);
+ P(kb, "KB", 0);
+ P(b, "B", 1);
+}