summaryrefslogtreecommitdiff
path: root/src/paging.c
blob: 9fdf6eab4b19388025e5c45d6c49c719d8c5a821 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
#include "aplos.h"

#define MAX_REGIONS 1024
#define INVALID_PTR UINT64_MAX
#define PADDR_MASK 0x000FFFFFFFFFF000

struct region
{
	uint64_t start;
	uint64_t size;
	uint64_t next;
	int available : 1;
};

struct region_map
{
	uint64_t used;   /* Chain of used indices in the regions table */
	uint64_t unused; /* Chain of unused indices in the regions table */
	struct region regions[MAX_REGIONS];
};

static struct region_map physical;
static struct region_map virtual;

static uint64_t physbase;

static void init_map(struct region_map *, bool);
static void mark_region(struct region_map *, uint64_t, uint64_t, bool);
static uint64_t lookup(struct region_map *, uint64_t);
static void delete_node(struct region_map *, uint64_t);
static uint64_t add_node(struct region_map *, uint64_t, uint64_t, bool);
static void debug_region_map(struct region_map *);
static void print_size(uint64_t);
static uint64_t allocate_virtual(uint64_t);
static void update_pagetables(uint64_t, uint64_t, uint64_t, bool);
static uint64_t *table_ptr(uint64_t *, uint16_t);
static uint64_t encode_entry(uint64_t, bool);
static uint64_t get_physical_page(void);

void
setup_paging(struct boot_info *info)
{
	print(u8"Setting up paging. All usable physical memory is mapped starting at %p\n", info->physbase);
	physbase = info->physbase;

	init_map(&physical, false);
	init_map(&virtual, true);

	for(uint64_t i = 0; i < info->memmap_count; i++)
		mark_region(&physical, info->memmaps[i].start, info->memmaps[i].size, true);
	debug_region_map(&physical);

	/* TODO fill in the used regions of virtual memory, by inspection the page tables that was setup for us
	 * by the bootloader.
	 */
	mark_region(&virtual, 0, PAGE_SIZE, false); /* don't use the first page of virtual memory */
}

bool
page_fault_handler(uint32_t code)
{
	int write = code&0x2;

	uint64_t addr = get_cr2();
	print(u8"Page fault trying to %s %p\n", write ? u8"write to" : u8"read from", addr);

	return false; /* We didn't actually handle it */
}

uint64_t
map_page(uint64_t paddr, uint64_t pagecount, bool cacheable)
{
	/* Find a region of virtual memory which isn't in use yet, and which is large enough */
	uint64_t offset = paddr % PAGE_SIZE;
	if(offset != 0){
		pagecount++;
		paddr -= offset;
	}

	uint64_t vaddr = allocate_virtual(pagecount);
	update_pagetables(paddr, vaddr, pagecount, cacheable);
	return vaddr + offset;
}

static void
init_map(struct region_map *map, bool available)
{
	map->used = INVALID_PTR;
	map->unused = 0;

	for(uint64_t p = 0; p < MAX_REGIONS-1; p++)
		map->regions[p].next = p+1;
	map->regions[MAX_REGIONS-1].next = INVALID_PTR;

	add_node(map, 0, UINT64_MAX, available);
}

static void
mark_region(struct region_map *map, uint64_t addr, uint64_t size, bool available)
{
	uint64_t p = lookup(map, addr);
	struct region *r = &map->regions[p];

	if((r->start <= addr) && (r->start+r->size >= addr+size) && (r->available != available)){
		/* Entirely within a current region. Split the current one up. */
		uint64_t oldstart = r->start;
		uint64_t oldend = r->start+r->size;
		delete_node(map, p);
		add_node(map, oldstart, addr - r->start, !available);
		add_node(map, addr, size, available);
		add_node(map, addr+size, oldend-(addr+size), !available);
	}else
		panic(); /* I don't want to think about this right now */
}

static uint64_t
lookup(struct region_map *map, uint64_t addr)
{
	uint64_t p;
	struct region *r;

	for(p = map->used; p != INVALID_PTR; p = r->next){
		r = &map->regions[p];
		if((r->start <= addr) && (r->start+r->size > addr))
			return p;
	}
	panic();
}

static void
delete_node(struct region_map *map, uint64_t p)
{
	uint64_t x;
	uint64_t prev = INVALID_PTR;
	for(x = map->used; x != INVALID_PTR; prev = x, x = map->regions[x].next){
		if(x == p){
			if(prev != INVALID_PTR)
				map->regions[prev].next = map->regions[x].next;
			else
				map->used = INVALID_PTR;
			map->regions[x].next = map->unused;
			map->unused = x;
			return;
		}
	}
	panic();
}

static uint64_t
add_node(struct region_map *map, uint64_t start, uint64_t size, bool available)
{
	struct region *r;
	uint64_t p;
	uint64_t prev = INVALID_PTR;
	for(p = map->used; p != INVALID_PTR; prev = p, p = map->regions[p].next){
		if(map->regions[p].start > start)
			break;
	}

	uint64_t new = map->unused;

	if(prev == INVALID_PTR){
		r = &map->regions[map->unused];
		uint64_t tmp = map->used;
		map->used = map->unused;
		map->unused = r->next;
		r->next = tmp;
	}else{
		r = &map->regions[map->unused];
		map->unused = r->next;
		r->next = map->regions[prev].next;
		map->regions[prev].next = new;
	}

	r->start = start;
	r->size = size;
	r->available = available;
	return new;
}

static void
debug_region_map(struct region_map *map)
{
	uint64_t available = 0;
	for(uint64_t p = map->used; p != INVALID_PTR; p = map->regions[p].next){
		struct region *r = &map->regions[p];
		if(r->available)
			available += r->size;
	}
	print(u8"Total available memory: ");
	print_size(available);
	print(u8"\n");

	for(uint64_t p = map->used; p != INVALID_PTR; p = map->regions[p].next){
		struct region *r = &map->regions[p];
		if(r->available){
			print(u8"[%p - %p] (", r->start, r->start+r->size);
			print_size(r->size);
			print(u8")\n");
		}
	}
}

static void
print_size(uint64_t size)
{
        uint64_t gb, mb, kb, b;
        gb = size >> 30; size -= gb << 30;
        mb = size >> 20; size -= mb << 20;
        kb = size >> 10; size -= kb << 10;
        b  = size;

        int printed = 0;

#define P(what, spec, force)                    \
        if(what || (force && !printed)){                        \
                if(printed)                     \
                        print(u8" + ");         \
                printed = 1;                    \
                print(u8"%u64 " spec, what);    \
        }

        P(gb, "GB", 0);
        P(mb, "MB", 0);
        P(kb, "KB", 0);
        P(b, "B", 1);
}

static uint64_t
allocate_virtual(uint64_t pagecount)
{
	uint64_t size = PAGE_SIZE * pagecount;

	for(uint64_t p = virtual.used; p != INVALID_PTR; p = virtual.regions[p].next){
		struct region *r = &virtual.regions[p];
		if(r->available && r->size >= size){
			uint64_t vaddr = r->start;
			mark_region(&virtual, vaddr, size, false);
			return vaddr;
		}
	}
	panic();
}

static void
update_pagetables(uint64_t paddr, uint64_t vaddr, uint64_t pagecount, bool cacheable)
{
	uint64_t cr3 = get_cr3();
	uint64_t *pml4 = table_ptr(&cr3, 0);

	for(uint64_t i = 0; i < pagecount; i++){
		uint16_t pml4o = (vaddr >> 39) & 0x1F;
		uint16_t pdpo = (vaddr >> 30) & 0x1F;
		uint16_t pdeo = (vaddr >> 21) & 0x1F;
		uint16_t pteo = (vaddr >> 12) & 0x1F;
	
		uint64_t *pdp = table_ptr(pml4, pml4o);
		uint64_t *pde = table_ptr(pdp, pdpo);
		uint64_t *pte = table_ptr(pde, pdeo);

		pte[pteo] = encode_entry(paddr, cacheable);
		
		paddr += PAGE_SIZE;
		vaddr += PAGE_SIZE;
	}
}

static uint64_t *
table_ptr(uint64_t *table, uint16_t index)
{
	uint64_t v = table[index];
	if(v == 0){
		v = get_physical_page();
		memset((void*)(v + physbase), 0, PAGE_SIZE);
		table[index] = encode_entry(v, true);
	}else
		v &= PADDR_MASK;

	return (uint64_t *)(v + physbase);
}

static uint64_t
encode_entry(uint64_t paddr, bool cacheable)
{
	uint64_t v = 0;
	v |= (paddr & PADDR_MASK);
	v |= 0x3; /* Present + RW */
	if(!cacheable)
		v |= 0x10; /* Cache disable */
	return v;
}

static uint64_t
get_physical_page(void)
{
	/* Almost identical to allocate_virtual. merge them. */

	for(uint64_t p = physical.used; p != INVALID_PTR; p = physical.regions[p].next){
		struct region *r = &physical.regions[p];
		if(r->available && r->size >= PAGE_SIZE){
			uint64_t paddr = r->start;
			mark_region(&physical, paddr, PAGE_SIZE, false);
			return paddr;
		}
	}
	panic();
}