1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
#include <stdbool.h>
#include <stdint.h>
#include "pmap.h"
#include "paging.h"
#include "boot.h"
#include "panic.h"
#include "task.h"
enum {
PE_ADDR_MASK = 0xfffff000,
PT_GLOBAL = 0x100,
PD_LARGE = 0x080,
PT_DIRTY = 0x040,
PE_ACCESSED = 0x020,
PE_NO_CACHE = 0x010,
PE_WRTHCH = 0x008,
PE_USER = 0x004,
PE_WRITABLE = 0x002,
PE_PRESENT = 0x001
};
//TODO:
// try_elf_run needs to call these new functions.
void *new_pd() {
uint32_t *pd = allocate_kernel_pages(1);
for (uint16_t i = 0; i < 1024; ++i)
pd[i] = 0;
return pd;
}
void free_pd(void *pd) {
uint32_t *pd_32 = pd;
for (uint16_t i = 0; i < 1024; ++i)
if (pd_32[i] & PE_PRESENT)
free_pages((void *)(pd_32[i] & PE_ADDR_MASK), 1);
free_pages(pd, 1);
}
void pd_map(void *pd, uint32_t physical_addr, uint32_t virtual_addr, bool writable) {
uint32_t *ptp = (uint32_t *)pd + (virtual_addr >> 22);
if (!(*ptp & PE_PRESENT))
*ptp = (uint32_t)allocate_kernel_pages(1) | PE_USER | PE_WRITABLE | PE_PRESENT;
((uint32_t *)(*ptp & PE_ADDR_MASK))[(virtual_addr >> 12) % 1024] = physical_addr | PE_USER | PE_PRESENT | (PE_WRITABLE * writable);
}
__attribute__ ((pure))
static bool pd_is_mapped(void *pd, uint32_t vma) {
uint32_t pde = ((uint32_t *)pd)[vma >> 22];
return (pde & PE_PRESENT) && (((uint32_t *)(pde & PE_ADDR_MASK))[(vma >> 12) % 1024] & PE_PRESENT);
}
#define KERNEL_END (0x08000000)
void free_task_pd(void *pd) {
uint32_t *pd_32 = pd;
for (uint16_t i = 0; i < 1024; ++i)
if (pd_32[i] & PE_PRESENT) {
uint32_t *pt_32 = (uint32_t *)(pd_32[i] & PE_ADDR_MASK);
if (i >= KERNEL_END >> 22)
for (uint16_t j = 0; j < 1024; ++j)
if (pt_32[j] & PE_PRESENT)
free_pages((void *)(pt_32[j] & PE_ADDR_MASK), 1);
free_pages(pt_32, 1);
}
free_pages(pd, 1);
}
void *new_task_pd() {
uint32_t *pd = new_pd();
for (uint32_t addr = 0; addr < KERNEL_END; addr += 4096)
pd_map(pd, addr, addr, false);
return pd;
}
void *pd_user_allocate(void *pd, uint32_t vma, uint32_t pages, bool writable) {
void *pma = allocate_user_pages(pages);
if (!pma)
panic("Could not allocate user pages.");
for (uint32_t i = 0; i < pages; ++i)
pd_map(pd, (uint32_t)pma + (i << 12), vma + (i << 12), writable);
return pma;
}
void *pd_user_allocate_anywhere_writable(void *pd, uint32_t pages) {
uint32_t run = 0;
for (void *vma = (void *)KERNEL_END; vma; vma += 4096) {
if (pd_is_mapped(pd, (uint32_t)vma))
run = 0;
else if (++run == pages) {
vma -= (pages - 1) * 4096;
for (uint32_t i = 0; i < pages; ++i)
pd_map(pd, (uint32_t)allocate_user_pages(1), (uint32_t)vma + 4096 * i, true);
return vma;
}
}
return 0;
}
#define KPAGE_DIR ((uint32_t *)0x00005000)
#define KPAGE_TABLE_0 ((uint32_t *)0x00400000)
void init_paging() {
//TODO: use PAE if possible
for (uint32_t i = 0; i < 1048576; ++i)
KPAGE_TABLE_0[i] = (i * 4096) | PE_WRITABLE | PE_PRESENT;
for (uint16_t i = 0; i < 1024; ++i)
KPAGE_DIR[i] = (uint32_t)(KPAGE_TABLE_0 + i * 1024) | PE_WRITABLE | PE_PRESENT;
asm volatile (
"mov $0x00005000, %%eax\n"
"mov %%eax, %%cr3\n"
"mov %%cr0, %%eax\n"
"or $0x80000000, %%eax\n"
"mov %%eax, %%cr0"
: : : "eax");
}
void switch_to_kernel_cr3() {
asm volatile (
"mov $0x00005000, %%eax\n"
"mov %%eax, %%cr3"
: : : "eax");
}
void switch_to_task_cr3() {
asm volatile (
"mov %0, %%cr3"
: : "a" (active_task->page_directory));
}
|