summaryrefslogtreecommitdiff
path: root/src/kernel/paging.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/kernel/paging.c')
-rw-r--r--src/kernel/paging.c53
1 files changed, 23 insertions, 30 deletions
diff --git a/src/kernel/paging.c b/src/kernel/paging.c
index a0f7c3d..fbc33de 100644
--- a/src/kernel/paging.c
+++ b/src/kernel/paging.c
@@ -20,28 +20,16 @@ enum {
PE_PRESENT = 0x001
};
-//TODO:
-// try_elf_run needs to call these new functions.
-
-void *new_pd() {
- uint32_t *pd = allocate_kernel_pages(1);
- for (uint16_t i = 0; i < 1024; ++i)
- pd[i] = 0;
- return pd;
-}
-
-void free_pd(void *pd) {
- uint32_t *pd_32 = pd;
- for (uint16_t i = 0; i < 1024; ++i)
- if (pd_32[i] & PE_PRESENT)
- free_pages((void *)(pd_32[i] & PE_ADDR_MASK), 1);
- free_pages(pd, 1);
-}
+#define KERNEL_END (0x08000000)
-void pd_map(void *pd, uint32_t physical_addr, uint32_t virtual_addr, bool writable) {
+static void pd_map(void *pd, uint32_t physical_addr, uint32_t virtual_addr, bool writable) {
uint32_t *ptp = (uint32_t *)pd + (virtual_addr >> 22);
- if (!(*ptp & PE_PRESENT))
- *ptp = (uint32_t)allocate_kernel_pages(1) | PE_USER | PE_WRITABLE | PE_PRESENT;
+ if (!(*ptp & PE_PRESENT)) {
+ uint32_t *new_pt = allocate_kernel_pages(1);
+ for (uint16_t i = 0; i < 1024; ++i)
+ new_pt[i] = 0;
+ *ptp = (uint32_t)new_pt | PE_USER | PE_WRITABLE | PE_PRESENT;
+ }
((uint32_t *)(*ptp & PE_ADDR_MASK))[(virtual_addr >> 12) % 1024] = physical_addr | PE_USER | PE_PRESENT | (PE_WRITABLE * writable);
}
@@ -51,26 +39,28 @@ static bool pd_is_mapped(void *pd, uint32_t vma) {
return (pde & PE_PRESENT) && (((uint32_t *)(pde & PE_ADDR_MASK))[(vma >> 12) % 1024] & PE_PRESENT);
}
-#define KERNEL_END (0x08000000)
-
void free_task_pd(void *pd) {
uint32_t *pd_32 = pd;
- for (uint16_t i = 0; i < 1024; ++i)
+ for (uint16_t i = KERNEL_END / 4096 / 1024; i < 1024; ++i)
if (pd_32[i] & PE_PRESENT) {
uint32_t *pt_32 = (uint32_t *)(pd_32[i] & PE_ADDR_MASK);
- if (i >= KERNEL_END >> 22)
- for (uint16_t j = 0; j < 1024; ++j)
- if (pt_32[j] & PE_PRESENT)
- free_pages((void *)(pt_32[j] & PE_ADDR_MASK), 1);
+ for (uint16_t j = 0; j < 1024; ++j)
+ if (pt_32[j] & PE_PRESENT)
+ free_pages((void *)(pt_32[j] & PE_ADDR_MASK), 1);
free_pages(pt_32, 1);
}
free_pages(pd, 1);
}
+__attribute__ ((aligned (4096)))
+static uint32_t kmap[KERNEL_END / 4096];
+
void *new_task_pd() {
- uint32_t *pd = new_pd();
- for (uint32_t addr = 0; addr < KERNEL_END; addr += 4096)
- pd_map(pd, addr, addr, false);
+ uint32_t *pd = allocate_kernel_pages(1);
+ for (uint8_t i = 0; i < KERNEL_END / 4096 / 1024; ++i)
+ pd[i] = (uint32_t)(kmap + i * 1024) | PE_USER | PE_PRESENT;
+ for (uint16_t i = KERNEL_END / 4096 / 1024; i < 1024; ++i)
+ pd[i] = 0;
return pd;
}
@@ -129,6 +119,9 @@ void init_paging() {
for (uint16_t i = 0; i < 1024; ++i)
KPAGE_DIR[i] = (uint32_t)(KPAGE_TABLE_0 + i * 1024) | PE_WRITABLE | PE_PRESENT;
+ for (uint16_t i = 0; i < KERNEL_END / 4096; ++i)
+ kmap[i] = (i * 4096) | PE_USER | PE_PRESENT;
+
asm volatile (
"mov $0x00005000, %%eax\n"
"mov %%eax, %%cr3\n"