summaryrefslogtreecommitdiff
path: root/src/kernel/paging.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/kernel/paging.c')
-rw-r--r--src/kernel/paging.c130
1 files changed, 130 insertions, 0 deletions
diff --git a/src/kernel/paging.c b/src/kernel/paging.c
new file mode 100644
index 0000000..b92e037
--- /dev/null
+++ b/src/kernel/paging.c
@@ -0,0 +1,130 @@
+#include <stdbool.h>
+#include <stdint.h>
+#include "pmap.h"
+#include "paging.h"
+#include "boot.h"
+#include "panic.h"
+#include "task.h"
+
+enum {
+ PE_ADDR_MASK = 0xfffff000,
+
+ PT_GLOBAL = 0x100,
+ PD_LARGE = 0x080,
+ PT_DIRTY = 0x040,
+ PE_ACCESSED = 0x020,
+ PE_NO_CACHE = 0x010,
+ PE_WRTHCH = 0x008,
+ PE_USER = 0x004,
+ PE_WRITABLE = 0x002,
+ PE_PRESENT = 0x001
+};
+
+//TODO:
+// try_elf_run needs to call these new functions.
+
+void *new_pd() {
+ uint32_t *pd = allocate_kernel_pages(1);
+ for (uint16_t i = 0; i < 1024; ++i)
+ pd[i] = 0;
+ return pd;
+}
+
+void free_pd(void *pd) {
+ uint32_t *pd_32 = pd;
+ for (uint16_t i = 0; i < 1024; ++i)
+ if (pd_32[i] & PE_PRESENT)
+ free_pages((void *)(pd_32[i] & PE_ADDR_MASK), 1);
+ free_pages(pd, 1);
+}
+
+void pd_map(void *pd, uint32_t physical_addr, uint32_t virtual_addr, bool writable) {
+ uint32_t *ptp = (uint32_t *)pd + (virtual_addr >> 22);
+ if (!(*ptp & PE_PRESENT))
+ *ptp = (uint32_t)allocate_kernel_pages(1) | PE_USER | PE_WRITABLE | PE_PRESENT;
+ ((uint32_t *)(*ptp & PE_ADDR_MASK))[(virtual_addr >> 12) % 1024] = physical_addr | PE_USER | PE_PRESENT | (PE_WRITABLE * writable);
+}
+
+bool pd_is_mapped(void *pd, uint32_t vma) {
+ uint32_t pde = ((uint32_t *)pd)[vma >> 22];
+ return (pde & PE_PRESENT) && (((uint32_t *)(pde & PE_ADDR_MASK))[(vma >> 12) % 1024] & PE_PRESENT);
+}
+
+#define KERNEL_END (0x08000000)
+
+void free_task_pd(void *pd) {
+ uint32_t *pd_32 = pd;
+ for (uint16_t i = 0; i < 1024; ++i)
+ if (pd_32[i] & PE_PRESENT) {
+ uint32_t *pt_32 = (uint32_t *)(pd_32[i] & PE_ADDR_MASK);
+ if (i >= KERNEL_END >> 22)
+ for (uint16_t j = 0; j < 1024; ++j)
+ if (pt_32[j] & PE_PRESENT)
+ free_pages((void *)(pt_32[j] & PE_ADDR_MASK), 1);
+ free_pages(pt_32, 1);
+ }
+ free_pages(pd, 1);
+}
+
+void *new_task_pd() {
+ uint32_t *pd = new_pd();
+ for (uint32_t addr = 0; addr < KERNEL_END; addr += 4096)
+ pd_map(pd, addr, addr, false);
+ return pd;
+}
+
+void *pd_user_allocate(void *pd, uint32_t vma, uint32_t pages, bool writable) {
+ void *pma = allocate_user_pages(pages);
+ if (!pma)
+ panic("Could not allocate user pages.");
+ for (uint32_t i = 0; i < pages; ++i)
+ pd_map(pd, (uint32_t)pma + (i << 12), vma + (i << 12), writable);
+ return pma;
+}
+
+void *pd_user_allocate_anywhere_writable(void *pd, uint32_t pages) {
+ uint32_t run = 0;
+ for (void *vma = (void *)KERNEL_END; vma; vma += 4096) {
+ if (pd_is_mapped(pd, vma))
+ run = 0;
+ else if (++run == pages) {
+ vma -= (pages - 1) * 4096;
+ for (uint32_t i = 0; i < pages; ++i)
+ pd_map(pd, (uint32_t)allocate_user_pages(1), (uint32_t)vma + 4096 * i, true);
+ return vma;
+ }
+ }
+}
+
+#define KPAGE_DIR ((uint32_t *)0x00005000)
+#define KPAGE_TABLE_0 ((uint32_t *)0x00400000)
+
+void init_paging() {
+ //TODO: use PAE if possible
+
+ for (uint32_t i = 0; i < 1048576; ++i)
+ KPAGE_TABLE_0[i] = (i * 4096) | PE_WRITABLE | PE_PRESENT;
+ for (uint16_t i = 0; i < 1024; ++i)
+ KPAGE_DIR[i] = (uint32_t)(KPAGE_TABLE_0 + i * 1024) | PE_WRITABLE | PE_PRESENT;
+
+ asm volatile (
+ "mov $0x00005000, %%eax\n"
+ "mov %%eax, %%cr3\n"
+ "mov %%cr0, %%eax\n"
+ "or $0x80000000, %%eax\n"
+ "mov %%eax, %%cr0"
+ : : : "eax");
+}
+
+void switch_to_kernel_cr3() {
+ asm volatile (
+ "mov $0x00005000, %%eax\n"
+ "mov %%eax, %%cr3"
+ : : : "eax");
+}
+
+void switch_to_task_cr3() {
+ asm volatile (
+ "mov %0, %%cr3"
+ : : "a" (active_task->page_directory));
+} \ No newline at end of file