189 lines
5.3 KiB
C
189 lines
5.3 KiB
C
/* Calcite, src/kernel/paging.c
|
|
* Copyright 2025 Benji Dial
|
|
*
|
|
* This program is free software: you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation, either version 3 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
|
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
* for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License along
|
|
* with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include "paging.h"
|
|
#include "panic.h"
|
|
|
|
#define MAX_PHYSICAL_GB 64ULL
|
|
|
|
//this is a simple bitmap. physical_map[i / 8] & (1 << (i % 8)) is nonzero if
|
|
//and only if the physical page with base i * 4096 is free.
|
|
static uint8_t physical_map[MAX_PHYSICAL_GB << 15];
|
|
|
|
//kernel virtual memory is the top gigabyte, i.e. one "page directory" worth
|
|
//of pages. in the future i might make it more reactive, and allow new page
|
|
//directories to be allocated as needed, but for now there is just the one
|
|
//page directory, and all of the page tables in that directory are statically
|
|
//allocated. i do not like the intel/amd names for the paging structures, so
|
|
//i will use these names throughout the rest of this file:
|
|
// p4 = "page map level 4"
|
|
// p3 = "page directory pointer table"
|
|
// p2 = "page directory"
|
|
// p1 = "page table"
|
|
|
|
//referenced from paging.asm
|
|
uint64_t kernel_p4_physical_address;
|
|
|
|
uint64_t kernel_p3_physical_address;
|
|
|
|
alignas(4096) static uint64_t kernel_p4[512];
|
|
alignas(4096) static uint64_t kernel_p3[512];
|
|
alignas(4096) static uint64_t kernel_p2[512];
|
|
alignas(4096) static uint64_t kernel_p1s[512 * 512];
|
|
|
|
//referenced from paging.asm. at the end of init, this will either be marked
|
|
//free or reused as an interrupt / syscall stack. if the length of this is
|
|
//changed, it also needs to be changed in paging.asm. there is no guard page,
|
|
//but i don't expect init to need much stack.
|
|
alignas(4096) uint64_t init_stack[16384];
|
|
|
|
void init_paging(uint64_t kernel_physical_base, void *kernel_virtual_base) {
|
|
|
|
uint64_t kernel_load_offset =
|
|
(uint64_t)kernel_virtual_base - kernel_physical_base;
|
|
|
|
kernel_p4_physical_address = (uint64_t)kernel_p4 - kernel_load_offset;
|
|
kernel_p3_physical_address = (uint64_t)kernel_p3 - kernel_load_offset;
|
|
|
|
kernel_p4[511] = ((uint64_t)kernel_p3 - kernel_load_offset) | 0x3;
|
|
for (int i = 0; i < 511; ++i)
|
|
kernel_p4[i] = 0;
|
|
|
|
kernel_p3[511] = ((uint64_t)kernel_p2 - kernel_load_offset) | 0x3;
|
|
for (int i = 0; i < 511; ++i)
|
|
kernel_p3[i] = 0;
|
|
|
|
for (int i = 0; i < 512; ++i)
|
|
kernel_p2[i] =
|
|
((uint64_t)kernel_p1s - kernel_load_offset + i * 4096) | 0x03;
|
|
|
|
for (int i = 0; i < 512 * 512; ++i)
|
|
kernel_p1s[i] = 0;
|
|
|
|
}
|
|
|
|
void mark_physical_memory_free(uint64_t base, uint64_t length) {
|
|
|
|
if (base + length > MAX_PHYSICAL_GB << 30)
|
|
length = (MAX_PHYSICAL_GB << 30) - base;
|
|
|
|
while (length && (base & (0x7 << 12))) {
|
|
physical_map[base >> 15] |= 1 << ((base >> 12) % 8);
|
|
base += 4096;
|
|
length -= 4096;
|
|
}
|
|
|
|
while (length >= 4096 * 8) {
|
|
physical_map[base >> 15] = 0xff;
|
|
base += 4096 * 8;
|
|
length -= 4096 * 8;
|
|
}
|
|
|
|
while (length) {
|
|
physical_map[base >> 15] |= 1 << ((base >> 12) % 8);
|
|
base += 4096;
|
|
length -= 4096;
|
|
}
|
|
|
|
}
|
|
|
|
//defined in paging.asm
|
|
void invlpg(void *address);
|
|
|
|
void map_in_kernel_page_table(
|
|
uint64_t physical_base, void *virtual_base,
|
|
int writable, int executable) {
|
|
|
|
uint64_t virtual_base_u64 = (uint64_t)virtual_base;
|
|
assert(virtual_base_u64 >= 0xffffffffc0000000);
|
|
|
|
uint64_t p1s_index = (virtual_base_u64 - 0xffffffffc0000000) >> 12;
|
|
assert(kernel_p1s[p1s_index] == 0);
|
|
|
|
kernel_p1s[p1s_index] =
|
|
physical_base | (writable ? 0x3 : 0x1) |
|
|
(executable ? 0 : 0x8000000000000000);
|
|
|
|
invlpg(virtual_base);
|
|
|
|
}
|
|
|
|
void unmap_kernel_page(void *virtual_base) {
|
|
|
|
uint64_t virtual_base_u64 = (uint64_t)virtual_base;
|
|
assert(virtual_base_u64 >= 0xffffffffc0000000);
|
|
|
|
uint64_t p1s_index = (virtual_base_u64 - 0xffffffffc0000000) >> 12;
|
|
assert(kernel_p1s[p1s_index] != 0);
|
|
|
|
kernel_p1s[p1s_index] = 0;
|
|
|
|
invlpg(virtual_base);
|
|
|
|
}
|
|
|
|
void unmap_and_free_kernel_page(void *virtual_base) {
|
|
|
|
uint64_t virtual_base_u64 = (uint64_t)virtual_base;
|
|
assert(virtual_base_u64 >= 0xffffffffc0000000);
|
|
|
|
uint64_t p1s_index = (virtual_base_u64 - 0xffffffffc0000000) >> 12;
|
|
assert(kernel_p1s[p1s_index] != 0);
|
|
|
|
uint64_t pma = kernel_p1s[p1s_index] & 0x7ffffffffffff000;
|
|
mark_physical_memory_free(pma, 4096);
|
|
|
|
kernel_p1s[p1s_index] = 0;
|
|
|
|
invlpg(virtual_base);
|
|
|
|
}
|
|
|
|
void *find_free_kernel_region(uint64_t length) {
|
|
|
|
length /= 4096;
|
|
|
|
uint64_t next = 0;
|
|
|
|
uint64_t run_start = 0;
|
|
uint64_t run_length = 0;
|
|
|
|
while (1) {
|
|
if (run_length == length)
|
|
return (void *)(run_start * 4096 + 0xffffffffc0000000);
|
|
if (next == 512 * 512)
|
|
panic("out of kernel virtual memory");
|
|
if (kernel_p1s[next] == 0)
|
|
++run_length;
|
|
else {
|
|
run_length = 0;
|
|
run_start = next + 1;
|
|
}
|
|
++next;
|
|
}
|
|
|
|
}
|
|
|
|
uint64_t take_free_physical_page() {
|
|
for (uint64_t i = 0; i < (MAX_PHYSICAL_GB << 15); ++i)
|
|
for (int j = 0; j < 8; ++j)
|
|
if (physical_map[i] & (1 << j)) {
|
|
physical_map[i] &= ~(1 << j);
|
|
return (i << 15) + (j << 12);
|
|
}
|
|
panic("out of physical memory");
|
|
}
|