start on paging system; make our own page tables and switch to them

This commit is contained in:
Benji Dial 2025-07-05 13:05:06 -04:00
parent c7c65593d3
commit 746218052e
7 changed files with 349 additions and 10 deletions

View file

@ -1,2 +1,5 @@
-std=c23
-I -I
dependencies/limine dependencies/limine
-I
kernel/include

39
kernel/include/paging.h Normal file
View file

@ -0,0 +1,39 @@
/* Calcite, kernel/include/paging.h
* Copyright 2025 Benji Dial
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <https://www.gnu.org/licenses/>.
*/
#pragma once
#include <stdint.h>
//kernel physical and virtual bases are passed so that we can compute the
//physical addresses of the kernel's statically allocated paging structures.
void init_paging(uint64_t kernel_physical_base, void *kernel_virtual_base);
//base and length should be page-aligned
void mark_physical_memory_free(uint64_t base, uint64_t length);
//maps one page. physical and virtual bases should be page-aligned.
//virtual address should be within kernel range.
void map_in_kernel_page_table(
uint64_t physical_base, void *virtual_base, int writable, int executable);
//returns a region of contiguous pages in kernel virtual memory where nothing
//is mapped. length should be page-aligned.
void *find_free_kernel_region(uint64_t length);
//implemented in paging.asm. the continuation should be noreturn.
[[noreturn]] void switch_to_kernel_page_tables(void (*continuation)());

View file

@ -16,6 +16,7 @@
*/ */
#include <limine.h> #include <limine.h>
#include <paging.h>
LIMINE_BASE_REVISION(3) LIMINE_BASE_REVISION(3)
@ -25,24 +26,126 @@ static volatile struct limine_framebuffer_request fb_request = {
.response = 0 .response = 0
}; };
static volatile struct limine_hhdm_request hhdm_request = {
.id = LIMINE_HHDM_REQUEST,
.revision = 0,
.response = 0
};
static volatile struct limine_kernel_address_request ka_request = {
.id = LIMINE_KERNEL_ADDRESS_REQUEST,
.revision = 0,
.response = 0
};
static volatile struct limine_memmap_request memmap_request = {
.id = LIMINE_MEMMAP_REQUEST,
.revision = 0,
.response = 0
};
[[noreturn]] static void die() { [[noreturn]] static void die() {
while (1) while (1)
__asm__ ("hlt"); __asm__ ("hlt");
} }
//these are defined in the linker script. they are page-aligned.
extern uint8_t __kernel_rx_start;
extern uint8_t __kernel_rx_end;
extern uint8_t __kernel_ro_start;
extern uint8_t __kernel_ro_end;
extern uint8_t __kernel_rw_start;
extern uint8_t __kernel_rw_end;
static void map_kernel_region(
uint64_t physical_start, void *virtual_start,
uint64_t length, int writable, int executable) {
for (uint64_t i = 0; i < length; i += 4096)
map_in_kernel_page_table(
physical_start + i, (uint8_t *)virtual_start + i, writable, executable);
}
static uint8_t *fb_base;
static int fb_width;
static int fb_height;
static int fb_pitch;
[[noreturn]] static void with_kernel_page_tables();
[[noreturn]] void kernel_entry() { [[noreturn]] void kernel_entry() {
if (fb_request.response == 0 || fb_request.response->framebuffer_count == 0) //die if the bootloader hasn't given us something that we need.
if (fb_request.response == 0 || hhdm_request.response == 0 ||
ka_request.response == 0 || memmap_request.response == 0 ||
fb_request.response->framebuffer_count == 0)
die(); die();
struct limine_framebuffer *fb = fb_request.response->framebuffers[0]; struct limine_framebuffer *fb = fb_request.response->framebuffers[0];
if (fb->memory_model != LIMINE_FRAMEBUFFER_RGB || fb->bpp != 32 ||
fb->red_mask_shift != 16 || fb->red_mask_size != 8 ||
fb->green_mask_shift != 8 || fb->green_mask_size != 8 ||
fb->blue_mask_shift != 0 || fb->blue_mask_size != 8)
die();
for (uint64_t y = 0; y < fb->height; ++y) //set up page tables. we will mark the regions with bootloader structures as
for (uint64_t x = 0; x < fb->width; ++x) { //usable, so we need to be careful not to allocate any physical pages until
uint8_t *pixel = (uint8_t *)fb->address + y * fb->pitch + x * 4; //after we are done using bootloader structures. we map the kernel into our
pixel[0] = y * 256 / fb->height; //page tables, and we remap the framebuffer to somewhere in kernel memory.
pixel[1] = x * 256 / fb->width; //we do not map the bootloader structures, so we also need to not switch
pixel[2] = 0; //to the new tables until we are done using them.
uint64_t kernel_physical_base = ka_request.response->physical_base;
uint8_t *kernel_virtual_base = (uint8_t *)ka_request.response->virtual_base;
init_paging(kernel_physical_base, kernel_virtual_base);
struct limine_memmap_response *mm_response = memmap_request.response;
for (uint64_t i = 0; i < mm_response->entry_count; ++i) {
struct limine_memmap_entry *entry = mm_response->entries[i];
if (entry->type == LIMINE_MEMMAP_USABLE ||
entry->type == LIMINE_MEMMAP_BOOTLOADER_RECLAIMABLE)
//limine guarantees these two types are page-aligned already.
mark_physical_memory_free(entry->base, entry->length);
}
map_kernel_region(
&__kernel_rx_start - kernel_virtual_base + kernel_physical_base,
&__kernel_rx_start, &__kernel_rx_end - &__kernel_rx_start, 0, 1);
map_kernel_region(
&__kernel_ro_start - kernel_virtual_base + kernel_physical_base,
&__kernel_ro_start, &__kernel_ro_end - &__kernel_ro_start, 0, 0);
map_kernel_region(
&__kernel_rw_start - kernel_virtual_base + kernel_physical_base,
&__kernel_rw_start, &__kernel_rw_end - &__kernel_rw_start, 1, 0);
//we round up to a multiple of a page.
uint64_t fb_length = ((fb->height * fb->pitch - 1) / 4096 + 1) * 4096;
fb_base = find_free_kernel_region(fb_length);
map_kernel_region(
(uint64_t)fb->address - hhdm_request.response->offset,
fb_base, fb_length, 1, 0);
//store rest of framebuffer information
fb_width = fb->width;
fb_height = fb->height;
fb_pitch = fb->pitch;
//switch to kernel page tables!
switch_to_kernel_page_tables(&with_kernel_page_tables);
}
[[noreturn]] static void with_kernel_page_tables() {
//display our test pattern
for (int y = 0; y < fb_height; ++y)
for (int x = 0; x < fb_width; ++x) {
fb_base[y * fb_pitch + x * 4] = y * 256 / fb_height;
fb_base[y * fb_pitch + x * 4 + 1] = x * 256 / fb_width;
fb_base[y * fb_pitch + x * 4 + 2] = 0;
} }
die(); die();

42
kernel/src/paging.asm Normal file
View file

@ -0,0 +1,42 @@
; Calcite, kernel/src/paging.asm
; Copyright 2025 Benji Dial
;
; This program is free software: you can redistribute it and/or modify
; it under the terms of the GNU General Public License as published by
; the Free Software Foundation, either version 3 of the License, or (at
; your option) any later version.
;
; This program is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
; more details.
;
; You should have received a copy of the GNU General Public License along with
; this program. If not, see <https://www.gnu.org/licenses/>.
bits 64
;both defined in paging.c
extern kernel_p4_physical_address
extern init_stack
init_stack_length equ 16384
section .text
extern switch_to_kernel_page_tables
switch_to_kernel_page_tables:
;switch the page table
mov rax, qword [kernel_p4_physical_address]
mov cr3, rax
;set the stack
mov rsp, init_stack + init_stack_length
;push 0 before jumping to continuation so that if the continuation
;does return (which it shouldn't), it returns to null instead of
;popping garbage and returning there
push qword 0
jmp rdi

147
kernel/src/paging.c Normal file
View file

@ -0,0 +1,147 @@
/* Calcite, kernel/src/paging.c
* Copyright 2025 Benji Dial
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <paging.h>
#define MAX_PHYSICAL_GB 64ULL
//this is a simple bitmap. physical_map[i / 8] & (1 << (i % 8)) is nonzero if
//and only if the physical page with base i * 4096 is free.
static uint8_t physical_map[MAX_PHYSICAL_GB << 15];
//kernel virtual memory is the top gigabyte, i.e. one "page directory" worth
//of pages. in the future i might make it more reactive, and allow new page
//directories to be allocated as needed, but for now there is just the one
//page directory, and all of the page tables in that directory are statically
//allocated. i do not like the intel/amd names for the paging structures, so
//i will use these names throughout the rest of this file:
// p4 = "page map level 4"
// p3 = "page directory pointer table"
// p2 = "page directory"
// p1 = "page table"
//referenced from paging.asm
uint64_t kernel_p4_physical_address;
alignas(4096) static uint64_t kernel_p4[512];
alignas(4096) static uint64_t kernel_p3[512];
alignas(4096) static uint64_t kernel_p2[512];
alignas(4096) static uint64_t kernel_p1s[512 * 512];
//referenced from paging.asm. at the end of init, this will either be marked
//free or reused as an interrupt / syscall stack. if the length of this is
//changed, it also needs to be changed in paging.asm. there is no guard page,
//but i don't expect init to need much stack.
alignas(4096) uint64_t init_stack[16384];
void init_paging(uint64_t kernel_physical_base, void *kernel_virtual_base) {
uint64_t kernel_load_offset =
(uint64_t)kernel_virtual_base - kernel_physical_base;
kernel_p4_physical_address = (uint64_t)kernel_p4 - kernel_load_offset;
kernel_p4[511] = ((uint64_t)kernel_p3 - kernel_load_offset) | 0x3;
for (int i = 0; i < 511; ++i)
kernel_p4[i] = 0;
kernel_p3[511] = ((uint64_t)kernel_p2 - kernel_load_offset) | 0x3;
for (int i = 0; i < 511; ++i)
kernel_p3[i] = 0;
for (int i = 0; i < 512; ++i)
kernel_p2[i] =
((uint64_t)kernel_p1s - kernel_load_offset + i * 4096) | 0x03;
for (int i = 0; i < 512 * 512; ++i)
kernel_p1s[i] = 0;
}
void mark_physical_memory_free(uint64_t base, uint64_t length) {
if (base + length > MAX_PHYSICAL_GB << 30)
length = (MAX_PHYSICAL_GB << 30) - base;
while (length && (base & (0x7 << 12))) {
physical_map[base >> 15] |= 1 << ((base >> 12) % 8);
base += 4096;
length -= 4096;
}
while (length >= 4096 * 8) {
physical_map[base >> 15] = 0xff;
base += 4096 * 8;
length -= 4096 * 8;
}
while (length) {
physical_map[base >> 15] |= 1 << ((base >> 12) % 8);
base += 4096;
length -= 4096;
}
}
void map_in_kernel_page_table(
uint64_t physical_base, void *virtual_base,
int writable, int executable) {
uint64_t virtual_base_u64 = (uint64_t)virtual_base;
//should probably die in this case
if (virtual_base_u64 < 0xffffffffc0000000)
return;
uint64_t p1s_index = (virtual_base_u64 - 0xffffffffc0000000) >> 12;
//should probably die in this case too
if (kernel_p1s[p1s_index] != 0)
return;
kernel_p1s[p1s_index] =
physical_base | (writable ? 0x3 : 0x1) |
(executable ? 0 : 0x8000000000000000);
}
void *find_free_kernel_region(uint64_t length) {
length /= 4096;
uint64_t next = 0;
uint64_t run_start = 0;
uint64_t run_length = 0;
while (1) {
if (run_length == length)
return (void *)(run_start * 4096 + 0xffffffffc0000000);
if (next == 512 * 512)
//die. TODO: handle this nicer.
while (1)
__asm__ ("hlt");
if (kernel_p1s[next] == 0)
++run_length;
else {
run_length = 0;
run_start = next + 1;
}
++next;
}
}

View file

@ -14,11 +14,15 @@ clean:
# kernel # kernel
KERNEL_SOURCES = entry.c KERNEL_SOURCES = entry.c paging.asm paging.c
build/kernel/%.asm.o: kernel/src/%.asm
@mkdir -p ${@D}
nasm -f elf64 $^ -o $@
build/kernel/%.c.o: kernel/src/%.c build/kernel/%.c.o: kernel/src/%.c
@mkdir -p ${@D} @mkdir -p ${@D}
cc -c ${CC_FLAGS} -I dependencies/limine $^ -o $@ cc -c ${CC_FLAGS} -I dependencies/limine -I kernel/include $^ -o $@
build/kernel/kernel.elf: ${KERNEL_SOURCES:%=build/kernel/%.o} build/kernel/kernel.elf: ${KERNEL_SOURCES:%=build/kernel/%.o}
ld -T kernel/link.ld $^ -o $@ ld -T kernel/link.ld $^ -o $@

View file

@ -17,9 +17,10 @@ Calcite requires some dependencies before it can be built:
* GNU Binutils (Specifically, "ld" is used to link the kernel.) * GNU Binutils (Specifically, "ld" is used to link the kernel.)
* GNU Make (I don't think I have used any GNU extensions.) * GNU Make (I don't think I have used any GNU extensions.)
* GNU xorriso * GNU xorriso
* NASM
On Debian, it is sufficient to run this command: On Debian, it is sufficient to run this command:
apt install binutils curl gcc make xorriso apt install binutils curl gcc make nasm xorriso
To build Calcite, first run "sh get-dependencies.sh", then run "make". To build Calcite, first run "sh get-dependencies.sh", then run "make".
This will build a disk image at "build/disk.iso" that can be booted This will build a disk image at "build/disk.iso" that can be booted