From bce944d1498eaa3b6940ee234c863b3548a66b37 Mon Sep 17 00:00:00 2001 From: Benji Dial Date: Sun, 24 Jan 2021 12:00:11 -0500 Subject: graphics! --- src/kernel/task.c | 203 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 184 insertions(+), 19 deletions(-) (limited to 'src/kernel/task.c') diff --git a/src/kernel/task.c b/src/kernel/task.c index 942d1c4..ab08e6b 100644 --- a/src/kernel/task.c +++ b/src/kernel/task.c @@ -1,6 +1,8 @@ +#include "paging.h" #include "panic.h" +#include "pmap.h" #include "task.h" -#include "paging.h" +#include "util.h" #include "log.h" struct tss { @@ -42,7 +44,7 @@ struct tss { #define MAX_TASKS 64 -static struct task_state tasks[MAX_TASKS]; +struct task_state tasks[MAX_TASKS]; struct task_state *active_task; void init_tasks() { @@ -68,31 +70,68 @@ uint32_t new_task(struct task_state state) { for (uint8_t n = 0; n < MAX_TASKS; ++n) if (!tasks[n].page_directory) { tasks[n] = state; + tasks[n].waiting = false; + for (uint8_t i = 0; i < MAX_WAITS; ++i) + tasks[n].waits[i].mode = NONE; return n + 1; } PANIC("Maximum number of tasks reached."); } +static void tmp_halt() { +//logf(LOG_INFO, "scheduler halting"); + TSS->esp0 = 0x0003c000; + asm("sti\n" + "hlt\n" + "cli"); + TSS->esp0 = 0x00040000; +//logf(LOG_INFO, "scheduler resumed"); +} + void advance_active_task() { - do +//logf(LOG_INFO, "entered scheduler from \"%s\"", active_task->name); + struct task_state *old_task = active_task; + while (1) { if (++active_task == tasks + MAX_TASKS) active_task = tasks; - while (!active_task->page_directory || active_task->wait_mode); + if (active_task->page_directory && !active_task->waiting) { + //logf(LOG_INFO, "exiting scheduler to \"%s\"", active_task->name); + return; + } + if (active_task == old_task) + tmp_halt(); + } } void make_sure_tasks() { for (uint8_t n = 0; n < MAX_TASKS; ++n) if (tasks[n].page_directory) - while (1) { - for (uint8_t n = 0; n < MAX_TASKS; ++n) - if (tasks[n].page_directory && !tasks[n].wait_mode) - return; - asm ("hlt"); - } - set_log_mode(LOG_SYSTEM); - logsz("No tasks, halting."); + return; + logf(LOG_INFO, "No tasks, halting."); while (1) - asm ("hlt"); + asm("hlt"); +} + +//IPC stuff isn't fully implemented, or tested in this version. +//i'm planning to finish and make use of it in the next version, +//making the terminal its own application instead of a library. + +#define MAX_IPC_PIPES 1024 +#define IPC_BUFFER_PAGES 1 + +struct ipc_pipe { + void *buffer; + void *buffer_next_send; + const void *buffer_next_read; + + uint32_t sender_handle; + uint32_t reader_handle; + bool delete_when_empty; +} ipc_pipes[MAX_IPC_PIPES]; + +void delete_pipe(struct ipc_pipe *pipe) { + free_pages(pipe->buffer, IPC_BUFFER_PAGES); + pipe->buffer = 0; } void delete_task(struct task_state *state) { @@ -101,10 +140,136 @@ void delete_task(struct task_state *state) { switch_to_task_cr3(); state->page_directory = 0; - uint32_t handle = state - tasks + 1; - for (uint8_t n = 0; n < MAX_TASKS; ++n) - if (tasks[n].page_directory && - (tasks[n].wait_mode == PROCESS_END) && - (tasks[n].wait_arg == handle)) - tasks[n].wait_mode = NONE; + unwait_any((struct wait){.mode = PROCESS_END, .task = state}); + unwait_any((struct wait){.mode = IPC_RECEIVE, .task = state}); + unwait_any((struct wait){.mode = IPC_SEND, .task = state}); + + const uint32_t handle = active_task - tasks + 1; + for (struct ipc_pipe *pipe = ipc_pipes; pipe < ipc_pipes + MAX_IPC_PIPES; ++pipe) + if (pipe->buffer) { + if (pipe->reader_handle == handle) + delete_pipe(pipe); + else if (pipe->sender_handle == handle) { + if (pipe->buffer_next_read == pipe->buffer_next_send) + delete_pipe(pipe); + else + pipe->delete_when_empty = true; + } + } +} + +uint32_t find_unread_ipc() { + const uint32_t r_handle = active_task - tasks + 1; + for (struct ipc_pipe *pipe = ipc_pipes; pipe < ipc_pipes + MAX_IPC_PIPES; ++pipe) + if (pipe->buffer && (pipe->reader_handle == r_handle) && + (pipe->buffer_next_read != pipe->buffer_next_send)) + return pipe->sender_handle; + return 0; +} + +struct ipc_pipe *get_existing_pipe(uint32_t sender_handle, uint32_t reader_handle) { + for (struct ipc_pipe *i = ipc_pipes; i < ipc_pipes + MAX_IPC_PIPES; ++i) + if (i->buffer && (i->sender_handle == sender_handle) && + (i->reader_handle == reader_handle)) + return i; + return 0; +} + +uint32_t ipc_send(uint32_t reader_handle, uint32_t count, const void *buffer) { + if (!reader_handle || !tasks[reader_handle - 1].page_directory) + return -1; + + const uint32_t our_handle = active_task - tasks + 1; + struct ipc_pipe *pipe = get_existing_pipe(our_handle, reader_handle); + if (!pipe) { + for (struct ipc_pipe *i = ipc_pipes; i < ipc_pipes + MAX_IPC_PIPES; ++i) + if (!i->buffer) { + i->buffer = allocate_kernel_pages(IPC_BUFFER_PAGES); + i->buffer_next_read = i->buffer; + i->buffer_next_send = i->buffer; + i->reader_handle = reader_handle; + i->sender_handle = our_handle; + i->delete_when_empty = false; + pipe = i; + break; + } + if (!pipe) + PANIC("out of ipc pipes"); + } + + unwait(tasks + reader_handle - 1, (struct wait){.mode = IPC_RECEIVE, .task = active_task}); + unwait(tasks + reader_handle - 1, (struct wait){.mode = IPC_RECEIVE_ANY}); + + uint32_t send_left = pipe->buffer_next_read - pipe->buffer_next_send - 1; + if (send_left < 0) + send_left += 4096 * IPC_BUFFER_PAGES; + if (count > send_left) + count = send_left; + + if (pipe->buffer_next_send + count < pipe->buffer + 4096 * IPC_BUFFER_PAGES) { + memcpy(pipe->buffer_next_send, buffer, count); + pipe->buffer_next_send += count; + return count; + } + + const uint32_t first_batch = pipe->buffer + 4096 * IPC_BUFFER_PAGES - pipe->buffer_next_send; + memcpy(pipe->buffer_next_send, buffer, first_batch); + memcpy(pipe->buffer, buffer + first_batch, count - first_batch); + pipe->buffer_next_send += count - 4096; + return count; +} + +uint32_t ipc_read(uint32_t sender_handle, uint32_t count, void *buffer) { + if (!sender_handle || !tasks[sender_handle - 1].page_directory) + return -1; + + const uint32_t our_handle = active_task - tasks + 1; + struct ipc_pipe *pipe = get_existing_pipe(sender_handle, our_handle); + if (!pipe) + return 0; + + unwait(tasks + sender_handle - 1, (struct wait){.mode = IPC_SEND, .task = active_task}); + + //TODO +} + +void add_wait(struct wait wait) { + for (uint8_t i = 0; i < MAX_WAITS; ++i) + if (!active_task->waits[i].mode) { + active_task->waits[i] = wait; + active_task->waiting = true; + return; + } + PANIC("Out of waits for task."); +} + +void unwait_any(struct wait wait) { + for (uint8_t i = 0; i < MAX_TASKS; ++i) + if (tasks[i].page_directory) + unwait(tasks + i, wait); +} + +void unwait(struct task_state *task, struct wait wait) { + if (!task->waiting) + return; + for (uint8_t i = 0; i < MAX_WAITS; ++i) { + if (task->waits[i].mode != wait.mode) + continue; + switch (wait.mode) { + case PROCESS_END: + case IPC_RECEIVE: + if (task->waits[i].task != wait.task) + continue; + break; + case WINDOW_ACTION: + case IPC_RECEIVE_ANY: + break; + default: + PANIC("Unwait matched with unrecognized wait mode."); + } + for (uint8_t i = 0; i < MAX_WAITS; ++i) + task->waits[i].mode = NONE; + task->waiting = false; + return; + } } \ No newline at end of file -- cgit v1.2.3