303 lines
8.3 KiB
C
303 lines
8.3 KiB
C
#include "paging.h"
|
|
#include "window.h"
|
|
#include "panic.h"
|
|
#include "pmap.h"
|
|
#include "task.h"
|
|
#include "util.h"
|
|
#include "log.h"
|
|
|
|
struct tss {
|
|
struct tss *prev;
|
|
|
|
uint32_t esp0;
|
|
uint32_t ss0;
|
|
uint32_t esp1;
|
|
uint32_t ss1;
|
|
uint32_t esp2;
|
|
uint32_t ss2;
|
|
|
|
uint32_t cr3;
|
|
uint32_t eip;
|
|
uint32_t eflags;
|
|
|
|
uint32_t eax;
|
|
uint32_t ecx;
|
|
uint32_t edx;
|
|
uint32_t ebx;
|
|
uint32_t esp;
|
|
uint32_t ebp;
|
|
uint32_t esi;
|
|
uint32_t edi;
|
|
|
|
uint32_t es;
|
|
uint32_t cs;
|
|
uint32_t ss;
|
|
uint32_t ds;
|
|
uint32_t fs;
|
|
uint32_t gs;
|
|
|
|
uint32_t ldt;
|
|
uint16_t trap;
|
|
uint16_t iomp;
|
|
} __attribute__ ((packed));
|
|
|
|
#define TSS ((struct tss *)0x00004f98)
|
|
|
|
struct task_state tasks[MAX_TASKS];
|
|
struct task_state *active_task;
|
|
|
|
struct task_state *new_task() {
|
|
for (uint8_t n = 0; n < MAX_TASKS; ++n)
|
|
if (!tasks[n].page_directory) {
|
|
tasks[n].ecx = n + 1;
|
|
tasks[n].waiting = false;
|
|
for (uint8_t i = 0; i < MAX_WAITS; ++i)
|
|
tasks[n].waits[i].mode = NONE;
|
|
return tasks + n;
|
|
}
|
|
logf(LOG_ERROR, "Reached %d tasks, refusing to create any more.", MAX_TASKS);
|
|
return 0;
|
|
}
|
|
|
|
static void tmp_halt() {
|
|
//logf(LOG_INFO, "scheduler halting");
|
|
TSS->esp0 = 0x00028000;
|
|
asm("sti\n"
|
|
"hlt\n"
|
|
"cli");
|
|
TSS->esp0 = 0x0002f000;
|
|
//logf(LOG_INFO, "scheduler resumed");
|
|
}
|
|
|
|
void advance_active_task() {
|
|
//logf(LOG_INFO, "entered scheduler from \"%s\"", active_task->name);
|
|
struct task_state *old_task = active_task;
|
|
while (1) {
|
|
if (++active_task == tasks + MAX_TASKS)
|
|
active_task = tasks;
|
|
if (active_task->page_directory && !active_task->waiting) {
|
|
//logf(LOG_INFO, "exiting scheduler to \"%s\"", active_task->name);
|
|
return;
|
|
}
|
|
if (active_task == old_task)
|
|
tmp_halt();
|
|
}
|
|
}
|
|
|
|
void make_sure_tasks() {
|
|
while (1) {
|
|
for (uint8_t n = 0; n < MAX_TASKS; ++n)
|
|
if (tasks[n].page_directory)
|
|
return;
|
|
tmp_halt();
|
|
}
|
|
}
|
|
|
|
//IPC stuff isn't fully implemented, or tested in this version.
|
|
//i'm planning to finish and make use of it in the next version,
|
|
//making the terminal its own application instead of a library.
|
|
|
|
#define MAX_IPC_PIPES 1024
|
|
#define IPC_BUFFER_PAGES 1
|
|
|
|
struct ipc_pipe {
|
|
void *buffer;
|
|
void *buffer_next_send;
|
|
const void *buffer_next_read;
|
|
|
|
uint32_t sender_handle;
|
|
uint32_t reader_handle;
|
|
bool delete_when_empty;
|
|
} ipc_pipes[MAX_IPC_PIPES];
|
|
|
|
void init_tasks() {
|
|
active_task = tasks;
|
|
|
|
for (uint8_t i = 0; i < MAX_TASKS; ++i)
|
|
tasks[i].page_directory = 0;
|
|
|
|
for (uint16_t i = 0; i < MAX_IPC_PIPES; ++i)
|
|
ipc_pipes[i].buffer = 0;
|
|
|
|
TSS->ss0 = 0x18;
|
|
TSS->esp0 = 0x0002f000;
|
|
//TSS->cs = 0x13;
|
|
//TSS->ds = 0x1b;
|
|
//TSS->ss = 0x1b;
|
|
TSS->iomp = sizeof(struct tss);
|
|
|
|
asm volatile (
|
|
"mov $0x08, %%ax\n"
|
|
"ltr %%ax"
|
|
: : : "ax");
|
|
}
|
|
|
|
void delete_pipe(struct ipc_pipe *pipe) {
|
|
free_pages(pipe->buffer, IPC_BUFFER_PAGES);
|
|
pipe->buffer = 0;
|
|
}
|
|
|
|
void delete_task(struct task_state *state) {
|
|
//logf(LOG_INFO, "-- deleting 0x%h", state);
|
|
switch_to_kernel_cr3();
|
|
free_task_pd(state->page_directory);
|
|
switch_to_task_cr3();
|
|
|
|
delete_any_windows_from(state);
|
|
state->page_directory = 0;
|
|
|
|
//logf(LOG_INFO, "-- unwaiting any waiting for 0x%h", state);
|
|
unwait_any((struct wait){.mode = PROCESS_END, .task = state});
|
|
unwait_any((struct wait){.mode = IPC_SENT, .task = state});
|
|
|
|
const uint32_t handle = state - tasks + 1;
|
|
for (struct ipc_pipe *pipe = ipc_pipes; pipe < ipc_pipes + MAX_IPC_PIPES; ++pipe)
|
|
if (pipe->buffer) {
|
|
if (pipe->reader_handle == handle)
|
|
delete_pipe(pipe);
|
|
else if (pipe->sender_handle == handle) {
|
|
if (pipe->buffer_next_read == pipe->buffer_next_send)
|
|
delete_pipe(pipe);
|
|
else
|
|
pipe->delete_when_empty = true;
|
|
}
|
|
}
|
|
}
|
|
|
|
__attribute__ ((pure))
|
|
uint32_t find_unread_ipc() {
|
|
const uint32_t r_handle = active_task - tasks + 1;
|
|
for (struct ipc_pipe *pipe = ipc_pipes; pipe < ipc_pipes + MAX_IPC_PIPES; ++pipe)
|
|
if (pipe->buffer && (pipe->reader_handle == r_handle) &&
|
|
(pipe->buffer_next_read != pipe->buffer_next_send)) {
|
|
//logf(LOG_INFO, "found %d bytes of unread ipc from 0x%hb to 0x%hb", pipe->buffer_next_send - pipe->buffer_next_read + (pipe->buffer_next_read < pipe->buffer_next_send ? 0 : IPC_BUFFER_PAGES * 4096), pipe->sender_handle, pipe->reader_handle);
|
|
return pipe->sender_handle;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
__attribute__ ((pure))
|
|
struct ipc_pipe *get_existing_pipe(uint32_t sender_handle, uint32_t reader_handle) {
|
|
for (struct ipc_pipe *i = ipc_pipes; i < ipc_pipes + MAX_IPC_PIPES; ++i)
|
|
if (i->buffer && (i->sender_handle == sender_handle) &&
|
|
(i->reader_handle == reader_handle))
|
|
return i;
|
|
return 0;
|
|
}
|
|
|
|
uint32_t ipc_send(uint32_t reader_handle, uint32_t count, const void *buffer) {
|
|
if (!reader_handle || (reader_handle > MAX_TASKS) || !tasks[reader_handle - 1].page_directory)
|
|
return -1;
|
|
|
|
const uint32_t our_handle = active_task - tasks + 1;
|
|
struct ipc_pipe *pipe = get_existing_pipe(our_handle, reader_handle);
|
|
if (!pipe) {
|
|
for (struct ipc_pipe *i = ipc_pipes; i < ipc_pipes + MAX_IPC_PIPES; ++i)
|
|
if (!i->buffer) {
|
|
i->buffer = allocate_kernel_pages(IPC_BUFFER_PAGES);
|
|
i->buffer_next_read = i->buffer;
|
|
i->buffer_next_send = i->buffer;
|
|
i->reader_handle = reader_handle;
|
|
i->sender_handle = our_handle;
|
|
i->delete_when_empty = false;
|
|
pipe = i;
|
|
break;
|
|
}
|
|
if (!pipe)
|
|
PANIC("out of ipc pipes");
|
|
}
|
|
|
|
unwait(tasks + reader_handle - 1, (struct wait){.mode = IPC_SENT, .task = active_task});
|
|
unwait(tasks + reader_handle - 1, (struct wait){.mode = IPC_SENT_ANY});
|
|
|
|
uint32_t send_left = pipe->buffer_next_read - pipe->buffer_next_send - 1;
|
|
if (send_left < 0)
|
|
send_left += 4096 * IPC_BUFFER_PAGES;
|
|
if (count > send_left)
|
|
count = send_left;
|
|
|
|
if (pipe->buffer_next_send + count < pipe->buffer + 4096 * IPC_BUFFER_PAGES) {
|
|
memcpy(pipe->buffer_next_send, buffer, count);
|
|
pipe->buffer_next_send += count;
|
|
return count;
|
|
}
|
|
|
|
const uint32_t first_batch = pipe->buffer + 4096 * IPC_BUFFER_PAGES - pipe->buffer_next_send;
|
|
memcpy(pipe->buffer_next_send, buffer, first_batch);
|
|
memcpy(pipe->buffer, buffer + first_batch, count - first_batch);
|
|
pipe->buffer_next_send += count - 4096;
|
|
return count;
|
|
}
|
|
|
|
uint32_t ipc_read(uint32_t sender_handle, uint32_t count, void *buffer) {
|
|
//logf(LOG_INFO, "kernel ipc_read(0x%hb, %u, 0x%h)", sender_handle, count, buffer);
|
|
if (!sender_handle || (sender_handle > MAX_TASKS))
|
|
return -1;
|
|
|
|
const uint32_t our_handle = active_task - tasks + 1;
|
|
struct ipc_pipe *const pipe = get_existing_pipe(sender_handle, our_handle);
|
|
if (!pipe)
|
|
return tasks[sender_handle - 1].page_directory ? 0 : -1;
|
|
|
|
//logf(LOG_INFO, "found pipe from 0x%hb to 0x%hb", pipe->sender_handle, pipe->reader_handle);
|
|
|
|
unwait(tasks + sender_handle - 1, (struct wait){.mode = IPC_READ, .task = active_task});
|
|
|
|
uint8_t *write_to = buffer;
|
|
const uint8_t *read_from = pipe->buffer_next_read;
|
|
|
|
//change this to memcpys like ipc_send once memcpy is more efficient
|
|
while ((read_from != pipe->buffer_next_send) && count--) {
|
|
*(write_to++) = *(read_from++);
|
|
if (read_from == pipe->buffer + IPC_BUFFER_PAGES * 4096)
|
|
read_from = pipe->buffer;
|
|
}
|
|
|
|
if (pipe->delete_when_empty && (read_from == pipe->buffer_next_send))
|
|
delete_pipe(pipe);
|
|
else
|
|
pipe->buffer_next_read = read_from;
|
|
return write_to - (uint8_t *)buffer;
|
|
}
|
|
|
|
void add_wait(struct wait wait) {
|
|
for (uint8_t i = 0; i < MAX_WAITS; ++i)
|
|
if (!active_task->waits[i].mode) {
|
|
active_task->waits[i] = wait;
|
|
active_task->waiting = true;
|
|
return;
|
|
}
|
|
PANIC("Out of waits for task.");
|
|
}
|
|
|
|
void unwait_any(struct wait wait) {
|
|
for (uint8_t i = 0; i < MAX_TASKS; ++i)
|
|
if (tasks[i].page_directory)
|
|
unwait(tasks + i, wait);
|
|
}
|
|
|
|
void unwait(struct task_state *task, struct wait wait) {
|
|
if (!task->waiting)
|
|
return;
|
|
for (uint8_t i = 0; i < MAX_WAITS; ++i) {
|
|
if (task->waits[i].mode != wait.mode)
|
|
continue;
|
|
switch (wait.mode) {
|
|
case PROCESS_END:
|
|
case IPC_SENT:
|
|
case IPC_READ:
|
|
if (task->waits[i].task != wait.task)
|
|
continue;
|
|
break;
|
|
case WINDOW_ACTION:
|
|
case IPC_SENT_ANY:
|
|
break;
|
|
default:
|
|
PANIC("Unwait matched with unrecognized wait mode.");
|
|
}
|
|
for (i = 0; i < MAX_WAITS; ++i)
|
|
task->waits[i].mode = NONE;
|
|
task->waiting = false;
|
|
return;
|
|
}
|
|
}
|