summaryrefslogtreecommitdiff
path: root/src/kernel/pmap.c
blob: 8634303a0b2ca4abfe379e4b7cf7cd967b1c3d78 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#include <stdint.h>
#include "panic.h"
#include "pmap.h"

#define PAGEMAP_START (0x00040000)
#define PAGEMAP_END   (0x00060000)
#define PAGE_USED(n) ((*(uint8_t *)(PAGEMAP_START + (n >> 3)) >> (n & 7)) & 1)
#define CLEAR_PAGE(n)  *(uint8_t *)(PAGEMAP_START + (n >> 3)) &= ~(1 << (n & 7))
#define SET_PAGE(n)    *(uint8_t *)(PAGEMAP_START + (n >> 3)) |=   1 << (n & 7)

#define KBSS_START (0x04000000)
#define USER_START (0x08000000)
extern const void _kernel_bss_end;

uint32_t kernel_pages_left;
uint32_t user_pages_left;
uint32_t max_kernel_pages;
uint32_t max_user_pages;

enum {
  BMET_FREE = 1
};

enum {
  BMEF_NON_VOLATILE = 0x02
};

struct bios_mmap_entry {
  uint64_t base;
  uint64_t length;
  uint32_t type;
  uint32_t flags;
};

void init_pagemap() {
  for (uint32_t *i = (uint32_t *)(PAGEMAP_START + (KBSS_START >> 15)); i < (uint32_t *)PAGEMAP_END; ++i)
    *i = 0xffffffff;

  const struct bios_mmap_entry *mmap_p = (const struct bios_mmap_entry *)0x00010000;
  const struct bios_mmap_entry *mmap_e = (const struct bios_mmap_entry *)(0x00010000 + *(uint16_t *)0x00004006);

  for (; mmap_p < mmap_e; ++mmap_p) {
    if (mmap_p->type != BMET_FREE)
      continue;
    if (mmap_p->base > 0xffffffff)
      continue;

    uint32_t base_page = ((mmap_p->base - 1) >> 12) + 1;

    uint64_t end = mmap_p->base + mmap_p->length;
    if (end > 0xffffffff)
      end = 0x100000000;
    uint32_t end_page = end >> 12;

    for (uint32_t i = base_page; i < end_page; ++i)
      CLEAR_PAGE(i);
  }

  uint32_t kernel_pages = (((uint32_t)&_kernel_bss_end - 1) >> 12) + 1;
  for (uint32_t i = 0; i < kernel_pages; ++i)
    SET_PAGE(i);

  kernel_pages_left = 0;
  for (uint32_t i = KBSS_START >> 12; i < USER_START >> 12; ++i)
    if (!PAGE_USED(i))
      ++kernel_pages_left;
  max_kernel_pages = kernel_pages_left;

  user_pages_left = 0;
  for (uint32_t i = USER_START >> 12; i < 1048576; ++i)
    if (!PAGE_USED(i))
      ++user_pages_left;
  max_user_pages = user_pages_left;
}

//very inneficient algorithm, just returns first hole big enough.
//a smarter algorithm might pick the smallest one available,
//and go by bytes (or dwords) instead of bits where possible.
void *allocate_kernel_pages(uint32_t n) {
  uint32_t run = 0;
  
  for (uint32_t page = KBSS_START >> 12; page < USER_START >> 12; ++page) {
    if (PAGE_USED(page))
      run = 0;
    else if (++run == n) {
      uint32_t start = page - run + 1;
      for (uint32_t i = start; i <= page; ++i)
        SET_PAGE(i);
      kernel_pages_left -= n;
      return (void *)(start << 12);
    }
  }

  return 0;
}

//very inneficient algorithm, just returns first hole big enough.
//a smarter algorithm might pick the smallest one available,
//and go by bytes (or dwords) instead of bits where possible.
void *allocate_user_pages(uint32_t n) {
  uint32_t run = 0;
  
  for (uint32_t page = USER_START >> 12; page < 1048576; ++page) {
    if (PAGE_USED(page))
      run = 0;
    else if (++run == n) {
      uint32_t start = page - run + 1;
      for (uint32_t i = start; i <= page; ++i)
        SET_PAGE(i);
      user_pages_left -= n;
      return (void *)(start << 12);
    }
  }

  return 0;
}

//in the future, change this to go by bytes or dwords instead of bits.
void free_pages(const void *ptr, uint32_t n) {
  uint32_t page = (uint32_t)ptr >> 12;
  for (uint32_t i = page; i < page + n; ++i)
    CLEAR_PAGE(i);
  if ((uint32_t)ptr >= USER_START)
    user_pages_left += n;
  else
    kernel_pages_left += n;
}