summaryrefslogtreecommitdiff
path: root/src/user/knob/heap.c
blob: c4b810ed40e47130a8c08c953d91d066706d3d57 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
#include <pland/syscall.h>
#include <pland/pcrt.h>
#include <stdbool.h>
#include <stdint.h>

#define MIN_NEW_CHUNK_PAGES 16

struct desc_page_head {
  void *next_page;//0 for last one
};

struct chunk_desc {
  void *start;//0 for a desc not in use
  uint32_t length;
  bool available;
} __attribute__ ((packed));
//without packing, it is 12 bytes and we get 341 per page
//with    packing, it is  9 bytes and we get 454 per page
//maybe i should make available just the top bit of length
//then we get 511 per page, but length has a max of 2^31-1

static struct desc_page_head *first_desc_page = 0;
static struct desc_page_head *last_desc_page = 0;

#define DESCS_PER_PAGE ((4096 - sizeof(struct desc_page_head)) / sizeof(struct chunk_desc))
#define PAGE_AS_ARRAY(p) ((struct chunk_desc *)((void *)p + sizeof(struct desc_page_head)))

static char size_was_buf[] = "size was ......... kB";
//kb should be < 1 000 000 000
static void log_size_was(uint32_t kb) {
  for (int8_t d = 9; d != -1; --d) {
    size_was_buf[9 + d] = (kb % 10) + '0';
    kb /= 10;
  }
  _system_log(size_was_buf);
}

static const char *const hextab = "0123456789abcdef";
static char caller_was_buf[] = "return address is 0x........";
static void log_caller_was(void *addr) {
  const uint32_t as_int = (uint32_t)addr;
  for (uint8_t i = 0; i < 8; ++i)
    caller_was_buf[27 - i] = hextab[(as_int >> (4 * i)) & 0xf];
  _system_log(caller_was_buf);
}

static struct desc_page_head *new_desc_page() {
  struct desc_page_head *dph = _allocate_ram(1);
  if (!dph) {
    _system_log("knob heap could not allocate new descriptor page");
    log_size_was(4);
    __pcrt_quit();
  }
  dph->next_page = 0;
  if (last_desc_page)
    last_desc_page->next_page = dph;
  last_desc_page = dph;

  struct chunk_desc *const a = PAGE_AS_ARRAY(dph);
  for (uint32_t i = 0; i < DESCS_PER_PAGE; ++i)
    a[i].start = 0;

  return dph;
}

static struct chunk_desc *unused_desc() {
  for (struct desc_page_head *i = first_desc_page; i; i = i->next_page) {
    struct chunk_desc *const a = PAGE_AS_ARRAY(i);
    for (uint32_t i = 0; i < DESCS_PER_PAGE; ++i)
      if (!a[i].start)
        return a + i;
  }

  return PAGE_AS_ARRAY(new_desc_page());
}

__attribute__ ((malloc))
void *get_block(uint32_t bytes) {
  if (!first_desc_page)
    first_desc_page = new_desc_page();

  struct chunk_desc *best_fit = 0;

  for (struct desc_page_head *i = first_desc_page; i; i = i->next_page) {
    struct chunk_desc *const a = PAGE_AS_ARRAY(i);
    for (uint32_t i = 0; i < DESCS_PER_PAGE; ++i) {
      if (!a[i].start || !a[i].available || (a[i].length < bytes))
        continue;
      if (a[i].length == bytes) {
        a[i].available = false;
        return a[i].start;
      }
      if (!best_fit || (a[i].length < best_fit->length))
        best_fit = a + i;
    }
  }

  if (!best_fit) {
    const uint32_t pages_needed = (bytes - 1) / 4096 + 1;
    const uint32_t allocating = pages_needed < MIN_NEW_CHUNK_PAGES ? MIN_NEW_CHUNK_PAGES : pages_needed;
    void *const new_chunk = _allocate_ram(allocating);
    if (!new_chunk) {
      _system_log("knob heap could not allocate new chunk");
      log_size_was(allocating * 4);
      log_caller_was(__builtin_return_address(0));
      __pcrt_quit();
    }
    best_fit = unused_desc();
    best_fit->start = new_chunk;
    best_fit->length = allocating * 4096;

    if (bytes == allocating * 4096) {
      best_fit->available = false;
      return new_chunk;
    }
  }

  void *const old_end = best_fit->start + best_fit->length;
  void *const new_end = best_fit->start + bytes;

  best_fit->length = bytes;
  best_fit->available = false;

  struct chunk_desc *const leftover = unused_desc();
  leftover->start = new_end;
  leftover->length = old_end - new_end;

  return best_fit->start;
}

void free_block(const void *block) {
  for (struct desc_page_head *i = first_desc_page; i; i = i->next_page) {
    struct chunk_desc *const a = PAGE_AS_ARRAY(i);
    for (uint32_t i = 0; i < DESCS_PER_PAGE; ++i)
      if (a[i].start == block) {
        //TODO: consolidate this with surrounding free chunks
        a[i].available = true;
        return;
      }
  }
}