platform_arena.c (8019B)
1 ////////////////////////////////////////////////////////////////// 2 // platform_arena.c 3 4 #if RV_USE_DEBUG_MALLOC_ARENA 5 6 // TODO(Samdal): make a malloc chained variant 7 8 // Arena Creation/Destruction 9 RV_INTERNAL rv_arena* rv_arena_alloc_(rv_arena_params* params) { 10 return NULL; 11 } 12 RV_INTERNAL void rv_arena_release(rv_arena* arena) 13 { 14 15 } 16 17 // Arena Push/Pop/Pos Core Functions 18 RV_INTERNAL void* rv_arena_push(rv_arena* arena, u64 size, u64 align) 19 { 20 return malloc(size); 21 } 22 RV_INTERNAL u64 rv_arena_pos(rv_arena* arena) 23 { 24 return 0; 25 } 26 RV_INTERNAL void rv_arena_pop_to(rv_arena* arena, u64 pos) { 27 28 } 29 30 // Arena Push/Pop Helpers 31 RV_INTERNAL void rv_arena_clear(rv_arena* arena) 32 { 33 34 } 35 RV_INTERNAL void rv_arena_pop(rv_arena* arena, u64 amt) 36 { 37 } 38 39 // Temporary Arena Scopes 40 RV_INTERNAL rv_temp_arena rv_temp_begin(rv_arena* arena) 41 { 42 return (rv_temp_arena){0}; 43 } 44 RV_INTERNAL void rv_temp_end(rv_temp_arena temp) 45 { 46 47 } 48 49 RV_GLOBAL rv_arena* rv_scratch_begin_(rv_arena **conflicts, u64 count) 50 { 51 return NULL; 52 } 53 54 #else // RV_USE_DEBUG_MALLOC_ARENA 55 56 // Arena Creation/Destruction 57 58 RV_INTERNAL rv_arena* 59 rv_arena_alloc_(rv_arena_params* params) { 60 // round up reserve/commit sizes 61 u64 page_size = rv_mem_get_page_size(params->flags & rv_arena_flag_large_pages); 62 u64 reserve_size = rv_align_pow2(params->reserve_size, page_size); 63 u64 commit_size = rv_align_pow2(params->commit_size, page_size); 64 65 // reserve/commit initial block 66 void* base = params->optional_backing_buffer; 67 if (base == 0) { 68 if (params->flags & rv_arena_flag_large_pages) { 69 base = rv_mem_reserve_large(reserve_size); 70 rv_mem_commit_large(base, commit_size); 71 } else { 72 base = rv_mem_reserve(reserve_size); 73 rv_mem_commit(base, commit_size); 74 } 75 } 76 77 if (base == NULL) { 78 rv_abort_msg(1, "Fatal Allocation Failure"); 79 } 80 81 // extract arena header & fill 82 rv_arena* arena = (rv_arena*)base; 83 arena->current = arena; 84 arena->flags = params->flags; 85 arena->cmt_size = params->commit_size; 86 arena->res_size = params->reserve_size; 87 arena->base_pos = 0; 88 arena->pos = RV_ARENA_HEADER_SIZE; 89 arena->cmt = commit_size; 90 arena->res = reserve_size; 91 #if RV_ARENA_FREE_LIST 92 arena->free_size = 0; 93 arena->free_last = 0; 94 #endif 95 RV_ASAN_POISON(base, commit_size); 96 RV_ASAN_UNPOISON(base, RV_ARENA_HEADER_SIZE); 97 return arena; 98 } 99 100 RV_INTERNAL void 101 rv_arena_release(rv_arena* arena) 102 { 103 for (rv_arena* n = arena->current, *prev = 0; n != 0; n = prev) { 104 prev = n->prev; 105 rv_mem_release(n, n->res); 106 } 107 } 108 109 // arena push/pop core functions 110 111 RV_INTERNAL void* 112 rv_arena_push(rv_arena* arena, u64 size, u64 align) 113 { 114 rv_assert(align > 0); 115 rv_arena* current = arena->current; 116 u64 pos_pre = rv_align_pow2(current->pos, align); 117 u64 pos_pst = pos_pre + size; 118 119 // Chain, if needed 120 if (current->res < pos_pst && !(arena->flags & rv_arena_flag_no_chain)) { 121 rv_arena* new_block = 0; 122 123 #if RV_ARENA_FREE_LIST 124 rv_arena* prev_block; 125 for (new_block = arena->free_last, prev_block = 0; new_block != 0; prev_block = new_block, new_block = new_block->prev) { 126 if (new_block->res >= rv_align_pow2(size, align)) { 127 if (prev_block) { 128 prev_block->prev = new_block->prev; 129 } else { 130 arena->free_last = new_block->prev; 131 } 132 arena->free_size -= new_block->res_size; 133 RV_ASAN_UNPOISON((u8*)new_block + RV_ARENA_HEADER_SIZE, new_block->res_size - RV_ARENA_HEADER_SIZE); 134 break; 135 } 136 } 137 #endif 138 139 if (new_block == 0) { 140 u64 res_size = current->res_size; 141 u64 cmt_size = current->cmt_size; 142 if (size + RV_ARENA_HEADER_SIZE > res_size) { 143 res_size = rv_align_pow2(size + RV_ARENA_HEADER_SIZE, align); 144 cmt_size = rv_align_pow2(size + RV_ARENA_HEADER_SIZE, align); 145 } 146 new_block = rv_arena_alloc(.reserve_size = res_size, 147 .commit_size = cmt_size, 148 .flags = current->flags); 149 } 150 151 new_block->base_pos = current->base_pos + current->res; 152 RV_STACK_PUSH_N(arena->current, new_block, prev); 153 154 current = new_block; 155 pos_pre = rv_align_pow2(current->pos, align); 156 pos_pst = pos_pre + size; 157 } 158 159 // commit new pages, if needed 160 if (current->cmt < pos_pst) { 161 u64 cmt_pst_aligned = pos_pst + current->cmt_size-1; 162 cmt_pst_aligned -= cmt_pst_aligned%current->cmt_size; 163 u64 cmt_pst_clamped = rv_min(cmt_pst_aligned, current->res); 164 u64 cmt_size = cmt_pst_clamped - current->cmt; 165 u8 *cmt_ptr = (u8 *)current + current->cmt; 166 if (current->flags & rv_arena_flag_large_pages) { 167 rv_mem_commit_large(cmt_ptr, cmt_size); 168 } else { 169 rv_mem_commit(cmt_ptr, cmt_size); 170 } 171 current->cmt = cmt_pst_clamped; 172 } 173 174 // push onto current block 175 void* result = 0; 176 if (current->cmt >= pos_pst) { 177 result = (u8 *)current+pos_pre; 178 current->pos = pos_pst; 179 RV_ASAN_UNPOISON(result, size); 180 } 181 182 if (result == 0) { 183 rv_abort_msg(1, "Fatal Allocation Failure"); 184 } 185 186 return result; 187 } 188 189 RV_INTERNAL u64 190 rv_arena_pos(rv_arena* arena) 191 { 192 rv_arena* current = arena->current; 193 u64 pos = current->base_pos + current->pos; 194 return pos; 195 } 196 197 RV_INTERNAL void 198 rv_arena_pop_to(rv_arena* arena, u64 pos) 199 { 200 u64 big_pos = rv_max(RV_ARENA_HEADER_SIZE, pos); 201 rv_arena* current = arena->current; 202 203 #if RV_ARENA_FREE_LIST 204 for (rv_arena* prev = 0; current->base_pos >= big_pos; current = prev) { 205 prev = current->prev; 206 current->pos = RV_ARENA_HEADER_SIZE; 207 arena->free_size += current->res_size; 208 RV_STACK_PUSH_N(arena->free_last, current, prev); 209 RV_ASAN_POISON((u8*)current + RV_ARENA_HEADER_SIZE, current->res_size - RV_ARENA_HEADER_SIZE); 210 } 211 #else 212 for (rv_arena* prev = 0; current->base_pos >= big_pos; current = prev) { 213 prev = current->prev; 214 rv_mem_release(current, current->res); 215 } 216 #endif 217 arena->current = current; 218 u64 new_pos = big_pos - current->base_pos; 219 rv_assert(new_pos <= current->pos); 220 RV_ASAN_POISON((u8*)current + new_pos, (current->pos - new_pos)); 221 current->pos = new_pos; 222 } 223 224 // arena push/pop helpers 225 226 RV_INTERNAL void 227 rv_arena_clear(rv_arena* arena) 228 { 229 rv_arena_pop_to(arena, 0); 230 } 231 232 RV_INTERNAL void 233 rv_arena_pop(rv_arena* arena, u64 amt) 234 { 235 u64 pos_old = rv_arena_pos(arena); 236 u64 pos_new = pos_old; 237 if (amt < pos_old) { 238 pos_new = pos_old - amt; 239 } 240 rv_arena_pop_to(arena, pos_new); 241 } 242 243 // Temporary arena scopes 244 245 RV_INTERNAL rv_temp_arena 246 rv_temp_begin(rv_arena* arena) 247 { 248 u64 pos = rv_arena_pos(arena); 249 rv_temp_arena temp = {arena, pos}; 250 return temp; 251 } 252 253 RV_INTERNAL void 254 rv_temp_end(rv_temp_arena temp) 255 { 256 rv_arena_pop_to(temp.arena, temp.pos); 257 } 258 259 260 #if !defined(_rv_scratch_count) 261 #define _rv_scratch_count 2 262 #endif 263 264 RV_INTERNAL RV_THREAD_LOCAL rv_arena* _rv_thread_scratch_pool[_rv_scratch_count] = {0}; 265 266 RV_GLOBAL rv_arena* rv_scratch_begin_(rv_arena **conflicts, u64 count) 267 { 268 rv_arena *result = 0; 269 rv_arena **arena_ptr = _rv_thread_scratch_pool; 270 for (u64 i = 0; i < _rv_scratch_count; i += 1, arena_ptr += 1) { 271 rv_arena **conflict_ptr = conflicts; 272 bool32 has_conflict = 0; 273 for (u64 j = 0; j < count; j += 1, conflict_ptr += 1) { 274 if (*arena_ptr == *conflict_ptr) { 275 has_conflict = 1; 276 break; 277 } 278 } 279 if (!has_conflict) { 280 if (*arena_ptr == NULL) { 281 *arena_ptr = rv_arena_alloc(); 282 } 283 result = *arena_ptr; 284 break; 285 } 286 } 287 288 return result; 289 } 290 291 #endif // !RV_USE_DEBUG_MALLOC_ARENA