/*
kmregion version 3.5, a region-based memory allocator
Copyright (C) 02021 Kragen Javier Sitaker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
*/
#include
#include
#include
#include
#include "kmregion.h"
#include
#include
#include
enum { block_size = 32768, initial_block_list_capacity = 512 };
kmregion *
km_start(km_disc disc, jmp_buf *on_error)
{
kmregion *p = disc.malloc(sizeof(*p), disc.userdata);
if (!p) return NULL;
p->disc = disc;
p->on_error = on_error;
void **blocks = p->disc.malloc(initial_block_list_capacity * sizeof(*blocks),
disc.userdata);
if (!blocks) {
p->disc.free(p, p->disc.userdata);
return NULL;
}
memset(blocks, 0, initial_block_list_capacity * sizeof(*blocks));
p->blocks = blocks;
p->block_capacity = initial_block_list_capacity;
p->n_blocks = 0;
void *block = p->disc.malloc(block_size, p->disc.userdata);
if (!block) {
p->disc.free(blocks, disc.userdata);
p->disc.free(p, disc.userdata);
return NULL;
}
p->blocks[p->n_blocks++] = block;
p->bp = block;
p->n = block_size;
return p;
}
void
km_end(kmregion *p)
{
for (size_t i = 0; i != p->n_blocks; i++) {
p->disc.free(p->blocks[i], p->disc.userdata);
}
p->disc.free(p->blocks, p->disc.userdata);
p->disc.free(p, p->disc.userdata);
}
static inline void *
km_error(jmp_buf *on_error)
{
if (on_error) longjmp(*on_error, 1);
return NULL;
}
/* Adds a new block to `p->blocks`, reallocating it if necessary;
* `km_slow_path_allocate` decides which size to call this with. It
* returns a pointer to the newly allocated block because `p->p` may
* or may not end up pointing into it — that’s up to
* `km_slow_path_allocate`.
*/
static void *
km_add_block(kmregion *p, size_t n)
{
if (p->n_blocks >= p->block_capacity) {
assert(p->block_capacity);
assert(p->n_blocks == p->block_capacity);
size_t old_size = p->block_capacity * sizeof(*p->blocks);
void *new_block_list = p->disc.malloc(old_size * 2, p->disc.userdata);
if (!new_block_list) return km_error(p->on_error);
/* This is “amortized constant time” assuming a well-behaved
* underlying malloc, but could be a latency problem for large
* arenas; consider 16 gigabytes consisting entirely of 8-kilobyte
* “large objects”, which is 2097152 block pointers, occupying 32
* mebibytes — when you do the bump from 16 mebibytes to 32
* mebibytes, this involves 48 mebibytes of memory traffic, which
* might take a millisecond or more. */
memset(new_block_list, 0, old_size * 2);
memcpy(new_block_list, p->blocks, old_size);
p->block_capacity *= 2;
free(p->blocks);
p->blocks = new_block_list;
}
void *block = p->disc.malloc(n, p->disc.userdata);
p->blocks[p->n_blocks++] = block;
if (!block) return km_error(p->on_error);
return block;
}
/* This is what km_new calls when its fast path fails; it needs to
allocate a new block, and possibly a new block list to save the
pointer to it in.
*/
void *
km_slow_path_allocate(kmregion *p, size_t n)
{
/* If we’re handling a large allocation, put it in its own block so
* we don’t waste the leftover space in the current block. */
if (n > block_size / 4) return km_add_block(p, n);
void *block = km_add_block(p, block_size);
if (!block) return NULL; /* km_add_block is responsible for the longjmp */
p->bp = block;
p->n = block_size;
/* If we didn’t fuck that up, then this is guaranteed to work: */
return km_new(p, n);
}
/* Wrappers around system malloc and free to permit us to use them. */
void *
km_malloc(size_t n, void *userdata)
{
return malloc(n);
}
void
km_free(void *p, void *userdata)
{
free(p);
}
km_disc km_libc_disc = { NULL, km_malloc, km_free };