Fix block allocation size

It needs to include the rounding done by `mapping_search()`
otherwise we'll trim the block too much and not be able to
re-allocate the same amount of space after the block is freed.

In the paper's version of mapping_search, this is subtle because
r is marked as "in out". http://www.gii.upv.es/tlsf/files/papers/jrts2008.pdf

To reproduce:
* Allocate an amount that is rounded up.
* Allocate a second block that takes up the remaining space. This
  prevents the second allocation from occurring in a new spot.
* Free the first block.
* Allocate the first amount a second time. It will fail without this
  change even though the requested space is actually available because the
  free block was added to the non-rounded segmented list.
This commit is contained in:
Scott Shawcroft
2024-03-04 16:11:57 -08:00
parent 8c9cd0517a
commit 81d3779545

23
tlsf.c
View File

@@ -280,14 +280,14 @@ static inline __attribute__((always_inline)) void mapping_insert(control_t* cont
}
/* This version rounds up to the next block size (for allocations) */
static inline __attribute__((always_inline)) void mapping_search(control_t* control, size_t size, int* fli, int* sli)
static inline __attribute__((always_inline)) void mapping_search(control_t* control, size_t* size, int* fli, int* sli)
{
if (size >= control->small_block_size)
if (*size >= control->small_block_size)
{
const size_t round = (1 << (tlsf_fls_sizet(size) - control->sl_index_count_log2)) - 1;
size += round;
const size_t round = (1 << (tlsf_fls_sizet(*size) - control->sl_index_count_log2)) - 1;
*size = (*size + round) & ~round;
}
mapping_insert(control, size, fli, sli);
mapping_insert(control, *size, fli, sli);
}
static inline __attribute__((always_inline)) block_header_t* search_suitable_block(control_t* control, int* fli, int* sli)
@@ -540,7 +540,7 @@ static inline __attribute__((always_inline)) block_header_t* block_trim_free_lea
return remaining_block;
}
static inline __attribute__((always_inline)) block_header_t* block_locate_free(control_t* control, size_t size)
static inline __attribute__((always_inline)) block_header_t* block_locate_free(control_t* control, size_t* size)
{
int fl = 0, sl = 0;
block_header_t* block = 0;
@@ -563,7 +563,7 @@ static inline __attribute__((always_inline)) block_header_t* block_locate_free(c
if (block)
{
tlsf_assert(block_size(block) >= size);
tlsf_assert(block_size(block) >= *size);
remove_free_block(control, block, fl, sl);
}
@@ -1000,8 +1000,9 @@ pool_t tlsf_get_pool(tlsf_t tlsf)
void* tlsf_malloc(tlsf_t tlsf, size_t size)
{
control_t* control = tlsf_cast(control_t*, tlsf);
const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
block_header_t* block = block_locate_free(control, adjust);
size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
// block_locate_free() may adjust our allocated size further.
block_header_t* block = block_locate_free(control, &adjust);
return block_prepare_used(control, block, adjust);
}
@@ -1056,9 +1057,9 @@ void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_off
** alignment constraint. Thus, the gap is not required.
** If we requested 0 bytes, return null, as tlsf_malloc(0) does.
*/
const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;
size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;
block_header_t* block = block_locate_free(control, aligned_size);
block_header_t* block = block_locate_free(control, &aligned_size);
/* This can't be a static assert. */
tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);