freemyipod r125 - Code Review

Jump to: navigation, search
Repository:freemyipod
Revision:r124‎ | r125 | r126 >
Date:21:24, 13 August 2010
Author:theseven
Status:new
Tags:
Comment:
Add TLSF allocator
Modified paths:
  • /embios/trunk/SOURCES (modified) (history)
  • /embios/trunk/export/syscallapi.h (modified) (history)
  • /embios/trunk/export/syscallwrappers.h (modified) (history)
  • /embios/trunk/libc/include/assert.h (added) (history)
  • /embios/trunk/libc/tlsf (added) (history)
  • /embios/trunk/libc/tlsf/tlsf.c (added) (history)
  • /embios/trunk/libc/tlsf/tlsf.h (added) (history)
  • /embios/trunk/libc/tlsf/tlsfbits.h (added) (history)
  • /embios/trunk/syscallapi.c (modified) (history)

Diff [purge]

Index: embios/trunk/libc/include/assert.h
@@ -0,0 +1,32 @@
 2+//
 3+//
 4+// Copyright 2010 TheSeven
 5+//
 6+//
 7+// This file is part of emBIOS.
 8+//
 9+// emBIOS is free software: you can redistribute it and/or
 10+// modify it under the terms of the GNU General Public License as
 11+// published by the Free Software Foundation, either version 2 of the
 12+// License, or (at your option) any later version.
 13+//
 14+// emBIOS is distributed in the hope that it will be useful,
 15+// but WITHOUT ANY WARRANTY; without even the implied warranty of
 16+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 17+// See the GNU General Public License for more details.
 18+//
 19+// You should have received a copy of the GNU General Public License along
 20+// with emBIOS. If not, see <http://www.gnu.org/licenses/>.
 21+//
 22+//
 23+
 24+
 25+#ifndef __ASSERT_H__
 26+#define __ASSERT_H__
 27+
 28+#include "../../global.h"
 29+#include "../../panic.h"
 30+
 31+#define assert(expr) if (!expr) panicf(PANIC_KILLTHREAD, "%s:%d: Assertion failed: %s", __FILE__, __LINE__, #expr)
 32+
 33+#endif
Index: embios/trunk/libc/tlsf/tlsfbits.h
@@ -0,0 +1,67 @@
 2+#ifndef INCLUDED_tlsfbits
 3+#define INCLUDED_tlsfbits
 4+
 5+#include "../../global.h"
 6+
 7+#if defined(__cplusplus)
 8+#define tlsf_decl inline
 9+#else
 10+#define tlsf_decl static
 11+#endif
 12+
 13+/*
 14+** Architecture-specific bit manipulation routines.
 15+**
 16+** TLSF achieves O(1) cost for malloc and free operations by limiting
 17+** the search for a free block to a free list of guaranteed size
 18+** adequate to fulfill the request, combined with efficient free list
 19+** queries using bitmasks and architecture-specific bit-manipulation
 20+** routines.
 21+**
 22+** Most modern processors provide instructions to count leading zeroes
 23+** in a word, find the lowest and highest set bit, etc. These
 24+** specific implementations will be used when available, falling back
 25+** to a reasonably efficient generic implementation.
 26+**
 27+** NOTE: TLSF spec relies on ffs/fls returning value 0..31.
 28+** ffs/fls return 1-32 by default, returning 0 for error.
 29+*/
 30+
 31+/*
 32+** gcc 3.4 and above have builtin support, specialized for architecture.
 33+** Some compilers masquerade as gcc; patchlevel test filters them out.
 34+*/
 35+
 36+
 37+tlsf_decl int tlsf_fls_generic(unsigned int word) ICODE_ATTR;
 38+tlsf_decl int tlsf_fls_generic(unsigned int word)
 39+{
 40+ int bit = 32;
 41+
 42+ if (!word) bit -= 1;
 43+ if (!(word & 0xffff0000)) { word <<= 16; bit -= 16; }
 44+ if (!(word & 0xff000000)) { word <<= 8; bit -= 8; }
 45+ if (!(word & 0xf0000000)) { word <<= 4; bit -= 4; }
 46+ if (!(word & 0xc0000000)) { word <<= 2; bit -= 2; }
 47+ if (!(word & 0x80000000)) { word <<= 1; bit -= 1; }
 48+
 49+ return bit;
 50+}
 51+
 52+/* Implement ffs in terms of fls. */
 53+tlsf_decl int tlsf_ffs(unsigned int word) ICODE_ATTR;
 54+tlsf_decl int tlsf_ffs(unsigned int word)
 55+{
 56+ return tlsf_fls_generic(word & (~word + 1)) - 1;
 57+}
 58+
 59+tlsf_decl int tlsf_fls(unsigned int word) ICODE_ATTR;
 60+tlsf_decl int tlsf_fls(unsigned int word)
 61+{
 62+ return tlsf_fls_generic(word) - 1;
 63+}
 64+
 65+
 66+#undef tlsf_decl
 67+
 68+#endif
Index: embios/trunk/libc/tlsf/tlsf.c
@@ -0,0 +1,952 @@
 2+#include "../../global.h"
 3+#include "../include/assert.h"
 4+#include <limits.h>
 5+#include <stddef.h>
 6+#include "../include/stdio.h"
 7+#include "../include/stdlib.h"
 8+#include "../include/string.h"
 9+#include "../../console.h"
 10+
 11+#include "tlsf.h"
 12+#include "tlsfbits.h"
 13+
 14+/*
 15+** Constants.
 16+*/
 17+
 18+/* Public constants: may be modified. */
 19+enum tlsf_public
 20+{
 21+ /* log2 of number of linear subdivisions of block sizes. */
 22+ SL_INDEX_COUNT_LOG2 = 5,
 23+};
 24+
 25+/* Private constants: do not modify. */
 26+enum tlsf_private
 27+{
 28+ /* All allocation sizes and addresses are aligned to 4 bytes. */
 29+ ALIGN_SIZE_LOG2 = 2,
 30+ ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
 31+
 32+ /*
 33+ ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
 34+ ** However, because we linearly subdivide the second-level lists, and
 35+ ** our minimum size granularity is 4 bytes, it doesn't make sense to
 36+ ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
 37+ ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
 38+ ** trying to split size ranges into more slots than we have available.
 39+ ** Instead, we calculate the minimum threshold size, and place all
 40+ ** blocks below that size into the 0th first-level list.
 41+ */
 42+ FL_INDEX_MAX = 30,
 43+ SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
 44+ FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
 45+ FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
 46+
 47+ SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
 48+};
 49+
 50+/*
 51+** Cast and min/max macros.
 52+*/
 53+
 54+#define tlsf_cast(t, exp) ((t) (exp))
 55+#define tlsf_min(a, b) ((a) < (b) ? (a) : (b))
 56+#define tlsf_max(a, b) ((a) > (b) ? (a) : (b))
 57+
 58+/*
 59+** Set assert macro, if it has not been provided by the user.
 60+*/
 61+#if !defined (tlsf_assert)
 62+#define tlsf_assert assert
 63+#endif
 64+
 65+/*
 66+** Static assertion mechanism.
 67+*/
 68+
 69+#define _tlsf_glue2(x, y) x ## y
 70+#define _tlsf_glue(x, y) _tlsf_glue2(x, y)
 71+#define tlsf_static_assert(exp) \
 72+ typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
 73+
 74+/* FIXME: This code only currently supports 32-bit architectures. */
 75+tlsf_static_assert(sizeof(size_t) * CHAR_BIT == 32);
 76+
 77+/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
 78+tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
 79+
 80+/* sizeof fl_bitmap must be >= FL_INDEX_COUNT. */
 81+tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= FL_INDEX_COUNT);
 82+
 83+/* Ensure we've properly tuned our sizes. */
 84+tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
 85+
 86+/*
 87+** Data structures and associated constants.
 88+*/
 89+
 90+/*
 91+** Block header structure.
 92+**
 93+** There are several implementation subtleties involved:
 94+** - The prev_phys_block field is only valid if the previous block is free.
 95+** - The prev_phys_block field is actually stored in the last word of the
 96+** previous block. It appears at the beginning of this structure only to
 97+** simplify the implementation.
 98+** - The next_free / prev_free fields are only valid if the block is free.
 99+*/
 100+typedef struct block_header_t
 101+{
 102+ /* Points to the previous physical block. */
 103+ struct block_header_t* prev_phys_block;
 104+
 105+ /* The size of this block, excluding the block header. */
 106+ size_t size;
 107+
 108+ /* Next and previous free blocks. */
 109+ struct block_header_t* next_free;
 110+ struct block_header_t* prev_free;
 111+} block_header_t;
 112+
 113+/*
 114+** Since block sizes are always a multiple of 4, the two least significant
 115+** bits of the size field are used to store the block status:
 116+** - bit 0: whether block is busy or free
 117+** - bit 1: whether previous block is busy or free
 118+*/
 119+static const size_t block_header_free_bit = 1 << 0;
 120+static const size_t block_header_prev_free_bit = 1 << 1;
 121+
 122+/*
 123+** The size of the block header exposed to used blocks is the size field.
 124+** The prev_phys_block field is stored *inside* the previous free block.
 125+*/
 126+static const size_t block_header_overhead = sizeof(size_t);
 127+
 128+/* User data starts directly after the size field in a used block. */
 129+static const size_t block_start_offset =
 130+ offsetof(block_header_t, size) + sizeof(size_t);
 131+
 132+/*
 133+** A free block must be large enough to store its header minus the size of
 134+** the prev_phys_block field, and no larger than the number of addressable
 135+** bits for FL_INDEX.
 136+*/
 137+static const size_t block_size_min =
 138+ sizeof(block_header_t) - sizeof(block_header_t*);
 139+static const size_t block_size_max = 1 << FL_INDEX_MAX;
 140+
 141+/* Empty lists point at this block to indicate they are free. */
 142+static block_header_t block_null;
 143+
 144+/* The TLSF pool structure. */
 145+typedef struct pool_t
 146+{
 147+ /* Bitmaps for free lists. */
 148+ unsigned int fl_bitmap;
 149+ unsigned int sl_bitmap[FL_INDEX_COUNT];
 150+
 151+ /* Head of free lists. */
 152+ block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
 153+} pool_t;
 154+
 155+/* A type used for casting when doing pointer arithmetic. */
 156+typedef unsigned int tlsfptr_t;
 157+
 158+/*
 159+** block_header_t member functions.
 160+*/
 161+
 162+static size_t block_size(const block_header_t* block) ICODE_ATTR;
 163+static size_t block_size(const block_header_t* block)
 164+{
 165+ return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
 166+}
 167+
 168+static void block_set_size(block_header_t* block, size_t size) ICODE_ATTR;
 169+static void block_set_size(block_header_t* block, size_t size)
 170+{
 171+ const size_t oldsize = block->size;
 172+ block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
 173+}
 174+
 175+static int block_is_last(const block_header_t* block) ICODE_ATTR;
 176+static int block_is_last(const block_header_t* block)
 177+{
 178+ return 0 == block_size(block);
 179+}
 180+
 181+static int block_is_free(const block_header_t* block) ICODE_ATTR;
 182+static int block_is_free(const block_header_t* block)
 183+{
 184+ return block->size & block_header_free_bit;
 185+}
 186+
 187+static void block_set_free(block_header_t* block) ICODE_ATTR;
 188+static void block_set_free(block_header_t* block)
 189+{
 190+ block->size |= block_header_free_bit;
 191+}
 192+
 193+static void block_set_used(block_header_t* block) ICODE_ATTR;
 194+static void block_set_used(block_header_t* block)
 195+{
 196+ block->size &= ~block_header_free_bit;
 197+}
 198+
 199+static int block_is_prev_free(const block_header_t* block) ICODE_ATTR;
 200+static int block_is_prev_free(const block_header_t* block)
 201+{
 202+ return block->size & block_header_prev_free_bit;
 203+}
 204+
 205+static void block_set_prev_free(block_header_t* block) ICODE_ATTR;
 206+static void block_set_prev_free(block_header_t* block)
 207+{
 208+ block->size |= block_header_prev_free_bit;
 209+}
 210+
 211+static void block_set_prev_used(block_header_t* block) ICODE_ATTR;
 212+static void block_set_prev_used(block_header_t* block)
 213+{
 214+ block->size &= ~block_header_prev_free_bit;
 215+}
 216+
 217+static block_header_t* block_from_ptr(const void* ptr) ICODE_ATTR;
 218+static block_header_t* block_from_ptr(const void* ptr)
 219+{
 220+ return tlsf_cast(block_header_t*,
 221+ tlsf_cast(unsigned char*, ptr) - block_start_offset);
 222+}
 223+
 224+static void* block_to_ptr(const block_header_t* block) ICODE_ATTR;
 225+static void* block_to_ptr(const block_header_t* block)
 226+{
 227+ return tlsf_cast(void*,
 228+ tlsf_cast(unsigned char*, block) + block_start_offset);
 229+}
 230+
 231+/* Return location of next block after block of given size. */
 232+static block_header_t* offset_to_block(const void* ptr, size_t size) ICODE_ATTR;
 233+static block_header_t* offset_to_block(const void* ptr, size_t size)
 234+{
 235+ return tlsf_cast(block_header_t*,
 236+ tlsf_cast(unsigned char*, ptr) + size);
 237+}
 238+
 239+/* Return location of previous block. */
 240+static block_header_t* block_prev(const block_header_t* block) ICODE_ATTR;
 241+static block_header_t* block_prev(const block_header_t* block)
 242+{
 243+ return block->prev_phys_block;
 244+}
 245+
 246+/* Return location of next existing block. */
 247+static block_header_t* block_next(const block_header_t* block) ICODE_ATTR;
 248+static block_header_t* block_next(const block_header_t* block)
 249+{
 250+ block_header_t* next = offset_to_block(block_to_ptr(block),
 251+ block_size(block) - block_header_overhead);
 252+ tlsf_assert(!block_is_last(block));
 253+ return next;
 254+}
 255+
 256+/* Link a new block with its physical neighbor, return the neighbor. */
 257+static block_header_t* block_link_next(block_header_t* block) ICODE_ATTR;
 258+static block_header_t* block_link_next(block_header_t* block)
 259+{
 260+ block_header_t* next = block_next(block);
 261+ next->prev_phys_block = block;
 262+ return next;
 263+}
 264+
 265+static void block_mark_as_free(block_header_t* block) ICODE_ATTR;
 266+static void block_mark_as_free(block_header_t* block)
 267+{
 268+ /* Link the block to the next block, first. */
 269+ block_header_t* next = block_link_next(block);
 270+ block_set_prev_free(next);
 271+ block_set_free(block);
 272+}
 273+
 274+static void block_mark_as_used(block_header_t* block) ICODE_ATTR;
 275+static void block_mark_as_used(block_header_t* block)
 276+{
 277+ block_header_t* next = block_next(block);
 278+ block_set_prev_used(next);
 279+ block_set_used(block);
 280+}
 281+
 282+static size_t align_up(size_t x, size_t align) ICODE_ATTR;
 283+static size_t align_up(size_t x, size_t align)
 284+{
 285+ tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
 286+ return (x + (align - 1)) & ~(align - 1);
 287+}
 288+
 289+static size_t align_down(size_t x, size_t align) ICODE_ATTR;
 290+static size_t align_down(size_t x, size_t align)
 291+{
 292+ tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
 293+ return x - (x & (align - 1));
 294+}
 295+
 296+static void* align_ptr(void* ptr, size_t align) ICODE_ATTR;
 297+static void* align_ptr(void* ptr, size_t align)
 298+{
 299+ const tlsfptr_t aligned =
 300+ (tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
 301+ tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
 302+ return tlsf_cast(void*, aligned);
 303+}
 304+
 305+/*
 306+** Adjust an allocation size to be aligned to word size, and no smaller
 307+** than internal minimum.
 308+*/
 309+static size_t adjust_request_size(size_t size, size_t align) ICODE_ATTR;
 310+static size_t adjust_request_size(size_t size, size_t align)
 311+{
 312+ size_t adjust = 0;
 313+ if (size && size < block_size_max)
 314+ {
 315+ const size_t aligned = align_up(size, align);
 316+ adjust = tlsf_max(aligned, block_size_min);
 317+ }
 318+ return adjust;
 319+}
 320+
 321+/*
 322+** TLSF utility functions. In most cases, these are direct translations of
 323+** the documentation found in the white paper.
 324+*/
 325+
 326+static void mapping_insert(size_t size, int* fli, int* sli) ICODE_ATTR;
 327+static void mapping_insert(size_t size, int* fli, int* sli)
 328+{
 329+ int fl, sl;
 330+ if (size < SMALL_BLOCK_SIZE)
 331+ {
 332+ /* Store small blocks in first list. */
 333+ fl = 0;
 334+ sl = size / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
 335+ }
 336+ else
 337+ {
 338+ fl = tlsf_fls(size);
 339+ sl = (size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
 340+ fl -= (FL_INDEX_SHIFT - 1);
 341+ }
 342+ *fli = fl;
 343+ *sli = sl;
 344+}
 345+
 346+/* This version rounds up to the next block size (for allocations) */
 347+static void mapping_search(size_t size, int* fli, int* sli) ICODE_ATTR;
 348+static void mapping_search(size_t size, int* fli, int* sli)
 349+{
 350+ if (size >= (1 << SL_INDEX_COUNT_LOG2))
 351+ {
 352+ const size_t round = (1 << (tlsf_fls(size) - SL_INDEX_COUNT_LOG2)) - 1;
 353+ size += round;
 354+ }
 355+ mapping_insert(size, fli, sli);
 356+}
 357+
 358+static block_header_t* search_suitable_block(pool_t* pool, int* fli, int* sli) ICODE_ATTR;
 359+static block_header_t* search_suitable_block(pool_t* pool, int* fli, int* sli)
 360+{
 361+ int fl = *fli;
 362+ int sl = *sli;
 363+
 364+ /*
 365+ ** First, search for a block in the list associated with the given
 366+ ** fl/sl index.
 367+ */
 368+ unsigned int sl_map = pool->sl_bitmap[fl] & (0xffffffff << sl);
 369+ if (!sl_map)
 370+ {
 371+ /* No block exists. Search in the next largest first-level list. */
 372+ const unsigned int fl_map = pool->fl_bitmap & (0xffffffff << (fl + 1));
 373+ if (!fl_map)
 374+ {
 375+ /* No free blocks available, memory has been exhausted. */
 376+ return 0;
 377+ }
 378+
 379+ fl = tlsf_ffs(fl_map);
 380+ *fli = fl;
 381+ sl_map = pool->sl_bitmap[fl];
 382+ }
 383+ tlsf_assert(sl_map && "internal error - second level bitmap is null");
 384+ sl = tlsf_ffs(sl_map);
 385+ *sli = sl;
 386+
 387+ /* Return the first block in the free list. */
 388+ return pool->blocks[fl][sl];
 389+}
 390+
 391+/* Remove a free block from the free list.*/
 392+static void remove_free_block(pool_t* pool, block_header_t* block, int fl, int sl) ICODE_ATTR;
 393+static void remove_free_block(pool_t* pool, block_header_t* block, int fl, int sl)
 394+{
 395+ block_header_t* prev = block->prev_free;
 396+ block_header_t* next = block->next_free;
 397+ tlsf_assert(prev && "prev_free field can not be null");
 398+ tlsf_assert(next && "next_free field can not be null");
 399+ next->prev_free = prev;
 400+ prev->next_free = next;
 401+
 402+ /* If this block is the head of the free list, set new head. */
 403+ if (pool->blocks[fl][sl] == block)
 404+ {
 405+ pool->blocks[fl][sl] = next;
 406+
 407+ /* If the new head is null, clear the bitmap. */
 408+ if (next == &block_null)
 409+ {
 410+ pool->sl_bitmap[fl] &= ~(1 << sl);
 411+
 412+ /* If the second bitmap is now empty, clear the fl bitmap. */
 413+ if (!pool->sl_bitmap[fl])
 414+ {
 415+ pool->fl_bitmap &= ~(1 << fl);
 416+ }
 417+ }
 418+ }
 419+}
 420+
 421+/* Insert a free block into the free block list. */
 422+static void insert_free_block(pool_t* pool, block_header_t* block, int fl, int sl) ICODE_ATTR;
 423+static void insert_free_block(pool_t* pool, block_header_t* block, int fl, int sl)
 424+{
 425+ block_header_t* current = pool->blocks[fl][sl];
 426+ tlsf_assert(current && "free list cannot have a null entry");
 427+ tlsf_assert(block && "cannot insert a null entry into the free list");
 428+ block->next_free = current;
 429+ block->prev_free = &block_null;
 430+ current->prev_free = block;
 431+
 432+ /*
 433+ ** Insert the new block at the head of the list, and mark the first-
 434+ ** and second-level bitmaps appropriately.
 435+ */
 436+ pool->blocks[fl][sl] = block;
 437+ pool->fl_bitmap |= (1 << fl);
 438+ pool->sl_bitmap[fl] |= (1 << sl);
 439+}
 440+
 441+/* Remove a given block from the free list. */
 442+static void block_remove(pool_t* pool, block_header_t* block) ICODE_ATTR;
 443+static void block_remove(pool_t* pool, block_header_t* block)
 444+{
 445+ int fl, sl;
 446+ mapping_insert(block_size(block), &fl, &sl);
 447+ remove_free_block(pool, block, fl, sl);
 448+}
 449+
 450+/* Insert a given block into the free list. */
 451+static void block_insert(pool_t* pool, block_header_t* block) ICODE_ATTR;
 452+static void block_insert(pool_t* pool, block_header_t* block)
 453+{
 454+ int fl, sl;
 455+ mapping_insert(block_size(block), &fl, &sl);
 456+ insert_free_block(pool, block, fl, sl);
 457+}
 458+
 459+static int block_can_split(block_header_t* block, size_t size) ICODE_ATTR;
 460+static int block_can_split(block_header_t* block, size_t size)
 461+{
 462+ return block_size(block) >= sizeof(block_header_t) + size;
 463+}
 464+
 465+/* Split a block into two, the second of which is free. */
 466+static block_header_t* block_split(block_header_t* block, size_t size) ICODE_ATTR;
 467+static block_header_t* block_split(block_header_t* block, size_t size)
 468+{
 469+ /* Calculate the amount of space left in the remaining block. */
 470+ block_header_t* remaining =
 471+ offset_to_block(block_to_ptr(block), size - block_header_overhead);
 472+
 473+ const size_t remain_size = block_size(block) - (size + block_header_overhead);
 474+ tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
 475+ block_set_size(remaining, remain_size);
 476+ tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
 477+
 478+ block_set_size(block, size);
 479+ block_mark_as_free(remaining);
 480+
 481+ return remaining;
 482+}
 483+
 484+/* Absorb a free block's storage into an adjacent previous free block. */
 485+static block_header_t* block_absorb(block_header_t* prev, block_header_t* block) ICODE_ATTR;
 486+static block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
 487+{
 488+ tlsf_assert(!block_is_last(prev) && "previous block can't be last!");
 489+ /* Note: Leaves flags untouched. */
 490+ prev->size += block_size(block) + block_header_overhead;
 491+ block_link_next(prev);
 492+ return prev;
 493+}
 494+
 495+/* Merge a just-freed block with an adjacent previous free block. */
 496+static block_header_t* block_merge_prev(pool_t* pool, block_header_t* block) ICODE_ATTR;
 497+static block_header_t* block_merge_prev(pool_t* pool, block_header_t* block)
 498+{
 499+ if (block_is_prev_free(block))
 500+ {
 501+ block_header_t* prev = block_prev(block);
 502+ tlsf_assert(prev && "prev physical block can't be null");
 503+ tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
 504+ block_remove(pool, prev);
 505+ block = block_absorb(prev, block);
 506+ }
 507+
 508+ return block;
 509+}
 510+
 511+/* Merge a just-freed block with an adjacent free block. */
 512+static block_header_t* block_merge_next(pool_t* pool, block_header_t* block) ICODE_ATTR;
 513+static block_header_t* block_merge_next(pool_t* pool, block_header_t* block)
 514+{
 515+ block_header_t* next = block_next(block);
 516+ tlsf_assert(next && "next physical block can't be null");
 517+
 518+ if (block_is_free(next))
 519+ {
 520+ tlsf_assert(!block_is_last(block) && "previous block can't be last!");
 521+ block_remove(pool, next);
 522+ block = block_absorb(block, next);
 523+ }
 524+
 525+ return block;
 526+}
 527+
 528+/* Trim any trailing block space off the end of a block, return to pool. */
 529+static void block_trim_free(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
 530+static void block_trim_free(pool_t* pool, block_header_t* block, size_t size)
 531+{
 532+ tlsf_assert(block_is_free(block) && "block must be free");
 533+ if (block_can_split(block, size))
 534+ {
 535+ block_header_t* remaining_block = block_split(block, size);
 536+ block_link_next(block);
 537+ block_set_prev_free(remaining_block);
 538+ block_insert(pool, remaining_block);
 539+ }
 540+}
 541+
 542+/* Trim any trailing block space off the end of a used block, return to pool. */
 543+static void block_trim_used(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
 544+static void block_trim_used(pool_t* pool, block_header_t* block, size_t size)
 545+{
 546+ tlsf_assert(!block_is_free(block) && "block must be used");
 547+ if (block_can_split(block, size))
 548+ {
 549+ /* If the next block is free, we must coalesce. */
 550+ block_header_t* remaining_block = block_split(block, size);
 551+ block_set_prev_used(remaining_block);
 552+
 553+ remaining_block = block_merge_next(pool, remaining_block);
 554+ block_insert(pool, remaining_block);
 555+ }
 556+}
 557+
 558+static block_header_t* block_trim_free_leading(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
 559+static block_header_t* block_trim_free_leading(pool_t* pool, block_header_t* block, size_t size)
 560+{
 561+ block_header_t* remaining_block = block;
 562+ if (block_can_split(block, size))
 563+ {
 564+ /* We want the 2nd block. */
 565+ remaining_block = block_split(block, size - block_header_overhead);
 566+ block_set_prev_free(remaining_block);
 567+
 568+ block_link_next(block);
 569+ block_insert(pool, block);
 570+ }
 571+
 572+ return remaining_block;
 573+}
 574+
 575+static block_header_t* block_locate_free(pool_t* pool, size_t size) ICODE_ATTR;
 576+static block_header_t* block_locate_free(pool_t* pool, size_t size)
 577+{
 578+ int fl, sl;
 579+ block_header_t* block = 0;
 580+
 581+ if (size)
 582+ {
 583+ mapping_search(size, &fl, &sl);
 584+ block = search_suitable_block(pool, &fl, &sl);
 585+ }
 586+
 587+ if (block)
 588+ {
 589+ tlsf_assert(block_size(block) >= size);
 590+ remove_free_block(pool, block, fl, sl);
 591+ }
 592+
 593+ return block;
 594+}
 595+
 596+static void* block_prepare_used(pool_t* pool, block_header_t* block, size_t size) ICODE_ATTR;
 597+static void* block_prepare_used(pool_t* pool, block_header_t* block, size_t size)
 598+{
 599+ void* p = 0;
 600+ if (block)
 601+ {
 602+ block_trim_free(pool, block, size);
 603+ block_mark_as_used(block);
 604+ p = block_to_ptr(block);
 605+ }
 606+ return p;
 607+}
 608+
 609+/* Clear structure and point all empty lists at the null block. */
 610+static void pool_construct(pool_t* pool) ICODE_ATTR;
 611+static void pool_construct(pool_t* pool)
 612+{
 613+ int i, j;
 614+
 615+ block_null.next_free = &block_null;
 616+ block_null.prev_free = &block_null;
 617+
 618+ pool->fl_bitmap = 0;
 619+ for (i = 0; i < FL_INDEX_COUNT; ++i)
 620+ {
 621+ pool->sl_bitmap[i] = 0;
 622+ for (j = 0; j < SL_INDEX_COUNT; ++j)
 623+ {
 624+ pool->blocks[i][j] = &block_null;
 625+ }
 626+ }
 627+}
 628+
 629+/*
 630+** Debugging utilities.
 631+*/
 632+
 633+typedef struct integrity_t
 634+{
 635+ int prev_status;
 636+ int status;
 637+} integrity_t;
 638+
 639+#define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
 640+
 641+static void integrity_walker(void* ptr, size_t size, int used, void* user)
 642+{
 643+ block_header_t* block = block_from_ptr(ptr);
 644+ integrity_t* integ = tlsf_cast(integrity_t*, user);
 645+ const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
 646+ const int this_status = block_is_free(block) ? 1 : 0;
 647+ const size_t this_block_size = block_size(block);
 648+
 649+ int status = 0;
 650+ tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
 651+ tlsf_insist(size == this_block_size && "block size incorrect");
 652+
 653+ integ->prev_status = this_status;
 654+ integ->status += status;
 655+}
 656+
 657+int tlsf_check_heap(tlsf_pool tlsf)
 658+{
 659+ int i, j;
 660+
 661+ pool_t* pool = tlsf_cast(pool_t*, tlsf);
 662+ int status = 0;
 663+
 664+ /* Check that the blocks are physically correct. */
 665+ integrity_t integ = { 0, 0 };
 666+ tlsf_walk_heap(tlsf, integrity_walker, &integ);
 667+ status = integ.status;
 668+
 669+ /* Check that the free lists and bitmaps are accurate. */
 670+ for (i = 0; i < FL_INDEX_COUNT; ++i)
 671+ {
 672+ for (j = 0; j < SL_INDEX_COUNT; ++j)
 673+ {
 674+ const int fl_map = pool->fl_bitmap & (1 << i);
 675+ const int sl_list = pool->sl_bitmap[i];
 676+ const int sl_map = sl_list & (1 << j);
 677+ const block_header_t* block = pool->blocks[i][j];
 678+
 679+ /* Check that first- and second-level lists agree. */
 680+ if (!fl_map)
 681+ {
 682+ tlsf_insist(!sl_map && "second-level map must be null");
 683+ }
 684+
 685+ if (!sl_map)
 686+ {
 687+ tlsf_insist((block == &block_null) && "block list must be null");
 688+ continue;
 689+ }
 690+
 691+ /* Check that there is at least one free block. */
 692+ tlsf_insist(sl_list && "no free blocks in second-level map");
 693+ tlsf_insist((block != &block_null) && "block should not be null");
 694+
 695+ while (block != &block_null)
 696+ {
 697+ int fli, sli;
 698+ tlsf_insist(block_is_free(block) && "block should be free");
 699+ tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
 700+ tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
 701+ tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
 702+ tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
 703+
 704+ mapping_insert(block_size(block), &fli, &sli);
 705+ tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
 706+ block = block->next_free;
 707+ }
 708+ }
 709+ }
 710+
 711+ return status;
 712+}
 713+
 714+#undef tlsf_insist
 715+
 716+static void default_walker(void* ptr, size_t size, int used, void* user)
 717+{
 718+ (void)user;
 719+ cprintf(CONSOLE_BOOT, "\t%p %s size: %x\n", ptr, used ? "used" : "free", size);
 720+}
 721+
 722+void tlsf_walk_heap(tlsf_pool pool, tlsf_walker walker, void* user)
 723+{
 724+ tlsf_walker heap_walker = walker ? walker : default_walker;
 725+ block_header_t* block =
 726+ offset_to_block(pool, sizeof(pool_t) - block_header_overhead);
 727+
 728+ while (block && !block_is_last(block))
 729+ {
 730+ heap_walker(
 731+ block_to_ptr(block),
 732+ block_size(block),
 733+ !block_is_free(block),
 734+ user);
 735+ block = block_next(block);
 736+ }
 737+}
 738+
 739+size_t tlsf_block_size(void* ptr)
 740+{
 741+ size_t size = 0;
 742+ if (ptr)
 743+ {
 744+ const block_header_t* block = block_from_ptr(ptr);
 745+ size = block_size(block);
 746+ }
 747+ return size;
 748+}
 749+
 750+/*
 751+** Overhead of the TLSF structures in a given memory block passed to
 752+** tlsf_create, equal to the size of a pool_t plus overhead of the initial
 753+** free block and the sentinel block.
 754+*/
 755+size_t tlsf_overhead()
 756+{
 757+ const size_t pool_overhead = sizeof(pool_t) + 2 * block_header_overhead;
 758+ return pool_overhead;
 759+}
 760+
 761+/*
 762+** TLSF main interface. Right out of the white paper.
 763+*/
 764+
 765+tlsf_pool tlsf_create(void* mem, size_t bytes)
 766+{
 767+ block_header_t* block;
 768+ block_header_t* next;
 769+
 770+ const size_t pool_overhead = tlsf_overhead();
 771+ const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
 772+ pool_t* pool = tlsf_cast(pool_t*, mem);
 773+
 774+ if (pool_bytes < block_size_min || pool_bytes > block_size_max)
 775+ {
 776+ cprintf(CONSOLE_BOOT, "tlsf_create: Pool size must be between %d and %d bytes.\n",
 777+ pool_overhead + block_size_min, pool_overhead + block_size_max);
 778+ return 0;
 779+ }
 780+
 781+ /* Construct a valid pool object. */
 782+ pool_construct(pool);
 783+
 784+ /*
 785+ ** Create the main free block. Offset the start of the block slightly
 786+ ** so that the prev_phys_block field falls inside of the pool
 787+ ** structure - it will never be used.
 788+ */
 789+ block = offset_to_block(
 790+ tlsf_cast(void*, pool), sizeof(pool_t) - block_header_overhead);
 791+ block_set_size(block, align_down(pool_bytes, ALIGN_SIZE));
 792+ block_set_free(block);
 793+ block_set_prev_used(block);
 794+ block_insert(pool, block);
 795+
 796+ /* Split the block to create a zero-size pool sentinel block. */
 797+ next = block_link_next(block);
 798+ block_set_size(next, 0);
 799+ block_set_used(next);
 800+ block_set_prev_free(next);
 801+
 802+ return tlsf_cast(tlsf_pool, pool);
 803+}
 804+
 805+void tlsf_destroy(tlsf_pool pool)
 806+{
 807+ /* Nothing to do. */
 808+ pool = pool;
 809+}
 810+
 811+void* tlsf_malloc(tlsf_pool tlsf, size_t size)
 812+{
 813+ pool_t* pool = tlsf_cast(pool_t*, tlsf);
 814+ const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
 815+ block_header_t* block = block_locate_free(pool, adjust);
 816+ return block_prepare_used(pool, block, adjust);
 817+}
 818+
 819+void* tlsf_memalign(tlsf_pool tlsf, size_t align, size_t size)
 820+{
 821+ pool_t* pool = tlsf_cast(pool_t*, tlsf);
 822+ const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
 823+
 824+ /*
 825+ ** We must allocate an additional minimum block size bytes so that if
 826+ ** our free block will leave an alignment gap which is smaller, we can
 827+ ** trim a leading free block and release it back to the heap. We must
 828+ ** do this because the previous physical block is in use, therefore
 829+ ** the prev_phys_block field is not valid, and we can't simply adjust
 830+ ** the size of that block.
 831+ */
 832+ const ptrdiff_t gap_minimum = tlsf_cast(ptrdiff_t, sizeof(block_header_t));
 833+ const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align);
 834+
 835+ /* If alignment is less than or equals base alignment, we're done. */
 836+ const size_t aligned_size = (align <= ALIGN_SIZE) ? adjust : size_with_gap;
 837+
 838+ block_header_t* block = block_locate_free(pool, aligned_size);
 839+
 840+ /* This can't be a static assert. */
 841+ tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
 842+
 843+ if (block)
 844+ {
 845+ void* ptr = block_to_ptr(block);
 846+ void* aligned = align_ptr(ptr, align);
 847+ ptrdiff_t gap = tlsf_cast(ptrdiff_t,
 848+ tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
 849+
 850+ /* If gap size is too small, offset to next aligned boundary. */
 851+ if (gap && gap < gap_minimum)
 852+ {
 853+ const ptrdiff_t gap_remain = gap_minimum - gap;
 854+ const ptrdiff_t offset = tlsf_max(gap_remain, tlsf_cast(ptrdiff_t, align));
 855+ void* next_aligned = tlsf_cast(void*,
 856+ tlsf_cast(tlsfptr_t, aligned) + tlsf_cast(tlsfptr_t, offset));
 857+ aligned = align_ptr(next_aligned, align);
 858+ gap = tlsf_cast(ptrdiff_t,
 859+ tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
 860+ }
 861+
 862+ if (gap)
 863+ {
 864+ tlsf_assert(gap >= gap_minimum && "gap size too small");
 865+ block = block_trim_free_leading(pool, block, gap);
 866+ }
 867+ }
 868+
 869+ return block_prepare_used(pool, block, adjust);
 870+}
 871+
 872+void tlsf_free(tlsf_pool tlsf, void* ptr)
 873+{
 874+ /* Don't attempt to free a NULL pointer. */
 875+ if (ptr)
 876+ {
 877+ pool_t* pool = tlsf_cast(pool_t*, tlsf);
 878+ block_header_t* block = block_from_ptr(ptr);
 879+ block_mark_as_free(block);
 880+ block = block_merge_prev(pool, block);
 881+ block = block_merge_next(pool, block);
 882+ block_insert(pool, block);
 883+ }
 884+}
 885+
 886+/*
 887+** The TLSF block information provides us with enough information to
 888+** provide a reasonably intelligent implementation of realloc, growing or
 889+** shrinking the currently allocated block as required.
 890+**
 891+** This routine handles the somewhat esoteric edge cases of realloc:
 892+** - a non-zero size with a null pointer will behave like malloc
 893+** - a zero size with a non-null pointer will behave like free
 894+** - a request that cannot be satisfied will leave the original buffer
 895+** untouched
 896+** - an extended buffer size will leave the newly-allocated area with
 897+** contents undefined
 898+*/
 899+void* tlsf_realloc(tlsf_pool tlsf, void* ptr, size_t size)
 900+{
 901+ pool_t* pool = tlsf_cast(pool_t*, tlsf);
 902+ void* p = 0;
 903+
 904+ /* Zero-size requests are treated as free. */
 905+ if (ptr && size == 0)
 906+ {
 907+ tlsf_free(tlsf, ptr);
 908+ }
 909+ /* Requests with NULL pointers are treated as malloc. */
 910+ else if (!ptr)
 911+ {
 912+ p = tlsf_malloc(tlsf, size);
 913+ }
 914+ else
 915+ {
 916+ block_header_t* block = block_from_ptr(ptr);
 917+ block_header_t* next = block_next(block);
 918+
 919+ const size_t cursize = block_size(block);
 920+ const size_t combined = cursize + block_size(next) + block_header_overhead;
 921+ const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
 922+
 923+ /*
 924+ ** If the next block is used, or when combined with the current
 925+ ** block, does not offer enough space, we must reallocate and copy.
 926+ */
 927+ if (adjust > cursize && (!block_is_free(next) || adjust > combined))
 928+ {
 929+ p = tlsf_malloc(tlsf, size);
 930+ if (p)
 931+ {
 932+ const size_t minsize = tlsf_min(cursize, size);
 933+ memcpy(p, ptr, minsize);
 934+ tlsf_free(tlsf, ptr);
 935+ }
 936+ }
 937+ else
 938+ {
 939+ /* Do we need to expand to the next block? */
 940+ if (adjust > cursize)
 941+ {
 942+ block_merge_next(pool, block);
 943+ block_mark_as_used(block);
 944+ }
 945+
 946+ /* Trim the resulting block and return the original pointer. */
 947+ block_trim_used(pool, block, adjust);
 948+ p = ptr;
 949+ }
 950+ }
 951+
 952+ return p;
 953+}
Index: embios/trunk/libc/tlsf/tlsf.h
@@ -0,0 +1,53 @@
 2+#ifndef INCLUDED_tlsf
 3+#define INCLUDED_tlsf
 4+
 5+/*
 6+** Two Level Segregated Fit memory allocator, version 1.9.
 7+** Written by Matthew Conte, and placed in the Public Domain.
 8+** http://tlsf.baisoku.org
 9+**
 10+** Based on the original documentation by Miguel Masmano:
 11+** http://rtportal.upv.es/rtmalloc/allocators/tlsf/index.shtml
 12+**
 13+** Please see the accompanying Readme.txt for implementation
 14+** notes and caveats.
 15+**
 16+** This implementation was written to the specification
 17+** of the document, therefore no GPL restrictions apply.
 18+*/
 19+
 20+#include "../../global.h"
 21+#include <stddef.h>
 22+
 23+#if defined(__cplusplus)
 24+extern "C" {
 25+#endif
 26+
 27+/* Create/destroy a memory pool. */
 28+typedef void* tlsf_pool;
 29+tlsf_pool tlsf_create(void* mem, size_t bytes);
 30+void tlsf_destroy(tlsf_pool pool);
 31+
 32+/* malloc/memalign/realloc/free replacements. */
 33+void* tlsf_malloc(tlsf_pool pool, size_t bytes) ICODE_ATTR;
 34+void* tlsf_memalign(tlsf_pool pool, size_t align, size_t bytes) ICODE_ATTR;
 35+void* tlsf_realloc(tlsf_pool pool, void* ptr, size_t size) ICODE_ATTR;
 36+void tlsf_free(tlsf_pool pool, void* ptr) ICODE_ATTR;
 37+
 38+/* Debugging. */
 39+typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
 40+void tlsf_walk_heap(tlsf_pool pool, tlsf_walker walker, void* user);
 41+/* Returns nonzero if heap check fails. */
 42+int tlsf_check_heap(tlsf_pool pool);
 43+
 44+/* Returns internal block size, not original request size */
 45+size_t tlsf_block_size(void* ptr) ICODE_ATTR;
 46+
 47+/* Overhead of per-pool internal structures. */
 48+size_t tlsf_overhead() ICODE_ATTR;
 49+
 50+#if defined(__cplusplus)
 51+};
 52+#endif
 53+
 54+#endif
Index: embios/trunk/export/syscallwrappers.h
@@ -164,6 +164,16 @@
165165 #define backlight_set_fade(args...) __embios_syscall->backlight_set_fade(args)
166166 #define backlight_set_brightness(args...) __embios_syscall->backlight_set_brightness(args)
167167 #define get_platform_id(args...) __embios_syscall->get_platform_id(args)
 168+#define tlsf_create(args...) __embios_syscall->tlsf_create(args)
 169+#define tlsf_destroy(args...) __embios_syscall->tlsf_destroy(args)
 170+#define tlsf_malloc(args...) __embios_syscall->tlsf_malloc(args)
 171+#define tlsf_memalign(args...) __embios_syscall->tlsf_memalign(args)
 172+#define tlsf_realloc(args...) __embios_syscall->tlsf_realloc(args)
 173+#define tlsf_free(args...) __embios_syscall->tlsf_free(args)
 174+#define tlsf_walk_heap(args...) __embios_syscall->tlsf_walk_heap(args)
 175+#define tlsf_check_heap(args...) __embios_syscall->tlsf_check_heap(args)
 176+#define tlsf_block_size(args...) __embios_syscall->tlsf_block_size(args)
 177+#define tlsf_overhead(args...) __embios_syscall->tlsf_overhead(args)
168178
169179
170180 #endif
Index: embios/trunk/export/syscallapi.h
@@ -51,6 +51,7 @@
5252 #include "../libc/include/string.h"
5353 #include "../libc/include/stdlib.h"
5454 #include "../libc/include/stdio.h"
 55+#include "../libc/tlsf/tlsf.h"
5556
5657 /* increase this every time the api struct changes */
5758 #define EMBIOS_API_VERSION 0
@@ -198,6 +199,16 @@
199200 typeof(backlight_set_fade) *backlight_set_fade;
200201 typeof(backlight_set_brightness) *backlight_set_brightness;
201202 typeof(get_platform_id) *get_platform_id;
 203+ typeof(tlsf_create) *tlsf_create;
 204+ typeof(tlsf_destroy) *tlsf_destroy;
 205+ typeof(tlsf_malloc) *tlsf_malloc;
 206+ typeof(tlsf_memalign) *tlsf_memalign;
 207+ typeof(tlsf_realloc) *tlsf_realloc;
 208+ typeof(tlsf_free) *tlsf_free;
 209+ typeof(tlsf_walk_heap) *tlsf_walk_heap;
 210+ typeof(tlsf_check_heap) *tlsf_check_heap;
 211+ typeof(tlsf_block_size) *tlsf_block_size;
 212+ typeof(tlsf_overhead) *tlsf_overhead;
202213 };
203214
204215
Index: embios/trunk/SOURCES
@@ -87,3 +87,4 @@
8888 libc/strlen.c
8989 libc/strncmp.c
9090 libc/strrchr.c
 91+libc/tlsf/tlsf.c
Index: embios/trunk/syscallapi.c
@@ -98,6 +98,16 @@
9999 .strstr = strstr,
100100 .strtok_r = strtok_r,
101101 .get_platform_id = get_platform_id,
 102+ .tlsf_create = tlsf_create,
 103+ .tlsf_destroy = tlsf_destroy,
 104+ .tlsf_malloc = tlsf_malloc,
 105+ .tlsf_memalign = tlsf_memalign,
 106+ .tlsf_realloc = tlsf_realloc,
 107+ .tlsf_free = tlsf_free,
 108+ .tlsf_walk_heap = tlsf_walk_heap,
 109+ .tlsf_check_heap = tlsf_check_heap,
 110+ .tlsf_block_size = tlsf_block_size,
 111+ .tlsf_overhead = tlsf_overhead,
102112 #ifdef HAVE_STORAGE
103113 .opendir = opendir,
104114 .closedir = closedir,