* libcsupport/src/free.c, libmisc/stackchk/check.c,
	rtems/include/rtems/rtems/region.h, rtems/src/regioncreate.c,
	rtems/src/regionextend.c, rtems/src/regiongetinfo.c,
	rtems/src/regiongetsegment.c, rtems/src/regiongetsegmentsize.c,
	rtems/src/regionresizesegment.c, score/src/pheapallocate.c,
	score/src/pheapallocatealigned.c, score/src/pheapextend.c,
	score/src/pheapfree.c, score/src/pheapgetblocksize.c,
	score/src/pheapgetfreeinfo.c, score/src/pheapgetinfo.c,
	score/src/pheapgetsize.c, score/src/pheapinit.c,
	score/src/pheapresizeblock.c, score/src/pheapwalk.c:
	Update for heap API changes.
	* score/include/rtems/score/apimutex.h,
	score/include/rtems/score/object.h: Documentation.
	* score/include/rtems/score/heap.h,
	score/include/rtems/score/protectedheap.h,
	score/inline/rtems/score/heap.inl, score/src/heap.c,
	score/src/heapallocate.c, score/src/heapallocatealigned.c,
	score/src/heapextend.c, score/src/heapfree.c,
	score/src/heapgetfreeinfo.c, score/src/heapgetinfo.c,
	score/src/heapresizeblock.c, score/src/heapsizeofuserarea.c,
	score/src/heapwalk.c: Overall cleanup. Added boundary constraint to
	allocation function. More changes follow.
This commit is contained in:
Joel Sherrill
2009-09-06 15:24:08 +00:00
parent 6685aa0959
commit dea3eccb38
36 changed files with 1534 additions and 1006 deletions

View File

@@ -1,3 +1,28 @@
2009-09-06 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
* libcsupport/src/free.c, libmisc/stackchk/check.c,
rtems/include/rtems/rtems/region.h, rtems/src/regioncreate.c,
rtems/src/regionextend.c, rtems/src/regiongetinfo.c,
rtems/src/regiongetsegment.c, rtems/src/regiongetsegmentsize.c,
rtems/src/regionresizesegment.c, score/src/pheapallocate.c,
score/src/pheapallocatealigned.c, score/src/pheapextend.c,
score/src/pheapfree.c, score/src/pheapgetblocksize.c,
score/src/pheapgetfreeinfo.c, score/src/pheapgetinfo.c,
score/src/pheapgetsize.c, score/src/pheapinit.c,
score/src/pheapresizeblock.c, score/src/pheapwalk.c:
Update for heap API changes.
* score/include/rtems/score/apimutex.h,
score/include/rtems/score/object.h: Documentation.
* score/include/rtems/score/heap.h,
score/include/rtems/score/protectedheap.h,
score/inline/rtems/score/heap.inl, score/src/heap.c,
score/src/heapallocate.c, score/src/heapallocatealigned.c,
score/src/heapextend.c, score/src/heapfree.c,
score/src/heapgetfreeinfo.c, score/src/heapgetinfo.c,
score/src/heapresizeblock.c, score/src/heapsizeofuserarea.c,
score/src/heapwalk.c: Overall cleanup. Added boundary constraint to
allocation function. More changes follow.
2009-09-04 Sebastian Huber <Sebastian.Huber@embedded-brains.de> 2009-09-04 Sebastian Huber <Sebastian.Huber@embedded-brains.de>
* rtems/src/taskmode.c, sapi/src/exshutdown.c, * rtems/src/taskmode.c, sapi/src/exshutdown.c,

View File

@@ -59,8 +59,8 @@ void free(
if ( !_Protected_heap_Free( RTEMS_Malloc_Heap, ptr ) ) { if ( !_Protected_heap_Free( RTEMS_Malloc_Heap, ptr ) ) {
printk( "Program heap: free of bad pointer %p -- range %p - %p \n", printk( "Program heap: free of bad pointer %p -- range %p - %p \n",
ptr, ptr,
RTEMS_Malloc_Heap->begin, RTEMS_Malloc_Heap->area_begin,
RTEMS_Malloc_Heap->end RTEMS_Malloc_Heap->area_end
); );
} }

View File

@@ -92,7 +92,7 @@ static inline bool Stack_check_Frame_pointer_in_range(
#else #else
#define Stack_check_Get_pattern_area( _the_stack ) \ #define Stack_check_Get_pattern_area( _the_stack ) \
((Stack_check_Control *) ((char *)(_the_stack)->area + HEAP_LAST_BLOCK_OVERHEAD)) ((Stack_check_Control *) ((char *)(_the_stack)->area + HEAP_BLOCK_HEADER_SIZE))
#define Stack_check_Calculate_used( _low, _size, _high_water) \ #define Stack_check_Calculate_used( _low, _size, _high_water) \
( ((char *)(_low) + (_size)) - (char *)(_high_water) ) ( ((char *)(_low) + (_size)) - (char *)(_high_water) )

View File

@@ -69,9 +69,9 @@ typedef struct {
Objects_Control Object; Objects_Control Object;
Thread_queue_Control Wait_queue; /* waiting threads */ Thread_queue_Control Wait_queue; /* waiting threads */
void *starting_address; /* physical start addr */ void *starting_address; /* physical start addr */
intptr_t length; /* physical length(bytes) */ uintptr_t length; /* physical length(bytes) */
uint32_t page_size; /* in bytes */ uintptr_t page_size; /* in bytes */
intptr_t maximum_segment_size; /* in bytes */ uintptr_t maximum_segment_size; /* in bytes */
rtems_attribute attribute_set; rtems_attribute attribute_set;
uint32_t number_of_used_blocks; /* blocks allocated */ uint32_t number_of_used_blocks; /* blocks allocated */
Heap_Control Memory; Heap_Control Memory;
@@ -104,9 +104,9 @@ void _Region_Manager_initialization(void);
rtems_status_code rtems_region_create( rtems_status_code rtems_region_create(
rtems_name name, rtems_name name,
void *starting_address, void *starting_address,
intptr_t length, uintptr_t length,
uint32_t page_size, uintptr_t page_size,
rtems_attribute attribute_set, rtems_attribute attribute_set,
Objects_Id *id Objects_Id *id
); );
@@ -121,7 +121,7 @@ rtems_status_code rtems_region_create(
rtems_status_code rtems_region_extend( rtems_status_code rtems_region_extend(
Objects_Id id, Objects_Id id,
void *starting_address, void *starting_address,
intptr_t length uintptr_t length
); );
/** /**
@@ -184,10 +184,10 @@ rtems_status_code rtems_region_delete(
*/ */
rtems_status_code rtems_region_get_segment( rtems_status_code rtems_region_get_segment(
Objects_Id id, Objects_Id id,
intptr_t size, uintptr_t size,
rtems_option option_set, rtems_option option_set,
rtems_interval timeout, rtems_interval timeout,
void **segment void **segment
); );
/** /**
@@ -199,7 +199,7 @@ rtems_status_code rtems_region_get_segment(
rtems_status_code rtems_region_get_segment_size( rtems_status_code rtems_region_get_segment_size(
Objects_Id id, Objects_Id id,
void *segment, void *segment,
intptr_t *size uintptr_t *size
); );
/** /**
@@ -241,8 +241,8 @@ rtems_status_code rtems_region_return_segment(
rtems_status_code rtems_region_resize_segment( rtems_status_code rtems_region_resize_segment(
Objects_Id id, Objects_Id id,
void *segment, void *segment,
intptr_t size, uintptr_t size,
intptr_t *old_size uintptr_t *old_size
); );
#ifndef __RTEMS_APPLICATION__ #ifndef __RTEMS_APPLICATION__

View File

@@ -50,8 +50,8 @@
rtems_status_code rtems_region_create( rtems_status_code rtems_region_create(
rtems_name name, rtems_name name,
void *starting_address, void *starting_address,
intptr_t length, uintptr_t length,
uint32_t page_size, uintptr_t page_size,
rtems_attribute attribute_set, rtems_attribute attribute_set,
Objects_Id *id Objects_Id *id
) )

View File

@@ -45,10 +45,10 @@
rtems_status_code rtems_region_extend( rtems_status_code rtems_region_extend(
Objects_Id id, Objects_Id id,
void *starting_address, void *starting_address,
intptr_t length uintptr_t length
) )
{ {
intptr_t amount_extended; uintptr_t amount_extended;
Heap_Extend_status heap_status; Heap_Extend_status heap_status;
Objects_Locations location; Objects_Locations location;
rtems_status_code return_status; rtems_status_code return_status;

View File

@@ -60,11 +60,8 @@ rtems_status_code rtems_region_get_information(
switch ( location ) { switch ( location ) {
case OBJECTS_LOCAL: case OBJECTS_LOCAL:
if ( _Heap_Get_information( &the_region->Memory, the_info ) != _Heap_Get_information( &the_region->Memory, the_info );
HEAP_GET_INFORMATION_SUCCESSFUL ) return_status = RTEMS_SUCCESSFUL;
return_status = RTEMS_INVALID_ADDRESS;
else
return_status = RTEMS_SUCCESSFUL;
break; break;
#if defined(RTEMS_MULTIPROCESSING) #if defined(RTEMS_MULTIPROCESSING)

View File

@@ -47,7 +47,7 @@
rtems_status_code rtems_region_get_segment( rtems_status_code rtems_region_get_segment(
Objects_Id id, Objects_Id id,
intptr_t size, uintptr_t size,
rtems_option option_set, rtems_option option_set,
rtems_interval timeout, rtems_interval timeout,
void **segment void **segment

View File

@@ -45,7 +45,7 @@
rtems_status_code rtems_region_get_segment_size( rtems_status_code rtems_region_get_segment_size(
Objects_Id id, Objects_Id id,
void *segment, void *segment,
intptr_t *size uintptr_t *size
) )
{ {
Objects_Locations location; Objects_Locations location;

View File

@@ -46,13 +46,13 @@
rtems_status_code rtems_region_resize_segment( rtems_status_code rtems_region_resize_segment(
Objects_Id id, Objects_Id id,
void *segment, void *segment,
intptr_t size, uintptr_t size,
intptr_t *old_size uintptr_t *old_size
) )
{ {
intptr_t avail_size; uintptr_t avail_size;
Objects_Locations location; Objects_Locations location;
intptr_t osize; uintptr_t osize;
rtems_status_code return_status = RTEMS_INTERNAL_ERROR; rtems_status_code return_status = RTEMS_INTERNAL_ERROR;
Heap_Resize_status status; Heap_Resize_status status;
register Region_Control *the_region; register Region_Control *the_region;

View File

@@ -1,9 +1,9 @@
/** /**
* @file rtems/score/apimutex.h * @file
* *
* This include file contains all the constants and structures associated * @ingroup ScoreAPIMutex
* with the API Mutex Handler. This handler is used by API level *
* routines to manage mutual exclusion. * @brief API Mutex Handler API.
*/ */
/* /*
@@ -20,115 +20,97 @@
#ifndef _RTEMS_SCORE_APIMUTEX_H #ifndef _RTEMS_SCORE_APIMUTEX_H
#define _RTEMS_SCORE_APIMUTEX_H #define _RTEMS_SCORE_APIMUTEX_H
/**
* @defgroup ScoreAPIMutex API Mutex Handler
*
* This handler encapsulates functionality which provides mutexes to be used
* in the implementation of API functionality.
*/
/**@{*/
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
/**
* @defgroup ScoreAPIMutex API Mutex Handler
*
* @ingroup Score
*
* @brief Provides routines to ensure mutual exclusion on API level.
*
* @{
*/
#include <rtems/score/coremutex.h> #include <rtems/score/coremutex.h>
#include <rtems/score/isr.h> #include <rtems/score/isr.h>
#include <rtems/score/object.h> #include <rtems/score/object.h>
/** /**
* The following defines the control block used to manage each API mutex. * @brief Control block used to manage each API mutex.
* An API Mutex is an aggregration of an Object and a SuperCore Mutex.
*/ */
typedef struct { typedef struct {
/** This field allows each API Mutex to be a full-fledged RTEMS object. */ /**
Objects_Control Object; * @brief Allows each API Mutex to be a full-fledged RTEMS object.
/** This field contains the SuperCore mutex information. */ */
CORE_mutex_Control Mutex; Objects_Control Object;
} API_Mutex_Control;
/**
* Contains the SuperCore mutex information.
*/
CORE_mutex_Control Mutex;
} API_Mutex_Control;
/** /**
* The following variable is the information control block used to manage * @brief Information control block used to manage this class of objects.
* this class of objects.
*/ */
SCORE_EXTERN Objects_Information _API_Mutex_Information; SCORE_EXTERN Objects_Information _API_Mutex_Information;
/** /**
* This routine performs the initialization necessary for this handler. * @brief Performs the initialization necessary for this handler.
* *
* @param[in] maximum_mutexes is the maximum number of API mutexes * The value @a maximum_mutexes is the maximum number of API mutexes that may
* that may exist at any time * exist at any time.
*/ */
void _API_Mutex_Initialization( void _API_Mutex_Initialization( uint32_t maximum_mutexes );
uint32_t maximum_mutexes
);
/** /**
* This routine allocates an API mutex from the inactive set. * @brief Allocates an API mutex from the inactive set and returns it in
* * @a mutex.
* @param[out] the_mutex will contain the allocated mutex.
*/ */
void _API_Mutex_Allocate( void _API_Mutex_Allocate( API_Mutex_Control **mutex );
API_Mutex_Control **the_mutex
);
/** /**
* This routine acquires the specified API mutex. * @brief Acquires the specified API mutex @a mutex.
*
* @param[in] the_mutex is the mutex to acquire.
*/ */
void _API_Mutex_Lock( void _API_Mutex_Lock( API_Mutex_Control *mutex );
API_Mutex_Control *the_mutex
);
/** /**
* This routine releases the specified API mutex. * @brief Releases the specified API mutex @a mutex.
*
* @param[in] the_mutex is the mutex to release.
*/ */
void _API_Mutex_Unlock( void _API_Mutex_Unlock( API_Mutex_Control *mutex );
API_Mutex_Control *the_mutex
); /** @} */
/** /**
* This variable points to the API Mutex instance that is used * @defgroup ScoreAllocatorMutex RTEMS Allocator Mutex
* to protect all memory allocation and deallocation in RTEMS.
* *
* @note When the APIs all use this for allocation and deallocation * @ingroup ScoreAPIMutex
* protection, then this possibly should be renamed and moved to a *
* higher level in the hierarchy. * @brief Protection for all memory allocations and deallocations in RTEMS.
*
* When the APIs all use this for allocation and deallocation protection, then
* this possibly should be renamed and moved to a higher level in the
* hierarchy.
*
* @{
*/ */
SCORE_EXTERN API_Mutex_Control *_RTEMS_Allocator_Mutex;
/** SCORE_EXTERN API_Mutex_Control *_RTEMS_Allocator_Mutex;
* This macro locks the RTEMS Allocation Mutex.
*
* @see _RTEMS_Allocator_Mutex
*/
#define _RTEMS_Lock_allocator() \ #define _RTEMS_Lock_allocator() \
_API_Mutex_Lock( _RTEMS_Allocator_Mutex ) _API_Mutex_Lock( _RTEMS_Allocator_Mutex )
/**
* This macro unlocks the RTEMS Allocation Mutex.
*
* @see _RTEMS_Allocator_Mutex
*/
#define _RTEMS_Unlock_allocator() \ #define _RTEMS_Unlock_allocator() \
_API_Mutex_Unlock( _RTEMS_Allocator_Mutex ) _API_Mutex_Unlock( _RTEMS_Allocator_Mutex )
/* /** @} */
* There are no inlines for this handler.
*/
#ifndef __RTEMS_APPLICATION__
/* #include <rtems/score/apimutex.inl> */
#endif
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
/*!@}*/
#endif #endif
/* end of include file */ /* end of include file */

View File

@@ -1,7 +1,9 @@
/** /**
* @file * @file
* *
* Heap Handler API. * @ingroup ScoreHeap
*
* @brief Heap Handler API.
*/ */
/* /*
@@ -25,7 +27,9 @@ extern "C" {
/** /**
* @defgroup ScoreHeap Heap Handler * @defgroup ScoreHeap Heap Handler
* *
* The Heap Handler provides a heap. * @ingroup Score
*
* @brief The Heap Handler provides a heap.
* *
* A heap is a doubly linked list of variable size blocks which are allocated * A heap is a doubly linked list of variable size blocks which are allocated
* using the first fit method. Garbage collection is performed each time a * using the first fit method. Garbage collection is performed each time a
@@ -33,17 +37,22 @@ extern "C" {
* information for both allocated and free blocks is contained in the heap * information for both allocated and free blocks is contained in the heap
* area. A heap control structure contains control information for the heap. * area. A heap control structure contains control information for the heap.
* *
* FIXME: The alignment routines could be made faster should we require only * The alignment routines could be made faster should we require only powers of
* powers of two to be supported both for 'page_size' and for 'alignment' * two to be supported both for page size, alignment and boundary arguments.
* arguments. However, both workspace and malloc heaps are initialized with * However, both workspace and malloc heaps are initialized with
* CPU_HEAP_ALIGNMENT as 'page_size', and while all the BSPs seem to use * CPU_HEAP_ALIGNMENT as page size, and while all the BSPs seem to use
* CPU_ALIGNMENT (that is power of two) as CPU_HEAP_ALIGNMENT, for whatever * CPU_ALIGNMENT (that is power of two) as CPU_HEAP_ALIGNMENT, for whatever
* reason CPU_HEAP_ALIGNMENT is only required to be multiple of CPU_ALIGNMENT * reason CPU_HEAP_ALIGNMENT is only required to be multiple of CPU_ALIGNMENT
* and explicitly not required to be a power of two. * and explicitly not required to be a power of two.
* *
* There are two kinds of blocks. One sort describes a free block from which * There are two kinds of blocks. One sort describes a free block from which
* we can allocate memory. The other blocks are used and contain allocated * we can allocate memory. The other blocks are used and provide an allocated
* memory. The free blocks are accessible via a list of free blocks. * memory area. The free blocks are accessible via a list of free blocks.
*
* Blocks or areas cover a continuous set of memory addresses. They have a
* begin and end address. The end address is not part of the set. The size of
* a block or area equals the distance between the begin and end address in
* units of bytes.
* *
* Free blocks look like: * Free blocks look like:
* <table> * <table>
@@ -83,7 +92,10 @@ extern "C" {
* <table> * <table>
* <tr><th>Label</th><th colspan=2>Content</th></tr> * <tr><th>Label</th><th colspan=2>Content</th></tr>
* <tr><td>heap->begin</td><td colspan=2>heap area begin address</td></tr> * <tr><td>heap->begin</td><td colspan=2>heap area begin address</td></tr>
* <tr><td>first_block->prev_size</td><td colspan=2>arbitrary value</td></tr> * <tr>
* <td>first_block->prev_size</td>
* <td colspan=2>page size (the value is arbitrary)</td>
* </tr>
* <tr> * <tr>
* <td>first_block->size</td> * <td>first_block->size</td>
* <td colspan=2>size available for allocation * <td colspan=2>size available for allocation
@@ -100,7 +112,7 @@ extern "C" {
* </tr> * </tr>
* <tr> * <tr>
* <td>second_block->size</td> * <td>second_block->size</td>
* <td colspan=2>arbitrary size | @c HEAP_PREV_BLOCK_FREE</td> * <td colspan=2>page size (the value is arbitrary)</td>
* </tr> * </tr>
* <tr><td>heap->end</td><td colspan=2>heap area end address</td></tr> * <tr><td>heap->end</td><td colspan=2>heap area end address</td></tr>
* </table> * </table>
@@ -108,6 +120,23 @@ extern "C" {
* @{ * @{
*/ */
/**
* @brief See also @ref Heap_Block.size_and_flag.
*/
#define HEAP_PREV_BLOCK_USED ((uintptr_t) 1)
/**
* @brief Offset from the block begin up to the block size field
* (@ref Heap_Block.size_and_flag).
*/
#define HEAP_BLOCK_SIZE_OFFSET sizeof(uintptr_t)
/**
* @brief The block header consists of the two size fields
* (@ref Heap_Block.prev_size and @ref Heap_Block.size_and_flag).
*/
#define HEAP_BLOCK_HEADER_SIZE (sizeof(uintptr_t) * 2)
/** /**
* @brief Description for free or used blocks. * @brief Description for free or used blocks.
*/ */
@@ -119,6 +148,11 @@ typedef struct Heap_Block {
* This field is only valid if the previous block is free. This case is * This field is only valid if the previous block is free. This case is
* indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the * indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the
* @a size_and_flag field of the current block. * @a size_and_flag field of the current block.
*
* In a used block only the @a size_and_flag field needs to be valid. The
* @a prev_size field of the current block is maintained by the previous
* block. The current block can use the @a prev_size field in the next block
* for allocation.
*/ */
uintptr_t prev_size; uintptr_t prev_size;
@@ -157,86 +191,119 @@ typedef struct Heap_Block {
struct Heap_Block *prev; struct Heap_Block *prev;
} Heap_Block; } Heap_Block;
#define HEAP_PREV_BLOCK_USED ((uintptr_t) 1)
#define HEAP_PREV_BLOCK_FREE ((uintptr_t) 0)
/** /**
* @brief Offset from the block begin up to the block size field. * @brief Run-time heap statistics.
*/
#define HEAP_BLOCK_SIZE_OFFSET (sizeof(uintptr_t))
/**
* @brief Offset from the block begin up to the allocated area begin.
*/
#define HEAP_BLOCK_ALLOC_AREA_OFFSET (sizeof(uintptr_t) * 2)
#define HEAP_BLOCK_USED_OVERHEAD (sizeof(uintptr_t) * 2)
#define HEAP_LAST_BLOCK_OVERHEAD HEAP_BLOCK_ALLOC_AREA_OFFSET
/**
* Run-time heap statistics.
* *
* @note (double)searches/allocs gives mean number of searches per alloc while * The value @a searches / @a allocs gives the mean number of searches per
* max_search gives maximum number of searches ever performed on a * allocation, while @a max_search gives maximum number of searches ever
* single call to alloc. * performed on a single allocation call.
*
* @note the statistics is always gathered. I believe the imposed overhead is
* rather small. Feel free to make it compile-time option if you think
* the overhead is too high for your application.
*/ */
typedef struct { typedef struct {
/** instance number of this heap */ /**
* @brief Instance number of this heap.
*/
uint32_t instance; uint32_t instance;
/** the size of the memory for heap */
/**
* @brief The size of the memory for heap.
*/
uintptr_t size; uintptr_t size;
/** current free size */
/**
* @brief Current free size.
*/
uintptr_t free_size; uintptr_t free_size;
/** minimum free size ever */
/**
* @brief Minimum free size ever.
*/
uintptr_t min_free_size; uintptr_t min_free_size;
/** current number of free blocks */
/**
* @brief Current number of free blocks.
*/
uint32_t free_blocks; uint32_t free_blocks;
/** maximum number of free blocks ever */
/**
* @brief Maximum number of free blocks ever.
*/
uint32_t max_free_blocks; uint32_t max_free_blocks;
/** current number of used blocks */
/**
* @brief Current number of used blocks.
*/
uint32_t used_blocks; uint32_t used_blocks;
/** maximum number of blocks searched ever */
/**
* @brief Maximum number of blocks searched ever.
*/
uint32_t max_search; uint32_t max_search;
/** total number of successful calls to alloc */
/**
* @brief Total number of successful calls to alloc.
*/
uint32_t allocs; uint32_t allocs;
/** total number of searches ever */
/**
* @brief Total number of searches ever.
*/
uint32_t searches; uint32_t searches;
/** total number of suceessful calls to free */
/**
* @brief Total number of suceessful calls to free.
*/
uint32_t frees; uint32_t frees;
/** total number of successful resizes */
/**
* @brief Total number of successful resizes.
*/
uint32_t resizes; uint32_t resizes;
} Heap_Statistics; } Heap_Statistics;
/** /**
* Control block used to manage each heap. * @brief Control block used to manage a heap.
*/ */
typedef struct { typedef struct {
/** head and tail of circular list of free blocks */
Heap_Block free_list; Heap_Block free_list;
/** allocation unit and alignment */
uintptr_t page_size; uintptr_t page_size;
/** minimum block size aligned on page_size */
uintptr_t min_block_size; uintptr_t min_block_size;
/** first address of memory for the heap */ uintptr_t area_begin;
uintptr_t begin; uintptr_t area_end;
/** first address past end of memory for the heap */ Heap_Block *first_block;
uintptr_t end; Heap_Block *last_block;
/** first valid block address in the heap */
Heap_Block *start;
/** last valid block address in the heap */
Heap_Block *final;
/** run-time statistics */
Heap_Statistics stats; Heap_Statistics stats;
} Heap_Control; } Heap_Control;
/** /**
* Status codes for _Heap_Extend * @brief Information about blocks.
*/
typedef struct {
/**
* @brief Number of blocks of this type.
*/
uint32_t number;
/**
* @brief Largest block of this type.
*/
uint32_t largest;
/**
* @brief Total size of the blocks of this type.
*/
uint32_t total;
} Heap_Information;
/**
* @brief Information block returned by _Heap_Get_information().
*/
typedef struct {
Heap_Information Free;
Heap_Information Used;
} Heap_Information_block;
/**
* @brief See _Heap_Extend().
*/ */
typedef enum { typedef enum {
HEAP_EXTEND_SUCCESSFUL, HEAP_EXTEND_SUCCESSFUL,
@@ -245,7 +312,7 @@ typedef enum {
} Heap_Extend_status; } Heap_Extend_status;
/** /**
* Status codes for _Heap_Resize_block * @brief See _Heap_Resize_block().
*/ */
typedef enum { typedef enum {
HEAP_RESIZE_SUCCESSFUL, HEAP_RESIZE_SUCCESSFUL,
@@ -254,40 +321,8 @@ typedef enum {
} Heap_Resize_status; } Heap_Resize_status;
/** /**
* Status codes for _Heap_Get_information * @brief Initializes the heap control block @a heap to manage the area
*/ * starting at @a area_begin of size @a area_size bytes.
typedef enum {
HEAP_GET_INFORMATION_SUCCESSFUL = 0,
HEAP_GET_INFORMATION_BLOCK_ERROR
} Heap_Get_information_status;
/**
* Information block returned by the Heap routines used to
* obtain heap information. This information is returned about
* either free or used blocks.
*/
typedef struct {
/** Number of blocks of this type. */
uint32_t number;
/** Largest blocks of this type. */
uint32_t largest;
/** Total size of the blocks of this type. */
uint32_t total;
} Heap_Information;
/**
* Information block returned by _Heap_Get_information
*/
typedef struct {
/** This field is information on the used blocks in the heap. */
Heap_Information Free;
/** This field is information on the used blocks in the heap. */
Heap_Information Used;
} Heap_Information_block;
/**
* Initializes the @a heap control block to manage the area starting at
* @a area_begin of @a area_size bytes.
* *
* Blocks of memory are allocated from the heap in multiples of @a page_size * Blocks of memory are allocated from the heap in multiples of @a page_size
* byte units. If the @a page_size is equal to zero or is not multiple of * byte units. If the @a page_size is equal to zero or is not multiple of
@@ -303,16 +338,13 @@ uintptr_t _Heap_Initialize(
); );
/** /**
* This routine grows @a heap memory area using the size bytes which * @brief Extends the memory area of the heap @a heap using the memory area
* begin at @a starting_address. * starting at @a area_begin of size @a area_size bytes.
* *
* @param[in] heap is the heap to operate upon * The extended space available for allocation will be returned in
* @param[in] starting_address is the starting address of the memory * @a amount_extended.
* to add to the heap *
* @param[in] size is the size in bytes of the memory area to add * The memory area must start at the end of the currently used memory area.
* @param[in] amount_extended points to a user area to return the
* @return a status indicating success or the reason for failure
* @return *size filled in with the amount of memory added to the heap
*/ */
Heap_Extend_status _Heap_Extend( Heap_Extend_status _Heap_Extend(
Heap_Control *heap, Heap_Control *heap,
@@ -322,139 +354,115 @@ Heap_Extend_status _Heap_Extend(
); );
/** /**
* This function attempts to allocate a block of @a size bytes from * @brief Allocates a memory area of size @a size bytes.
* @a heap. If insufficient memory is free in @a heap to allocate
* a block of the requested size, then NULL is returned.
* *
* @param[in] heap is the heap to operate upon * If the alignment parameter @a alignment is not equal to zero, the allocated
* @param[in] size is the amount of memory to allocate in bytes * memory area will begin at an address aligned by this value.
* @return NULL if unsuccessful and a pointer to the block if successful
*/
void *_Heap_Allocate( Heap_Control *heap, uintptr_t size );
/**
* This function attempts to allocate a memory block of @a size bytes from
* @a heap so that the start of the user memory is aligned on the
* @a alignment boundary. If @a alignment is 0, it is set to CPU_ALIGNMENT.
* Any other value of @a alignment is taken "as is", i.e., even odd
* alignments are possible.
* Returns pointer to the start of the memory block if success, NULL if
* failure.
* *
* @param[in] heap is the heap to operate upon * If the boundary parameter @a boundary is not equal to zero, the allocated
* @param[in] size is the amount of memory to allocate in bytes * memory area will fulfill a boundary constraint. The boudnary value
* @param[in] alignment the required alignment * specifies the set of addresses which are aligned by the boundary value. The
* @return NULL if unsuccessful and a pointer to the block if successful * interior of the allocated memory area will not contain an element of this
* set. The begin or end address of the area may be a member of the set.
*
* A size value of zero will return a unique address which may be freed with
* _Heap_Free().
*
* Returns a pointer to the begin of the allocated memory area, or @c NULL if
* no memory is available or the parameters are inconsistent.
*/ */
void *_Heap_Allocate_aligned( void *_Heap_Allocate_aligned_with_boundary(
Heap_Control *heap, Heap_Control *heap,
uintptr_t size, uintptr_t size,
uintptr_t alignment uintptr_t alignment,
uintptr_t boundary
);
#define _Heap_Allocate_aligned( heap, size, alignment ) \
_Heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 )
#define _Heap_Allocate( heap, size ) \
_Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 )
/**
* @brief Frees the allocated memory area starting at @a addr in the heap
* @a heap.
*
* Inappropriate values for @a addr may corrupt the heap.
*
* Returns @c true in case of success, and @c false otherwise.
*/
bool _Heap_Free( Heap_Control *heap, void *addr );
/**
* @brief Walks the heap @a heap to verify its integrity.
*
* If @a dump is @c true, then diagnostic messages will be printed to standard
* output. In this case @a source is used to mark the output lines.
*
* Returns @c true if no errors occured, and @c false if the heap is corrupt.
*/
bool _Heap_Walk(
Heap_Control *heap,
int source,
bool dump
); );
/** /**
* This function sets @a size to the size of the block of allocatable area * @brief Returns information about used and free blocks for the heap @a heap
* which begins at @a starting_address. The size returned in @a *size could * in @a info.
* be greater than the size requested for allocation. */
* Returns true if the @a starting_address is in the heap, and false void _Heap_Get_information(
* otherwise. Heap_Control *heap,
Heap_Information_block *info
);
/**
* @brief Returns information about free blocks for the heap @a heap in
* @a info.
*/
void _Heap_Get_free_information(
Heap_Control *heap,
Heap_Information *info
);
/**
* @brief Returns the size of the allocatable memory area starting at @a addr
* in @a size.
* *
* @param[in] heap is the heap to operate upon * The size value may be greater than the initially requested size in
* @param[in] starting_address is the starting address of the user block * _Heap_Allocate_aligned_with_boundary().
* to obtain the size of *
* @param[in] size points to a user area to return the size in * Inappropriate values for @a addr will not corrupt the heap, but may yield
* @return true if successfully able to determine the size, false otherwise * invalid size values.
* @return *size filled in with the size of the user area for this block *
* Returns @a true if successful, and @c false otherwise.
*/ */
bool _Heap_Size_of_alloc_area( bool _Heap_Size_of_alloc_area(
Heap_Control *heap, Heap_Control *heap,
void *area_begin, void *addr,
uintptr_t *size uintptr_t *size
); );
/** /**
* This function tries to resize in place the block that is pointed to by the * @brief Resizes the block of the allocated memory area starting at @a addr.
* @a starting_address to the new @a size.
* *
* @param[in] heap is the heap to operate upon * The new memory area will have a size of at least @a size bytes. A resize
* @param[in] starting_address is the starting address of the user block * may be impossible and depends on the current heap usage.
* to be resized *
* @param[in] size is the new size * The size available for allocation in the current block before the resize
* @param[in] old_mem_size points to a user area to return the size of the * will be returned in @a old_size. The size available for allocation in
* user memory area of the block before resizing. * the resized block will be returned in @a new_size. If the resize was not
* @param[in] avail_mem_size points to a user area to return the size of * successful, then a value of zero will be returned in @a new_size.
* the user memory area of the free block that has been enlarged or *
* created due to resizing, 0 if none. * Inappropriate values for @a addr may corrupt the heap.
* @return HEAP_RESIZE_SUCCESSFUL if successfully able to resize the block,
* HEAP_RESIZE_UNSATISFIED if the block can't be resized in place,
* HEAP_RESIZE_FATAL_ERROR if failure
* @return *old_mem_size filled in with the size of the user memory area of
* the block before resizing.
* @return *avail_mem_size filled in with the size of the user memory area
* of the free block that has been enlarged or created due to
* resizing, 0 if none.
*/ */
Heap_Resize_status _Heap_Resize_block( Heap_Resize_status _Heap_Resize_block(
Heap_Control *heap, Heap_Control *heap,
void *starting_address, void *addr,
uintptr_t size, uintptr_t size,
uintptr_t *old_mem_size, uintptr_t *old_size,
uintptr_t *avail_mem_size uintptr_t *new_size
);
/**
* This routine returns the block of memory which begins
* at @a alloc_area_begin to @a heap. Any coalescing which is
* possible with the freeing of this routine is performed.
*
* @param[in] heap is the heap to operate upon
* @param[in] start_address is the starting address of the user block
* to free
* @return true if successfully freed, false otherwise
*/
bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin );
/**
* This routine walks the heap to verify its integrity.
*
* @param[in] heap is the heap to operate upon
* @param[in] source is a user specified integer which may be used to
* indicate where in the application this was invoked from
* @param[in] do_dump is set to true if errors should be printed
* @return true if the test passed fine, false otherwise.
*/
bool _Heap_Walk(
Heap_Control *heap,
int source,
bool do_dump
);
/**
* This routine walks the heap and tots up the free and allocated
* sizes.
*
* @param[in] heap pointer to heap header
* @param[in] the_info pointer to a status information area
* @return *the_info is filled with status information
* @return 0=success, otherwise heap is corrupt.
*/
Heap_Get_information_status _Heap_Get_information(
Heap_Control *heap,
Heap_Information_block *the_info
);
/**
* This heap routine returns information about the free blocks
* in the specified heap.
*
* @param[in] heap pointer to heap header.
* @param[in] info pointer to the free block information.
*
* @return free block information filled in.
*/
void _Heap_Get_free_information(
Heap_Control *heap,
Heap_Information *info
); );
#if !defined(__RTEMS_APPLICATION__) #if !defined(__RTEMS_APPLICATION__)
@@ -462,36 +470,20 @@ void _Heap_Get_free_information(
#include <rtems/score/heap.inl> #include <rtems/score/heap.inl>
/** /**
* @brief Returns the minimal block size for a block which may contain an area * @brief Allocates the memory area starting at @a alloc_begin of size
* of size @a alloc_size for allocation, or zero in case of an overflow. * @a alloc_size bytes in the block @a block.
* *
* Uses the heap values @a page_size and @a min_block_size. * The block may be split up into multiple blocks.
*
* Inappropriate values for @a alloc_begin or @a alloc_size may corrupt the
* heap.
*
* Returns the block containing the allocated memory area.
*/ */
uintptr_t _Heap_Calc_block_size( Heap_Block *_Heap_Block_allocate(
uintptr_t alloc_size,
uintptr_t page_size,
uintptr_t min_block_size
);
/**
* This method allocates a block of size @a alloc_size from @a the_block
* belonging to @a heap. Split @a the_block if possible, otherwise
* allocate it entirely. When split, make the lower part used, and leave
* the upper part free.
*
* This is an internal routines used by _Heap_Allocate() and
* _Heap_Allocate_aligned(). Refer to 'heap.c' for details.
*
* @param[in] heap is the heap to operate upon
* @param[in] the_block is the block to allocates the requested size from
* @param[in] alloc_size is the requested number of bytes to take out of
* the block
*
* @return This methods returns the size of the allocated block.
*/
uintptr_t _Heap_Block_allocate(
Heap_Control *heap, Heap_Control *heap,
Heap_Block *block, Heap_Block *block,
uintptr_t alloc_begin,
uintptr_t alloc_size uintptr_t alloc_size
); );

View File

@@ -29,6 +29,12 @@
extern "C" { extern "C" {
#endif #endif
/**
* @defgroup Score SuperCore
*
* @brief Provides services for all APIs.
*/
/** /**
* The following type defines the control block used to manage * The following type defines the control block used to manage
* object names. * object names.

View File

@@ -1,9 +1,12 @@
/** /**
* @file rtems/score/protectedheap.h * @file
* *
* This include file contains the information pertaining to the * @ingroup ScoreProtHeap
* Protected Heap Handler.
* *
* @brief Protected Heap Handler API.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -20,210 +23,125 @@
#include <rtems/score/heap.h> #include <rtems/score/heap.h>
#include <rtems/score/apimutex.h> #include <rtems/score/apimutex.h>
/**
* @defgroup ScoreProtHeap Protected Heap Handler
*
* This handler encapsulates functionality which provides the foundation
* Protected Heap services.
*
* It is a simple wrapper for the help with the addition of the
* allocation mutex being used for protection.
*/
/**@{*/
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
/** /**
* This routine initializes @a the_heap record to manage the * @defgroup ScoreProtHeap Protected Heap Handler
* contiguous heap of @a size bytes which starts at @a starting_address.
* Blocks of memory are allocated from the heap in multiples of
* @a page_size byte units. If @a page_size is 0 or is not multiple of
* CPU_ALIGNMENT, it's aligned up to the nearest CPU_ALIGNMENT boundary.
* *
* @param[in] the_heap is the heap to operate upon * @ingroup ScoreHeap
* @param[in] starting_address is the starting address of the memory for
* the heap
* @param[in] size is the size in bytes of the memory area for the heap
* @param[in] page_size is the size in bytes of the allocation unit
* *
* @return This method returns the maximum memory available. If * @brief Provides protected heap services.
* unsuccessful, 0 will be returned. *
* The @ref ScoreAllocatorMutex is used to protect the heap accesses.
*
* @{
*/ */
static inline uint32_t _Protected_heap_Initialize(
Heap_Control *the_heap, /**
void *starting_address, * @brief See _Heap_Initialize().
intptr_t size, */
uint32_t page_size RTEMS_INLINE_ROUTINE uintptr_t _Protected_heap_Initialize(
Heap_Control *heap,
void *area_begin,
uintptr_t area_size,
uintptr_t page_size
) )
{ {
return _Heap_Initialize( the_heap, starting_address, size, page_size ); return _Heap_Initialize( heap, area_begin, area_size, page_size );
} }
/** /**
* This routine grows @a the_heap memory area using the size bytes which * @brief See _Heap_Extend().
* begin at @a starting_address.
* *
* @param[in] the_heap is the heap to operate upon * Returns @a true in case of success, and @a false otherwise.
* @param[in] starting_address is the starting address of the memory
* to add to the heap
* @param[in] size is the size in bytes of the memory area to add
* @return a status indicating success or the reason for failure
*/ */
bool _Protected_heap_Extend( bool _Protected_heap_Extend(
Heap_Control *the_heap, Heap_Control *heap,
void *starting_address, void *area_begin,
intptr_t size uintptr_t area_size
); );
/** /**
* This function attempts to allocate a block of @a size bytes from * @brief See _Heap_Allocate_aligned_with_boundary().
* @a the_heap. If insufficient memory is free in @a the_heap to allocate
* a block of the requested size, then NULL is returned.
*
* @param[in] the_heap is the heap to operate upon
* @param[in] size is the amount of memory to allocate in bytes
* @return NULL if unsuccessful and a pointer to the block if successful
*/ */
void *_Protected_heap_Allocate( void *_Protected_heap_Allocate(
Heap_Control *the_heap, Heap_Control *heap,
intptr_t size uintptr_t size
); );
/** /**
* This function attempts to allocate a memory block of @a size bytes from * @brief See _Heap_Allocate_aligned_with_boundary().
* @a the_heap so that the start of the user memory is aligned on the
* @a alignment boundary. If @a alignment is 0, it is set to CPU_ALIGNMENT.
* Any other value of @a alignment is taken "as is", i.e., even odd
* alignments are possible.
* Returns pointer to the start of the memory block if success, NULL if
* failure.
*
* @param[in] the_heap is the heap to operate upon
* @param[in] size is the amount of memory to allocate in bytes
* @param[in] alignment the required alignment
* @return NULL if unsuccessful and a pointer to the block if successful
*/ */
void *_Protected_heap_Allocate_aligned( void *_Protected_heap_Allocate_aligned(
Heap_Control *the_heap, Heap_Control *heap,
intptr_t size, uintptr_t size,
uint32_t alignment uintptr_t alignment
); );
/** /**
* This function sets @a *size to the size of the block of user memory * @brief See _Heap_Size_of_alloc_area().
* which begins at @a starting_address. The size returned in @a *size could
* be greater than the size requested for allocation.
* Returns true if the @a starting_address is in the heap, and false
* otherwise.
*
* @param[in] the_heap is the heap to operate upon
* @param[in] starting_address is the starting address of the user block
* to obtain the size of
* @param[in] size points to a user area to return the size in
* @return true if successfully able to determine the size, false otherwise
* @return *size filled in with the size of the user area for this block
*/ */
bool _Protected_heap_Get_block_size( bool _Protected_heap_Get_block_size(
Heap_Control *the_heap, Heap_Control *heap,
void *starting_address, void *addr,
intptr_t *size uintptr_t *size
); );
/** /**
* This function tries to resize in place the block that is pointed to by the * @brief See _Heap_Resize_block().
* @a starting_address to the new @a size.
* *
* @param[in] the_heap is the heap to operate upon * Returns @a true in case of success, and @a false otherwise.
* @param[in] starting_address is the starting address of the user block
* to be resized
* @param[in] size is the new size
*
* @return true if successfully able to resize the block.
* false if the block can't be resized in place.
*/ */
bool _Protected_heap_Resize_block( bool _Protected_heap_Resize_block(
Heap_Control *the_heap, Heap_Control *heap,
void *starting_address, void *addr,
intptr_t size uintptr_t size
); );
/** /**
* This routine returns the block of memory which begins * @brief See _Heap_Free().
* at @a starting_address to @a the_heap. Any coalescing which is
* possible with the freeing of this routine is performed.
* *
* @param[in] the_heap is the heap to operate upon * Returns @a true in case of success, and @a false otherwise.
* @param[in] start_address is the starting address of the user block
* to free
* @return true if successfully freed, false otherwise
*/ */
bool _Protected_heap_Free( bool _Protected_heap_Free( Heap_Control *heap, void *addr );
Heap_Control *the_heap,
void *start_address
);
/** /**
* This routine walks the heap to verify its integrity. * @brief See _Heap_Walk().
*
* @param[in] the_heap is the heap to operate upon
* @param[in] source is a user specified integer which may be used to
* indicate where in the application this was invoked from
* @param[in] do_dump is set to true if errors should be printed
* @return true if the test passed fine, false otherwise.
*/ */
bool _Protected_heap_Walk( bool _Protected_heap_Walk( Heap_Control *heap, int source, bool dump );
Heap_Control *the_heap,
int source,
bool do_dump
);
/** /**
* This routine walks the heap and tots up the free and allocated * @brief See _Heap_Get_information().
* sizes.
* *
* @param[in] the_heap pointer to heap header * Returns @a true in case of success, and @a false otherwise.
* @param[in] the_info pointer to a status information area
*
* @return true if successfully able to return information
*/ */
bool _Protected_heap_Get_information( bool _Protected_heap_Get_information(
Heap_Control *the_heap, Heap_Control *heap,
Heap_Information_block *the_info Heap_Information_block *info
); );
/** /**
* This heap routine returns information about the free blocks * @brief See _Heap_Get_free_information().
* in the specified heap.
* *
* @param[in] the_heap pointer to heap header. * Returns @a true in case of success, and @a false otherwise.
* @param[in] info pointer to the free block information.
*
* @return free block information filled in.
*/ */
bool _Protected_heap_Get_free_information( bool _Protected_heap_Get_free_information(
Heap_Control *the_heap, Heap_Control *heap,
Heap_Information *info Heap_Information *info
); );
/** /**
* This function returns the maximum size of the protected heap. * @brief See _Heap_Get_size().
*
* @param[in] the_heap points to the heap being operated upon
*
* @return This method returns the total amount of memory
* allocated to the heap.
*/ */
uint32_t _Protected_heap_Get_size( uintptr_t _Protected_heap_Get_size( Heap_Control *heap );
Heap_Control *the_heap
); /** @} */
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
/**@}*/
#endif #endif
/* end of include file */ /* end of include file */

View File

@@ -1,8 +1,9 @@
/** /**
* @file * @file
* *
* @brief Static inline implementations of the inlined routines from the heap * @ingroup ScoreHeap
* handler. *
* @brief Heap Handler API.
*/ */
/* /*
@@ -41,17 +42,17 @@ RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_tail( Heap_Control *heap )
return &heap->free_list; return &heap->free_list;
} }
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_First_free_block( Heap_Control *heap ) RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_first( Heap_Control *heap )
{ {
return _Heap_Free_list_head(heap)->next; return _Heap_Free_list_head(heap)->next;
} }
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Last_free_block( Heap_Control *heap ) RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_last( Heap_Control *heap )
{ {
return _Heap_Free_list_tail(heap)->prev; return _Heap_Free_list_tail(heap)->prev;
} }
RTEMS_INLINE_ROUTINE void _Heap_Block_remove_from_free_list( Heap_Block *block ) RTEMS_INLINE_ROUTINE void _Heap_Free_list_remove( Heap_Block *block )
{ {
Heap_Block *next = block->next; Heap_Block *next = block->next;
Heap_Block *prev = block->prev; Heap_Block *prev = block->prev;
@@ -60,7 +61,7 @@ RTEMS_INLINE_ROUTINE void _Heap_Block_remove_from_free_list( Heap_Block *block )
next->prev = prev; next->prev = prev;
} }
RTEMS_INLINE_ROUTINE void _Heap_Block_replace_in_free_list( RTEMS_INLINE_ROUTINE void _Heap_Free_list_replace(
Heap_Block *old_block, Heap_Block *old_block,
Heap_Block *new_block Heap_Block *new_block
) )
@@ -75,16 +76,16 @@ RTEMS_INLINE_ROUTINE void _Heap_Block_replace_in_free_list(
prev->next = new_block; prev->next = new_block;
} }
RTEMS_INLINE_ROUTINE void _Heap_Block_insert_after( RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_after(
Heap_Block *prev_block, Heap_Block *block_before,
Heap_Block *new_block Heap_Block *new_block
) )
{ {
Heap_Block *next = prev_block->next; Heap_Block *next = block_before->next;
new_block->next = next; new_block->next = next;
new_block->prev = prev_block; new_block->prev = block_before;
prev_block->next = new_block; block_before->next = new_block;
next->prev = new_block; next->prev = new_block;
} }
@@ -122,60 +123,64 @@ RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
* @brief Returns the block which is @a offset away from @a block. * @brief Returns the block which is @a offset away from @a block.
*/ */
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at( RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
const Heap_Block *block, Heap_Block *block,
uintptr_t offset uintptr_t offset
) )
{ {
return (Heap_Block *) ((uintptr_t) block + offset); return (Heap_Block *) ((uintptr_t) block + offset);
} }
/**
* @brief Returns the begin of the allocatable area of @a block.
*/
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block( RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
Heap_Block *block const Heap_Block *block
) )
{ {
return (uintptr_t) block + HEAP_BLOCK_ALLOC_AREA_OFFSET; return (uintptr_t) block + HEAP_BLOCK_HEADER_SIZE;
} }
/**
* @brief Returns the block associated with the allocatable area starting at
* @a alloc_area_begin inside a heap with a page size of @a page_size.
*/
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area( RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area(
uintptr_t alloc_area_begin, uintptr_t alloc_begin,
uintptr_t page_size uintptr_t page_size
) )
{ {
return (Heap_Block *) (_Heap_Align_down( alloc_area_begin, page_size ) return (Heap_Block *) (_Heap_Align_down( alloc_begin, page_size )
- HEAP_BLOCK_ALLOC_AREA_OFFSET); - HEAP_BLOCK_HEADER_SIZE);
} }
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( Heap_Block *block ) RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{ {
return block->size_and_flag & HEAP_PREV_BLOCK_USED; return block->size_and_flag & HEAP_PREV_BLOCK_USED;
} }
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( Heap_Block *block ) RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
{ {
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED; return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
} }
RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap( RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
Heap_Control *heap, const Heap_Control *heap,
Heap_Block *block const Heap_Block *block
) )
{ {
return _Addresses_Is_in_range( block, heap->start, heap->final ); return (uintptr_t) block >= (uintptr_t) heap->first_block
&& (uintptr_t) block <= (uintptr_t) heap->last_block;
} }
/** /**
* @brief Returns the maximum size of the heap. * @brief Returns the heap area size.
*/ */
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( Heap_Control *heap ) RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( const Heap_Control *heap )
{ {
return (uintptr_t) heap->end - (uintptr_t) heap->begin; return heap->area_end - heap->area_begin;
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Max( uintptr_t a, uintptr_t b )
{
return a > b ? a : b;
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min( uintptr_t a, uintptr_t b )
{
return a < b ? a : b;
} }
/** @} */ /** @} */

View File

@@ -1,9 +1,17 @@
/* /**
* Heap Handler * @file
* *
* @ingroup ScoreHeap
*
* @brief Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2009. * COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
* Copyright (c) 2009 embedded brains GmbH.
*
* The license and distribution terms for this file may be * The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at * found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE. * http://www.rtems.com/license/LICENSE.
@@ -18,6 +26,10 @@
#include <rtems/system.h> #include <rtems/system.h>
#include <rtems/score/heap.h> #include <rtems/score/heap.h>
#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 4 != 0
#error "invalid CPU_ALIGNMENT value"
#endif
static uint32_t instance = 0; static uint32_t instance = 0;
/*PAGE /*PAGE
@@ -113,16 +125,19 @@ static uint32_t instance = 0;
uintptr_t _Heap_Initialize( uintptr_t _Heap_Initialize(
Heap_Control *heap, Heap_Control *heap,
void *area_begin, void *heap_area_begin_ptr,
uintptr_t area_size, uintptr_t heap_area_size,
uintptr_t page_size uintptr_t page_size
) )
{ {
Heap_Statistics * const stats = &heap->stats; Heap_Statistics *const stats = &heap->stats;
uintptr_t heap_area_begin = (uintptr_t) area_begin; uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
uintptr_t heap_area_end = heap_area_begin + area_size; uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_ALLOC_AREA_OFFSET; uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_HEADER_SIZE;
uintptr_t alloc_area_size = 0; uintptr_t alloc_area_size = 0;
uintptr_t first_block_begin = 0;
uintptr_t first_block_size = 0;
uintptr_t min_block_size = 0;
uintptr_t overhead = 0; uintptr_t overhead = 0;
Heap_Block *first_block = NULL; Heap_Block *first_block = NULL;
Heap_Block *second_block = NULL; Heap_Block *second_block = NULL;
@@ -132,47 +147,50 @@ uintptr_t _Heap_Initialize(
} else { } else {
page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT ); page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
} }
min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
heap->min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
alloc_area_begin = _Heap_Align_up( alloc_area_begin, page_size ); alloc_area_begin = _Heap_Align_up( alloc_area_begin, page_size );
overhead = HEAP_LAST_BLOCK_OVERHEAD first_block_begin = alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
+ (alloc_area_begin - HEAP_BLOCK_ALLOC_AREA_OFFSET - heap_area_begin); overhead = HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
alloc_area_size = _Heap_Align_down ( area_size - overhead, page_size ); first_block_size = heap_area_size - overhead;
first_block_size = _Heap_Align_down ( first_block_size, page_size );
alloc_area_size = first_block_size - HEAP_BLOCK_HEADER_SIZE;
if ( if (
heap_area_end < heap_area_begin heap_area_end < heap_area_begin
|| area_size < overhead || heap_area_size <= overhead
|| alloc_area_size == 0 || first_block_size < min_block_size
) { ) {
/* Invalid area or area too small */ /* Invalid area or area too small */
return 0; return 0;
} }
heap->page_size = page_size;
heap->begin = heap_area_begin;
heap->end = heap_area_end;
/* First block */ /* First block */
first_block = _Heap_Block_of_alloc_area( alloc_area_begin, page_size ); first_block = (Heap_Block *) first_block_begin;
first_block->prev_size = page_size; first_block->prev_size = page_size;
first_block->size_and_flag = alloc_area_size | HEAP_PREV_BLOCK_USED; first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
first_block->next = _Heap_Free_list_tail( heap ); first_block->next = _Heap_Free_list_tail( heap );
first_block->prev = _Heap_Free_list_head( heap ); first_block->prev = _Heap_Free_list_head( heap );
_Heap_Free_list_head( heap )->next = first_block;
_Heap_Free_list_tail( heap )->prev = first_block;
heap->start = first_block;
/* Second and last block */ /* Second and last block */
second_block = _Heap_Block_at( first_block, alloc_area_size ); second_block = _Heap_Block_at( first_block, first_block_size );
second_block->prev_size = alloc_area_size; second_block->prev_size = first_block_size;
second_block->size_and_flag = page_size | HEAP_PREV_BLOCK_FREE; second_block->size_and_flag = page_size;
heap->final = second_block;
/* Heap control */
heap->page_size = page_size;
heap->min_block_size = min_block_size;
heap->area_begin = heap_area_begin;
heap->area_end = heap_area_end;
heap->first_block = first_block;
heap->last_block = second_block;
_Heap_Free_list_head( heap )->next = first_block;
_Heap_Free_list_tail( heap )->prev = first_block;
/* Statistics */ /* Statistics */
stats->size = area_size; stats->size = heap_area_size;
stats->free_size = alloc_area_size; stats->free_size = first_block_size;
stats->min_free_size = alloc_area_size; stats->min_free_size = first_block_size;
stats->free_blocks = 1; stats->free_blocks = 1;
stats->max_free_blocks = 1; stats->max_free_blocks = 1;
stats->used_blocks = 0; stats->used_blocks = 0;
@@ -183,9 +201,9 @@ uintptr_t _Heap_Initialize(
stats->resizes = 0; stats->resizes = 0;
stats->instance = instance++; stats->instance = instance++;
_HAssert( _Heap_Is_aligned( CPU_ALIGNMENT, 4 )); _HAssert( _Heap_Is_aligned( CPU_ALIGNMENT, 4 ) );
_HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT )); _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
_HAssert( _Heap_Is_aligned( heap->min_block_size, page_size )); _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
_HAssert( _HAssert(
_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size ) _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
); );
@@ -193,72 +211,142 @@ uintptr_t _Heap_Initialize(
_Heap_Is_aligned( _Heap_Alloc_area_of_block( second_block ), page_size ) _Heap_Is_aligned( _Heap_Alloc_area_of_block( second_block ), page_size )
); );
if ( !_Heap_Walk( heap, 0, false ) ) {
_Heap_Walk( heap, 0, true );
}
return alloc_area_size; return alloc_area_size;
} }
uintptr_t _Heap_Calc_block_size( static Heap_Block *_Heap_Block_split(
uintptr_t alloc_size,
uintptr_t page_size,
uintptr_t min_block_size)
{
uintptr_t block_size =
_Heap_Align_up( alloc_size + HEAP_BLOCK_USED_OVERHEAD, page_size );
if (block_size < min_block_size) {
block_size = min_block_size;
}
if (block_size > alloc_size) {
return block_size;
} else {
/* Integer overflow occured */
return 0;
}
}
uintptr_t _Heap_Block_allocate(
Heap_Control *heap, Heap_Control *heap,
Heap_Block *block, Heap_Block *block,
uintptr_t alloc_size uintptr_t alloc_size
) )
{ {
Heap_Statistics * const stats = &heap->stats; uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
uintptr_t const block_size = _Heap_Block_size( block ); uintptr_t const block_size = _Heap_Block_size( block );
uintptr_t const unused_size = block_size - alloc_size;
Heap_Block *next_block = _Heap_Block_at( block, block_size );
_HAssert( _Heap_Is_aligned( block_size, heap->page_size )); uintptr_t const used_size =
_HAssert( _Heap_Is_aligned( alloc_size, heap->page_size )); _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
_HAssert( alloc_size <= block_size ); uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
_HAssert( _Heap_Is_prev_used( block ));
if (unused_size >= heap->min_block_size) { uintptr_t const free_size = block_size + HEAP_BLOCK_SIZE_OFFSET - used_size;
/* uintptr_t const free_size_limit = min_block_size + HEAP_BLOCK_SIZE_OFFSET;
* Split the block so that the upper part is still free, and the lower part
* becomes used. This is slightly less optimal than leaving the lower part Heap_Block *const next_block = _Heap_Block_at( block, block_size );
* free as it requires replacing block in the free blocks list, but it
* makes it possible to reuse this code in the _Heap_Resize_block(). _HAssert( used_size <= block_size + HEAP_BLOCK_SIZE_OFFSET );
*/ _HAssert( used_size + free_size == block_size + HEAP_BLOCK_SIZE_OFFSET );
Heap_Block *new_block = _Heap_Block_at( block, alloc_size );
block->size_and_flag = alloc_size | HEAP_PREV_BLOCK_USED; if ( free_size >= free_size_limit ) {
new_block->size_and_flag = unused_size | HEAP_PREV_BLOCK_USED; uintptr_t const free_block_size = block_size - used_block_size;
next_block->prev_size = unused_size; Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
_Heap_Block_replace_in_free_list( block, new_block );
_HAssert( used_block_size + free_block_size == block_size );
block->size_and_flag = used_block_size
| (block->size_and_flag & HEAP_PREV_BLOCK_USED);
free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
next_block->prev_size = free_block_size;
return free_block;
} else { } else {
next_block->size_and_flag |= HEAP_PREV_BLOCK_USED; next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
alloc_size = block_size;
_Heap_Block_remove_from_free_list( block ); return NULL;
}
}
static Heap_Block *_Heap_Block_allocate_from_begin(
Heap_Control *heap,
Heap_Block *block,
uintptr_t alloc_size
)
{
Heap_Block *const free_block = _Heap_Block_split( heap, block, alloc_size );
if ( free_block != NULL ) {
_Heap_Free_list_replace( block, free_block );
} else {
Heap_Statistics *const stats = &heap->stats;
_Heap_Free_list_remove( block );
/* Statistics */ /* Statistics */
--stats->free_blocks; --stats->free_blocks;
} }
return block;
}
static Heap_Block *_Heap_Block_allocate_from_end(
Heap_Control *heap,
Heap_Block *block,
uintptr_t alloc_begin,
uintptr_t alloc_size
)
{
uintptr_t const block_begin = (uintptr_t) block;
uintptr_t block_size = _Heap_Block_size( block );
uintptr_t block_end = block_begin + block_size;
Heap_Block *const new_block =
_Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
uintptr_t const new_block_begin = (uintptr_t) new_block;
uintptr_t const new_block_size = block_end - new_block_begin;
Heap_Block *free_block = NULL;
block_end = new_block_begin;
block_size = block_end - block_begin;
_HAssert( block_size >= heap->min_block_size );
_HAssert( new_block_size >= heap->min_block_size );
block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
new_block->prev_size = block_size;
new_block->size_and_flag = new_block_size;
free_block = _Heap_Block_split( heap, new_block, alloc_size );
if ( free_block != NULL ) {
_Heap_Free_list_insert_after( block, free_block );
}
return new_block;
}
Heap_Block *_Heap_Block_allocate(
Heap_Control *heap,
Heap_Block *block,
uintptr_t alloc_begin,
uintptr_t alloc_size
)
{
Heap_Statistics *const stats = &heap->stats;
uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
_HAssert( _Heap_Is_prev_used( block ) );
_HAssert( alloc_area_begin <= alloc_begin );
if ( alloc_area_offset < heap->page_size ) {
alloc_size += alloc_area_offset;
block = _Heap_Block_allocate_from_begin( heap, block, alloc_size );
} else {
block = _Heap_Block_allocate_from_end( heap, block, alloc_begin, alloc_size );
}
/* Statistics */ /* Statistics */
++stats->used_blocks; ++stats->used_blocks;
stats->free_size -= alloc_size; stats->free_size -= _Heap_Block_size( block );
if(stats->min_free_size > stats->free_size) { if ( stats->min_free_size > stats->free_size ) {
stats->min_free_size = stats->free_size; stats->min_free_size = stats->free_size;
} }
return alloc_size; return block;
} }

View File

@@ -1,9 +1,17 @@
/* /**
* Heap Handler * @file
* *
* @ingroup ScoreHeap
*
* @brief Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-1999. * COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
* Copyright (c) 2009 embedded brains GmbH.
*
* The license and distribution terms for this file may be * The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at * found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE. * http://www.rtems.com/license/LICENSE.
@@ -19,48 +27,204 @@
#include <rtems/score/sysstate.h> #include <rtems/score/sysstate.h>
#include <rtems/score/heap.h> #include <rtems/score/heap.h>
void *_Heap_Allocate( Heap_Control *heap, uintptr_t size ) #ifdef RTEMS_HEAP_DEBUG
static void _Heap_Check_allocation(
const Heap_Control *heap,
const Heap_Block *block,
uintptr_t alloc_begin,
uintptr_t alloc_size,
uintptr_t alignment,
uintptr_t boundary
)
{
uintptr_t const min_block_size = heap->min_block_size;
uintptr_t const page_size = heap->page_size;
uintptr_t const block_begin = (uintptr_t) block;
uintptr_t const block_size = _Heap_Block_size( block );
uintptr_t const block_end = block_begin + block_size;
uintptr_t const alloc_end = alloc_begin + alloc_size;
uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
uintptr_t const alloc_area_size = alloc_area_offset + alloc_size;
_HAssert( block_size >= min_block_size );
_HAssert( block_begin < block_end );
_HAssert(
_Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
);
_HAssert(
_Heap_Is_aligned( block_size, page_size )
);
_HAssert( alloc_end <= block_end + HEAP_BLOCK_SIZE_OFFSET );
_HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
_HAssert( alloc_area_offset < page_size );
_HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
if ( alignment == 0 ) {
_HAssert( alloc_begin == alloc_area_begin );
} else {
_HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
}
if ( boundary != 0 ) {
uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
_HAssert( alloc_size <= boundary );
_HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
}
}
#else
#define _Heap_Check_allocation( h, b, ab, as, ag, bd ) ((void) 0)
#endif
static uintptr_t _Heap_Check_block(
const Heap_Control *heap,
const Heap_Block *block,
uintptr_t alloc_size,
uintptr_t alignment,
uintptr_t boundary
)
{
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
uintptr_t const block_begin = (uintptr_t) block;
uintptr_t const block_size = _Heap_Block_size( block );
uintptr_t const block_end = block_begin + block_size;
uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_begin_ceiling = block_end - min_block_size
+ HEAP_BLOCK_HEADER_SIZE + page_size - 1;
uintptr_t alloc_end = block_end + HEAP_BLOCK_SIZE_OFFSET;
uintptr_t alloc_begin = alloc_end - alloc_size;
alloc_begin = _Heap_Align_down( alloc_begin, alignment );
/* Ensure that the we have a valid new block at the end */
if ( alloc_begin > alloc_begin_ceiling ) {
alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
}
alloc_end = alloc_begin + alloc_size;
/* Ensure boundary constaint */
if ( boundary != 0 ) {
uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
if ( boundary_line < boundary_floor ) {
return 0;
}
alloc_begin = boundary_line - alloc_size;
alloc_begin = _Heap_Align_down( alloc_begin, alignment );
alloc_end = alloc_begin + alloc_size;
boundary_line = _Heap_Align_down( alloc_end, boundary );
}
}
/* Ensure that the we have a valid new block at the beginning */
if ( alloc_begin >= alloc_begin_floor ) {
uintptr_t const alloc_block_begin =
(uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
uintptr_t const free_size = alloc_block_begin - block_begin;
if ( free_size >= min_block_size || free_size == 0 ) {
return alloc_begin;
}
}
return 0;
}
void *_Heap_Allocate_aligned_with_boundary(
Heap_Control *heap,
uintptr_t alloc_size,
uintptr_t alignment,
uintptr_t boundary
)
{ {
Heap_Statistics *const stats = &heap->stats; Heap_Statistics *const stats = &heap->stats;
Heap_Block * const tail = _Heap_Free_list_tail( heap ); Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
Heap_Block *block = _Heap_First_free_block( heap ); Heap_Block *block = _Heap_Free_list_first( heap );
uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
- HEAP_BLOCK_SIZE_OFFSET;
uintptr_t const page_size = heap->page_size;
uintptr_t alloc_begin = 0;
uint32_t search_count = 0; uint32_t search_count = 0;
void *alloc_area_begin_ptr = NULL;
size = _Heap_Calc_block_size( size, heap->page_size, heap->min_block_size ); if ( block_size_floor < alloc_size ) {
if( size == 0 ) { /* Integer overflow occured */
return NULL; return NULL;
} }
/* if ( boundary != 0 ) {
* Find large enough free block. if ( boundary < alloc_size ) {
* return NULL;
* Do not bother to mask out the HEAP_PREV_BLOCK_USED bit as it will not }
* change the result of the size comparison.
*/
while (block != tail && block->size_and_flag < size) {
_HAssert( _Heap_Is_prev_used( block ));
block = block->next; if ( alignment == 0 ) {
++search_count; alignment = page_size;
}
} }
if (block != tail) { while ( block != free_list_tail ) {
_Heap_Block_allocate( heap, block, size ); _HAssert( _Heap_Is_prev_used( block ) );
alloc_area_begin_ptr = (void *) _Heap_Alloc_area_of_block( block ); /* Statistics */
++search_count;
_HAssert( _Heap_Is_aligned( (uintptr_t) alloc_area_begin_ptr, heap->page_size ));
/*
* The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
* field. Thus the value is about one unit larger than the real block
* size. The greater than operator takes this into account.
*/
if ( block->size_and_flag > block_size_floor ) {
if ( alignment == 0 ) {
alloc_begin = _Heap_Alloc_area_of_block( block );
} else {
alloc_begin = _Heap_Check_block(
heap,
block,
alloc_size,
alignment,
boundary
);
}
}
if ( alloc_begin != 0 ) {
break;
}
block = block->next;
}
if ( alloc_begin != 0 ) {
block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
_Heap_Check_allocation(
heap,
block,
alloc_begin,
alloc_size,
alignment,
boundary
);
/* Statistics */ /* Statistics */
++stats->allocs;
stats->searches += search_count; stats->searches += search_count;
} }
/* Statistics */ /* Statistics */
if (stats->max_search < search_count) { if ( stats->max_search < search_count ) {
stats->max_search = search_count; stats->max_search = search_count;
} }
return alloc_area_begin_ptr; return (void *) alloc_begin;
} }

View File

@@ -1,3 +1,4 @@
#if 0
/* /*
* Heap Handler * Heap Handler
* *
@@ -31,10 +32,10 @@ check_result(
) )
{ {
uintptr_t const user_area = _Heap_Alloc_area_of_block(the_block); uintptr_t const user_area = _Heap_Alloc_area_of_block(the_block);
uintptr_t const block_end = the_block uintptr_t const block_end = (uintptr_t) the_block
+ _Heap_Block_size(the_block) + HEAP_BLOCK_SIZE_OFFSET; + _Heap_Block_size(the_block) + HEAP_BLOCK_SIZE_OFFSET;
uintptr_t const user_end = aligned_user_addr + size; uintptr_t const user_end = aligned_user_addr + size;
uintptr_t const heap_start = (uintptr_t) the_heap->start + HEAP_LAST_BLOCK_OVERHEAD; uintptr_t const heap_start = (uintptr_t) the_heap->start + HEAP_BLOCK_HEADER_SIZE;
uintptr_t const heap_end = (uintptr_t) the_heap->final uintptr_t const heap_end = (uintptr_t) the_heap->final
+ HEAP_BLOCK_SIZE_OFFSET; + HEAP_BLOCK_SIZE_OFFSET;
uintptr_t const page_size = the_heap->page_size; uintptr_t const page_size = the_heap->page_size;
@@ -97,7 +98,7 @@ Heap_Block *block_allocate(
/* Don't split the block as remainder is either zero or too small to be /* Don't split the block as remainder is either zero or too small to be
used as a separate free block. Change 'alloc_size' to the size of the used as a separate free block. Change 'alloc_size' to the size of the
block and remove the block from the list of free blocks. */ block and remove the block from the list of free blocks. */
_Heap_Block_remove_from_free_list(the_block); _Heap_Free_list_remove(the_block);
alloc_size = block_size; alloc_size = block_size;
stats->free_blocks -= 1; stats->free_blocks -= 1;
} }
@@ -157,7 +158,7 @@ void *_Heap_Allocate_aligned(
/* Find large enough free block that satisfies the alignment requirements. */ /* Find large enough free block that satisfies the alignment requirements. */
for (the_block = _Heap_First_free_block(the_heap), search_count = 0; for (the_block = _Heap_Free_list_first(the_heap), search_count = 0;
the_block != tail; the_block != tail;
the_block = the_block->next, ++search_count) the_block = the_block->next, ++search_count)
{ {
@@ -220,7 +221,7 @@ void *_Heap_Allocate_aligned(
/* The block is indeed acceptable: calculate the size of the block /* The block is indeed acceptable: calculate the size of the block
to be allocated and perform allocation. */ to be allocated and perform allocation. */
uintptr_t const alloc_size = uintptr_t const alloc_size =
block_end - user_addr + HEAP_BLOCK_ALLOC_AREA_OFFSET; block_end - user_addr + HEAP_BLOCK_HEADER_SIZE;
_HAssert(_Heap_Is_aligned(aligned_user_addr, alignment)); _HAssert(_Heap_Is_aligned(aligned_user_addr, alignment));
@@ -244,3 +245,4 @@ void *_Heap_Allocate_aligned(
return user_ptr; return user_ptr;
} }
#endif

View File

@@ -1,6 +1,12 @@
/* /**
* Heap Handler * @file
* *
* @ingroup ScoreHeap
*
* @brief Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-1999. * COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -28,11 +34,11 @@ Heap_Extend_status _Heap_Extend(
{ {
Heap_Statistics *const stats = &heap->stats; Heap_Statistics *const stats = &heap->stats;
uintptr_t const area_begin = (uintptr_t) area_begin_ptr; uintptr_t const area_begin = (uintptr_t) area_begin_ptr;
uintptr_t const heap_area_begin = heap->begin; uintptr_t const heap_area_begin = heap->area_begin;
uintptr_t const heap_area_end = heap->end; uintptr_t const heap_area_end = heap->area_end;
uintptr_t const new_heap_area_end = heap_area_end + area_size; uintptr_t const new_heap_area_end = heap_area_end + area_size;
uintptr_t extend_size = 0; uintptr_t extend_size = 0;
Heap_Block *const old_final = heap->final; Heap_Block *const old_final = heap->last_block;
Heap_Block *new_final = NULL; Heap_Block *new_final = NULL;
/* /*
@@ -60,10 +66,10 @@ Heap_Extend_status _Heap_Extend(
* block and free it. * block and free it.
*/ */
heap->end = new_heap_area_end; heap->area_end = new_heap_area_end;
extend_size = new_heap_area_end extend_size = new_heap_area_end
- (uintptr_t) old_final - HEAP_LAST_BLOCK_OVERHEAD; - (uintptr_t) old_final - HEAP_BLOCK_HEADER_SIZE;
extend_size = _Heap_Align_down( extend_size, heap->page_size ); extend_size = _Heap_Align_down( extend_size, heap->page_size );
*amount_extended = extend_size; *amount_extended = extend_size;
@@ -74,7 +80,7 @@ Heap_Extend_status _Heap_Extend(
new_final = _Heap_Block_at( old_final, extend_size ); new_final = _Heap_Block_at( old_final, extend_size );
new_final->size_and_flag = heap->page_size | HEAP_PREV_BLOCK_USED; new_final->size_and_flag = heap->page_size | HEAP_PREV_BLOCK_USED;
heap->final = new_final; heap->last_block = new_final;
stats->size += area_size; stats->size += area_size;
++stats->used_blocks; ++stats->used_blocks;

View File

@@ -1,6 +1,12 @@
/* /**
* Heap Handler * @file
* *
* @ingroup ScoreHeap
*
* @brief Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -19,26 +25,18 @@
#include <rtems/score/sysstate.h> #include <rtems/score/sysstate.h>
#include <rtems/score/heap.h> #include <rtems/score/heap.h>
bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr ) bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr )
{ {
Heap_Statistics *const stats = &heap->stats; Heap_Statistics *const stats = &heap->stats;
uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr; uintptr_t alloc_begin = (uintptr_t) alloc_begin_ptr;
Heap_Block *block = Heap_Block *block =
_Heap_Block_of_alloc_area( alloc_area_begin, heap->page_size ); _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
Heap_Block *next_block = NULL; Heap_Block *next_block = NULL;
uintptr_t block_size = 0; uintptr_t block_size = 0;
uintptr_t next_block_size = 0; uintptr_t next_block_size = 0;
bool next_is_free = false; bool next_is_free = false;
if (
!_Addresses_Is_in_range( alloc_area_begin_ptr, heap->start, heap->final)
) {
_HAssert( alloc_area_begin_ptr != NULL );
return false;
}
if ( !_Heap_Is_block_in_heap( heap, block ) ) { if ( !_Heap_Is_block_in_heap( heap, block ) ) {
_HAssert( false );
return false; return false;
} }
@@ -56,7 +54,7 @@ bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
} }
next_block_size = _Heap_Block_size( next_block ); next_block_size = _Heap_Block_size( next_block );
next_is_free = next_block != heap->final next_is_free = next_block != heap->last_block
&& !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size )); && !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));
if ( !_Heap_Is_prev_used( block ) ) { if ( !_Heap_Is_prev_used( block ) ) {
@@ -77,7 +75,7 @@ bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
if ( next_is_free ) { /* coalesce both */ if ( next_is_free ) { /* coalesce both */
uintptr_t const size = block_size + prev_size + next_block_size; uintptr_t const size = block_size + prev_size + next_block_size;
_Heap_Block_remove_from_free_list( next_block ); _Heap_Free_list_remove( next_block );
stats->free_blocks -= 1; stats->free_blocks -= 1;
prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED; prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
next_block = _Heap_Block_at( prev_block, size ); next_block = _Heap_Block_at( prev_block, size );
@@ -91,14 +89,14 @@ bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
} }
} else if ( next_is_free ) { /* coalesce next */ } else if ( next_is_free ) { /* coalesce next */
uintptr_t const size = block_size + next_block_size; uintptr_t const size = block_size + next_block_size;
_Heap_Block_replace_in_free_list( next_block, block ); _Heap_Free_list_replace( next_block, block );
block->size_and_flag = size | HEAP_PREV_BLOCK_USED; block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
next_block = _Heap_Block_at( block, size ); next_block = _Heap_Block_at( block, size );
next_block->prev_size = size; next_block->prev_size = size;
} else { /* no coalesce */ } else { /* no coalesce */
/* Add 'block' to the head of the free blocks list as it tends to /* Add 'block' to the head of the free blocks list as it tends to
produce less fragmentation than adding to the tail. */ produce less fragmentation than adding to the tail. */
_Heap_Block_insert_after( _Heap_Free_list_head( heap), block ); _Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED; block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED; next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
next_block->prev_size = block_size; next_block->prev_size = block_size;

View File

@@ -1,6 +1,12 @@
/* /**
* Heap Handler * @file
* *
* @ingroup ScoreHeap
*
* @brief Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2004. * COPYRIGHT (c) 1989-2004.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -19,21 +25,6 @@
#include <rtems/score/sysstate.h> #include <rtems/score/sysstate.h>
#include <rtems/score/heap.h> #include <rtems/score/heap.h>
/*PAGE
*
* _Heap_Get_free_information
*
* This heap routine returns information about the free blocks
* in the specified heap.
*
* Input parameters:
* the_heap - pointer to heap header.
* info - pointer to the free block information.
*
* Output parameters:
* returns - free block information filled in.
*/
void _Heap_Get_free_information( void _Heap_Get_free_information(
Heap_Control *the_heap, Heap_Control *the_heap,
Heap_Information *info Heap_Information *info
@@ -46,7 +37,7 @@ void _Heap_Get_free_information(
info->largest = 0; info->largest = 0;
info->total = 0; info->total = 0;
for(the_block = _Heap_First_free_block(the_heap); for(the_block = _Heap_Free_list_first(the_heap);
the_block != tail; the_block != tail;
the_block = the_block->next) the_block = the_block->next)
{ {

View File

@@ -1,6 +1,12 @@
/* /**
* Heap Handler * @file
* *
* @ingroup ScoreHeap
*
* @brief Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2009. * COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -19,27 +25,13 @@
#include <rtems/score/sysstate.h> #include <rtems/score/sysstate.h>
#include <rtems/score/heap.h> #include <rtems/score/heap.h>
/* void _Heap_Get_information(
* _Heap_Get_information
*
* This kernel routine walks the heap and tots up the free and allocated
* sizes. Derived from _Heap_Walk.
*
* Input parameters:
* the_heap - pointer to heap header
* the_info - pointer for information to be returned
*
* Output parameters:
* *the_info - contains information about heap
* return 0=success, otherwise heap is corrupt.
*/
Heap_Get_information_status _Heap_Get_information(
Heap_Control *the_heap, Heap_Control *the_heap,
Heap_Information_block *the_info Heap_Information_block *the_info
) )
{ {
Heap_Block *the_block = the_heap->start; Heap_Block *the_block = the_heap->first_block;
Heap_Block *const end = the_heap->final; Heap_Block *const end = the_heap->last_block;
_HAssert(the_block->prev_size == the_heap->page_size); _HAssert(the_block->prev_size == the_heap->page_size);
_HAssert(_Heap_Is_prev_used(the_block)); _HAssert(_Heap_Is_prev_used(the_block));
@@ -52,7 +44,7 @@ Heap_Get_information_status _Heap_Get_information(
the_info->Used.largest = 0; the_info->Used.largest = 0;
while ( the_block != end ) { while ( the_block != end ) {
uint32_t const the_size = _Heap_Block_size(the_block); uintptr_t const the_size = _Heap_Block_size(the_block);
Heap_Block *const next_block = _Heap_Block_at(the_block, the_size); Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
Heap_Information *info; Heap_Information *info;
@@ -74,7 +66,5 @@ Heap_Get_information_status _Heap_Get_information(
* "used" as client never allocated it. Make 'Used.total' contain this * "used" as client never allocated it. Make 'Used.total' contain this
* blocks' overhead though. * blocks' overhead though.
*/ */
the_info->Used.total += HEAP_LAST_BLOCK_OVERHEAD; the_info->Used.total += HEAP_BLOCK_HEADER_SIZE;
return HEAP_GET_INFORMATION_SUCCESSFUL;
} }

View File

@@ -1,6 +1,12 @@
/* /**
* Heap Handler * @file
* *
* @ingroup ScoreHeap
*
* @brief Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-1999. * COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -21,129 +27,163 @@
Heap_Resize_status _Heap_Resize_block( Heap_Resize_status _Heap_Resize_block(
Heap_Control *heap, Heap_Control *heap,
void *alloc_area_begin_ptr, void *alloc_begin_ptr,
uintptr_t size, uintptr_t new_alloc_size,
uintptr_t *old_mem_size, uintptr_t *old_size,
uintptr_t *avail_mem_size uintptr_t *new_size
) )
{ {
uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
Heap_Block *block;
Heap_Block *next_block;
uintptr_t next_block_size;
bool next_is_used;
Heap_Block *next_next_block;
uintptr_t old_block_size;
uintptr_t old_user_size;
uintptr_t prev_used_flag;
Heap_Statistics *const stats = &heap->stats; Heap_Statistics *const stats = &heap->stats;
uintptr_t const min_block_size = heap->min_block_size; uintptr_t const min_block_size = heap->min_block_size;
uintptr_t const page_size = heap->page_size; uintptr_t const page_size = heap->page_size;
uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr;
Heap_Block *const block = _Heap_Block_of_alloc_area( alloc_begin, page_size );
Heap_Block *next_block = NULL;
Heap_Block *next_next_block = NULL;
uintptr_t block_size = 0;
uintptr_t block_end = 0;
uintptr_t next_block_size = 0;
bool next_block_is_used = false;;
uintptr_t alloc_size = 0;
uintptr_t prev_block_used_flag = 0;
*old_mem_size = 0; *old_size = 0;
*avail_mem_size = 0; *new_size = 0;
block = _Heap_Block_of_alloc_area(alloc_area_begin, heap->page_size); if ( !_Heap_Is_block_in_heap( heap, block ) ) {
_HAssert(_Heap_Is_block_in_heap(heap, block));
if (!_Heap_Is_block_in_heap(heap, block))
return HEAP_RESIZE_FATAL_ERROR; return HEAP_RESIZE_FATAL_ERROR;
}
prev_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED; block_size = _Heap_Block_size( block );
old_block_size = _Heap_Block_size(block); block_end = (uintptr_t) block + block_size;
next_block = _Heap_Block_at(block, old_block_size); prev_block_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
next_block = _Heap_Block_at( block, block_size );
_HAssert(_Heap_Is_block_in_heap(heap, next_block)); _HAssert( _Heap_Is_block_in_heap( heap, next_block ) );
_HAssert(_Heap_Is_prev_used(next_block)); _HAssert( _Heap_Is_prev_used( next_block ) );
if ( !_Heap_Is_block_in_heap(heap, next_block) ||
!_Heap_Is_prev_used(next_block))
return HEAP_RESIZE_FATAL_ERROR;
next_block_size = _Heap_Block_size(next_block); next_block_size = _Heap_Block_size( next_block );
next_next_block = _Heap_Block_at(next_block, next_block_size); next_next_block = _Heap_Block_at( next_block, next_block_size );
next_is_used = (next_block == heap->final) ||
_Heap_Is_prev_used(next_next_block);
/* See _Heap_Size_of_alloc_area() source for explanations */ _HAssert(
old_user_size = (uintptr_t) next_block - alloc_area_begin next_block == heap->last_block
+ HEAP_BLOCK_SIZE_OFFSET; || _Heap_Is_block_in_heap( heap, next_next_block )
);
*old_mem_size = old_user_size; next_block_is_used = next_block == heap->last_block
|| _Heap_Is_prev_used( next_next_block );
if (size > old_user_size) { alloc_size = block_end - alloc_begin + HEAP_BLOCK_SIZE_OFFSET;
/* Need to extend the block: allocate part of the next block and then
merge 'block' and allocated block together. */ *old_size = alloc_size;
if (next_is_used) /* Next block is in use, -- no way to extend */
if ( new_alloc_size > alloc_size ) {
/*
* Need to extend the block: allocate part of the next block and then
* merge the blocks.
*/
if ( next_block_is_used ) {
return HEAP_RESIZE_UNSATISFIED; return HEAP_RESIZE_UNSATISFIED;
else { } else {
uintptr_t add_block_size = uintptr_t add_block_size =
_Heap_Align_up(size - old_user_size, page_size); _Heap_Align_up( new_alloc_size - alloc_size, page_size );
if (add_block_size < min_block_size)
if ( add_block_size < min_block_size ) {
add_block_size = min_block_size; add_block_size = min_block_size;
if (add_block_size > next_block_size) }
return HEAP_RESIZE_UNSATISFIED; /* Next block is too small or none. */
add_block_size = if ( add_block_size > next_block_size ) {
_Heap_Block_allocate(heap, next_block, add_block_size); return HEAP_RESIZE_UNSATISFIED;
/* Merge two subsequent blocks */ }
block->size_and_flag = (old_block_size + add_block_size) | prev_used_flag;
next_block = _Heap_Block_allocate(
heap,
next_block,
_Heap_Alloc_area_of_block( next_block ),
add_block_size - HEAP_BLOCK_HEADER_SIZE
);
/* Merge the blocks */
block->size_and_flag = ( block_size + _Heap_Block_size( next_block ) )
| prev_block_used_flag;
/* Statistics */
--stats->used_blocks; --stats->used_blocks;
} }
} else { } else {
/* Calculate how much memory we could free */ /* Calculate how much memory we could free */
uintptr_t free_block_size = uintptr_t free_block_size =
_Heap_Align_down(old_user_size - size, page_size); _Heap_Align_down( alloc_size - new_alloc_size, page_size );
if (free_block_size > 0) { if ( free_block_size > 0 ) {
/*
* To free some memory the block should be shortened so that it can can
* hold 'new_alloc_size' user bytes and still remain not shorter than
* 'min_block_size'.
*/
uintptr_t new_block_size = block_size - free_block_size;
/* To free some memory the block should be shortened so that it can if ( new_block_size < min_block_size ) {
can hold 'size' user bytes and still remain not shorter than uintptr_t const delta = min_block_size - new_block_size;
'min_block_size'. */
uintptr_t new_block_size = old_block_size - free_block_size; _HAssert( free_block_size >= delta );
if (new_block_size < min_block_size) {
uintptr_t delta = min_block_size - new_block_size;
_HAssert(free_block_size >= delta);
free_block_size -= delta; free_block_size -= delta;
if (free_block_size == 0) {
if ( free_block_size == 0 ) {
/* Statistics */
++stats->resizes; ++stats->resizes;
return HEAP_RESIZE_SUCCESSFUL; return HEAP_RESIZE_SUCCESSFUL;
} }
new_block_size += delta; new_block_size += delta;
} }
_HAssert(new_block_size >= min_block_size); _HAssert( new_block_size >= min_block_size );
_HAssert(new_block_size + free_block_size == old_block_size); _HAssert( new_block_size + free_block_size == block_size );
_HAssert(_Heap_Is_aligned(new_block_size, page_size)); _HAssert( _Heap_Is_aligned( new_block_size, page_size ) );
_HAssert(_Heap_Is_aligned(free_block_size, page_size)); _HAssert( _Heap_Is_aligned( free_block_size, page_size ) );
if (!next_is_used) { if ( !next_block_is_used ) {
/* Extend the next block to the low addresses by 'free_block_size' */ /* Extend the next block */
Heap_Block *const new_next_block = Heap_Block *const new_next_block =
_Heap_Block_at(block, new_block_size); _Heap_Block_at( block, new_block_size );
uintptr_t const new_next_block_size = uintptr_t const new_next_block_size =
next_block_size + free_block_size; next_block_size + free_block_size;
_HAssert(_Heap_Is_block_in_heap(heap, next_next_block));
block->size_and_flag = new_block_size | prev_used_flag;
new_next_block->size_and_flag = new_next_block_size | HEAP_PREV_BLOCK_USED;
next_next_block->prev_size = new_next_block_size;
_Heap_Block_replace_in_free_list(next_block, new_next_block);
heap->stats.free_size += free_block_size;
*avail_mem_size = new_next_block_size - HEAP_BLOCK_USED_OVERHEAD;
} else if (free_block_size >= min_block_size) { _HAssert( _Heap_Is_block_in_heap( heap, next_next_block ) );
/* Split the block into 2 used parts, then free the second one. */
block->size_and_flag = new_block_size | prev_used_flag; block->size_and_flag = new_block_size | prev_block_used_flag;
next_block = _Heap_Block_at(block, new_block_size); new_next_block->size_and_flag =
new_next_block_size | HEAP_PREV_BLOCK_USED;
next_next_block->prev_size = new_next_block_size;
_Heap_Free_list_replace( next_block, new_next_block );
*new_size = new_next_block_size - HEAP_BLOCK_SIZE_OFFSET;
/* Statistics */
stats->free_size += free_block_size;
} else if ( free_block_size >= min_block_size ) {
/* Split the block into two used parts, then free the second one */
block->size_and_flag = new_block_size | prev_block_used_flag;
next_block = _Heap_Block_at( block, new_block_size );
next_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED; next_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
_Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( next_block ) );
*new_size = free_block_size - HEAP_BLOCK_SIZE_OFFSET;
/* Statistics */
++stats->used_blocks; /* We have created used block */ ++stats->used_blocks; /* We have created used block */
--stats->frees; /* Don't count next call in stats */ --stats->frees; /* Do not count next call in stats */
_Heap_Free(heap, (void *) _Heap_Alloc_area_of_block(next_block));
*avail_mem_size = free_block_size - HEAP_BLOCK_USED_OVERHEAD;
} }
} }
} }
/* Statistics */
++stats->resizes; ++stats->resizes;
return HEAP_RESIZE_SUCCESSFUL; return HEAP_RESIZE_SUCCESSFUL;
} }

View File

@@ -1,6 +1,12 @@
/* /**
* Heap Handler * @file
* *
* @ingroup ScoreHeap
*
* @brief Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-1999. * COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -21,24 +27,16 @@
bool _Heap_Size_of_alloc_area( bool _Heap_Size_of_alloc_area(
Heap_Control *heap, Heap_Control *heap,
void *alloc_area_begin_ptr, void *alloc_begin_ptr,
uintptr_t *size uintptr_t *alloc_size
) )
{ {
uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr; uintptr_t const page_size = heap->page_size;
Heap_Block *block = uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr;
_Heap_Block_of_alloc_area( alloc_area_begin, heap->page_size ); Heap_Block *block = _Heap_Block_of_alloc_area( alloc_begin, page_size );
Heap_Block *next_block = NULL; Heap_Block *next_block = NULL;
uintptr_t block_size = 0; uintptr_t block_size = 0;
if (
!_Addresses_Is_in_range( alloc_area_begin_ptr, heap->start, heap->final )
) {
return false;
}
_HAssert(_Heap_Is_block_in_heap( heap, block ));
if ( !_Heap_Is_block_in_heap( heap, block ) ) { if ( !_Heap_Is_block_in_heap( heap, block ) ) {
return false; return false;
} }
@@ -46,26 +44,14 @@ bool _Heap_Size_of_alloc_area(
block_size = _Heap_Block_size( block ); block_size = _Heap_Block_size( block );
next_block = _Heap_Block_at( block, block_size ); next_block = _Heap_Block_at( block, block_size );
_HAssert( _Heap_Is_block_in_heap( heap, next_block ));
_HAssert( _Heap_Is_prev_used( next_block ));
if ( if (
!_Heap_Is_block_in_heap( heap, next_block ) || !_Heap_Is_block_in_heap( heap, next_block )
!_Heap_Is_prev_used( next_block ) || !_Heap_Is_prev_used( next_block )
) { ) {
return false; return false;
} }
/* *alloc_size = (uintptr_t) next_block + HEAP_BLOCK_SIZE_OFFSET - alloc_begin;
* 'alloc_area_begin' could be greater than 'block' address plus
* HEAP_BLOCK_ALLOC_AREA_OFFSET as _Heap_Allocate_aligned() may produce such
* user pointers. To get rid of this offset we calculate user size as
* difference between the end of 'block' (='next_block') and
* 'alloc_area_begin' and then add correction equal to the offset of the
* 'size' field of the 'Heap_Block' structure. The correction is due to the
* fact that 'prev_size' field of the next block is actually used as user
* accessible area of 'block'.
*/
*size = (uintptr_t) next_block - alloc_area_begin + HEAP_BLOCK_SIZE_OFFSET;
return true; return true;
} }

View File

@@ -1,8 +1,14 @@
/* /**
* Heap Handler * @file
* *
* COPYRIGHT (c) 1989-2007. * @ingroup ScoreHeap
* On-Line Applications Research Corporation (OAR). *
* @brief Heap Handler implementation.
*/
/*
* COPYRIGHT ( c ) 1989-2007.
* On-Line Applications Research Corporation ( OAR ).
* *
* The license and distribution terms for this file may be * The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at * found in the file LICENSE in this distribution or at
@@ -22,197 +28,446 @@
#include <rtems/score/interr.h> #include <rtems/score/interr.h>
#include <rtems/bspIo.h> #include <rtems/bspIo.h>
#if defined(__GNUC__) static void _Heap_Walk_printk( int source, bool dump, bool error, const char *fmt, ... )
#define DO_NOT_INLINE __attribute__((__noinline__)) {
#else if ( dump ) {
#define DO_NOT_INLINE va_list ap;
#endif
/*
* Helper to avoid introducing even more branches and paths in this
* code to do coverage analysis on.
*
* We do not want this inlined.
*/
static void hw_nl(
int error,
bool do_dump
) DO_NOT_INLINE;
/*PAGE if ( error ) {
* printk( "FAIL[%d]: ", source );
* _Heap_Walk } else {
* printk( "PASS[%d]: ", source );
* This kernel routine walks the heap and verifies its correctness. }
*
* Input parameters: va_start( ap, fmt );
* the_heap - pointer to heap header vprintk( fmt, ap );
* source - a numeric indicator of the invoker of this routine va_end( ap );
* do_dump - when true print the information }
* }
* Output parameters: NONE
*/ static bool _Heap_Walk_check_free_list(
int source,
bool dump,
Heap_Control *heap
)
{
const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
const Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
const Heap_Block *free_block = first_free_block;
uintptr_t const page_size = heap->page_size;
uintptr_t const loop_limit =
((uintptr_t) heap->last_block - (uintptr_t) heap->first_block)
/ heap->min_block_size;
uintptr_t loop_counter = 0;
while ( free_block != free_list_tail && loop_counter < loop_limit ) {
if ( !_Heap_Is_block_in_heap( heap, free_block ) ) {
_Heap_Walk_printk(
source,
dump,
true,
"free block 0x%08x: not in heap\n",
free_block
);
return false;
}
if (
!_Heap_Is_aligned( _Heap_Alloc_area_of_block( free_block ), page_size )
) {
_Heap_Walk_printk(
source,
dump,
true,
"free block 0x%08x: alloc area not page aligned\n",
free_block
);
return false;
}
++loop_counter;
free_block = free_block->next;
}
if ( loop_counter >= loop_limit ) {
_Heap_Walk_printk(
source,
dump,
true,
"free list contains a loop\n"
);
return false;
}
return true;
}
static bool _Heap_Walk_is_in_free_list(
Heap_Control *heap,
Heap_Block *block
)
{
const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
const Heap_Block *free_block = _Heap_Free_list_first( heap );
while ( free_block != free_list_tail ) {
if ( free_block == block ) {
return true;
}
free_block = free_block->next;
}
return false;
}
static bool _Heap_Walk_check_control(
int source,
bool dump,
Heap_Control *heap
)
{
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
Heap_Block *const first_block = heap->first_block;
Heap_Block *const last_block = heap->last_block;
_Heap_Walk_printk(
source,
dump,
false,
"page size %u, min block size %u\n"
"\tarea begin 0x%08x, area end 0x%08x\n"
"\tfirst block 0x%08x, last block 0x%08x\n"
"\tfirst free 0x%08x, last free 0x%08x\n",
page_size, min_block_size,
heap->area_begin, heap->area_end,
first_block, last_block,
first_free_block, last_free_block
);
if ( page_size == 0 ) {
_Heap_Walk_printk( source, dump, true, "page size is zero\n" );
return false;
}
if ( !_Addresses_Is_aligned( (void *) page_size ) ) {
_Heap_Walk_printk(
source,
dump,
true,
"page size %u not CPU aligned\n",
page_size
);
return false;
}
if ( !_Heap_Is_aligned( min_block_size, page_size ) ) {
_Heap_Walk_printk(
source,
dump,
true,
"min block size %u not page aligned\n",
min_block_size
);
return false;
}
if (
first_free_block != free_list_head
&& !_Addresses_Is_aligned( first_free_block )
) {
_Heap_Walk_printk(
source,
dump,
true,
"first free block: 0x%08x not CPU aligned\n",
first_free_block
);
return false;
}
if (
last_free_block != free_list_tail
&& !_Addresses_Is_aligned( last_free_block )
) {
_Heap_Walk_printk(
source,
dump,
true,
"last free block: 0x%08x not CPU aligned\n",
last_free_block
);
return false;
}
if (
!_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
) {
_Heap_Walk_printk(
source,
dump,
true,
"first block: 0x%08x not page aligned\n",
first_block
);
return false;
}
if (
!_Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
) {
_Heap_Walk_printk(
source,
dump,
true,
"last block: 0x%08x not page aligned\n",
last_block
);
return false;
}
if ( !_Heap_Is_prev_used( first_block ) ) {
_Heap_Walk_printk(
source,
dump,
true,
"first block: HEAP_PREV_BLOCK_USED is cleared\n"
);
}
if ( first_block->prev_size != page_size ) {
_Heap_Walk_printk(
source,
dump,
true,
"first block: prev size %u != page size %u\n",
first_block->prev_size,
page_size
);
}
return _Heap_Walk_check_free_list( source, dump, heap );
}
bool _Heap_Walk( bool _Heap_Walk(
Heap_Control *the_heap, Heap_Control *heap,
int source, int source,
bool do_dump bool dump
) )
{ {
Heap_Block *the_block = the_heap->start; uintptr_t const page_size = heap->page_size;
Heap_Block *const end = the_heap->final; uintptr_t const min_block_size = heap->min_block_size;
Heap_Block *const tail = _Heap_Free_list_tail(the_heap); Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
int error = 0; Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
int passes = 0; Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
Heap_Block *const last_block = heap->last_block;
Heap_Block *block = heap->first_block;
bool error = false;
/* FIXME: Why is this disabled? */ if ( !_System_state_Is_up( _System_state_Get() ) ) {
do_dump = false;
/* FIXME: Why is this disabled? */
/*
* We don't want to allow walking the heap until we have
* transferred control to the user task so we watch the
* system state.
*/
/*
if ( !_System_state_Is_up( _System_state_Get() ) )
return true; return true;
*/
/* FIXME: Reason for this? */
if (source < 0)
source = (int) the_heap->stats.instance;
if (do_dump)
printk("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n",
source, the_block, end,
_Heap_First_free_block(the_heap), _Heap_Last_free_block(the_heap),
the_heap->begin, the_heap->end);
/*
* Handle the 1st block
*/
if (!_Heap_Is_prev_used(the_block)) {
printk("PASS: %d !HEAP_PREV_BLOCK_USED flag of 1st block isn't set\n", source);
error = 1;
} }
if (the_block->prev_size != the_heap->page_size) { if ( !_Heap_Walk_check_control( source, dump, heap ) ) {
printk("PASS: %d !prev_size of 1st block isn't page_size\n", source); return false;
error = 1;
} }
while ( the_block != end ) { while ( block != last_block && _Addresses_Is_aligned( block ) ) {
uint32_t const the_size = _Heap_Block_size(the_block); uintptr_t const block_begin = (uintptr_t) block;
Heap_Block *const next_block = _Heap_Block_at(the_block, the_size); uintptr_t const block_size = _Heap_Block_size( block );
bool prev_used = _Heap_Is_prev_used(the_block); bool const prev_used = _Heap_Is_prev_used( block );
Heap_Block *const next_block = _Heap_Block_at( block, block_size );
uintptr_t const next_block_begin = (uintptr_t) next_block;
if (do_dump) { if ( prev_used ) {
printk("PASS: %d block %p size %d(%c)", _Heap_Walk_printk(
source, the_block, the_size, (prev_used ? 'U' : 'F')); source,
if (prev_used) dump,
printk(" prev_size %d", the_block->prev_size); error,
else "block 0x%08x: size %u\n",
printk(" (prev_size) %d", the_block->prev_size); block,
block_size
);
} else {
_Heap_Walk_printk(
source,
dump,
error,
"block 0x%08x: size %u, prev_size %u\n",
block,
block_size,
block->prev_size
);
} }
if (
if (!_Addresses_Is_aligned(next_block) ) { !_Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
printk("PASS: %d next_block %p is not aligned\n", source, next_block); ) {
error = 1; error = true;
_Heap_Walk_printk(
source,
dump,
error,
"block 0x%08x: not page (%u) aligned\n",
block,
page_size
);
break; break;
} }
if (!_Heap_Is_prev_used(next_block)) { if ( !_Heap_Is_aligned( block_size, page_size ) ) {
if (do_dump) error = true;
printk( " prev %p next %p", the_block->prev, the_block->next); _Heap_Walk_printk(
if (_Heap_Block_size(the_block) != next_block->prev_size) { source,
if (do_dump) printk("\n"); dump,
printk("PASS: %d !front and back sizes don't match", source); error,
error = 1; "block 0x%08x: block size %u not page (%u) aligned\n",
} block,
if (!prev_used) { block_size,
page_size
hw_nl(do_dump, error); );
printk("PASS: %d !two consecutive blocks are free", source);
error = 1;
}
{ /* Check if 'the_block' is in the free block list */
Heap_Block* block = _Heap_First_free_block(the_heap);
if (!_Addresses_Is_aligned(block) ) {
printk(
"PASS: %d first free block %p is not aligned\n", source, block);
error = 1;
break;
}
while(block != the_block && block != tail) {
if (!_Addresses_Is_aligned(block) ) {
printk(
"PASS: %d a free block %p is not aligned\n", source, block);
error = 1;
break;
}
if (!_Heap_Is_block_in_heap(the_heap, block)) {
printk("PASS: %d a free block %p is not in heap\n", source, block);
error = 1;
break;
}
block = block->next;
}
if (block != the_block) {
hw_nl(do_dump, error);
printk("PASS: %d !the_block not in the free list", source);
error = 1;
}
}
}
hw_nl(do_dump, error);
if (the_size < the_heap->min_block_size) {
printk("PASS: %d !block size is too small\n", source);
error = 1;
break; break;
} }
if (!_Heap_Is_aligned( the_size, the_heap->page_size)) {
printk("PASS: %d !block size is misaligned\n", source); if ( block_size < min_block_size ) {
error = 1; error = true;
_Heap_Walk_printk(
source,
dump,
error,
"block 0x%08x: size %u < min block size %u\n",
block,
block_size,
min_block_size
);
break;
} }
if (++passes > (do_dump ? 10 : 0) && error) if ( next_block_begin <= block_begin ) {
error = true;
_Heap_Walk_printk(
source,
dump,
error,
"block 0x%08x: next block 0x%08x is not a successor\n",
block,
next_block
);
break; break;
}
the_block = next_block; if ( !_Heap_Is_prev_used( next_block ) ) {
_Heap_Walk_printk(
source,
dump,
error,
"block 0x%08x: prev 0x%08x%s, next 0x%08x%s\n",
block,
block->prev,
block->prev == first_free_block ?
" (= first)"
: (block->prev == free_list_head ? " (= head)" : ""),
block->next,
block->next == last_free_block ?
" (= last)"
: (block->next == free_list_tail ? " (= tail)" : "")
);
if ( block_size != next_block->prev_size ) {
error = true;
_Heap_Walk_printk(
source,
dump,
error,
"block 0x%08x: size %u != size %u (in next block 0x%08x)\n",
block,
block_size,
next_block->prev_size,
next_block
);
}
if ( !prev_used ) {
error = true;
_Heap_Walk_printk(
source,
dump,
error,
"block 0x%08x: two consecutive blocks are free\n",
block
);
}
if ( !_Heap_Walk_is_in_free_list( heap, block ) ) {
error = true;
_Heap_Walk_printk(
source,
dump,
error,
"block 0x%08x: free block not in free list\n",
block
);
}
}
block = next_block;
} }
if (the_block != end) { if ( !_Addresses_Is_aligned( block ) ) {
printk("PASS: %d !last block address isn't equal to 'final' %p %p\n", error = true;
source, the_block, end); _Heap_Walk_printk(
error = 1; source,
dump,
error,
"block 0x%08x: not CPU aligned\n",
block
);
return false;
} }
if (_Heap_Block_size(the_block) != the_heap->page_size) { if ( block == last_block ) {
printk("PASS: %d !last block's size isn't page_size (%d != %d)\n", source, uintptr_t const block_size = _Heap_Block_size( block );
_Heap_Block_size(the_block), the_heap->page_size);
error = 1; if ( block_size != page_size ) {
error = true;
_Heap_Walk_printk(
source,
dump,
error,
"last block 0x%08x: size %u != page size %u\n",
block,
block_size,
page_size
);
}
} else {
error = true;
_Heap_Walk_printk(
source,
dump,
error,
"last block 0x%08x != last block 0x%08x\n",
block,
last_block
);
} }
if (do_dump && error) return !error;
_Internal_error_Occurred( INTERNAL_ERROR_CORE, true, 0xffff0000 );
return error;
}
/*
* This method exists to simplify branch paths in the generated code above.
*/
static void hw_nl(
int error,
bool do_dump
)
{
if (do_dump || error) printk("\n");
} }

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -18,7 +26,7 @@
void *_Protected_heap_Allocate( void *_Protected_heap_Allocate(
Heap_Control *the_heap, Heap_Control *the_heap,
intptr_t size uintptr_t size
) )
{ {
void *p; void *p;

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -18,8 +26,8 @@
void *_Protected_heap_Allocate_aligned( void *_Protected_heap_Allocate_aligned(
Heap_Control *the_heap, Heap_Control *the_heap,
intptr_t size, uintptr_t size,
uint32_t alignment uintptr_t alignment
) )
{ {
void *p; void *p;

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -19,11 +27,11 @@
bool _Protected_heap_Extend( bool _Protected_heap_Extend(
Heap_Control *the_heap, Heap_Control *the_heap,
void *starting_address, void *starting_address,
intptr_t size uintptr_t size
) )
{ {
Heap_Extend_status status; Heap_Extend_status status;
intptr_t amount_extended; uintptr_t amount_extended;
_RTEMS_Lock_allocator(); _RTEMS_Lock_allocator();
status = _Heap_Extend(the_heap, starting_address, size, &amount_extended); status = _Heap_Extend(the_heap, starting_address, size, &amount_extended);

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -19,10 +27,10 @@
bool _Protected_heap_Get_block_size( bool _Protected_heap_Get_block_size(
Heap_Control *the_heap, Heap_Control *the_heap,
void *starting_address, void *starting_address,
intptr_t *size uintptr_t *size
) )
{ {
bool status; bool status;
_RTEMS_Lock_allocator(); _RTEMS_Lock_allocator();
status = _Heap_Size_of_alloc_area( the_heap, starting_address, size ); status = _Heap_Size_of_alloc_area( the_heap, starting_address, size );

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -21,8 +29,6 @@ bool _Protected_heap_Get_information(
Heap_Information_block *the_info Heap_Information_block *the_info
) )
{ {
Heap_Get_information_status status;
if ( !the_heap ) if ( !the_heap )
return false; return false;
@@ -30,11 +36,8 @@ bool _Protected_heap_Get_information(
return false; return false;
_RTEMS_Lock_allocator(); _RTEMS_Lock_allocator();
status = _Heap_Get_information( the_heap, the_info ); _Heap_Get_information( the_heap, the_info );
_RTEMS_Unlock_allocator(); _RTEMS_Unlock_allocator();
if ( status == HEAP_GET_INFORMATION_SUCCESSFUL ) return true;
return true;
return false;
} }

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2009. * COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -16,7 +24,7 @@
#include <rtems/system.h> #include <rtems/system.h>
#include <rtems/score/protectedheap.h> #include <rtems/score/protectedheap.h>
uint32_t _Protected_heap_Get_size( uintptr_t _Protected_heap_Get_size(
Heap_Control *the_heap Heap_Control *the_heap
) )
{ {

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *
@@ -19,12 +27,12 @@
bool _Protected_heap_Resize_block( bool _Protected_heap_Resize_block(
Heap_Control *the_heap, Heap_Control *the_heap,
void *starting_address, void *starting_address,
intptr_t size uintptr_t size
) )
{ {
Heap_Resize_status status; Heap_Resize_status status;
intptr_t old_mem_size; uintptr_t old_mem_size;
intptr_t avail_mem_size; uintptr_t avail_mem_size;
_RTEMS_Lock_allocator(); _RTEMS_Lock_allocator();
status = _Heap_Resize_block( status = _Heap_Resize_block(

View File

@@ -1,4 +1,12 @@
/** /**
* @file
*
* @ingroup ScoreProtHeap
*
* @brief Protected Heap Handler implementation.
*/
/*
* COPYRIGHT (c) 1989-2007. * COPYRIGHT (c) 1989-2007.
* On-Line Applications Research Corporation (OAR). * On-Line Applications Research Corporation (OAR).
* *