blob: cba9609b1715c18fad0a08c270ea8f48eacd33ae [file] [log] [blame]
/*******************************************************************************
* Copyright (C) 2013, ZTE Corporation.
*
* File Name: qalloc.c
* File Mark:
* Description:
* Others:
* Version: V1.0
* Author: geanfeng
* Date: 2013-09-24
********************************************************************************/
/****************************************************************************
* Include files
****************************************************************************/
#include "qalloc.h"
#include "drvs_general.h"
#include "drvs_assert.h"
#include "drvs_debug.h"
#define CPU_CACHE_LINE_SIZE 32
#define QALLOC_LOCK ZOSS_DISABLE_IRQ_FAST
#define QALLOC_UNLOCK ZOSS_ENABLE_IRQ_FAST
#define QALLOC_BLOCK_FLAG (0x0FFF0000)
#define QALLOC_INIT_BLOCK(block_ptr, block_order, block_status) { block_ptr->order = block_order; block_ptr->status = block_status;}
#define DEBUG_STATISTIC_COUNT 100
typedef enum _T_QALLOC_BLOCK_STATUS {
QALLOC_BLOCK_FREE = QALLOC_BLOCK_FLAG,
QALLOC_BLOCK_ALLOC,
QALLOC_BLOCK_IN_MERGE,
} T_QALLOC_BLOCK_STATUS;
/*
* General purpose special memory block descriptor.
*/
typedef struct _T_Quick_Block
{
struct list_head node; /* free block chain*/
UINT32 num;
UINT32 order;
VOID *debug_info;
T_QALLOC_BLOCK_STATUS status;
}
T_Quick_Block;
/*
* General purpose free block list descriptor.
*/
typedef struct _T_Free_Block_List
{
struct list_head head; /* free block chain*/
UINT32 block_cnt;
}
T_Free_Block_List;
/*
* General purpose special memory pool chunk descriptor.
*/
typedef struct _T_Quick_Pool_Chunk
{
struct list_head next_chunk; /* next chunk in pool */
T_Quick_Pool *top_pool;
UINT32 start_addr; /* starting address of memory chunk */
UINT32 end_addr; /* ending address of memory chunk */
T_Quick_Block *block_map; /*the block map of chunk*/
T_Free_Block_List *block_free; /*the block free list of chunk in order*/
UINT32 page_size; /*define the page size. the min alloc size is one page.*/
UINT32 max_alloc_order; /*the max alloc page order*/
UINT32 page_count;
UINT32 avail_page_count;
}
T_Quick_Pool_Chunk;
typedef struct _T_Quick_Pool_Alloc_Debug
{
VOID *alloc_owner_info;
UINT32 alloc_count;
}
T_Quick_Pool_Alloc_Debug;
VOID QPool_PrintAll(VOID);
volatile T_Quick_Pool_Chunk *g_assert_chunk = NULL;
volatile T_Quick_Block *g_assert_block = NULL;
UINT32 g_QPool_Cmd_Init = 0;
LIST_HEAD(g_QPool);
/**
* fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline SINT32 fls(SINT32 x)
{
SINT32 r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u))
{
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u))
{
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u))
{
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u))
{
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u))
{
x <<= 1;
r -= 1;
}
return r;
}
/**
* cal_order - calculate the order value
* @count: the count to calculate
*/
static inline UINT32 cal_order(UINT32 count)
{
UINT32 order = 0;
order = fls(count) - 1;
return order;
}
/**
* chunk_AddBlk - add one block to free list
* @chunk: the chunk of this function operated in.
* @block: the block to added
*/
static inline VOID chunk_AddBlk(T_Quick_Pool_Chunk *chunk, T_Quick_Block *block)
{
UINT32 order =block->order;
zDrv_ASSERT(block->status == QALLOC_BLOCK_FREE);
zDrv_ASSERT(!(block->num & ((1 << order) -1)));
list_add(&block->node, &(chunk->block_free[order].head));
chunk->block_free[order].block_cnt++;
chunk->avail_page_count += 1 << block->order;
}
/**
* chunk_DeleteBlk - delete one block from the order free list
* @chunk: the chunk of this function operated in.
* @block: the block to added
*/
static inline T_Quick_Block * chunk_DeleteBlk(T_Quick_Pool_Chunk *chunk, UINT32 order)
{
T_Quick_Block *order_block = NULL;
struct list_head *order_block_head = NULL;
order_block_head = &chunk->block_free[order].head;
if (list_empty(order_block_head))
return NULL; /*is empty*/
order_block = list_first_entry(order_block_head, T_Quick_Block, node);
list_del(&order_block->node);
chunk->block_free[order].block_cnt--;
chunk->avail_page_count -= 1 << order_block->order;
zDrv_ASSERT(order_block->status == QALLOC_BLOCK_FREE);
zDrv_ASSERT(!(order_block->num & ((1 << order) -1)));
return order_block;
}
/**
* chunk_InitBlk - recursive initialize the block.
* @chunk: the chunk of this function operated in.
* @block_pos: the current position in process.
* @order: the initialize order of chunk.
*/
static UINT32 chunk_InitBlk (T_Quick_Pool_Chunk *chunk, UINT32 block_pos, UINT32 order)
{
UINT32 block_size = 0;
T_Quick_Block *block = NULL;
zDrv_ASSERT(chunk != NULL);
block_size = 1 << order;
while ((block_pos < chunk->page_count) && (chunk->page_count - block_pos) >= (block_size))
{
block = &chunk->block_map[block_pos];
QALLOC_INIT_BLOCK(block, order, QALLOC_BLOCK_FREE);
chunk_AddBlk(chunk, &(chunk->block_map[block_pos]));
block_pos += block_size;
}
if (chunk->page_count == block_pos)
return 0;
if (block_pos > chunk->page_count)
block_pos -= block_size;
return block_pos;
}
/**
* chunk_SplitBlk - split the block and insert to the free list.
* @chunk: the chunk of this function operated in.
* @block: the block to need split.
*/
static VOID chunk_SplitBlk(T_Quick_Pool_Chunk *chunk, T_Quick_Block * block)
{
UINT32 right_order = 0;
T_Quick_Block *new_block = NULL;
right_order = block->order -1;
new_block = block + (0x1 << right_order);
QALLOC_INIT_BLOCK(new_block, right_order, QALLOC_BLOCK_FREE);
chunk_AddBlk(chunk, new_block);
block->order--;
return;
}
/**
* chunk_MergeBlk - merge some block to one block.
* @chunk: the chunk of this function operated in.
* @block: the free block.
*/
static T_Quick_Block * chunk_MergeBlk(T_Quick_Pool_Chunk *chunk, T_Quick_Block * block)
{
UINT32 order = 0;
UINT32 block_num = 0;
UINT32 max_order = chunk->max_alloc_order;
UINT32 left_block_num = 0;
UINT32 right_block_num = 0;
UINT32 merge_block_num = 0;
T_Quick_Block *left_block = NULL;
T_Quick_Block *right_block = NULL;
T_Quick_Block *merge_block = NULL;
order = block->order;
if (order == max_order)
goto add;
block_num = block->num;
left_block_num = (block_num >> (order + 1)) << (order + 1);
right_block_num = left_block_num + (1 << order);
if (block_num == left_block_num)
{
merge_block_num = right_block_num;
}
else
{
merge_block_num = left_block_num;
}
merge_block = chunk->block_map + merge_block_num;
if ( (merge_block->status == QALLOC_BLOCK_FREE) && (merge_block->order == order) )
{
list_del(&merge_block->node);
chunk->avail_page_count -= 1 << merge_block->order;
right_block = chunk->block_map + right_block_num;
QALLOC_INIT_BLOCK(right_block, 0x0, 0x0);
left_block = chunk->block_map + left_block_num;
QALLOC_INIT_BLOCK(left_block, (order + 1), QALLOC_BLOCK_IN_MERGE);
return left_block;
}
add:
/*cant merge, add this block to free list*/
QALLOC_INIT_BLOCK(block, order, QALLOC_BLOCK_FREE);
chunk_AddBlk(chunk, block);
return NULL;
}
/**
* chunk_AllocBlk - allocate one block.
* @chunk: the chunk of this function operated in.
* @size: the allocate size.
*/
static UINT32 chunk_AllocBlk(T_Quick_Pool_Chunk *chunk, UINT32 size, VOID *debug_info)
{
UINT32 alloc_order = 0;
UINT32 tmp_order = 0;
UINT32 max_order = 0;
T_Quick_Block * alloc_block = NULL;
UINT32 addr = 0;
UINT32 page_alloc_count = 0;
if (chunk->max_alloc_order == 0)
{
/*order == 0*/
if (size > chunk->page_size)
return 0x0;
alloc_block = chunk_DeleteBlk(chunk, 0);
if (alloc_block == NULL)
return 0;
alloc_order = 0;
}
else
{
/*order > 0*/
page_alloc_count = (size /chunk->page_size);
if ((page_alloc_count * chunk->page_size) < size)
page_alloc_count++;
if (size == 0 || page_alloc_count > (0x1 << chunk->max_alloc_order))
return 0x0;
alloc_order = cal_order(page_alloc_count);
if (page_alloc_count > (0x1 << (alloc_order)))
alloc_order++;
/*get the free block*/
max_order = chunk->max_alloc_order;
tmp_order = alloc_order;
while (tmp_order <= max_order)
{
alloc_block = chunk_DeleteBlk(chunk, tmp_order);
if (alloc_block)
break;
else
tmp_order++;
}
if (alloc_block == NULL)
return 0x0;
/*split the block*/
while (alloc_order != alloc_block->order)
{
chunk_SplitBlk(chunk, alloc_block);
}
}
QALLOC_INIT_BLOCK(alloc_block, alloc_order, QALLOC_BLOCK_ALLOC);
alloc_block->debug_info = debug_info;
/*calc the block address*/
addr = chunk->start_addr + (alloc_block->num * chunk->page_size); /*calc address*/
return addr;
}
/**
* chunk_FreeBlk - free the space to free list.
* @chunk: the chunk of this function operated in.
* @addr: the address to free.
*/
static VOID chunk_FreeBlk(T_Quick_Pool_Chunk *chunk, UINT32 addr, VOID *debug_info)
{
T_Quick_Block * free_block = NULL;
UINT32 block_num = 0;
T_Quick_Block * new_free_block = NULL;
block_num = (addr - chunk->start_addr) / chunk->page_size;
free_block = chunk->block_map + block_num;
if (free_block->status != QALLOC_BLOCK_ALLOC)
{
g_assert_chunk = chunk;
g_assert_block= free_block;
zDrv_ASSERT(0);
return ;
}
free_block->debug_info = debug_info;
if (chunk->max_alloc_order == 0)
{
/*no need merge, add this block to free list*/
QALLOC_INIT_BLOCK(free_block, 0x0, QALLOC_BLOCK_FREE);
chunk_AddBlk(chunk, free_block);
}
else
{
new_free_block = chunk_MergeBlk(chunk, free_block);
while (new_free_block)
{
new_free_block = chunk_MergeBlk(chunk, new_free_block);
}
}
return ;
}
/**
* chunk_PrintInfo.
* @:
*/
static VOID chunk_PrintInfo(T_Quick_Pool_Chunk *chunk, T_Quick_Pool_Alloc_Debug * debug_info)
{
UINT32 i = 0;
UINT32 j = 0;
UINT32 page_count = 0;
UINT32 page_sum = 0;
UINT32 block_owner = 0;
zDrv_ASSERT(chunk != NULL && debug_info != NULL);
zOss_Memset(debug_info, 0x0, sizeof(T_Quick_Pool_Alloc_Debug) * DEBUG_STATISTIC_COUNT);
for (i = 0; i < chunk->page_count; i++)
{
if (chunk->block_map[i].status == QALLOC_BLOCK_ALLOC)
{
/*¶ÔÒÑ·ÖÅä¿é´óС½øÐÐͳ¼Æ*/
if (page_count != page_sum)
{
zDrvDebug_Printf( "Error: alloc block %d, record=%d actual=%d!!!", block_owner, 1<< chunk->block_map[block_owner].order, page_sum);
}
block_owner = i;
page_count = 1<< chunk->block_map[block_owner].order;
page_sum = 1;
/*¶ÔÕ¼ÓÐÕß½øÐÐͳ¼Æ*/
for (j = 0; j < DEBUG_STATISTIC_COUNT; j++)
{
if (debug_info[j].alloc_owner_info == chunk->block_map[i].debug_info)
{
debug_info[j].alloc_count += page_count;
break;
}
else if (debug_info[j].alloc_count == 0x0)
{
debug_info[j].alloc_owner_info = chunk->block_map[i].debug_info;
debug_info[j].alloc_count += page_count;
break;
}
}
}
else if (chunk->block_map[i].status == QALLOC_BLOCK_FREE)
{
/*¶Ô¿ÕÏпé´óС½øÐÐͳ¼Æ*/
if (page_count != page_sum)
{
zDrvDebug_Printf( "Error: free block %d, record=%d actual=%d!!!", block_owner, 1<< chunk->block_map[block_owner].order, page_sum);
}
block_owner = i;
page_count = 1<< chunk->block_map[block_owner].order;
page_sum = 1;
}
else if (chunk->block_map[i].status == QALLOC_BLOCK_IN_MERGE)
{
zDrvDebug_Printf( "Error: In Merge block %d!!!", i);
}
else
{
page_sum++;
}
}
/*print debug info*/
for (j = 0; j < DEBUG_STATISTIC_COUNT; j++)
{
if (debug_info[j].alloc_count)
{
zDrvDebug_Printf( "(FUN) %s alloc count = %d", debug_info[j].alloc_owner_info, debug_info[j].alloc_count);
}
else
{
break ;
}
}
if (i == DEBUG_STATISTIC_COUNT)
{
zDrvDebug_Printf( "statistic to max count !!!");
}
return ;
}
/**
* drvPool_CmdEntry.
* @:
*/
static VOID drvPool_CmdEntry(T_Shell_CommandMessage *CmdMsg)
{
QPool_PrintAll();
return ;
}
/**
* QPool_Create - create a new special memory pool
*
* Create a new special memory pool that can be used to manage special purpose
* memory not managed by the regular kmalloc/kfree interface.
*/
T_Quick_Pool *QPool_Create(UINT8* name)
{
T_Quick_Pool *pool = NULL;
pool = (T_Quick_Pool *)zOss_Malloc(sizeof(T_Quick_Pool));
if (pool == NULL)
{
zDrv_ASSERT(0);
return NULL;
}
INIT_LIST_HEAD(&pool->chunks);
pool->count = 0;
pool->name = name;
list_add(&pool->node, &g_QPool);
if (!g_QPool_Cmd_Init)
{
zOss_AddShellCmd("drvpool", drvPool_CmdEntry, "DRV Pool Debug Info");
g_QPool_Cmd_Init = 1;
}
return pool;
}
/**
* QPool_AddVirt - add a new chunk of special memory to the pool
* @pool: pool to add new memory chunk to
* @virt: virtual starting address of memory chunk to add to pool
* @size: size in bytes of the memory chunk to add to pool
* @page_size: the page size of
* @size: size in bytes of the memory chunk to add to pool
* Add a new chunk of special memory to the specified pool.
*
* Returns 0 on success or a -ve errno on failure.
*/
SINT32 QPool_AddVirt(T_Quick_Pool *pool, UINT32 virt, UINT32 size,
UINT32 page_size, UINT32 max_alloc_order)
{
T_Quick_Pool_Chunk *chunk = NULL;
SINT32 i = 0;
UINT32 block_pos = 0;
UINT32 order = 0;
struct list_head *_chunk, *_next_chunk;
T_Quick_Pool_Chunk *iter_chunk;
zDrv_ASSERT((virt % CPU_CACHE_LINE_SIZE) == 0);
zDrv_ASSERT((size % CPU_CACHE_LINE_SIZE) == 0);
zDrv_ASSERT(pool != NULL && pool->chunks.next != NULL);
chunk = (T_Quick_Pool_Chunk *)zOss_Malloc(sizeof(T_Quick_Pool_Chunk));
if (chunk == NULL)
goto error;
zOss_Memset(chunk, 0x0, sizeof(T_Quick_Pool_Chunk));
chunk->start_addr = virt;
chunk->page_size = page_size;
chunk->max_alloc_order = max_alloc_order;
chunk->block_free = (T_Free_Block_List *)zOss_Malloc((chunk->max_alloc_order + 1) * sizeof(T_Free_Block_List));
if (chunk->block_free == NULL)
goto error;
for (i = 0; i <= chunk->max_alloc_order; i++)
{
INIT_LIST_HEAD(&(chunk->block_free[i].head));
chunk->block_free[i].block_cnt = 0;
}
chunk->page_count = (size / ((1 << chunk->max_alloc_order) * chunk->page_size)) * (1 << chunk->max_alloc_order);
zDrv_ASSERT(chunk->page_count != 0);
if (chunk->page_count == 0)
goto error;
chunk->block_map = (T_Quick_Block *)zOss_Malloc(chunk->page_count * sizeof(T_Quick_Block));
if (chunk->block_map == NULL)
goto error;
zOss_Memset(chunk->block_map, 0x0, chunk->page_count * sizeof(T_Quick_Block));
/*initialize the block number*/
for (i = 0; i < chunk->page_count; i++)
{
chunk->block_map[i].num = i;
}
/*initialize the block free list*/
chunk->avail_page_count = 0x0;
order = chunk->max_alloc_order;
block_pos = 0;
do
{
block_pos = chunk_InitBlk(chunk, block_pos, order);
order--;
}
while (block_pos); /*when block_pos change to zero, init end*/
chunk->end_addr = virt + chunk->page_count * chunk->page_size;
/*sort the chunk by page size*/
if (list_empty(&pool->chunks))
{
list_add(&chunk->next_chunk, &pool->chunks);
}
else
{
list_for_each_safe(_chunk, _next_chunk, &pool->chunks)
{
iter_chunk = list_entry(_chunk, T_Quick_Pool_Chunk, next_chunk);
if (iter_chunk->page_size < chunk->page_size)
{
list_add(&chunk->next_chunk, &iter_chunk->next_chunk);
}
}
}
pool->count++;
chunk->top_pool = pool;
return 0;
error:
if (chunk->block_free)
zOss_Free(chunk->block_free);
if (chunk->block_map)
zOss_Free(chunk->block_map);
return -1;
}
/**
* QPool_Alloc - allocate special memory from the pool
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
* @debug_info: some debug info
*
* Allocate the requested number of bytes from the specified pool.
* Uses a first-fit algorithm.
*/
UINT32 QPool_Alloc(T_Quick_Pool *pool, UINT32 size, VOID *debug_info)
{
struct list_head *_chunk, *_next_chunk;
T_Quick_Pool_Chunk *chunk;
UINT32 addr = 0;
ZOSS_INTR old_intr;
zDrv_ASSERT(pool->count != 0);
QALLOC_LOCK();
/*first search the best match page_size*/
list_for_each_safe(_chunk, _next_chunk, &pool->chunks)
{
chunk = list_entry(_chunk, T_Quick_Pool_Chunk, next_chunk);
if (size == chunk->page_size)
{
addr = chunk_AllocBlk(chunk, size, debug_info);
if (addr)
{
QALLOC_UNLOCK();
return addr;
}
}
}
list_for_each_safe(_chunk, _next_chunk, &pool->chunks)
{
chunk = list_entry(_chunk, T_Quick_Pool_Chunk, next_chunk);
if (size != chunk->page_size)
{
addr = chunk_AllocBlk(chunk, size, debug_info);
if (addr)
{
QALLOC_UNLOCK();
return addr;
}
}
}
QALLOC_UNLOCK();
return addr;
}
/**
* QPool_Free - free allocated special memory back to the pool
* @pool: pool to free to
* @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free
*
*/
VOID QPool_Free(T_Quick_Pool *pool, UINT32 addr, VOID *debug_info)
{
struct list_head *_chunk, *_next_chunk;
T_Quick_Pool_Chunk *chunk;
ZOSS_INTR old_intr;
QALLOC_LOCK();
list_for_each_safe(_chunk, _next_chunk, &pool->chunks)
{
chunk = list_entry(_chunk, T_Quick_Pool_Chunk, next_chunk);
if (addr >= chunk->start_addr && addr < chunk->end_addr)
{
chunk_FreeBlk(chunk, addr, debug_info);
QALLOC_UNLOCK();
return;
}
}
QALLOC_UNLOCK();
}
/**
* QPool_Destroy - destroy a special memory pool
*
* Destroy the specified special memory pool. Verifies that there are no
* outstanding allocations.
*/
VOID QPool_Destroy(T_Quick_Pool *pool)
{
struct list_head *_chunk, *_next_chunk;
T_Quick_Pool_Chunk *chunk;
list_for_each_safe(_chunk, _next_chunk, &pool->chunks)
{
chunk = list_entry(_chunk, T_Quick_Pool_Chunk, next_chunk);
list_del(&chunk->next_chunk);
zOss_Free(chunk->block_free);
zOss_Free(chunk->block_map);
zOss_Memset(chunk, 0x0, sizeof(T_Quick_Pool_Chunk));
zOss_Free(chunk);
}
list_del(&pool->node);
zOss_Memset(pool, 0x0, sizeof(T_Quick_Pool));
zOss_Free(pool);
return;
}
/**
* QPool_Avail - get available free space of the pool
* @pool: pool to get available free space
*
* Return available free space of the specified pool.
*/
UINT32 QPool_Avail(T_Quick_Pool *pool)
{
struct list_head *_chunk, *_next_chunk;
T_Quick_Pool_Chunk *chunk;
UINT32 avail = 0;
ZOSS_INTR old_intr;
QALLOC_LOCK();
list_for_each_safe(_chunk, _next_chunk, &pool->chunks)
{
chunk = list_entry(_chunk, T_Quick_Pool_Chunk, next_chunk);
avail += chunk->avail_page_count * chunk->page_size;
}
QALLOC_UNLOCK();
return avail;
}
/**
* QPool_Size - get size in bytes of memory managed by the pool
* @pool: pool to get size
*
* Return size in bytes of memory managed by the pool.
*/
UINT32 QPool_Size(T_Quick_Pool *pool)
{
struct list_head *_chunk, *_next_chunk;
T_Quick_Pool_Chunk *chunk;
UINT32 size = 0;
ZOSS_INTR old_intr;
QALLOC_LOCK();
list_for_each_safe(_chunk, _next_chunk, &pool->chunks)
{
chunk = list_entry(_chunk, T_Quick_Pool_Chunk, next_chunk);
size += chunk->end_addr - chunk->start_addr;
}
QALLOC_UNLOCK();
return size;
}
/**
* QPool_Print - print the pool debug info.
* @pool: pool to print
*
*/
VOID QPool_Print(T_Quick_Pool *pool)
{
struct list_head *_chunk, *_next_chunk;
T_Quick_Pool_Chunk *chunk;
T_Quick_Pool_Alloc_Debug *debug_info = NULL;
UINT32 i = 0;
zDrvDebug_Printf( "----------QUICK POOL DEBUG INFO (%s)----------", pool->name);
debug_info = (T_Quick_Pool_Alloc_Debug *)zOss_Malloc(sizeof(T_Quick_Pool_Alloc_Debug) * DEBUG_STATISTIC_COUNT);
list_for_each_safe(_chunk, _next_chunk, &pool->chunks)
{
chunk = list_entry(_chunk, T_Quick_Pool_Chunk, next_chunk);
zDrvDebug_Printf( "CHUNK %d INFO:", i++);
zDrvDebug_Printf( "start address = 0x%x", chunk->start_addr);
zDrvDebug_Printf( "end address = 0x%x", chunk->end_addr);
zDrvDebug_Printf( "page size = %d", chunk->page_size);
zDrvDebug_Printf( "page count = %d", chunk->page_count);
zDrvDebug_Printf( "page available = %d", chunk->avail_page_count);
zDrvDebug_Printf( "max allocate page count = %d", 0x1 << chunk->max_alloc_order);
if (debug_info)
{
chunk_PrintInfo(chunk, debug_info);
}
}
free(debug_info);
}
/**
* QPool_PrintAll - print the all pool debug info.
*
*/
VOID QPool_PrintAll(VOID)
{
struct list_head *_pool, *_next_pool;
T_Quick_Pool *qPool;
if (list_empty(&g_QPool))
{
zDrvDebug_Printf( "Quick Pool: no memory pool to print !!!");
return ;
}
list_for_each_safe(_pool, _next_pool, &g_QPool)
{
qPool = list_entry(_pool, T_Quick_Pool, node);
QPool_Print(qPool);
}
}