Modified kmalloc to cater for dynamic memory allocation on a previously allocated size

This commit is contained in:
Bahadir Balban
2009-06-01 14:48:26 +03:00
parent 33200c92df
commit 53f54f2328

View File

@@ -11,17 +11,28 @@
/* Supports this many different kmalloc sizes */ /* Supports this many different kmalloc sizes */
#define KMALLOC_POOLS_MAX 5 #define KMALLOC_POOLS_MAX 5
struct kmalloc_pool_head {
struct list_head cache_list;
int occupied;
int total_caches;
int cache_size;
};
struct kmalloc_mempool { struct kmalloc_mempool {
int total; int total;
struct list_head pool_head[KMALLOC_POOLS_MAX]; struct kmalloc_pool_head pool_head[KMALLOC_POOLS_MAX];
struct mutex kmalloc_mutex; struct mutex kmalloc_mutex;
}; };
struct kmalloc_mempool km_pool; struct kmalloc_mempool km_pool;
void init_kmalloc() void init_kmalloc()
{ {
for (int i = 0; i < KMALLOC_POOLS_MAX; i++) for (int i = 0; i < KMALLOC_POOLS_MAX; i++) {
INIT_LIST_HEAD(&km_pool.pool_head[i]); INIT_LIST_HEAD(&km_pool.pool_head[i].cache_list);
km_pool.pool_head[i].occupied = 0;
km_pool.pool_head[i].total_caches = 0;
km_pool.pool_head[i].cache_size = 0;
}
mutex_init(&km_pool.kmalloc_mutex); mutex_init(&km_pool.kmalloc_mutex);
} }
@@ -33,27 +44,32 @@ void init_kmalloc()
*/ */
void *kmalloc(int size) void *kmalloc(int size)
{ {
struct mem_cache *cache, *n; struct mem_cache *cache;
int right_sized_pool_idx = -1; int right_sized_pool_idx = -1;
int index; int index;
/* Search all existing pools for this size, and if found, free bufs */ BUG_ON(!size); /* It is a kernel bug if size is 0 */
for (int i = 0; i < km_pool.total; i++) { for (int i = 0; i < km_pool.total; i++) {
list_for_each_entry_safe(cache, n, &km_pool.pool_head[i], list) { /* Check if this pool has right size */
if (cache->struct_size == size) { if (km_pool.pool_head[i].cache_size == size) {
right_sized_pool_idx = i; right_sized_pool_idx = i;
/*
* Found the pool, now see if any
* cache has available slots
*/
list_for_each_entry(cache, &km_pool.pool_head[i].cache_list,
list) {
if (cache->free) if (cache->free)
return mem_cache_alloc(cache); return mem_cache_alloc(cache);
else else
continue; break;
} else }
break;
} }
} }
/* /*
* No such pool list already available at hand, and we don't have room * All pools are allocated and none has requested size
* for new pool lists.
*/ */
if ((right_sized_pool_idx < 0) && if ((right_sized_pool_idx < 0) &&
(km_pool.total == KMALLOC_POOLS_MAX - 1)) { (km_pool.total == KMALLOC_POOLS_MAX - 1)) {
@@ -62,17 +78,21 @@ void *kmalloc(int size)
BUG(); BUG();
} }
/* A pool exists with given size? (But no cache in it is free) */
if (right_sized_pool_idx >= 0) if (right_sized_pool_idx >= 0)
index = right_sized_pool_idx; index = right_sized_pool_idx;
else else /* No pool of this size, allocate new by incrementing total */
index = km_pool.total++; index = km_pool.total++;
/* Only allow up to page size */ /* Only allow up to page size */
BUG_ON(size >= PAGE_SIZE); BUG_ON(size >= PAGE_SIZE);
BUG_ON(!(cache = mem_cache_init(alloc_page(), PAGE_SIZE, BUG_ON(!(cache = mem_cache_init(alloc_page(), PAGE_SIZE,
size, 0))); size, 0)));
printk("%s: Created new cache for size %d\n", __FUNCTION__, size); printk("%s: Created new cache for size %d\n", __FUNCTION__, size);
list_add(&cache->list, &km_pool.pool_head[index]); list_add(&cache->list, &km_pool.pool_head[index].cache_list);
km_pool.pool_head[index].occupied = 1;
km_pool.pool_head[index].total_caches++;
km_pool.pool_head[index].cache_size = size;
return mem_cache_alloc(cache); return mem_cache_alloc(cache);
} }
@@ -85,20 +105,22 @@ int kfree(void *p)
struct mem_cache *cache, *tmp; struct mem_cache *cache, *tmp;
for (int i = 0; i < km_pool.total; i++) for (int i = 0; i < km_pool.total; i++)
list_for_each_entry_safe(cache, tmp, &km_pool.pool_head[i], list) list_for_each_entry_safe(cache, tmp,
&km_pool.pool_head[i].cache_list,
list) {
if (!mem_cache_free(cache, p)) { if (!mem_cache_free(cache, p)) {
if (mem_cache_is_empty(cache)) { if (mem_cache_is_empty(cache)) {
km_pool.pool_head[i].total_caches--;
list_del(&cache->list); list_del(&cache->list);
free_page(cache); free_page(cache);
/* /*
* Total remains the same unless all * Total remains the same but slot
* caches are freed on that pool * may have no caches left.
*/ */
if (list_empty(&km_pool.pool_head[i]))
km_pool.total--;
} }
return 0; return 0;
} }
}
return -1; return -1;
} }