Initial commit

This commit is contained in:
Bahadir Balban
2008-01-13 13:53:52 +00:00
commit e2b791a3d8
789 changed files with 95825 additions and 0 deletions

67
tasks/libmem/SConstruct Normal file
View File

@@ -0,0 +1,67 @@
#
# Copyright (C) 2007 Bahadir Balban
#
import os
import glob
import sys
from os.path import join
from string import split
project_root = "../.."
headers_root = join(project_root, "include/l4")
config_h = join(headers_root, "config.h")
#libl4 paths
libl4_headers = join(project_root, "tasks/libl4/include")
libl4_libpath = join(project_root, "tasks/libl4")
mm = "mm"
kmalloc = "kmalloc"
memcache = "memcache"
tests = "tests"
mm_dir = mm
kmalloc_dir = kmalloc
memcache_dir = memcache
tests_dir = tests
test_env = Environment(CC = 'gcc -m32',
CCFLAGS = ['-g', '-std=gnu99', '-Wall', '-Werror'],
ENV = {'PATH' : os.environ['PATH']},
LIBS = ['gcc', 'mm', 'km', 'mc'],
LIBPATH = ['#'],
CPPPATH = ['#include', join(project_root, "include"), "#", libl4_headers])
env = Environment(CC = 'arm-none-linux-gnueabi-gcc',
CCFLAGS = ['-g', '-nostdlib', '-Wall', '-Werror', '-ffreestanding', '-std=gnu99'],
LINKFLAGS = ['-nostdlib'],
ENV = {'PATH' : os.environ['PATH']},
LIBS = 'gcc',
CPPPATH = [join(project_root, "include"), "#", libl4_headers])
if os.path.exists(config_h) is False:
print "\nThis build requires a valid kernel configuration header."
print "Please run `scons configure' in the kernel root directory."
print "Choose the `tests' target to build memory allocator tests,"
print "or any other target for real use.\n"
sys.exit()
mm_src = glob.glob("%s/*.c" % mm_dir)
kmalloc_src = glob.glob("%s/*.c" % kmalloc_dir)
memcache_src = glob.glob("%s/*.c" % memcache_dir)
tests_src = glob.glob ("%s/*.c" % tests_dir)
if "tests" in COMMAND_LINE_TARGETS:
print "WARNING!!! Did you configure the kernel with test target first???"
libmm = test_env.StaticLibrary(mm, mm_src)
libkmalloc = test_env.StaticLibrary("km", kmalloc_src)
libmemcache = test_env.StaticLibrary("mc", memcache_src)
test_prog = test_env.Program("test", tests_src)
env.Alias("tests", test_prog)
else:
libmm = env.StaticLibrary(mm, mm_src)
libkmalloc = env.StaticLibrary("km", kmalloc_src)
libmemcache = env.StaticLibrary("mc", memcache_src)

View File

@@ -0,0 +1,363 @@
/*
* Simple linked-list based kernel memory allocator.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <stdio.h>
#include <string.h>
#include <l4/config.h>
#include <l4/macros.h>
#include <l4/types.h>
#include INC_GLUE(memory.h)
#include INC_GLUE(memlayout.h)
#include INC_SUBARCH(mm.h)
#include <l4lib/arch/syscalls.h>
#include <l4/lib/list.h>
#include <kmalloc/kmalloc.h>
#include <mm/alloc_page.h>
/* Initial free area descriptor.
*
* Basic description of how free areas are tracked:
*
* A km_area marked with pg_alloc_pages means it is located at the beginning
* of a new page allocation, and it is the first struct to describe those
* allocated page(s).
*
* If, for all subpage_areas, pg_alloc_pages = {SA, SB, ..., SZ}, and `fragments
* of pg_alloc_pages' = {sa(n), sb(n), ..., sz(n)} where n is the sequence number
* of that fragment, and for each SX, SX = sx(1), and "->" denotes "next"
* pointer relationship, on a random occasion, the areas could look like this:
*
* SA->sa(2)->sa(3)->SB->sb(2)->SC->SD->SE->se(2)->se(3)->se(4)
*
* With regard to all alloc/free functions defined below, in this example's
* context, sa(1..3) can merge if any adjacent pair of them are free. Whereas if
* adjacent(SC,SD) were true, SC and SD cannot be merged even if they are both
* free, because they are pg_alloc_pages. Also, for each SX, it can be freed IFF
* it is the only element in SX, and it is free. For instance, each of SC or SD
* can be individually freed, provided they are marked unused.
*
* We could have used a bucket for each, e.g:
*
* SA->sa(2)->sa(3)
* |
* v
* SB->sb(2)->sb(3)
* |
* v
* SC
* |
* v
* SD
*
* etc. But the original is simple enough for now and does the job.
*
*/
struct list_head km_area_start;
/*
* Initialises a km_area descriptor according to the free area parameters
* supplied along with it. @ppage = pointer to start of free memory.
* @npages = number of pages the region contains. @km_areas = head of the list
* of km_areas on the system that belongs to kmalloc.
*/
void kmalloc_add_new_pages(void *ppage, int npages, struct list_head *km_areas)
{
struct km_area *new = (struct km_area *)ppage;
new->vaddr = (unsigned long)ppage + sizeof(struct km_area);
new->size = (npages * PAGE_SIZE) - sizeof(struct km_area);
new->used = 0;
new->pg_alloc_pages = npages;
INIT_LIST_HEAD(&new->list);
/*
* The first entry is a pg_alloc_pages. Adding the new pg_alloc_pages
* in tail ensures each pg_alloc_pages are adjacent, and their
* children are never intermixed.
*/
list_add_tail(&new->list, km_areas);
}
#define KM_INIT_PAGES 3
void kmalloc_init()
{
/* Initially allocated pages with one big free km_area */
void *ppage = l4_map_helper(alloc_page(KM_INIT_PAGES),
KM_INIT_PAGES);
struct km_area *new = (struct km_area *)ppage;
BUG_ON(!new);
new->vaddr = (unsigned long)ppage + sizeof(struct km_area);
new->size = (KM_INIT_PAGES * PAGE_SIZE)
- sizeof(struct km_area);
new->used = 0;
new->pg_alloc_pages = KM_INIT_PAGES;
INIT_LIST_HEAD(&new->list);
INIT_LIST_HEAD(&km_area_start);
/* Add the first area to the global list head */
list_add(&new->list, &km_area_start);
/* NOTE: If needed, initialise mutex here */
}
/*
* Given a free list, finds a free region of requested size plus one subpage
* area descriptor. Allocates and initialises the new descriptor, adds it to
* the list and returns it.
*/
static struct km_area *
find_free_km_area(int size, struct list_head *km_areas)
{
struct km_area *new;
struct km_area *area;
const unsigned long alignment_extra_max = SZ_WORD - 1;
int alignment_used = 0, alignment_unused = 0;
/* The minimum size needed if the area will be divided into two */
int dividable_size = size + sizeof(struct km_area)
+ alignment_extra_max;
list_for_each_entry (area, km_areas, list) {
/* Is this a free region that fits? */
if ((area->size) >= dividable_size && !area->used) {
unsigned long addr, addr_aligned;
/*
* Cut the free area from the end, as much as
* we want to use
*/
area->size -= size + sizeof(struct km_area);
addr = (area->vaddr + area->size);
addr_aligned = align_up(addr, SZ_WORD);
alignment_used = addr_aligned - addr;
alignment_unused = alignment_extra_max
- alignment_used;
/*
* Add the extra bit that's skipped for alignment
* to original subpage
*/
area->size += alignment_used;
/*
* Allocate the new link structure at the end
* of the free area shortened previously.
*/
new = (struct km_area *)addr_aligned;
/*
* Actual allocated memory starts after subpage
* descriptor
*/
new->vaddr = (unsigned long)new
+ sizeof(struct km_area);
new->size = size + sizeof(struct km_area)
+ alignment_unused;
new->used = 1;
/* Divides other allocated page(s) */
new->pg_alloc_pages = 0;
/* Add used region to the page area list */
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &area->list);
return new;
} else if (area->size < dividable_size &&
area->size >= size && !area->used) {
/*
* Area not at dividable size but can satisfy request,
* so it's simply returned.
*/
area->used = 1;
return area;
}
}
/* Traversed all areas and can't satisfy request. */
return 0;
}
/*
* Allocate, initialise a km_area along with its free memory of minimum
* size as @size, and add it to km_area list.
*/
static int
kmalloc_get_free_pages(int size, struct list_head *km_areas)
{
int totalsize = size + sizeof(struct km_area) * 2;
int npages = totalsize / PAGE_SIZE;
void *ppage;
if (totalsize & PAGE_MASK)
npages++;
if ((ppage = l4_map_helper(alloc_page(npages), npages)) == 0)
/* TODO: Return specific error code, e.g. ENOMEM */
return -1;
BUG_ON((npages * PAGE_SIZE) < (size + sizeof(struct km_area)));
kmalloc_add_new_pages(ppage, npages, km_areas);
return 0;
}
/*
* Linked list based kernel memory allocator. This has the simplicity of
* allocating list structures together with the requested memory area. This
* can't be done with the page allocator, because it works in page-size chunks.
* In kmalloc we can allocate more fine-grain sizes, so a link structure can
* also be embedded together with requested data.
*/
/* Allocates given @size, requests more free pages if free areas depleted. */
void *kmalloc(int size)
{
struct km_area *new_area;
void *allocation;
/* NOTE: If needed, lock mutex here */
new_area = find_free_km_area(size, &km_area_start);
if (!new_area) {
if (kmalloc_get_free_pages(size, &km_area_start) < 0) {
allocation = 0;
goto out;
}
else
new_area = find_free_km_area(size, &km_area_start);
}
BUG_ON(!new_area);
allocation = (void *)new_area->vaddr;
out:
/* NOTE: If locked, unlock mutex here */
return allocation;
}
/* kmalloc with zero initialised memory */
void *kzalloc(int size)
{
void *mem = kmalloc(size);
if (mem)
memset(mem, 0, size);
return mem;
}
void km_free_empty_pages(struct km_area *free_area)
{
unsigned long wholesize;
/* Not allocated from page allocator */
if (!free_area->pg_alloc_pages)
return;
/* The first km_area in memory from the page allocator: */
/* Must be on a page boundary */
BUG_ON((unsigned long)free_area & PAGE_MASK);
/* Must be unused */
BUG_ON(free_area->used);
/* Must be whole, (i.e. not divided into other km_areas) */
wholesize = free_area->pg_alloc_pages * PAGE_SIZE;
if ((free_area->size + sizeof(struct km_area)) < wholesize)
return;
/* Must have at least PAGE_SIZE size, when itself included */
BUG_ON(free_area->size < (PAGE_SIZE - sizeof(struct km_area)));
/* Its size must be a multiple of PAGE_SIZE, when itself included */
if ((free_area->size + sizeof(struct km_area)) & PAGE_MASK) {
printk("Error: free_area->size: 0x%lu, with km_area_struct:"
" 0x%lu, PAGE_MASK: 0x%x\n", free_area->size,
free_area->size + sizeof(struct km_area), PAGE_MASK);
BUG();
}
list_del(&free_area->list);
/* And finally must be freed without problems */
BUG_ON(free_page(l4_unmap_helper(free_area, __pfn(wholesize))) < 0);
return;
}
struct km_area *km_merge_free_areas(struct km_area *before,
struct km_area *after)
{
/*
* If `after' has pg_alloc_pages set, it means it can't be merged and
* has to be returned explicitly to the page allocator.
*/
if (after->pg_alloc_pages)
return 0;
BUG_ON(before->vaddr + before->size != after->vaddr);
BUG_ON(before->used || after->used)
BUG_ON(before == after);
/*
* km_area structures are at the beginning of the memory
* areas they describe. By simply merging them with another
* area they're effectively freed.
*/
before->size += after->size + sizeof(struct km_area);
list_del(&after->list);
return before;
}
int find_and_free_km_area(void *vaddr, struct list_head *areas)
{
struct km_area *area, *prev, *next, *merged;
if (!vaddr) /* A well-known invalid address */
return -1;
list_for_each_entry(area, areas, list)
if (area->vaddr == (unsigned long)vaddr && area->used)
goto found;
/* Area not found */
return -1;
found:
area->used = 0;
/* Now merge with adjacent areas if possible */
if (area->list.prev != areas) {
prev = list_entry(area->list.prev, struct km_area, list);
if (!prev->used)
if ((merged = km_merge_free_areas(prev, area)))
area = merged;
}
if (area->list.next != areas) {
next = list_entry(area->list.next, struct km_area, list);
if (!next->used)
if ((merged = km_merge_free_areas(prev, area)))
area = merged;
}
/*
* After freeing and all possible merging, try returning region back
* to page allocator.
*/
km_free_empty_pages(area);
return 0;
}
int kfree(void *virtual)
{
int ret;
/* NOTE: If needed, lock mutex here */
ret = find_and_free_km_area(virtual, &km_area_start);
/* NOTE: If locked, unlock mutex here */
return ret;
}

View File

@@ -0,0 +1,457 @@
/*
* Kernel memory allocator.
*
* Copyright (C) 2007 Bahadir Balban
*
*/
#include <stdio.h>
#include <string.h>
#include <l4/config.h>
#include <l4/macros.h>
#include <l4/types.h>
#include INC_GLUE(memory.h)
#include INC_GLUE(memlayout.h)
#include INC_SUBARCH(mm.h)
#include <l4lib/arch/syscalls.h>
#include <l4/lib/list.h>
#include <kmalloc/kmalloc.h>
#include <mm/alloc_page.h>
/* Initial free area descriptor.
*
* Basic description of how free areas are tracked:
*
* A subpage_area marked as head_of_pages means it is located at the beginning
* of a new page allocation, and it is the first struct to describe those
* allocated page(s).
*
* If, for all subpage_areas, head_of_pages = {SA, SB, ..., SZ}, and `fragments
* of head_of_pages' = {sa(n), sb(n), ..., sz(n)} where n is the sequence number
* of that fragment, and for each SX, SX = sx(1), and "->" denotes "next"
* pointer relationship, on a random occasion, the areas could look like this:
*
* SA->sa(2)->sa(3)->SB->sb(2)->SC->SD->SE->se(2)->se(3)->se(4)
*
* With regard to all alloc/free functions defined below, in this example's
* context, sa(1..3) can merge if any adjacent pair of them are free. Whereas if
* adjacent(SC,SD) were true, SC and SD cannot be merged even if they are both
* free, because they are head_of_pages. Also, for each SX, it can be freed IFF
* it is the only element in SX, and it is free. For instance, each of SC or SD
* can be individually freed, provided they are marked unused.
*
* We could have used a bucket for each, e.g:
*
* SA->sa(2)->sa(3)
* |
* v
* SB->sb(2)->sb(3)
* |
* v
* SC
* |
* v
* SD
*
* etc. But the original is simple enough for now and does the job.
*
*/
struct subpage_area km_areas;
/* Initialises a subpage area descriptor according to the free area parameters
* supplied along with it. @ppage = pointer to start of free memory.
* @npages = number of pages the region contains. @areas = head of the list of
* subpage_areas on the system that belongs to kmalloc. */
void kmalloc_add_new_pages(void *ppage, int npages, struct subpage_area **areas)
{
struct subpage_area *new = (struct subpage_area *)ppage;
new->vaddr = (unsigned int)ppage + sizeof(struct subpage_area);
new->size = (npages * PAGE_SIZE) - sizeof(struct subpage_area);
new->used = 0;
new->head_of_pages = npages;
INIT_LIST_HEAD(&new->list);
/* The first entry is a head_of_pages. Adding the new head_of_pages
* in tail ensures each head_of_pages are adjacent, and their
* children are never intermixed */
list_add_tail(&new->list, &(*areas)->list);
}
#define KMALLOC_INITIAL_PAGES 3
void kmalloc_init()
{
/* Initially allocated pages with one big free km_area */
void *ppage = alloc_page(KMALLOC_INITIAL_PAGES);
ppage = l4_map_helper(ppage, KMALLOC_INITIAL_PAGES);
struct subpage_area *new = (struct subpage_area *)ppage;
BUG_ON(!new);
new->vaddr = (unsigned int)ppage + sizeof(struct subpage_area);
new->size = (KMALLOC_INITIAL_PAGES * PAGE_SIZE)
- sizeof(struct subpage_area);
new->used = 0;
new->head_of_pages = KMALLOC_INITIAL_PAGES;
INIT_LIST_HEAD(&new->list);
/* Assign the first area to global list pointer */
km_areas = new;
/* NOTE: If needed, initialise mutex here */
}
/* Given a free list, finds a free region of requested size plus one subpage
* area descriptor. Allocates and initialises the new descriptor, adds it to
* the list and returns it.
*/
static struct subpage_area *
find_free_subpage_area(int size, struct subpage_area **areas)
{
struct subpage_area *new;
struct subpage_area *cur = *areas;
const unsigned int alignment_extra_max = SZ_WORD - 1;
unsigned int alignment_used = 0, alignment_unused = 0;
/* The minimum size needed if the area will be divided into two */
int dividable_size = size + sizeof(struct subpage_area)
+ alignment_extra_max;
/* Is this a free region that fits? */
if ((cur->size) >= dividable_size && !cur->used) {
unsigned int addr, addr_aligned;
/* Cut the free area as much as we want to used */
cur->size -= size + sizeof(struct subpage_area);
addr = (cur->vaddr + cur->size);
addr_aligned = align_up(addr, SZ_WORD);
alignment_used = addr_aligned - addr;
alignment_unused = alignment_extra_max - alignment_used;
/* Add the extra bit that's skipped for alignment to original subpage */
cur->size += alignment_used;
/* Allocate the new link structure at the end
* of the free area shortened previously. */
new = (struct subpage_area *)addr_aligned;
/* Actual allocated memory starts after subpage descriptor */
new->vaddr = (unsigned int)new + sizeof(struct subpage_area);
new->size = size + sizeof(struct subpage_area) + alignment_unused;
new->used = 1;
new->head_of_pages = 0; /* Divides other allocated page(s) */
/* Add used region to the subpage_area list */
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &cur->list);
return new;
} else if (cur->size < dividable_size &&
cur->size >= size && !cur->used) {
/* The area can't be divided, but has enough room for the
* actual allocation, it just misses the few bytes for a
* new subpage_area for splitting. In this case the current
* page area is simply marked used and returned. This is a
* rare but important case, because on-demand free page
* allocations don't ensure new free areas are sufficiently
* large to be divisable. */
cur->used = 1;
return cur;
}
/* Do the same for all other entries */
list_for_each_entry (cur, &(*areas)->list, list) {
/* Is this a free region that fits? */
if ((cur->size) >= dividable_size && !cur->used) {
unsigned int addr, addr_aligned;
/* Cut the free area from the end, as much as
* we want to use */
cur->size -= size + sizeof(struct subpage_area);
addr = (cur->vaddr + cur->size);
addr_aligned = align_up(addr, SZ_WORD);
alignment_used = addr_aligned - addr;
alignment_unused = alignment_extra_max
- alignment_used;
/* Add the extra bit that's skipped for alignment
* to original subpage */
cur->size += alignment_used;
/* Allocate the new link structure at the end
* of the free area shortened previously. */
new = (struct subpage_area *)addr_aligned;
/* Actual allocated memory starts after subpage
* descriptor */
new->vaddr = (unsigned int)new
+ sizeof(struct subpage_area);
new->size = size + sizeof(struct subpage_area)
+ alignment_unused;
new->used = 1;
/* Divides other allocated page(s) */
new->head_of_pages = 0;
/* Add used region to the page area list */
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &cur->list);
return new;
} else if (cur->size < dividable_size &&
cur->size >= size && !cur->used) {
/* Area not at dividable size but can satisfy request,
* so it's simply returned. */
cur->used = 1;
return cur;
}
}
/* Traversed all areas and can't satisfy request. */
return 0;
}
/* Allocate, initialise a subpage area along with its free
* memory of minimum size as @size, and add it to subpage list. */
static int
kmalloc_get_free_pages(int size, struct subpage_area **areas)
{
int totalsize = size + sizeof(struct subpage_area) * 2;
int npages = totalsize / PAGE_SIZE;
void *ppage;
if (totalsize & PAGE_MASK)
npages++;
if ((ppage = l4_map_helper(alloc_page(npages), npages))
== 0)
/* TODO: Return specific error code, e.g. ENOMEM */
return -1;
BUG_ON((npages * PAGE_SIZE) < (size + sizeof(struct subpage_area)));
kmalloc_add_new_pages(ppage, npages, areas);
return 0;
}
/* Linked list based subpage allocator. This has the simplicity of allocating
* list structures together with the requested memory area. This can't be done
* with the page allocator, because it works in page-size chunks. In kmalloc
* we can allocate more fine-grain sizes, so a link structure can also be
* embedded together with requested data.
*/
/* Allocates given @size, requests more free pages if free areas depleted. */
void *kmalloc(int size)
{
struct subpage_area *new_area;
void *allocation;
/* NOTE: If needed, lock mutex here */
new_area = find_free_subpage_area(size, &km_areas);
if (!new_area) {
if (kmalloc_get_free_pages(size, &km_areas) < 0) {
allocation = 0;
goto out;
}
else
new_area = find_free_subpage_area(size, &km_areas);
}
BUG_ON(!new_area);
allocation = (void *)new_area->vaddr;
out:
/* NOTE: If locked, unlock mutex here */
return allocation;
}
/* kmalloc with zero initialised memory */
void *kzalloc(int size)
{
void *mem = kmalloc(size);
if (mem)
memset(mem, 0, size);
return mem;
}
void km_free_empty_pages(struct subpage_area *free_area,
struct subpage_area **start)
{
unsigned int wholesize;
if (!free_area->head_of_pages)
return; /* Not allocated from page allocator */
if (free_area == *start)
return; /* First subpage area is allocated at
initialisation and never deallocated */
/* A head of page: */
/* Can't be the only element, start is always there. */
BUG_ON(list_empty(&free_area->list));
/* Must be on a page boundary */
BUG_ON((unsigned int)free_area & PAGE_MASK);
/* Must be unused */
BUG_ON(free_area->used);
/* Furthermore, a head of page that can be freed must be whole:
* Total number of pages when as a whole, is kept in [31:1] */
wholesize = free_area->head_of_pages * PAGE_SIZE;
if ((free_area->size + sizeof(struct subpage_area)) < wholesize)
return;
/* Must have at least PAGE_SIZE size, when itself included */
BUG_ON(free_area->size < (PAGE_SIZE - sizeof(struct subpage_area)));
/* Its size must be a multiple of PAGE_SIZE, when itself included */
// BUG_ON((free_area->size + sizeof(struct subpage_area)) & PAGE_MASK);
if ((free_area->size + sizeof(struct subpage_area)) & PAGE_MASK) {
printk("Error: free_area->size: 0x%x, with subpage: 0x%x, PAGE_MASK: 0x%x\n",
free_area->size, free_area->size + sizeof(struct subpage_area), PAGE_MASK);
BUG();
}
list_del(&free_area->list);
/* And finally must be freed without problems */
if (free_page(l4_unmap_helper(free_area, wholesize)) < 0)
BUG();
return;
}
static int
km_merge_with_prev_subpage(struct subpage_area *start,
struct subpage_area *this,
struct subpage_area *prev)
{
BUG_ON(this == prev);
BUG_ON(this->used);
/* Can't merge used and unused regions */
if (prev->used)
return 0;
/* At the beginning. this is head, prev is tail. Can't merge. */
if (start == this)
return 0;
/* Can't merge head descriptors of page allocations. They
* are to be returned back to the page allocator on their own. */
if (this->head_of_pages)
return 0;
/* Subpage areas can be non-contiguous, if they are not a part of
* the same page(s) allocation. This usually holds if prev and this
* are fragments from the same page allocation. */
if (prev->vaddr + prev->size != (unsigned int)this)
return 0;
/* Remember that subpage_area structures are at the beginning of
* the memory areas they describe. By simply merging them with
* another area they're effectively freed. */
prev->size += this->size + sizeof(struct subpage_area);
list_del(&this->list);
return 1;
}
static int
km_merge_with_next_subpage(struct subpage_area *start,
struct subpage_area *this,
struct subpage_area *next)
{
BUG_ON(this == next);
BUG_ON(this->used);
/* At the end. this is tail, next is head. Can't merge. */
if (start == next)
return 0;
/* Can't merge used and unused regions */
if (next->used)
return 0;
/* Can't merge head descriptors of page allocations. They
* are to be returned back to the page allocator on their own. */
if (next->head_of_pages)
return 0;
/* Subpage areas can be non-contiguous, if they are not a part of
* the same head_of_page(s) allocation. This usually holds if next
* and this are fragments from the same head_of_page. */
if (this->vaddr + this->size != (unsigned int)next)
return 0;
/* Remember that subpage_area structures are at the beginning of
* the memory areas they describe. By simply merging them with
* another area they're effectively freed. */
this->size += next->size + sizeof(struct subpage_area);
list_del(&next->list);
return 1;
}
int find_and_free_subpage_area(void *vaddr, struct subpage_area **areas)
{
struct subpage_area *cur = *areas;
if (!vaddr) /* A well-known invalid address */
return -1;
if (cur->vaddr == (unsigned int)vaddr) {
struct subpage_area *prev, *next;
BUG_ON(!cur->used);
cur->used = 0;
if (!list_empty(&cur->list)) {
prev = list_entry(cur->list.prev,
struct subpage_area,
list);
if (km_merge_with_prev_subpage(*areas, cur, prev))
cur = prev;
if (!list_empty(&cur->list)) {
/* Last merge did not reduce to last
* element. */
next = list_entry(cur->list.next,
struct subpage_area,
list);
km_merge_with_next_subpage(*areas, cur, next);
}
}
km_free_empty_pages(cur, areas);
return 0;
}
list_for_each_entry(cur, &(*areas)->list, list) {
if (cur->vaddr == (unsigned int)vaddr) {
struct subpage_area *prev, *next;
BUG_ON(!cur->used);
cur->used = 0;
if (!list_empty(&cur->list)) {
prev = list_entry(cur->list.prev,
struct subpage_area,
list);
if (km_merge_with_prev_subpage(*areas,
cur, prev))
cur = prev;
if (!list_empty(&cur->list)) {
/* Last merge did not reduce to last
* element. */
next = list_entry(cur->list.next,
struct subpage_area,
list);
km_merge_with_next_subpage(*areas, cur,
next);
}
}
/* After freeing and all possible merging, try
* returning region back to page allocator. */
km_free_empty_pages(cur, areas);
return 0;
}
}
/* TODO, Return a specific error code. Here, this is a
* serious error. (Trying to free non-existing memory) */
return -1;
}
int kfree(void *vaddr)
{
int ret;
/* NOTE: If needed, lock mutex here */
ret = find_and_free_subpage_area(vaddr, &km_areas);
/* NOTE: If locked, unlock mutex here */
return ret;
}

View File

@@ -0,0 +1,31 @@
#ifndef __KMALLOC_H__
#define __KMALLOC_H__
#include <mm/alloc_page.h>
#include <l4/lib/list.h>
/*
* List member to keep track of free and unused regions in subpages.
* Smallest unit it represents is one byte, but note that it is also
* used for describing regions that span across multiple pages.
*/
struct km_area {
struct list_head list;
unsigned long vaddr;
unsigned long size;
int used;
int pg_alloc_pages; /* Means borrowed from alloc_page() */
};
extern struct list_head km_area_start;
/* Kmalloc initialisation */
void kmalloc_init(void);
/* Kmalloc allocation functions */
void *kmalloc(int size);
void *kzalloc(int size);
int kfree(void *vaddr);
#endif /* __KMALLOC_H__ */

View File

@@ -0,0 +1,28 @@
#ifndef __KMALLOC_H__
#define __KMALLOC_H__
#include <mm/alloc_page.h>
#include <l4/lib/list.h>
/* List member to keep track of free and unused regions in subpages.
* Smallest unit it represents is one byte, but note that it is also
* used for describing regions that span across multiple pages. */
struct subpage_area {
struct list_head list;
unsigned int vaddr;
unsigned int size;
unsigned int used;
unsigned int head_of_pages; /* Means head of alloc_page() */
};
extern struct subpage_area subpage_area_start;
/* Kmalloc initialisation */
void kmalloc_init(void);
/* Kmalloc allocation functions */
void *kmalloc(int size);
void *kzalloc(int size);
int kfree(void *vaddr);
#endif /* __KMALLOC_H__ */

View File

@@ -0,0 +1,202 @@
/*
* Bitmap-based linked-listable fixed-size memory cache.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <memcache/memcache.h>
#include <string.h>
#include <stdio.h>
/* Some definitions from glue/memory.h */
#define align_up(addr, size) ((((unsigned long)addr) + (size - 1)) & (~(size - 1)))
#define SZ_WORD sizeof(unsigned long)
#define WORD_BITS 32
#define BITWISE_GETWORD(x) (x >> 5) /* Divide by 32 */
#define BITWISE_GETBIT(x) (1 << (x % WORD_BITS))
static int find_and_set_first_free_bit(u32 *word, unsigned int limit)
{
int success = 0;
int i;
for(i = 0; i < limit; i++) {
/* Find first unset bit */
if (!(word[BITWISE_GETWORD(i)] & BITWISE_GETBIT(i))) {
/* Set it */
word[BITWISE_GETWORD(i)] |= BITWISE_GETBIT(i);
success = 1;
break;
}
}
/* Return bit just set */
if (success)
return i;
else
return -1;
}
static int check_and_clear_bit(u32 *word, int bit)
{
/* Check that bit was set */
if (word[BITWISE_GETWORD(bit)] & BITWISE_GETBIT(bit)) {
word[BITWISE_GETWORD(bit)] &= ~BITWISE_GETBIT(bit);
return 0;
} else {
//printf("Trying to clear already clear bit\n");
return -1;
}
}
/* Allocate, clear and return element */
void *mem_cache_zalloc(struct mem_cache *cache)
{
void *elem = mem_cache_alloc(cache);
memset(elem, 0, cache->struct_size);
return elem;
}
/* Allocate another element from given @cache. Returns 0 when full. */
void *mem_cache_alloc(struct mem_cache *cache)
{
int bit;
if (cache->free > 0) {
/* NOTE: If needed, must lock here */
cache->free--;
if ((bit = find_and_set_first_free_bit(cache->bitmap,
cache->total)) < 0) {
printk("Error: Anomaly in cache occupied state.\n"
"Bitmap full although cache->free > 0\n");
BUG();
}
/* NOTE: If needed, must unlock here */
return (void *)(cache->start + (cache->struct_size * bit));
} else {
/* Cache full */
return 0;
}
}
/* Free element at @addr in @cache. Return negative on error. */
int mem_cache_free(struct mem_cache *cache, void *addr)
{
unsigned int struct_addr = (unsigned int)addr;
unsigned int bit;
int err = 0;
/* Check boundary */
if (struct_addr < cache->start || struct_addr > cache->end) {
printk("Error: This address doesn't belong to this cache.\n");
return -1;
}
bit = ((struct_addr - cache->start) / cache->struct_size);
/* Check alignment:
* Find out if there was a lost remainder in last division.
* There shouldn't have been, because addresses are allocated at
* struct_size offsets from cache->start. */
if (((bit * cache->struct_size) + cache->start) != struct_addr) {
printk("Error: This address is not aligned on a predefined "
"structure address in this cache.\n");
err = -1;
return err;
}
/* NOTE: If needed, must lock here */
/* Check free/occupied state */
if (check_and_clear_bit(cache->bitmap, bit) < 0) {
printk("Error: Anomaly in cache occupied state:\n"
"Trying to free already free structure.\n");
err = -1;
goto out;
}
cache->free++;
if (cache->free > cache->total) {
printk("Error: Anomaly in cache occupied state:\n"
"More free elements than total.\n");
err = -1;
goto out;
}
out:
/* NOTE: If locked, must unlock here */
return err;
}
struct mem_cache *mem_cache_init(void *start,
int cache_size,
int struct_size,
unsigned int aligned)
{
struct mem_cache *cache = start;
unsigned int area_start;
unsigned int *bitmap;
int bwords_in_structs;
int bwords;
int total;
int bsize;
if ((struct_size < 0) || (cache_size < 0) ||
((unsigned long)start == ~(0))) {
printk("Invalid parameters.\n");
return 0;
}
/* The cache definition itself is at the beginning.
* Skipping it to get to start of free memory. i.e. the cache. */
area_start = (unsigned long)start + sizeof(struct mem_cache);
cache_size -= sizeof(struct mem_cache);
if (cache_size < struct_size) {
printk("Cache too small for given struct_size\n");
return 0;
}
/* Get how much bitmap words occupy */
total = cache_size / struct_size;
bwords = total >> 5; /* Divide by 32 */
if (total & 0x1F) { /* Remainder? */
bwords++; /* Add one more word for remainder */
}
bsize = bwords * 4;
/* This many structures will be chucked from cache for bitmap space */
bwords_in_structs = ((bsize) / struct_size) + 1;
/* Total structs left after deducing bitmaps */
total = total - bwords_in_structs;
cache_size -= bsize;
/* This should always catch too small caches */
if (total <= 0) {
printk("Cache too small for given struct_size\n");
return 0;
}
if (cache_size <= 0) {
printk("Cache too small for given struct_size\n");
return 0;
}
bitmap = (unsigned int *)area_start;
area_start = (unsigned int)(bitmap + bwords);
if (aligned) {
unsigned int addr = area_start;
unsigned int addr_aligned = align_up(area_start, struct_size);
unsigned int diff = addr_aligned - addr;
BUG_ON(diff >= struct_size);
if (diff)
total--;
cache_size -= diff;
area_start = addr_aligned;
}
INIT_LIST_HEAD(&cache->list);
cache->start = area_start;
cache->end = area_start + cache_size;
cache->total = total;
cache->free = cache->total;
cache->struct_size = struct_size;
cache->bitmap = bitmap;
/* NOTE: If needed, must initialise lock here */
memset(cache->bitmap, 0, bwords*SZ_WORD);
return cache;
}

View File

@@ -0,0 +1,50 @@
/*
* Bitmap-based link-listable fixed-size memory cache.
*
* Copyright (C) 2007 Bahadir Balban
*/
#ifndef __MEMCACHE_H__
#define __MEMCACHE_H__
#include <l4/config.h>
#include <l4/macros.h>
#include <l4/types.h>
#include <l4/lib/list.h>
/* Very basic cache structure. All it does is, keep an internal bitmap of
* items of struct_size. (Note bitmap is fairly efficient and simple for a
* fixed-size memory cache) Keeps track of free/occupied items within its
* start/end boundaries. Does not grow/shrink but you can link-list it. */
struct mem_cache {
struct list_head list;
int total;
int free;
unsigned int start;
unsigned int end;
unsigned int struct_size;
unsigned int *bitmap;
};
void *mem_cache_zalloc(struct mem_cache *cache);
void *mem_cache_alloc(struct mem_cache *cache);
int mem_cache_free(struct mem_cache *cache, void *addr);
struct mem_cache *mem_cache_init(void *start, int cache_size,
int struct_size, unsigned int alignment);
static inline int mem_cache_is_full(struct mem_cache *cache)
{
return cache->free == 0;
}
static inline int mem_cache_is_empty(struct mem_cache *cache)
{
return cache->free == cache->total;
}
static inline int mem_cache_is_last_free(struct mem_cache *cache)
{
return cache->free == 1;
}
static inline int mem_cache_total_empty(struct mem_cache *cache)
{
return cache->free;
}
#endif /* __MEMCACHE_H__ */

View File

@@ -0,0 +1,266 @@
/*
* A proof-of-concept linked-list based page allocator.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <stdio.h>
#include <string.h>
#include <l4/config.h>
#include <l4/macros.h>
#include <l4/types.h>
#include <l4/lib/list.h>
#include <l4lib/arch/syscalls.h>
#include "alloc_page.h"
#include INC_GLUE(memory.h)
#include INC_SUBARCH(mm.h)
#include INC_GLUE(memlayout.h)
struct page_allocator allocator;
static struct mem_cache *new_dcache();
/*
* Allocate a new page area from @area_sources_start. If no areas left,
* allocate a new cache first, allocate page area from the new cache.
*/
static struct page_area *new_page_area(struct page_allocator *p,
struct list_head *ccache)
{
struct mem_cache *cache;
struct page_area *new_area;
struct list_head *cache_list;
if (ccache)
cache_list = ccache;
else
cache_list = &p->dcache_list;
list_for_each_entry(cache, cache_list, list)
if ((new_area = mem_cache_alloc(cache)) != 0) {
new_area->cache = cache;
return new_area;
}
/* Must not reach here if a ccache is already used. */
BUG_ON(ccache);
if ((cache = new_dcache(p)) == 0)
return 0; /* Denotes out of memory */
new_area = mem_cache_alloc(cache);
new_area->cache = cache;
return new_area;
}
/* Given the page @quantity, finds a free region, divides and returns new area. */
static struct page_area *
get_free_page_area(int quantity, struct page_allocator *p,
struct list_head *cache_list)
{
struct page_area *new, *area;
if (quantity <= 0)
return 0;
list_for_each_entry(area, &p->page_area_list, list) {
/* Free but needs dividing */
if (area->numpages > quantity && !area->used) {
area->numpages -= quantity;
if (!(new = new_page_area(p, cache_list)))
return 0; /* No more pages */
new->pfn = area->pfn + area->numpages;
new->numpages = quantity;
new->used = 1;
INIT_LIST_HEAD(&new->list);
list_add(&new->list, &area->list);
return new;
/* Free and exact size match, no need to divide. */
} else if (area->numpages == quantity && !area->used) {
area->used = 1;
return area;
}
}
/* No more pages */
return 0;
}
void *alloc_page(int quantity)
{
struct page_area *p = get_free_page_area(quantity, &allocator, 0);
if (p)
return (void *)__pfn_to_addr(p->pfn);
else
return 0;
}
/*
* Helper to allocate a page using an internal page area cache. Returns
* a virtual address because these allocations are always internally referenced.
*/
void *alloc_page_using_cache(struct page_allocator *a, struct list_head *c)
{
struct page_area *p = get_free_page_area(1, a, c);
if (p)
return l4_map_helper((void *)__pfn_to_addr(p->pfn), 1);
else
return 0;
}
/*
* There's still free memory, but allocator ran out of page areas stored in
* dcaches. In this case, the ccache supplies a new page area, which is used to
* describe a page that stores a new dcache. If ccache is also out of page areas
* it adds the spare cache to ccache_list, uses that for the current allocation,
* and allocates a new spare cache for future use
*/
static struct mem_cache *new_dcache(struct page_allocator *p)
{
void *dpage; /* Page that keeps data cache */
void *spage; /* Page that keeps spare cache */
if((dpage = alloc_page_using_cache(p, &p->ccache_list)))
return mem_cache_init(dpage, PAGE_SIZE,
sizeof(struct page_area), 0);
/* If ccache is also full, add the spare page_area cache to ccache */
list_add(&p->spare->list, &p->ccache_list);
/* This must not fail now, and satisfy at least two page requests. */
BUG_ON(mem_cache_total_empty(p->spare) < 2);
BUG_ON(!(dpage = alloc_page_using_cache(p, &p->ccache_list)));
BUG_ON(!(spage = alloc_page_using_cache(p, &p->ccache_list)));
/* Initialise the new spare and return the new dcache. */
p->spare = mem_cache_init(spage, PAGE_SIZE, sizeof(struct page_area),0);
return mem_cache_init(dpage, PAGE_SIZE, sizeof(struct page_area), 0);
}
/*
* Only to be used at initialisation. Allocates memory caches that contain
* page_area elements by incrementing the free physical memory mark by
* PAGE_SIZE.
*/
static struct mem_cache *new_allocator_startup_cache(unsigned long *start)
{
struct page_area *area;
struct mem_cache *cache;
cache = mem_cache_init(l4_map_helper((void *)*start, 1), PAGE_SIZE,
sizeof(struct page_area), 0);
area = mem_cache_alloc(cache);
/* Initialising the dummy just for illustration */
area->pfn = __pfn(*start);
area->numpages = 1;
area->cache = cache;
INIT_LIST_HEAD(&area->list);
/* FIXME: Should I add this to the page area list? */
*start += PAGE_SIZE;
return cache;
}
/*
* All physical memory is tracked by a simple linked list implementation. A
* single list contains both used and free page_area descriptors. Each page_area
* describes a continuous region of physical pages, indicating its location by
* it's pfn.
*
* alloc_page() keeps track of all page-granuled memory, except the bits that
* were in use before the allocator initialised. This covers anything that is
* outside the @start @end range. This includes the page tables, first caches
* allocated by this function, compile-time allocated kernel data and text.
* Also other memory regions like IO are not tracked by alloc_page() but by
* other means.
*/
void init_page_allocator(unsigned long start, unsigned long end)
{
struct page_area *freemem;
struct mem_cache *dcache, *ccache;
INIT_LIST_HEAD(&allocator.dcache_list);
INIT_LIST_HEAD(&allocator.ccache_list);
INIT_LIST_HEAD(&allocator.page_area_list);
/* Primary cache list that stores page areas of regular data. */
dcache = new_allocator_startup_cache(&start);
list_add(&dcache->list, &allocator.dcache_list);
/* The secondary cache list that stores page areas of caches */
ccache = new_allocator_startup_cache(&start);
list_add(&ccache->list, &allocator.ccache_list);
/* Initialise first area that describes all of free physical memory */
freemem = mem_cache_alloc(dcache);
INIT_LIST_HEAD(&freemem->list);
freemem->pfn = __pfn(start);
freemem->numpages = __pfn(end) - freemem->pfn;
freemem->cache = dcache;
freemem->used = 0;
/* Add it as the first unused page area */
list_add(&freemem->list, &allocator.page_area_list);
/* Allocate and add the spare page area cache */
allocator.spare = mem_cache_init(l4_map_helper(alloc_page(1), 1),
PAGE_SIZE, sizeof(struct page_area),
0);
}
/* Merges two page areas, frees area cache if empty, returns the merged area. */
struct page_area *merge_free_areas(struct page_area *before,
struct page_area *after)
{
struct mem_cache *c;
BUG_ON(before->pfn + before->numpages != after->pfn);
BUG_ON(before->used || after->used)
BUG_ON(before == after);
before->numpages += after->numpages;
list_del(&after->list);
c = after->cache;
mem_cache_free(c, after);
/* Recursively free the cache page */
if (mem_cache_is_empty(c))
BUG_ON(free_page(l4_unmap_helper(c, 1)) < 0)
return before;
}
static int find_and_free_page_area(void *addr, struct page_allocator *p)
{
struct page_area *area, *prev, *next;
/* First find the page area to be freed. */
list_for_each_entry(area, &p->dcache_list, list)
if (__pfn_to_addr(area->pfn) == (unsigned long)addr &&
area->used) { /* Found it */
area->used = 0;
goto found;
}
return -1; /* Finished the loop, but area not found. */
found:
/* Now merge with adjacent areas, if possible */
if (area->list.prev != &p->dcache_list) {
prev = list_entry(area->list.prev, struct page_area, list);
if (!prev->used)
area = merge_free_areas(prev, area);
}
if (area->list.next != &p->dcache_list) {
next = list_entry(area->list.next, struct page_area, list);
if (!next->used)
area = merge_free_areas(area, next);
}
return 0;
}
int free_page(void *paddr)
{
return find_and_free_page_area(paddr, &allocator);
}

View File

@@ -0,0 +1,36 @@
#ifndef __ALLOC_PAGE_H__
#define __ALLOC_PAGE_H__
#include <memcache/memcache.h>
/* List member to keep track of free and unused physical pages.
* Has PAGE_SIZE granularity */
struct page_area {
struct list_head list;
unsigned int used; /* Used or free */
unsigned int pfn; /* Base pfn */
unsigned int numpages; /* Number of pages this region covers */
struct mem_cache *cache;/* The cache used when freeing the page area for
* quickly finding where the area is stored. */
};
struct page_allocator {
/* Keep track of page area lists and allocated caches for page areas. */
struct list_head page_area_list;
/* Caches of page areas that refer to any kind of data */
struct list_head dcache_list;
/* Caches of page areas that refer to cache pages */
struct list_head ccache_list;
/* A spare cache to aid when both caches are full */
struct mem_cache *spare;
};
/* Initialises the page allocator */
void init_page_allocator(unsigned long start, unsigned long end);
/* Page allocation functions */
void *alloc_page(int quantity);
void *zalloc_page(int quantity);
int free_page(void *paddr);
#endif /* __ALLOC_PAGE_H__ */

110
tasks/libmem/run_tests.py Executable file
View File

@@ -0,0 +1,110 @@
#!/usr/bin/python
import os
import shutil
from os.path import join
import sys
project_root = join(os.getcwd(), "../..")
source_root = os.path.join(project_root, 'src')
headers_root = os.path.join(project_root, 'include')
tests_run_root = os.path.join(os.getcwd(), 'tmp')
tools_root = os.getcwd()
init_state = "page_init.out"
exit_state = "page_exit.out"
SZ_10MB = 1024 * 1024 * 10
def power(x, y):
res = 1
for i in range(y):
res = res * x;
return res
def test_mm():
'''
Tries to set up meaningful input parameters for page_size, maximum allocation
size, total number of allocations, and total pages, and tests the memory allocator
in every one of these combinations. The parameter guessing is not great, but at least
some test cases are reasonable.
'''
page_sizes = [128, 256, 512, 1024, 2048, 4096, 8192]
max_alloc_sizes = [1, 10, 40, 50, 100, 200]
for page_size in page_sizes:
numpages = SZ_10MB / page_size
for i in range(1, 3):
res = numpages / power(10, i) # Divide numpages to 10, 100, 1000
if res > 0:
max_alloc_sizes.append(numpages/10)
max_alloc_sizes.append(numpages/100)
max_alloc_sizes.append(numpages/1000)
for max_alloc_size in max_alloc_sizes:
if max_alloc_size >= numpages: # If a single allocation exceeds total, adjust.
max_alloc_size = numpages / 2
num_allocs = numpages / (max_alloc_size) * 2 * 2 / 3
cmd = "./test -a=p -n=%d -s=%d -fi=%s -fx=%s -ps=%d -pn=%d" % \
(num_allocs, max_alloc_size, join(tests_run_root, init_state),\
join(tests_run_root, exit_state), page_size, numpages)
print "num_allocs = %d, max_alloc_size = %d, page_size = %d, numpages = %d" % \
(num_allocs, max_alloc_size, page_size, numpages)
os.system(cmd)
#os.system("cat %s" % join(tests_run_root, init_state))
diffcmd = "diff " + join(tests_run_root, init_state) + " " + join(tests_run_root, exit_state)
if os.system(diffcmd) != 0:
print "Error: %s has failed.\n" % cmd
sys.exit(1)
def test_km():
'''
Tries to set up meaningful input parameters for payload size, maximum allocation
size, total number of allocations, and total pages, and tests kmalloc
in every one of these combinations. The parameter guessing is not great, but at least
some test cases are reasonable.
'''
page_sizes = [4096, 8192]
max_alloc_sizes = [1, 10, 40, 50, 100, 200, 1024, 2048, 4096, 10000, 50000, 100000]
numpages = 1024
for page_size in page_sizes:
for max_alloc_size in max_alloc_sizes:
num_allocs = (numpages * page_size * 3) / (max_alloc_size * 2)
cmd = "./test -a=k -n=%d -s=%d -fi=%s -fx=%s -ps=%d -pn=%d" % \
(num_allocs, max_alloc_size, join(tests_run_root, init_state),\
join(tests_run_root, exit_state), page_size, numpages)
print "num_allocs = %d, max_alloc_size = %d, page_size = %d, numpages = %d" %\
(num_allocs, max_alloc_size, page_size, numpages)
diffcmd = "diff " + join(tests_run_root, init_state) + " " +\
join(tests_run_root, exit_state)
if os.system(diffcmd) != 0:
print "Error: %s has failed.\n" % cmd
sys.exit(1)
def test_mm_params(num_allocs, max_alloc_size, page_size, numpages, iterations):
for i in range(iterations):
cmd = "./test -a=p -n=%d -s=%d -fi=%s -fx=%s -ps=%d -pn=%d" % \
(num_allocs, max_alloc_size, join(tests_run_root, init_state),\
join(tests_run_root, exit_state), page_size, numpages)
print "num_allocs = %d, max_alloc_size = %d, page_size = %d, numpages = %d" % \
(num_allocs, max_alloc_size, page_size, numpages)
os.system(cmd)
#os.system("cat %s" % join(tests_run_root, init_state))
diffcmd = "diff " + join(tests_run_root, init_state) + " " + join(tests_run_root, exit_state)
if os.system(diffcmd) != 0:
print "Error: %s has failed.\n" % cmd
sys.exit(1)
def run_tests():
if os.path.exists(tests_run_root):
shutil.rmtree(tests_run_root)
os.mkdir(tests_run_root)
# for i in range (100):
#test_km()
test_mm()
#test_mm_params(10922, 10, 128, 81920, 50)
#test_km()
#test_mc()
if __name__ == '__main__':
run_tests()

View File

@@ -0,0 +1,9 @@
# Inherit global environment
Import('env')
# The set of source files associated with this SConscript file.
src_local = ['main.c', 'test_kmalloc.c', 'test_memcache.c', 'test_allocpage.c', 'test_alloc_generic.c', 'debug.c', 'memory.c', 'clz.c']
obj = env.Object(src_local)
Return('obj')

16
tasks/libmem/tests/clz.c Normal file
View File

@@ -0,0 +1,16 @@
#include <l4/macros.h>
#include <l4/types.h>
#include <l4/config.h>
/* Emulation of CLZ (count leading zeroes) instruction */
unsigned int __clz(unsigned int bitvector)
{
unsigned int x = 0;
while((!(bitvector & ((unsigned)1 << 31))) && (x < 32)) {
bitvector <<= 1;
x++;
}
return x;
}

7
tasks/libmem/tests/clz.h Normal file
View File

@@ -0,0 +1,7 @@
#ifndef __CLZ_H__
#define __CLZ_H__
unsigned int __clz(unsigned int bitvector);
#endif /* __CLZ_H__ */

View File

@@ -0,0 +1,33 @@
#include "debug.h"
#include <stdio.h>
void print_page_area_list(struct page_allocator *p)
{
struct page_area *area;
list_for_each_entry (area, &p->page_area_list, list) {
printf("%-20s\n%-20s\n", "Page area:","-------------------------");
printf("%-20s %u\n", "Pfn:", area->pfn);
printf("%-20s %d\n", "Used:", area->used);
printf("%-20s %d\n\n", "Number of pages:", area->numpages);
}
}
void print_km_area(struct km_area *s)
{
printf("%-20s\n%-20s\n", "Subpage area:","-------------------------");
printf("%-20s 0x%lu\n", "Addr:", s->vaddr);
printf("%-20s 0x%lu\n", "Size:", s->size);
printf("%-20s %d\n", "Used:", s->used);
printf("%-20s %d\n\n", "Head_of_pages:", s->pg_alloc_pages);
}
void print_km_area_list(struct list_head *km_areas)
{
struct km_area *area;
list_for_each_entry (area, km_areas, list)
print_km_area(area);
}

View File

@@ -0,0 +1,17 @@
#ifndef __DEBUG_H__
#define __DEBUG_H__
#include <kmalloc/kmalloc.h>
#include <mm/alloc_page.h>
#include <l4/lib/list.h>
#if defined(DEBUG)
#define dprintf printf
#else
#define dprintf(...)
#endif
void print_page_area_list(struct page_allocator *p);
void print_km_area_list(struct list_head *s);
void print_km_area(struct km_area *s);
#endif /* DEBUG_H */

View File

@@ -0,0 +1,28 @@
#include "libl4.h"
unsigned long virt_to_phys(unsigned long addr)
{
return addr;
}
unsigned long phys_to_virt(unsigned long addr)
{
return addr;
}
u32 l4_getpid(unsigned int *a, unsigned int *b, unsigned int *c)
{
return 0;
}
u32 l4_unmap(unsigned long a, unsigned long b, u32 npages)
{
return 0;
}
u32 l4_map(unsigned long a, unsigned long b, u32 size, u32 flags, unsigned int tid)
{
return 0;
}

View File

@@ -0,0 +1,17 @@
/*
* Mock-up l4 library definitions for host testing.
*
*/
#ifndef __TESTS_LIBL4_H__
#define __TESTS_LIBL4_H__
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
u32 l4_map(unsigned long phys, unsigned long virt, u32 size, u32 flags, u32 tid);
u32 l4_unmap(unsigned long a, unsigned long b, u32 npages);
u32 l4_getpid(unsigned int *a, unsigned int *b, unsigned int *c);
#endif

View File

250
tasks/libmem/tests/main.c Normal file
View File

@@ -0,0 +1,250 @@
#include <stdio.h>
#include <malloc.h>
#include <string.h>
#include <stdlib.h>
#include <l4/macros.h>
#include <l4/config.h>
#include <kmalloc/kmalloc.h>
#include <mm/alloc_page.h>
#include INC_SUBARCH(mm.h)
#include INC_ARCH(linker.h)
#include INC_PLAT(printascii.h)
#include INC_PLAT(offsets.h)
#include INC_GLUE(memlayout.h)
#include "tests.h"
#include "test_kmalloc.h"
#include "test_allocpage.h"
#include "test_memcache.h"
#include "clz.h"
#include "memory.h"
#include "libl4.h"
#include "debug.h"
unsigned int TEST_PHYSMEM_TOTAL_PAGES = 250;
unsigned int TEST_PHYSMEM_TOTAL_SIZE;
unsigned int PHYS_MEM_START;
unsigned int PHYS_MEM_END;
void *malloced_test_memory;
void memory_initialise(void)
{
init_page_allocator(PHYS_MEM_START, PHYS_MEM_END);
kmalloc_init();
}
/* Allocating memory from the host C library, and
* it is used as if it is the physical memory available
* on the system.
*/
void alloc_test_memory()
{
TEST_PHYSMEM_TOTAL_SIZE = (PAGE_SIZE * TEST_PHYSMEM_TOTAL_PAGES);
if (!(malloced_test_memory = malloc(TEST_PHYSMEM_TOTAL_SIZE)))
printf("Host system out of memory.\n");
PHYS_MEM_START = (unsigned int)malloced_test_memory;
PHYS_MEM_END = PHYS_MEM_START + TEST_PHYSMEM_TOTAL_SIZE;
PHYS_MEM_START = page_align_up(PHYS_MEM_START);
PHYS_MEM_END = page_align(PHYS_MEM_END);
/* Normally _end is to know where the loaded kernel image
* ends in physical memory, so the system can start allocating
* physical memory from there. Because in our mock-up there's no
* used space in the malloc()'ed memory, _end is the same as the
* beginning of malloc()'ed memory.
*/
_end = PHYS_MEM_START;
dprintf("Initialising physical memory\n");
dprintf("Initialising allocators:\n");
memory_initialise();
}
struct cmdline_opts {
char run_allocator;
int allocations;
int alloc_size_max;
int physmem_pages;
int page_size;
int no_of_pages;
char *finit_path;
char *fexit_path;
} options;
int check_options_validity(struct cmdline_opts *opts)
{
if (opts->allocations <= 0) {
printf("Invalid number of allocations: %d\n", opts->allocations);
return -1;
}
if (opts->no_of_pages <= 0) {
printf("Invalid number of pages: %d\n", opts->no_of_pages);
return -1;
}
if (opts->alloc_size_max <= 0) {
printf("Invalid alloc_size_max: %d\n", opts->alloc_size_max);
return -1;
}
if (opts->page_size <= 0) {
printf("Invalid page_size: %d\n", opts->page_size);
return -1;
}
return 0;
}
void print_options(struct cmdline_opts *opts)
{
dprintf("Running: %s\n",
((opts->run_allocator == 'p') ? "page allocator" :
((opts->run_allocator == 'k') ? "kmalloc/kfree" :
"memcache allocator")));
dprintf("Total allocations: %d\n", opts->allocations);
dprintf("Maximum allocation size: %d, 0x%x(hex)\n\n",
opts->alloc_size_max, opts->alloc_size_max);
dprintf("Initial state file: %s\n", opts->finit_path);
dprintf("Exit state file: %s\n", opts->fexit_path);
}
void display_help(void)
{
printf("Main:\n");
printf("\tUsage:\n");
printf("\tmain\t-a=<p>|<k>|<m> [-n=<number of allocations>] [-s=<maximum size for any allocation>]\n"
"\t\t[-fi=<file to dump init state>] [-fx=<file to dump exit state>]\n"
"\t\t[-ps=<page size>] [-pn=<total number of pages>]\n");
printf("\n");
}
int get_cmdline_opts(int argc, char *argv[], struct cmdline_opts *opts)
{
int parsed = 0;
memset(opts, 0, sizeof (struct cmdline_opts));
if (argc <= 1)
return -1;
for (int i = 1; i < argc; i++) {
if (argv[i][0] == '-' && argv[i][2] == '=') {
if (argv[i][1] == 'a') {
if (argv[i][3] == 'k' ||
argv[i][3] == 'm' ||
argv[i][3] == 'p') {
opts->run_allocator = argv[i][3];
parsed = 1;
}
}
if (argv[i][1] == 'n') {
opts->allocations = atoi(&argv[i][3]);
parsed = 1;
}
if (argv[i][1] == 's') {
opts->alloc_size_max = atoi(&argv[i][3]);
parsed = 1;
}
}
if (argv[i][0] == '-' && argv[i][1] == 'f'
&& argv[i][3] == '=') {
if (argv[i][2] == 'i') {
opts->finit_path = &argv[i][4];
parsed = 1;
}
if (argv[i][2] == 'x') {
opts->fexit_path = &argv[i][4];
parsed = 1;
}
}
if (argv[i][0] == '-' && argv[i][1] == 'p'
&& argv[i][3] == '=') {
if (argv[i][2] == 's') {
opts->page_size = atoi(&argv[i][4]);
parsed = 1;
}
if (argv[i][2] == 'n') {
opts->no_of_pages = atoi(&argv[i][4]);
parsed = 1;
}
}
}
if (!parsed)
return -1;
return 0;
}
void get_output_files(FILE **out1, FILE **out2,
char *alloc_func_name, char *rootpath)
{
char pathbuf[150];
char *root = "/tmp/";
char *initstate_prefix = "test_initstate_";
char *endstate_prefix = "test_endstate_";
char *extension = ".out";
if (!rootpath)
rootpath = root;
/* File path manipulations */
sprintf(pathbuf, "%s%s%s%s", rootpath, initstate_prefix, alloc_func_name, extension);
*out1 = fopen(pathbuf,"w+");
sprintf(pathbuf, "%s%s%s%s", rootpath, endstate_prefix, alloc_func_name, extension);
*out2 = fopen(pathbuf, "w+");
return;
}
int main(int argc, char *argv[])
{
FILE *finit, *fexit;
int output_files = 0;
if (get_cmdline_opts(argc, argv, &options) < 0) {
display_help();
return 1;
}
print_options(&options);
if (check_options_validity(&options) < 0)
exit(1);
if (options.finit_path && options.fexit_path) {
finit = fopen(options.finit_path, "w+");
fexit = fopen(options.fexit_path, "w+");
output_files = 1;
}
if (options.page_size) {
PAGE_SIZE = options.page_size;
PAGE_MASK = PAGE_SIZE - 1;
PAGE_BITS = 32 - __clz(PAGE_MASK);
dprintf("Using: Page Size: %d\n", PAGE_SIZE);
dprintf("Using: Page Mask: 0x%x\n", PAGE_MASK);
dprintf("Using: Page Bits: %d\n", PAGE_BITS);
}
if (options.no_of_pages) {
dprintf("Using: Total pages: %d\n", options.no_of_pages);
TEST_PHYSMEM_TOTAL_PAGES = options.no_of_pages;
}
alloc_test_memory();
if (options.run_allocator == 'p') {
if (!output_files)
get_output_files(&finit, &fexit, "alloc_page", 0);
test_allocpage(options.allocations, options.alloc_size_max,
finit, fexit);
} else if (options.run_allocator == 'k') {
if (!output_files)
get_output_files(&finit, &fexit, "kmalloc", 0);
test_kmalloc(options.allocations, options.alloc_size_max,
finit, fexit);
} else if (options.run_allocator == 'm') {
if (!output_files)
get_output_files(&finit, &fexit, "memcache", 0);
test_memcache(options.allocations, options.alloc_size_max,
finit, fexit, 1);
} else {
printf("Invalid allocator option.\n");
}
free((void *)malloced_test_memory);
fclose(finit);
fclose(fexit);
return 0;
}

View File

@@ -0,0 +1,9 @@
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include INC_GLUE(memory.h)
unsigned int PAGE_SIZE = TEST_PAGE_SIZE;
unsigned int PAGE_MASK = TEST_PAGE_MASK;
unsigned int PAGE_BITS = TEST_PAGE_BITS;

View File

@@ -0,0 +1,216 @@
/*
* Generic random allocation/deallocation test
*
* Copyright 2007 (C) Bahadir Balban
*
*/
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include INC_GLUE(memory.h)
#include <l4/lib/list.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include "test_alloc_generic.h"
#include "debug.h"
void print_test_state(unsigned int title,
print_alloc_state_t print_allocator_state)
{
switch (title) {
case TEST_STATE_BEGIN:
printf("=================\n"
"===== BEGIN =====\n"
"=================\n\n");
break;
case TEST_STATE_MIDDLE:
printf("==================\n"
"===== MIDDLE =====\n"
"==================\n\n");
break;
case TEST_STATE_END:
printf("===========\n"
"=== END ===\n"
"===========\n\n");
break;
case TEST_STATE_ERROR:
printf("=================\n"
"===== ERROR =====\n"
"=================\n\n");
break;
default:
printf("Title error.\n");
}
print_allocator_state();
}
void get_output_filepaths(FILE **out1, FILE **out2,
char *alloc_func_name)
{
char pathbuf[150];
char *rootpath = "/tmp/";
char *initstate_prefix = "test_initstate_";
char *endstate_prefix = "test_endstate_";
char *extention = ".out";
/* File path manipulations */
sprintf(pathbuf, "%s%s%s%s", rootpath, initstate_prefix, alloc_func_name, extention);
*out1 = fopen(pathbuf,"w+");
sprintf(pathbuf, "%s%s%s%s", rootpath, endstate_prefix, alloc_func_name, extention);
*out2 = fopen(pathbuf, "w+");
return;
}
/* This function is at the heart of generic random allocation testing.
* It is made as simple as possible, and can be used for testing all
* allocators. It randomly allocates/deallocates data and prints out
* the outcome of the action. Here are a few things it does and doesn't
* do:
* - It does not test false input on the allocators, e.g. attempting
* to free an address that hasn't been allocated, or attempting to
* free address 0.
* - It does capture and compare initial and final states of the
* allocators' internal structures after all allocations are freed.
* This is done by comparing two files filled with allocator state
* by functions supplied by the allocators themselves.
* - It expects the allocator NOT to run out of memory.
*/
int
test_alloc_free_random_order(const int MAX_ALLOCATIONS,
const int ALLOC_SIZE_MAX,
alloc_func_t alloc,
free_func_t free,
print_alloc_state_t print_allocator_state,
FILE *state_init_file, FILE *state_end_file)
{
/* The last element in full_state that tells about any full index.
* This is the limit the random deallocation would use to find a full
* index */
int random_size;
int random_action;
int random_index;
int alloc_so_far = 0;
int full_state_last = -1;
int halfway_through = 0;
FILE * const default_stdout = stdout;
/* Memory pointers */
void *mem[MAX_ALLOCATIONS];
/* Each element keeps track of one currently full index number */
int full_state[MAX_ALLOCATIONS];
/* Check arguments first */
if (!MAX_ALLOCATIONS || !ALLOC_SIZE_MAX || !alloc || !free
|| !print_allocator_state || !state_init_file || !state_end_file) {
printf("Invalid arguments to %s()\n", __FUNCTION__);
return 1;
}
memset(mem, 0, MAX_ALLOCATIONS * sizeof(void *));
memset(full_state, 0, MAX_ALLOCATIONS * sizeof(int));
//print_test_state(TEST_STATE_BEGIN, print_allocator_state);
stdout = state_init_file;
print_test_state(TEST_STATE_BEGIN, print_allocator_state);
stdout = default_stdout;
/* Randomly either allocate/deallocate at a random
* index, of random size */
srand(time(0));
while (1) {
if (alloc_so_far < (MAX_ALLOCATIONS / 2)) {
/* Give more chance to allocations at the beginning */
if ((rand() % 4) == 0) /* 1/4 chance */
random_action = FREE;
else /* 3/4 chance */
random_action = ALLOCATE;
} else {
if (!halfway_through) {
#if defined (DEBUG)
print_test_state(TEST_STATE_MIDDLE,
print_allocator_state);
#endif
halfway_through = 1;
}
/* Give more chane to freeing after halfway-through */
if ((rand() % 3) == 0) /* 1/3 chance */
random_action = ALLOCATE;
else /* 2/3 chance */
random_action = FREE;
}
random_size = (rand() % (ALLOC_SIZE_MAX-1)) + 1;
if (random_action == ALLOCATE) {
if (alloc_so_far < MAX_ALLOCATIONS) {
alloc_so_far++;
for (int i = 0; i < MAX_ALLOCATIONS; i++) {
if (mem[i] == 0) { // Find the first empty slot.
int allocation_error =
((mem[i] = alloc(random_size)) <= 0);
dprintf("%-12s%-8s%-12p%-8s%-10d\n",
"alloc:", "addr:", mem[i],
"size:", random_size);
if (allocation_error) {
print_test_state(TEST_STATE_ERROR,
print_allocator_state);
if (mem[i] < 0) {
printf("Error: alloc() returned negative value\n");
BUG();
} else if (mem[i] == 0) {
printf("Error: Allocator is out of memory.\n");
return 1;
}
}
full_state_last++;
full_state[full_state_last] = i;
break;
}
}
} else
random_action = FREE;
}
if (random_action == FREE) {
/* all are free, can't free anymore */
if (full_state_last < 0)
continue;
else if (full_state_last > 0)
random_index = rand() % full_state_last;
else
random_index = 0; /* Last item */
if(mem[full_state[random_index]] == 0)
BUG();
if (free(mem[full_state[random_index]]) < 0)
BUG();
dprintf("%-12s%-8s%-12p\n","free:",
"addr:", mem[full_state[random_index]]);
mem[full_state[random_index]] = 0;
/* Fill in the empty gap with last element */
full_state[random_index] = full_state[full_state_last];
/* Last element now in the gap
* (somewhere inbetween first and last) */
full_state[full_state_last] = 0;
/* One less in the number of full items */
full_state_last--;
}
/* Check that all allocations and deallocations took place */
if (alloc_so_far == MAX_ALLOCATIONS && full_state_last < 0) {
for (int i = 0; i < MAX_ALLOCATIONS; i++)
BUG_ON(full_state[i] != 0); // A final sanity check.
break;
}
}
//print_test_state(TEST_STATE_END, print_allocator_state);
stdout = state_end_file;
print_test_state(TEST_STATE_BEGIN, print_allocator_state);
stdout = default_stdout;
return 0;
}

View File

@@ -0,0 +1,29 @@
#ifndef __TEST_ALLOC_GENERIC_H__
#define __TEST_ALLOC_GENERIC_H__
enum test_state_title {
TEST_STATE_BEGIN = 0,
TEST_STATE_MIDDLE,
TEST_STATE_END,
TEST_STATE_ERROR
};
typedef void (*print_alloc_state_t)(void);
typedef void *(*alloc_func_t)(int size);
typedef int (*free_func_t)(void *addr);
enum alloc_action {
FREE = 0,
ALLOCATE = 1,
};
void get_output_filepaths(FILE **out1, FILE **out2,
char *alloc_func_name);
int test_alloc_free_random_order(const int MAX_ALLOCATIONS,
const int ALLOC_SIZE_MAX,
alloc_func_t alloc, free_func_t free,
print_alloc_state_t print_allocator_state,
FILE *init_state, FILE *exit_state);
#endif /* __TEST_ALLOC_GENERIC_H__ */

View File

@@ -0,0 +1,85 @@
/*
* Testing code for the page allocator.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <l4/lib/list.h>
#include INC_GLUE(memory.h)
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include "test_allocpage.h"
#include "test_alloc_generic.h"
#include "debug.h"
unsigned int PAGE_ALLOCATIONS = 30;
unsigned int PAGE_ALLOC_SIZE_MAX = 8;
extern struct page_allocator allocator;
void print_page_area(struct page_area *a, int areano)
{
printf("Area starts @: 0x%lu, %s, numpages: %d\n",
__pfn_to_addr(a->pfn),
(a->used) ? "used" : "unused", a->numpages);
return;
}
void print_areas(struct list_head *area_head)
{
struct page_area *cur;
int areano = 1;
printf("Page areas:\n-------------\n");
list_for_each_entry(cur, area_head, list)
print_page_area(cur, areano++);
}
void print_cache(struct mem_cache *c, int cacheno)
{
printf("Cache %d state:\n-------------\n", cacheno);
printf("Total: %d\n", c->total);
printf("Free: %d\n", c->free);
printf("Start: 0x%x\n", c->start);
}
void print_caches(struct list_head *cache_head)
{
int caches = 1;
struct mem_cache *cur;
list_for_each_entry(cur, cache_head, list)
print_cache(cur, caches++);
}
void print_page_allocator_state(void)
{
print_areas(&allocator.page_area_list);
printf("Data Cache:\n--------\n");
print_caches(&allocator.dcache_list);
printf("Cache Cache:\n----------\n");
print_caches(&allocator.ccache_list);
}
/* FIXME: with current default parameters (allocations = 30, sizemax = 8),
* for some odd reason, we got the bug at line 280 in alloc_page.c.
* Very weird. Find out why.
*/
void test_allocpage(int page_allocations, int page_alloc_size_max,
FILE *init_state, FILE *exit_state)
{
//if (!page_allocations)
// page_allocations = PAGE_ALLOCATIONS;
//if (!page_alloc_size_max)
// page_alloc_size_max = PAGE_ALLOC_SIZE_MAX;
dprintf("\nPAGE ALLOCATOR TEST:====================================\n\n");
test_alloc_free_random_order(page_allocations, page_alloc_size_max,
alloc_page, free_page,
print_page_allocator_state,
init_state, exit_state);
}

View File

@@ -0,0 +1,13 @@
#ifndef __TEST_ALLOCPAGE_H__
#define __TEST_ALLOCPAGE_H__
#include <mm/alloc_page.h>
#include "tests.h"
void test_allocpage(int num_allocs, int alloc_max, FILE *init, FILE *exit);
void print_page_area(struct page_area *a, int no);
void print_caches(struct list_head *cache_head);
void print_cache(struct mem_cache *c, int cacheno);
void print_areas(struct list_head *area_head);
void print_page_area(struct page_area *ar, int areano);
#endif

View File

@@ -0,0 +1,42 @@
/*
* Testing code for the kmalloc allocator.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <l4/lib/list.h>
#include INC_GLUE(memory.h)
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include "test_alloc_generic.h"
#include "test_allocpage.h"
#include "debug.h"
#include "tests.h"
extern struct list_head km_area_start;
void print_kmalloc_state(void)
{
print_km_area_list(&km_area_start);
}
void test_kmalloc(int kmalloc_allocations, int kmalloc_alloc_size_max,
FILE *init_state, FILE *exit_state)
{
unsigned int KMALLOC_ALLOCATIONS = 20;
unsigned int KMALLOC_ALLOC_SIZE_MAX = (PAGE_SIZE * 3);
if (!kmalloc_allocations)
kmalloc_allocations = KMALLOC_ALLOCATIONS;
if (!kmalloc_alloc_size_max)
kmalloc_alloc_size_max = KMALLOC_ALLOC_SIZE_MAX;
test_alloc_free_random_order(kmalloc_allocations, kmalloc_alloc_size_max,
kmalloc, kfree, print_kmalloc_state,
init_state, exit_state);
}

View File

@@ -0,0 +1,8 @@
#ifndef __TEST_KMALLOC_H__
#define __TEST_KMALLOC_H__
#include <kmalloc/kmalloc.h>
void test_kmalloc(int num_allocs, int allocs_max, FILE *initstate, FILE *exitstate);
#endif

View File

@@ -0,0 +1,115 @@
/*
* Testing code for the memcache structure.
*
* Copyright (C) 2007 Bahadir Balban
*/
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <memcache/memcache.h>
#include "test_memcache.h"
#include "test_alloc_generic.h"
#include "debug.h"
#include "tests.h"
#include <l4/macros.h>
#include <l4/config.h>
#include <l4/types.h>
#include <l4/lib/list.h>
#include INC_GLUE(memory.h)
unsigned int MEM_CACHE_SIZE;
struct mem_cache *this;
void *buffer;
void *mem_cache_alloc_wrapped(int size)
{
return mem_cache_alloc(this);
}
int mem_cache_free_wrapped(void *addr)
{
return mem_cache_free(this, addr);
}
void print_memcache_state(void)
{
printf("%-15s%d\n","Total:", this->total);
printf("%-15s%d\n","Free:", this->free);
printf("Bitmap has %d words:\n", BITWISE_GETWORD(this->total) + 1);
for (int i = 0; i <= BITWISE_GETWORD(this->total); i++)
printf("0x%x\n", this->bitmap[i]);
}
int test_memcache_init_aligned(int *items_max, int item_size)
{
if (item_size * 10 > MEM_CACHE_SIZE)
MEM_CACHE_SIZE = item_size * 10;
if (!(buffer = calloc(1, MEM_CACHE_SIZE))) {
printf("System out of memory.\n");
BUG();
}
if ((this = mem_cache_init(buffer, MEM_CACHE_SIZE,
item_size, 1)) == 0) {
printf("Unable to initialise cache.\n");
return -1;
}
*items_max = mem_cache_total_empty(this);
printf("\nMEMCACHE TEST: ALIGNED ELEMENTS\n==========================\n");
printf("%-30s%d\n", "Item size:", item_size);
printf("%-30s0x%x\n", "Cache occupied space:", MEM_CACHE_SIZE);
printf("%-30s%d\n","Total items in cache:", *items_max);
printf("%-30s0x%x\n","Total items space:", (*items_max * item_size));
return 0;
}
int test_memcache_init(int *items_max, int item_size)
{
if (item_size * 10 > MEM_CACHE_SIZE)
MEM_CACHE_SIZE = item_size * 10;
printf("%s: Allocating cache memory.\n",__FUNCTION__);
if (!(buffer = calloc(1, MEM_CACHE_SIZE))) {
printf("System out of memory.\n");
BUG();
}
if ((this = mem_cache_init(buffer, MEM_CACHE_SIZE,
item_size, 0)) == 0) {
printf("Unable to initialise cache.\n");
return -1;
}
*items_max = mem_cache_total_empty(this);
printf("\nMEMCACHE TEST:\n========================\n");
printf("%-30s%d\n", "Item size:", item_size);
printf("%-30s0x%x\n", "Cache occupied space:", MEM_CACHE_SIZE);
printf("%-30s%d\n","Total items in cache:", *items_max);
printf("%-30s0x%x\n","Total items space:", (*items_max * item_size));
return 0;
}
int test_memcache(int items_max, int item_size, FILE *init_state, FILE *exit_state, int aligned)
{
const unsigned int TEST_CACHE_ITEM_SIZE = 5;
MEM_CACHE_SIZE = PAGE_SIZE * 5;
if (!item_size)
item_size = TEST_CACHE_ITEM_SIZE;
/* items_max value is ignored and overwritten because caches have fixed size. */
test_memcache_init(&items_max, item_size);
test_alloc_free_random_order(items_max, /* unused */ 2, mem_cache_alloc_wrapped,
mem_cache_free_wrapped, print_memcache_state,
init_state, exit_state);
free(buffer);
if (aligned) {
test_memcache_init_aligned(&items_max, item_size);
test_alloc_free_random_order(items_max, /* unused */ 2, mem_cache_alloc_wrapped,
mem_cache_free_wrapped, print_memcache_state,
init_state, exit_state);
}
free(buffer);
return 0;
}

View File

@@ -0,0 +1,10 @@
#ifndef __TEST_MEMCACHE_H__
#define __TEST_MEMCACHE_H__
#include <memcache/memcache.h>
int test_memcache(int num_alloc, int alloc_size_max, FILE *initstate, FILE *exitstate, int aligned);
#endif /* __TEST_MEMCACHE_H__ */

View File

@@ -0,0 +1,21 @@
#ifndef __TESTS_H__
#define __TESTS_H__
/* Mock-up physical memory */
extern unsigned int TEST_PHYSMEM_TOTAL_PAGES;
extern unsigned int TEST_PHYSMEM_TOTAL_SIZE;
/* Allocator test */
extern unsigned int PAGE_ALLOCATIONS;
extern unsigned int PAGE_ALLOC_SIZE_MAX;
/* Memcache test */
extern unsigned int MEMCACHE_ALLOCS_MAX;
extern unsigned int TEST_CACHE_ITEM_SIZE;
/* Kmalloc */
extern unsigned int KMALLOC_ALLOCATIONS;
extern unsigned int KMALLOC_ALLOC_SIZE_MAX;
#endif /* __TESTS_H__ */