advanced-memory-allocator / mymalloc.c
mymalloc.c
Raw
#define _DEFAULT_SOURCE
#define _BSD_SOURCE 
#include <malloc.h> 
#include <stdio.h> 
#include <unistd.h>
#include <string.h>
#include <assert.h>
#include <stdbool.h>
#include <sys/mman.h>
#include <pthread.h>
// Include any other headers we need here

// NOTE: You should NOT include <stdlib.h> in your final implementation

#include <debug.h> // definition of debug_printf

// global var for head of linked list
struct bmem *head = NULL; 

/* Design Decision:
 * Linked List, bmem = block of memory
 * Fields:
 * size = size of heap (design decision: includes header)
 * next = next block of memory
 * free = is this block free?
 */
typedef struct bmem {
	size_t size; // keep track of size 
	struct bmem *prev; // keep track of prev block of mem in list
	struct bmem *next; // keep track of next block of mem in list
	bool free; // has this block been freed?
} bmem;

// constant val for size of memory block (used in sbrk)
#define BMEM_SIZE sizeof(struct bmem)

// constant val for page size 
#define PAGE_SIZE sysconf(_SC_PAGE_SIZE)

// lock for thread-safe allocation
pthread_mutex_t prot = PTHREAD_MUTEX_INITIALIZER;

/* Iterate though linked list to get the last
 * element in the list. */
bmem *get_prev() {
	if (head == NULL) {
		return NULL;
	}
	bmem *nextb = head;
	while (nextb->next != NULL) {
		nextb = nextb->next;
	}
	return nextb;
}

/** Splits the page, by setting up the leftover as a new memory block
 * and adding it to our free list **/
void split(bmem *block, size_t s) {
        pthread_mutex_lock(&prot);
	size_t remainder = block->size - (s + BMEM_SIZE);
	// block size shrinks to just requested size + header
	block->size = s + BMEM_SIZE;
	// continuous memory area
	bmem *leftover_b = (void *)block + block->size;
	leftover_b->next = block->next;
	block->next = leftover_b;
	leftover_b->prev = block;
	// add it to the free list
        leftover_b->free = true;
        leftover_b->size = remainder;
	pthread_mutex_unlock(&prot);
}

/* Initialize the head on the first go-around, using the
 * requested size and then determine if we can split the head. */
void initialize_head(size_t s) {
	pthread_mutex_lock(&prot);
	head = mmap(NULL, s + BMEM_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
        // size is always page size, since s < PAGE_SIZE, mmap rounds up
        head->size = PAGE_SIZE;
        head->prev = NULL;
        head->next = NULL;
        head->free = false;
	pthread_mutex_unlock(&prot);
	// is “leftover” enough for header + at least 8 bytes of memory?
        if (head->size - (s + BMEM_SIZE) >= (BMEM_SIZE + 8)) {
		split(head, s);
        }
}
/* Return next free memory block that doesn't 
 * exceed size.
 * NULL if we cannot find any blocks. */
bmem *find_free_block(size_t s) {
	if (head == NULL) {
		// initialize the head 
		initialize_head(s);
		return head;
	}
	// otherwise, we already have a block setup, so traverse
	bmem *ptr = head;
	pthread_mutex_lock(&prot);
	while(ptr != NULL) {
		// check whether we have enough memory to allocate 
		if((ptr->size - BMEM_SIZE) >= s && ptr->free == true) {
			ptr->free = false;
			pthread_mutex_unlock(&prot);
			if (ptr->size - (s + BMEM_SIZE) >= (BMEM_SIZE + 8)) {
				split(ptr, s);
			}
			return ptr;
		}
		ptr = ptr->next;
	}
	pthread_mutex_unlock(&prot);
	return NULL;
}

/* Allocate memory of size s by creating a new block,
 * setting appropriate fields, and linking it to our
 * existing list.
 * Returns NULL if it cannot create more memory*/
bmem *create_next_block(size_t s) {
	pthread_mutex_lock(&prot);
	bmem *block;
	// anonymous private mapping, both readable and writable
	block = mmap(NULL, s + BMEM_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
	bmem *prev = get_prev();
	if (block != (void *) -1) {
		// link the blocks
		prev->next = block;
		// instantiate block
		block->size = PAGE_SIZE;
		block->prev = prev;
		block->next = NULL;
		block->free = false;
	}
	pthread_mutex_unlock(&prot);
	// after shrinking, can we can allocate big enough leftover
       	if (block->size - (s + BMEM_SIZE) >= (BMEM_SIZE + 8)) {
                split(block, s);
        }
	return block;
}

/** Allocates necessary number of pages with mmap and 
 * returns pointer to block's user memory **/
bmem *allocate_pages(size_t s, bmem *block) {
	int leftover = (s + BMEM_SIZE) % PAGE_SIZE;
        int pages = (s + BMEM_SIZE) / PAGE_SIZE;
        if (leftover != 0) {
		pages++;
	}
	pthread_mutex_lock(&prot);
	block = mmap(NULL, pages * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
	bmem *prev = get_prev();
	if (block != (void *) -1) {
		if (prev != NULL) {
	        	// link the blocks
			prev->next = block;
		}
                // instantiate block
                block->size = pages * PAGE_SIZE;
		block->prev = prev;
                block->next = NULL;
		block->free = false;
	}
	if (head == NULL) {
		head = block;
	}
	// unlock before returning block
        pthread_mutex_unlock(&prot);	
	return block;
}

/* Custom malloc method: allocates memory and returns pointer
 * to this new space, or returns NULL if there isn't enough memory */
void *mymalloc(size_t s) {
	assert(s > 0);
	bmem *block;	
	// For requests of size < PAGE_SIZE − BLOCK_SIZE
	if (s < PAGE_SIZE - BMEM_SIZE) {
		// find existing free block
		block = find_free_block(s);
		// if we can't find any free blocks
  		if (!block) {	
		       	// ask for more space
			block = create_next_block(s);
 	 	} 
		// if it couldn't create any more blocks
		if (!block) {
			// We are out of memory
			return NULL;
		}
	} else {
		// requests of size ≥ PAGE_SIZE − BLOCK_SIZE
		block = allocate_pages(s, block);
	}
	debug_printf("malloc %zu bytes\n", s);
	return block + 1;
}

/* Custom calloc method: clear the memory
 * and return pointer */
void *mycalloc(size_t nmemb, size_t s) {
	void *p = mymalloc(nmemb * s);
	if (!p) {
		// We are out of memory
		// if we get null from mymalloc
		return NULL;
	}
	memset(p, 0, (nmemb * s)); // set memory to 0
	debug_printf("calloc %zu bytes\n", s);
       	return p;
}

/* Attempts to coalesce blocks if needed:
 * whenever two blocks in the free list 
 * form a continuous area of memory, 
 * they should be merged into one block  */
void *coalesce(bmem *block) {
	// check memory is free+continuous (w/ curr block and nxt block)
	if (block->next && block->next->free && ((void*)block + block->size) == block->next) {
		// if so, coalesce
		block->size += block->next->size; 
		block->next = block->next->next;
	        if (block->next) {
			block->next->prev = block;
		}
	}
	// check memory is free+continuous (w/ curr block and prev block)
	if (block->prev && block->prev->free && ((void*)block->prev + block->prev->size) == block) {
		// if so, coalesce
		block->prev->size += block->size;
		block->prev->next = block->next;
                if (block->next) {
			block->next->prev = block->prev;
		}
	} 
}

/* Unlink the given block so we no longer have
 * any pointers to it. */
void unlink_block(bmem *bptr) {
	bptr->free = true;
	// unlink any references in list before unmapping
        if (bptr->prev) {
		bptr->prev->next = bptr->next;
        }
	if (bptr->next) {
		bptr->next->prev = bptr->prev;
	}
        if (head == bptr) {
		head = bptr->next;
	}
}

/* Determine whether the memory given is
 * ours to free before doing so.*/
bool check_free(bmem *temp, bmem *bptr) {
	bool exists = false;
        while (temp) {
		if (temp == bptr) {
			exists = true;
                        break;
                }
		temp = temp->next;
        }
	return exists;
}


/* Custom free method: frees the memory
 * blocks previously allocated to heap. */
void myfree(void *ptr) {
	if (!ptr) return;
	bmem *bptr = (bmem*) (ptr) - 1;
	bmem *temp = head;
	// ensure it is our memory to free
	bool exists = check_free(temp, bptr);
	if (!exists) return;
	pthread_mutex_lock(&prot);
        size_t freed_s = bptr->size;
	assert(bptr->free == false);
	if (bptr->size < PAGE_SIZE) {
		bptr->free = true;
		// coalesce if able
		coalesce(bptr);
	} 
	else {
		unlink_block(bptr);
		// otherwise use munmap to unmap
		int res = munmap(bptr, bptr->size);
		assert(res == 0);
	}
	pthread_mutex_unlock(&prot);
	debug_printf("Freed %zu bytes\n", freed_s);
}