axf-os161 / kern / vm / vm.c
vm.c
Raw
#include <types.h>
#include <kern/errno.h>
#include <lib.h>
#include <proc.h>
#include <current.h>
#include <addrspace.h>
#include <vm.h>
#include <spl.h>
#include <mips/tlb.h>

// Same as dumbvm, may want to change later
// Under dumbvm, always have 72k of user stack
// (this must be > 64K so argument blocks of size ARG_MAX will fit)
#define VM_STACKPAGES    18 

struct coremap_entry *coremap_first_entry;
static paddr_t coremap_startaddr, coremap_endaddr;

// Spinlocks do not require kmalloc
static struct spinlock coremap_lock;

void 
coremap_bootstrap(void) 
{
    // Used in order to find out available memory to manage
    coremap_endaddr = ram_getsize();
    coremap_startaddr = ram_getfirstfree();

    // Align starting address to next page boundary
    size_t remainder = coremap_startaddr % PAGE_SIZE;
    if (remainder != 0) {
        coremap_startaddr += PAGE_SIZE;
        coremap_startaddr -= remainder;
    }
    coremap_first_entry = (struct coremap_entry*)PADDR_TO_KVADDR(coremap_startaddr); 

    int numpages = coremap_endaddr/PAGE_SIZE;

    size_t coremap_totalsize = sizeof(struct coremap_entry)*numpages;
    size_t coremap_pages = coremap_totalsize/PAGE_SIZE;

    remainder = coremap_totalsize % PAGE_SIZE;
    if (remainder != 0) {
        coremap_pages += 1;
    }

    coremap_startaddr += coremap_pages*PAGE_SIZE;

    // Traverse from start to first available memory address marking as reserved for kernel
    for (uint32_t i = 0; i < coremap_startaddr/PAGE_SIZE; i++) {
        coremap_first_entry[i].block_size_pages = 0;
        coremap_first_entry[i].state = COREMAP_STATE_RESERVED;
    }

    // Traverse through available mem addresses and mark as free
    for (uint32_t i = coremap_startaddr/PAGE_SIZE; i < coremap_endaddr/PAGE_SIZE; i++) {
        coremap_first_entry[i].block_size_pages = 0;
        coremap_first_entry[i].state = COREMAP_STATE_FREE;
    }
}

/* Allocate/free some kernel-space virtual pages */
vaddr_t
alloc_kpages(unsigned npages)
{
    paddr_t addr;

    spinlock_acquire(&coremap_lock);

    // Find npages consecutive free pages in coremap
    uint32_t start_index = coremap_startaddr/PAGE_SIZE;
    uint32_t consecutive_free_pages = 0;

    for(uint32_t i = start_index; i < coremap_endaddr/PAGE_SIZE; i++)
    {       
        if(coremap_first_entry[i].state == COREMAP_STATE_FREE)
        {
            consecutive_free_pages++;
        }
        else
        {
            consecutive_free_pages = 0;    
        }

        if(consecutive_free_pages == npages)
        {
            // Get start address of consecutive group of pages
            addr = (i - npages + 1) * PAGE_SIZE;
            break;
        }
        
    }
    
    if(consecutive_free_pages != npages)
    {
        // Could not find npages consecutive free pages in coremap
        spinlock_release(&coremap_lock);
	    return 0;
    }
    
    // Mark start_index as beginning of coremap region found above
    start_index = addr/PAGE_SIZE;

    // Mark number of pages in region in first entry only
    coremap_first_entry[start_index].block_size_pages = npages; 

    for(uint32_t i = 0; i < npages; i++)
    {
        coremap_first_entry[start_index + i].state = COREMAP_STATE_RESERVED;
    }

    spinlock_release(&coremap_lock);
	return PADDR_TO_KVADDR(addr);
}

void
free_kpages(vaddr_t addr)
{
    // Convert vaddr to paddr
    paddr_t paddr = addr - MIPS_KSEG0;

    // Convert paddr into coremap page index
    uint32_t index = paddr/PAGE_SIZE;

    spinlock_acquire(&coremap_lock);

    // Get number of pages in block
    uint32_t block_size = coremap_first_entry[index].block_size_pages;

    for(unsigned int i = 0; i < block_size; i++) {
		coremap_first_entry[index + i].state = COREMAP_STATE_FREE;
		coremap_first_entry[index + i].block_size_pages = 0; 
	}

    spinlock_release(&coremap_lock);

}

void vm_bootstrap(void)
{
    // Do nothing
}

void
vm_tlbshootdown_all(void)
{
	panic("tlb shootdown not implemented\n");
}

void
vm_tlbshootdown(const struct tlbshootdown *ts)
{
	(void)ts;
	panic("tlb shootdown not implemented\n");
}

int
vm_fault(int faulttype, vaddr_t faultaddress)
{
	vaddr_t stackbase, stacktop;
    paddr_t paddr = 0;
	int i;
	uint32_t ehi, elo;
	struct addrspace *as;
	int spl;

	faultaddress &= PAGE_FRAME;

	DEBUG(DB_VM, "vm: fault: 0x%x\n", faultaddress);

	switch (faulttype) {
	    case VM_FAULT_READONLY:
		// We always create pages read-write, so we can't get this
		panic("vm: got VM_FAULT_READONLY\n");
	    case VM_FAULT_READ:
	    case VM_FAULT_WRITE:
		break;
	    default:
		return EINVAL;
	}

	if (curproc == NULL) {
		/*
		 * No process. This is probably a kernel fault early
		 * in boot. Return EFAULT so as to panic instead of
		 * getting into an infinite faulting loop.
		 */
		return EFAULT;
	}

	as = proc_getas();
	if (as == NULL) {
		/*
		 * No address space set up. This is probably also a
		 * kernel fault early in boot.
		 */
		return EFAULT;
	}

	// Assert that the address space has been set up properly
    KASSERT(as->region_base != NULL);

    // Check if faultaddress is in stack or region
	stackbase = USERSTACK - VM_STACKPAGES * PAGE_SIZE;
	stacktop = USERSTACK;

    const bool is_in_stack = (faultaddress >= stackbase) && (faultaddress < stacktop);

    struct region_node *region = as->region_base;
    bool is_in_region = false;
    while(region != NULL)
    {
        if((faultaddress <= region->start_vaddr) && (faultaddress < (region->start_vaddr + region->size)))
        {
            is_in_region = true;
            break;
        }

        region = region->next;
    }

    if(!(is_in_stack || is_in_region))
    {
        // Fault address not valid
        return EFAULT;
    }

    struct page_tbl_entry *pte = as->pte_base;
    struct page_tbl_entry *prev_pte = NULL;

    // Find location in address space page table for new entry
    // Add at head if page table empty
    if(pte == NULL)
    {
        pte = kmalloc(sizeof(struct page_tbl_entry));
        if(pte == NULL)
        {
            return ENOMEM;
        }

        pte->pte_lock = lock_create("PTE Lock");
        if(pte->pte_lock == NULL)
            {
                kfree(pte);
                return ENOMEM;
            }
        lock_acquire(pte->pte_lock);
        pte->vpage = faultaddress;
        pte->ppage = retrieve_user_pages(1);

        if(pte->ppage == 0)
        {   
            lock_release(pte->pte_lock);
            lock_destroy(pte->pte_lock);
            kfree(pte);
            return ENOMEM;
        }

        pte->state = PTE_STATE_MAPPED;
    }
    // If pte populated, seach page table for fault address ppage match
    else
    {
        while(pte != NULL) 
        {
            // If existing pte entry for faultaddress exists, update entry
            if(pte->vpage == faultaddress) 
            {
                if(pte->state == PTE_STATE_MAPPED)
                {
                    paddr = pte->ppage;
                }
                break;
            }

            prev_pte = pte;
            pte = pte->next;

        }
        // If no match found, create new entry at tail
        if(pte == NULL)
        {
            pte = kmalloc(sizeof(struct page_tbl_entry));
            if(pte == NULL)
            {
                return ENOMEM;
            }

            lock_acquire(prev_pte->pte_lock);
            prev_pte->next = pte;
            lock_release(prev_pte->pte_lock);

            pte->pte_lock = lock_create("PTE Lock");
            if(pte->pte_lock == NULL)
            {
                kfree(pte);
                return ENOMEM;
            }
            lock_acquire(pte->pte_lock);
            pte->vpage = faultaddress;
            pte->ppage = retrieve_user_pages(1);

            if(pte->ppage == 0)
            {   
                lock_release(pte->pte_lock);
                lock_destroy(pte->pte_lock);
                kfree(pte);
                return ENOMEM;
            }

            pte->state = PTE_STATE_MAPPED;
            lock_release(pte->pte_lock);
        }
    }

	// Make sure it's page-aligned
	KASSERT((paddr & PAGE_FRAME) == paddr);

	// Disable interrupts on this CPU while frobbing the TLB
	spl = splhigh();

    // Search for available entry in TLB
	for (i=0; i<NUM_TLB; i++) {
		tlb_read(&ehi, &elo, i);
		if (elo & TLBLO_VALID) {
			continue;
		}
		ehi = faultaddress;
		elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
		tlb_write(ehi, elo, i);
		splx(spl);
		return 0;
	}

    // If TLB cache full, use random replacement to replace entry
	ehi = faultaddress;
    elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
    tlb_random(ehi, elo);
    splx(spl);
    return 0;
}

paddr_t
retrieve_user_pages(unsigned npages)
{
	paddr_t userpageaddr = 0;
	uint32_t numfreepages = 0;
	
    spinlock_acquire(&coremap_lock);

	for(uint32_t i = coremap_startaddr/PAGE_SIZE; i < coremap_endaddr/PAGE_SIZE; i++) {
		
		numfreepages = (coremap_first_entry[i].state == COREMAP_STATE_FREE) ? numfreepages + 1 : 0;

		if(numfreepages == npages) {
			userpageaddr = (i + 1 - npages) * PAGE_SIZE;
			break;
		}
	}

    // Mark start_index as beginning of coremap region found above
    const uint32_t start_index = userpageaddr/PAGE_SIZE;

    // Mark number of pages in region in first entry only
    coremap_first_entry[start_index].block_size_pages = npages; 

    for(uint32_t i = 0; i < npages; i++)
    {
        coremap_first_entry[start_index + i].state = COREMAP_STATE_INUSE;
    }

    spinlock_release(&coremap_lock);

	return userpageaddr;
}