/* -*- Mode: C; tab-width: 4; indent-tabs-mode: 't; c-basic-offset: 4 -*- * * Name : $RCSfile: kernmem.c $ * * Copyright : 2001,2002 by Imagination Technologies Limited. * All rights reserved. * No part of this software, either material or conceptual * may be copied or distributed, transmitted, transcribed, * stored in a retrieval system or translated into any * human or computer language in any form by any means, * electronic, mechanical, manual or other-wise, or * disclosed to third parties without the express written * permission of: * Imagination Technologies Limited, * HomePark Industrial Estate, * Kings Langley, * Hertfordshire, * WD4 8LZ, * UK * * Description : Defines functions providing contiguous kernel memory * allocations under Linux. * * Version : $Revision: 1.9 $ * **************************************************************************/ #include #include #include #include #include "kernmem.h" #include "virtmem.h" #include "mmap.h" #include "debug.h" #include "hostfunc.h" static KERNEL_ALLOC_REC KAllocHead; static PKERNEL_ALLOC_REC psKAllocHead = &KAllocHead; /* // kernel_allocate_reserve // // Purpose: Allocates contiguous pages and reserves them // // Args: nBytes - minimum number of bytes to reserve // // Returns: Page-aligned address of kernel allocation or zero on error. */ void* kernel_allocate_reserve(unsigned long nBytes) { unsigned long pkvPageAlignedContigMem=0; unsigned long pkvCurrentPage; unsigned int nPages; unsigned int nOrder=0; unsigned int nOrderPages=1; /* 2 to the power of 0 */ PKERNEL_ALLOC_REC psNewRecord; PKERNEL_ALLOC_REC psCurrentRecord; PKERNEL_ALLOC_REC psLastRecord; /* Calculate the number of pages we need to allocate */ nPages = (nBytes + PAGE_SIZE -1) / PAGE_SIZE; /* Calculate the order of the pages (log 2 nPages) */ while (nOrderPages < nPages) { nOrderPages *= 2; ++nOrder; } /* Allocate contiguous pages */ /* N.B. __get_dma_pages is used rather than // __get_free_pages(__GFP_DMA) for compatibility // with kernels prior to version 2.2 // See Rubini's Linux Device Drivers, page 225 */ pkvPageAlignedContigMem = __get_dma_pages(GFP_KERNEL, nOrder); if (pkvPageAlignedContigMem) { /* Reserve pages to enable them to be re-mapped */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) unsigned long i; for ( i = MAP_NR(pkvPageAlignedContigMem); i <= MAP_NR(pkvPageAlignedContigMem + nBytes); ++i ) { mem_map_reserve(i); } #else for ( pkvCurrentPage = pkvPageAlignedContigMem; pkvCurrentPage < (pkvPageAlignedContigMem + nBytes); pkvCurrentPage += PAGE_SIZE ) { mem_map_reserve(virt_to_page(pkvCurrentPage)); } #endif /* DEBUGGING START */ DPF("New kmalloc - pkvPageAlignedContigMem: 0x%08X, nBytes: 0x%08X, Order: 0x%08X\n", pkvPageAlignedContigMem, nBytes, nOrder); /* DEBUGGING END */ /* Register the area in mmap address space */ pvr_mmap_register_area((void *)pkvPageAlignedContigMem, nBytes, PVR_MMAP_CONTIG, TRUE); /* Create a new memory record to track this allocation */ psNewRecord=kmalloc(sizeof(KERNEL_ALLOC_REC), GFP_KERNEL); if (psNewRecord) { psCurrentRecord = psLastRecord = psKAllocHead; while (psCurrentRecord) { psLastRecord = psCurrentRecord; psCurrentRecord = psCurrentRecord->pNext; } psNewRecord->pkvPageAlignedContigMem = pkvPageAlignedContigMem; psNewRecord->nBytes = nBytes; psNewRecord->nOrder = nOrder; /* Append the new record */ psLastRecord->pNext = psNewRecord; psNewRecord->pNext = NULL; } else { DPF("kernmem.c - kernel_allocate_reserve: Failed to allocate memory record.\n"); } } else { DPF ("kernmem.c - kernel_allocate_reserve: Allocation failed.\n"); } return (void*)pkvPageAlignedContigMem; } /* // kernel_deallocate_unreserve // // Purpose: Unreserves and deallocates pages allocated by kernel_allocate_reserve // // Args: pkvPageAlignedContigMem - Page-aligned address returned by kernel_allocate_reserve // // Returns: None. */ void kernel_deallocate_unreserve(unsigned long pkvPageAlignedContigMem) { PKERNEL_ALLOC_REC psCurrentRecord = psKAllocHead->pNext; PKERNEL_ALLOC_REC psLastRecord = psKAllocHead; unsigned long nBytes; unsigned long nOrder; unsigned long pkvCurrentPage; unsigned int i; /* Locate the corresponding allocation entry */ while (psCurrentRecord) { if ((unsigned long)(psCurrentRecord->pkvPageAlignedContigMem) == pkvPageAlignedContigMem) break; psLastRecord = psCurrentRecord; psCurrentRecord = psCurrentRecord->pNext; } if (psCurrentRecord) { /* Retrieve the size of the allocation */ nBytes = psCurrentRecord->nBytes; /* Retrieve the order of the allocation */ nOrder = psCurrentRecord->nOrder; /* Unlink the allocation record */ psLastRecord->pNext = psCurrentRecord->pNext; /* Delete the allocation record */ kfree(psCurrentRecord); /* Unreserve pages */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) for ( i=MAP_NR(pkvPageAlignedContigMem); i<=MAP_NR(pkvPageAlignedContigMem + nBytes); ++i ) { mem_map_unreserve(i); } #else for ( pkvCurrentPage = (unsigned long)pkvPageAlignedContigMem; pkvCurrentPage < ((unsigned long)pkvPageAlignedContigMem) + nBytes; pkvCurrentPage += PAGE_SIZE ) { mem_map_unreserve(virt_to_page(pkvCurrentPage)); } #endif pvr_mmap_remove_registered_area((void *)pkvPageAlignedContigMem); /* DEBUGGING START */ DPF("Deallocating kmalloc - pkvPageAlignedContigMem: 0x%08X\n", pkvPageAlignedContigMem); /* DEBUGGING END */ /* De-allocate memory */ if (pkvPageAlignedContigMem) { free_pages(pkvPageAlignedContigMem, nOrder); } } else { DPF("kernmem.c - kernel_deallocate_unreserve: Error - failed to find allocation record.\n"); } } /* // kernel_memory_cleanup - Frees any remaining memory allocations */ void kernel_memory_cleanup(void) { PKERNEL_ALLOC_REC psCurrentRecord; while (psKAllocHead && (psCurrentRecord = psKAllocHead->pNext)) { virtual_deallocate_unreserve((void *)psCurrentRecord->pkvPageAlignedContigMem); } }