/* -*- Mode: C; tab-width: 4; indent-tabs-mode: 't; c-basic-offset: 4 -*- * * Name : $RCSfile: mmap.c $ * * Copyright : 2001,2002 by Imagination Technologies Limited. * All rights reserved. * No part of this software, either material or conceptual * may be copied or distributed, transmitted, transcribed, * stored in a retrieval system or translated into any * human or computer language in any form by any means, * electronic, mechanical, manual or other-wise, or * disclosed to third parties without the express written * permission of: * Imagination Technologies Limited, * HomePark Industrial Estate, * Kings Langley, * Hertfordshire, * WD4 8LZ, * UK * * Description : Linux mmap interface * * Version : $Revision: 1.28 $ * **********************************************************************/ #include #include #include #include #include #include #include #include "virtmem.h" #include "mmap.h" #include "debug.h" #include "hostfunc.h" #ifdef SUPPORT_AGP #include /* external data */ extern int agpInitialized; /* have we setup agp? */ extern agp_kern_info agpInfo; /* agp bridge information */ #endif #if HAVE_VMA == 1 #define VMA(vma) vma##, #else #define VMA(vma) #endif /* Prototypes */ static void AllocateTable(); static PKV_OFFSET_STRUCT FindAllocRec(unsigned long nOffset); static PKV_OFFSET_STRUCT FindRegisteredArea(void *pkvAddress, unsigned long nLength); /* Imported prototype from linux/hostfunc.c */ unsigned long ConvertLinToPhys(unsigned long LinAddr); /* Device operation */ int pvr_mmap(struct file* pFile, struct vm_area_struct* ps_vma); /* VM area operations */ void pvr_mmap_vopen(struct vm_area_struct* ps_vma); void pvr_mmap_vclose(struct vm_area_struct* ps_vma); /* Page fault handler */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) unsigned long pvr_mmap_vmmap(struct vm_area_struct* ps_vma, unsigned long dwAddress, int iAccess); #else struct page* pvr_mmap_vmmap(struct vm_area_struct* ps_vma, unsigned long dwAddress, int iAccess); #endif /* Memory operation structures */ static struct vm_operations_struct pvr_mmap_vmops = { open: pvr_mmap_vopen, close: pvr_mmap_vclose, nopage: pvr_mmap_vmmap }; /* Static variables */ PKV_OFFSET_LOCKED_STRUCT psKVOffsetStruct=0; static PKV_OFFSET_STRUCT psKVOffsetTable=0; static PKV_OFFSET_STRUCT psNextAllocRec; static unsigned long nNextOffset = 0; /* Space to store a record of the offset table vmalloc info. */ static VIRT_ALLOC_REC OffsetTableAllocRec; static inline pgprot_t pgprot_noncached(pgprot_t _prot) { unsigned long prot = pgprot_val(_prot); #if defined(__i386__) /* On PPro and successors, PCD alone doesn't always mean uncached because of interactions with the MTRRs. PCD | PWT means definitely uncached. */ if (boot_cpu_data.x86 > 3) prot |= _PAGE_PCD | _PAGE_PWT; #elif defined(ARM) prot &= ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE); #else #error "mmap.c - pgprot_noncached - Unknown architecture." #endif return __pgprot(prot); } static unsigned pvr_map_block(struct vm_area_struct *ps_vma, unsigned long dstAddr, unsigned long srcAddr, unsigned long nBytes) { /* Attempt to remap contiguous page range */ int dwResult; if (ps_vma->vm_flags & VM_IO) { dwResult = io_remap_page_range ( /*#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,7)*/ VMA(ps_vma) /*#endif*/ dstAddr, srcAddr, nBytes, ps_vma->vm_page_prot ); #if 0 DPF("mmap.c - pvr_mmap: Mapped contiguous IO range.\n", nOffset); #endif } else { dwResult = remap_page_range ( /*#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,7)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20))*/ VMA(ps_vma) /*#endif*/ dstAddr, srcAddr, nBytes, ps_vma->vm_page_prot ); #if 0 DPF("mmap.c - pvr_mmap: Mapped contiguous RAM range.\n", nOffset); #endif } return dwResult; } int pvr_mmap(struct file* pFile, struct vm_area_struct* ps_vma) { unsigned long nOffset, nBytes; PKV_OFFSET_STRUCT psCurrentRec; struct list_head *ptr = 0; struct pvrProcRecord *processEntry = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) nOffset = ps_vma->vm_offset; #else nOffset = ps_vma->vm_pgoff << PAGE_SHIFT; #endif if (nOffset & ~PAGE_MASK) { DPF("mmap.c - pvr_mmap : Error - offset not aligned: 0x%08X\n", nOffset); return -ENXIO; } if (!nOffset && (ps_vma->vm_flags & VM_WRITE)) { DPF("mmap.c - pvr_mmap : Error - writeable mapping to zero\n"); return -ENXIO; } nBytes = ps_vma->vm_end - ps_vma->vm_start; /* Only support shared writeable mappings */ if ( (ps_vma->vm_flags & VM_WRITE) && !(ps_vma->vm_flags & VM_SHARED) ) { DPF("mmap.c - pvr_mmap : Error - Cannot mmap non-shareable writable areas.\n"); return -ENXIO; } /* Set IO flag for non-memory regions */ if ((nOffset >= __pa(high_memory)) || (pFile->f_flags & O_SYNC)) { ps_vma->vm_flags |= VM_IO; } /* Do not allow this area to be swapped out */ #if 0 /* Original example used LOCKED, but RESERVED seems to be preferred */ ps_vma->vm_flags |= VM_LOCKED; #endif ps_vma->vm_flags |= VM_RESERVED; /* Look up the entry in the allocation table */ psCurrentRec=FindAllocRec(nOffset); ps_vma->vm_private_data = (void *)psCurrentRec; if (psCurrentRec) { if (!psCurrentRec->bCached) ps_vma->vm_page_prot = pgprot_noncached(ps_vma->vm_page_prot); /* Determine whether this area is contiguous */ if ((psCurrentRec->eMapType == PVR_MMAP_CONTIG) || (psCurrentRec->eMapType == PVR_MMAP_AGP_SCATTER)) { unsigned dwResult=0; if (psCurrentRec->eMapType == PVR_MMAP_CONTIG) { unsigned long addr = psCurrentRec->pPhysAddr + (nOffset - psCurrentRec->nOffset); dwResult = pvr_map_block(ps_vma, ps_vma->vm_start, addr, nBytes); } else { /* map in all the aperature blocks */ agp_memory *tmp; unsigned long pg_offset = 0; unsigned long addr = agpInfo.aper_base + (nOffset - psCurrentRec->nOffset); LINBUFSTRUCT *pLinBufStruct = (LINBUFSTRUCT *)psCurrentRec->pkvPageAlignedAddress; for (tmp = (agp_memory*)pLinBufStruct->pAGPList; tmp != NULL; tmp = tmp->next) { dwResult |= pvr_map_block(ps_vma, ps_vma->vm_start + PAGE_SIZE * pg_offset, addr + tmp->pg_start * PAGE_SIZE, PAGE_SIZE * tmp->page_count); pg_offset += tmp->page_count; } } if (dwResult != 0) { DPF("mmap.c - pvr_mmap: Error - Failed to map contiguous pages.\n"); return -ENXIO; } else { /* Install the ops to make sure that the module count is maintained */ ps_vma->vm_ops = &pvr_mmap_vmops; #if 0 DPF("mmap.c - pvr_mmap: Installed page fault handler.\n", nOffset); #endif } } else { /* Install a page fault handler */ ps_vma->vm_ops = &pvr_mmap_vmops; } } else { DPF("mmap.c - pvr_mmap: Error - Attempted to mmap unregistered area at offset 0x%08X\n.\n", nOffset); return -ENXIO; } /* Call the open routine to increment the usage count */ pvr_mmap_vopen(ps_vma); DPF("mmap.c - pvr_mmap: Mapped area at offset 0x%08X\n", nOffset); for (ptr = pvrProcList.next; ptr != &pvrProcList; ptr = ptr->next) { processEntry = list_entry(ptr, struct pvrProcRecord, list); if (processEntry->pid == current->pid) { struct pvrMMAPRecord *mmapEntry; mmapEntry = kmalloc(sizeof(struct pvrMMAPRecord), GFP_KERNEL); mmapEntry->userAddress = ps_vma->vm_start; mmapEntry->length = nBytes; mmapEntry->kernelAddress = psCurrentRec->pkvPageAlignedAddress; list_add_tail(&mmapEntry->list, &processEntry->mmapList); break; } } return 0; } void pvr_mmap_vopen(struct vm_area_struct* ps_vma) { static unsigned long nOpenCount = 0; ++nOpenCount; DPF("Incrementing mmap use count. Call count: %lu\n", nOpenCount); MOD_INC_USE_COUNT; } void pvr_mmap_vclose(struct vm_area_struct* ps_vma) { static unsigned long nCloseCount = 0; ++nCloseCount; DPF("Decrementing mmap use count. Call count: %lu\n", nCloseCount); #if 0 /* DEBUGGING START */ { /* Determine the start address of the vma */ unsigned long dwAddress = ps_vma->vm_start; /* Look for a corresponding mmap entry */ PKV_OFFSET_STRUCT psRec = FindRegisteredArea(dwAddress, sizeof(unsigned long)); if (psRec) { DPF("pvr_mmap_vclose: Offset 0x%08X matched kvaddr: 0x%08X\n", psRec->nOffset, dwAddress); } else { DPF("pvr_mmap_vclose: Failed to find offset table entry for kvaddr: 0x%08X\n", dwAddress); } } #endif /* DEBUGGING END */ MOD_DEC_USE_COUNT; } /* The page fault handler has a different prototype for Kernels < 2.4 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) unsigned long pvr_mmap_vmmap(struct vm_area_struct* ps_vma, unsigned long dwAddress, int iAccess) #else struct page* pvr_mmap_vmmap(struct vm_area_struct* ps_vma, unsigned long dwAddress, int iAccess) #endif { unsigned long nOffset; unsigned long nVirtAddress; PKV_OFFSET_STRUCT psCurrentRec; struct page *pPage; /* Calculate the offset within the vmalloc allocation */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) nOffset = dwAddress - ps_vma->vm_start + ps_vma->vm_offset; #else nOffset = dwAddress - ps_vma->vm_start + (ps_vma->vm_pgoff<vm_private_data; if (psCurrentRec) { if (psCurrentRec->eMapType == PVR_MMAP_SCATTER) { LINBUFSTRUCT *pLinBufStruct = (LINBUFSTRUCT *)psCurrentRec->pkvPageAlignedAddress; unsigned long pageIndex = (nOffset - psCurrentRec->nOffset)/PAGE_SIZE; if (pageIndex < pLinBufStruct->dwPagesCommitted) nVirtAddress = pLinBufStruct->pageList[pageIndex]; else nVirtAddress = 0; DPF("pvr_mmap SCATTER index %d nVirtAddress %08X phys %08X (%08X - %08X)\n", pageIndex, nVirtAddress, virt_to_bus(nVirtAddress), *(unsigned long *)nVirtAddress, *((unsigned long *)nVirtAddress+1)); pPage = virt_to_page(nVirtAddress); } else { nVirtAddress = psCurrentRec->pkvPageAlignedAddress + (nOffset - psCurrentRec->nOffset); /* Convert Kernel Virtual address to page */ pPage = ConvertKVToPage(nVirtAddress); } } else { DPF("mmap.c - pvr_mmap_vmmap: Error - Failed to find allocation record.\n"); pPage = 0; } /* Check for a valid translation */ if (pPage == 0UL) { DPF("mmap.c - pvr_mmap_vmmap: Error - Page fault out of range.\n"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) return 0UL; #else return pPage; #endif } DPF("mmap.c - pvr_mmap_vmmap: Page fault handled for offset: 0x%08X (KSeg: 0x%08X)\n", nOffset, nVirtAddress); /* Note that we don't do a get_page() here, as the pages we're * mapping in have the reserve bit set. On such pages linux * will not drop the ref count during the unmap (see * __free_pages in linux/mm/page_alloc.c), so we ended up * leaking all the pages mapped through the nopage mapper. */ return pPage; } /* // pvr_mmap_register_area - registers an area in the offset table */ void pvr_mmap_register_area(void* pkvArea, unsigned long nBytes, PVR_MMAP_TYPE eMapType, unsigned long bCached) { if (psKVOffsetTable==0) { AllocateTable(); } if (psKVOffsetTable && (FindRegisteredArea(pkvArea, nBytes)==0)) { if ((eMapType == PVR_MMAP_SCATTER) || (eMapType == PVR_MMAP_AGP_SCATTER)) psNextAllocRec->pkvPageAlignedAddress = (unsigned long)pkvArea; else psNextAllocRec->pkvPageAlignedAddress = (unsigned long)pkvArea & PAGE_MASK; psNextAllocRec->nLength = (nBytes + PAGE_SIZE-1) & PAGE_MASK; psNextAllocRec->eMapType = eMapType; psNextAllocRec->bCached = bCached; psNextAllocRec->nOffset = nNextOffset; if (eMapType == PVR_MMAP_CONTIG) psNextAllocRec->pPhysAddr = ConvertLinToPhys((unsigned long)pkvArea & PAGE_MASK); else psNextAllocRec->pPhysAddr = 0; #if 0 /* DEBUG START */ DPF("pvr_mmap_register_area: Virt:0x%08lX, Offset:0x%08lX, Length:0x%08lX\n", psNextAllocRec->pkvPageAlignedAddress, psNextAllocRec->nOffset, psNextAllocRec->nLength); DPF("Area registered at entry: %d\n", ((unsigned long)psNextAllocRec-(unsigned long)psKVOffsetTable)/sizeof(KV_OFFSET_STRUCT)); #endif /* DEBUG END */ nNextOffset += psNextAllocRec->nLength + PAGE_SIZE; ++psNextAllocRec; /* Terminate table */ psNextAllocRec->nOffset = 0; } } /* // pvr_mmap_remove_registered_area - Removes an area from the offset table */ void pvr_mmap_remove_registered_area(void* pkvMemArea) { int bSuccess = FALSE; int i, j; unsigned long nMaxOffset = 0; int nMaxOffsetIndex = 0; if (psKVOffsetTable) { for (i=0; i==0 || (psKVOffsetTable[i].nOffset != 0); ++i) { if (psKVOffsetTable[i].pkvPageAlignedAddress == (unsigned long)pkvMemArea) { /* Found record - overwrite it with the last one in the table */ PKV_OFFSET_STRUCT psLastRecord = psNextAllocRec - 1; #if 0 /* DEBUG START */ DPF("pvr_mmap_remove_registered_area: Virt:0x%08lX, Offset:0x%08lX, Length:0x%08lX\n", psKVOffsetTable[i].pkvPageAlignedAddress, psKVOffsetTable[i].nOffset, psKVOffsetTable[i].nLength); #endif /* DEBUG END */ /* look for record with highest offset */ for (j=0; (j==0) || (psKVOffsetTable[j].nOffset != 0); j++) { if (psKVOffsetTable[j].nOffset > nMaxOffset) { nMaxOffset = psKVOffsetTable[j].nOffset; nMaxOffsetIndex = j; } } /* if deleting the highest offset entry, reduce the running offset */ if (i == nMaxOffsetIndex) { /* look for record with highest offset (other than us) */ nMaxOffset = 0; nMaxOffsetIndex = 0; for (j=0; (j==0) || (psKVOffsetTable[j].nOffset != 0); j++) { unsigned long offset = psKVOffsetTable[j].nOffset; if ((i != j) && (offset > nMaxOffset)) { nMaxOffset = offset; nMaxOffsetIndex = j; } } if (i) nNextOffset = psKVOffsetTable[nMaxOffsetIndex].nOffset + psKVOffsetTable[nMaxOffsetIndex].nLength + PAGE_SIZE; else nNextOffset = 0; } if (psLastRecord != &psKVOffsetTable[i]) { /* Copy the last record over the one to be deleted */ psKVOffsetTable[i] = *psLastRecord; } /* Erase the last record */ psLastRecord->nOffset = 0; psLastRecord->pkvPageAlignedAddress = 0; psLastRecord->nLength = 0; psLastRecord->pPhysAddr = 0; psLastRecord->eMapType = -1; /* Allow it to be re-used */ --psNextAllocRec; /* Indicate success */ bSuccess = TRUE; /* No need to continue looking */ break; } } if (!bSuccess) { DPF("mmap.c - pvr_mmap_remove_registered_area: Failed to remove area at 0x%08X\n", pkvMemArea); } } else { DPF("mmap.c - pvr_mmap_remove_registered_area: Offset table missing - failed to remove area at 0x%08X\n", pkvMemArea); } } /* // FindAllocRec - Locates a record in the offset table by offset */ static PKV_OFFSET_STRUCT FindAllocRec(unsigned long nOffset) { PKV_OFFSET_STRUCT pStruct = 0; unsigned int i; for (i=0; (&psKVOffsetTable[i]) != psNextAllocRec; ++i) { if ( (psKVOffsetTable[i].nOffset <= nOffset) && (psKVOffsetTable[i].nOffset + psKVOffsetTable[i].nLength > nOffset) ) { pStruct = &psKVOffsetTable[i]; break; } } return pStruct; } /* // FindRegisteredArea - Locates a record in the offset table which covers the specified area */ static PKV_OFFSET_STRUCT FindRegisteredArea(void *pkvAddress, unsigned long nLength) { PKV_OFFSET_STRUCT pStruct = 0; unsigned long addr = (unsigned long)pkvAddress; unsigned int i; if (psKVOffsetTable) { for (i=0; (&psKVOffsetTable[i]) != psNextAllocRec; ++i) { if ((((psKVOffsetTable[i].eMapType == PVR_MMAP_SCATTER) || (psKVOffsetTable[i].eMapType == PVR_MMAP_AGP_SCATTER)) && (psKVOffsetTable[i].pkvPageAlignedAddress == addr)) || (((psKVOffsetTable[i].eMapType != PVR_MMAP_SCATTER) && (psKVOffsetTable[i].eMapType != PVR_MMAP_AGP_SCATTER)) && (psKVOffsetTable[i].pkvPageAlignedAddress <= addr) && (psKVOffsetTable[i].pkvPageAlignedAddress + psKVOffsetTable[i].nLength >= addr + nLength))) { return &psKVOffsetTable[i]; } } } return NULL; } unsigned long pvr_mmap_user_to_kern(unsigned long nUserAddress) { struct list_head *ptr = 0; struct pvrProcRecord *processEntry = 0; struct pvrMMAPRecord *mmapEntry = 0; unsigned long pkvAddress = 0; for (ptr = pvrProcList.next; ptr != &pvrProcList; ptr = ptr->next) { processEntry = list_entry(ptr, struct pvrProcRecord, list); if (processEntry->pid == current->pid) { struct list_head *mptr; for (mptr = processEntry->mmapList.next; mptr != &processEntry->mmapList; mptr = mptr->next) { mmapEntry = list_entry(mptr, struct pvrMMAPRecord, list); if ((mmapEntry->userAddress <= nUserAddress) && (mmapEntry->userAddress + mmapEntry->length > nUserAddress)) { pkvAddress = mmapEntry->kernelAddress + (nUserAddress - mmapEntry->userAddress); break; } } break; } } return pkvAddress; } /* // AllocateTable - Allocates space for the offset table and // reserves it to allow it to be re-mapped to user space. // Also registers the table within itself, at offset zero. */ static void AllocateTable() { void* pkvAddress; unsigned long pkvPageAlignedAddress; unsigned long pkvPageAddress; unsigned long *lockAddress; unsigned long nBytes = KVOFFSET_TABLE_SIZE + PAGE_SIZE - 1; pkvAddress = vmalloc(nBytes); memset(pkvAddress, 0, nBytes); if (pkvAddress) { /* Determine a page aligned pointer */ pkvPageAlignedAddress = ((unsigned long)pkvAddress + PAGE_SIZE - 1) & PAGE_MASK; /* Reserve those pages to allow them to be re-mapped to user space */ for ( pkvPageAddress = pkvPageAlignedAddress; pkvPageAddress < pkvPageAlignedAddress + nBytes; pkvPageAddress += PAGE_SIZE ) { mem_map_reserve(ConvertKVToPage(pkvPageAddress)); } /* Record the allocation */ OffsetTableAllocRec.pkvMem = (unsigned long)pkvAddress; OffsetTableAllocRec.pkvPageAlignedMem = pkvPageAlignedAddress; OffsetTableAllocRec.nBytes = nBytes; OffsetTableAllocRec.pNext = NULL; /* Initialise the table pointers */ psKVOffsetStruct = (void *)pkvPageAlignedAddress; psKVOffsetTable = psKVOffsetStruct->pKVOffsetTable; /* lock needs to go in a seperate page to be writeable from user */ lockAddress = vmalloc(PAGE_SIZE); mem_map_reserve(ConvertKVToPage((unsigned long)lockAddress)); *lockAddress = 0; psKVOffsetStruct->lockAddress = lockAddress; psNextAllocRec = &psKVOffsetTable[0]; /* Register the table within itself to allow it to be remapped */ psNextAllocRec->pkvPageAlignedAddress = (unsigned long)psKVOffsetStruct; psNextAllocRec->nOffset = 0; psNextAllocRec->nLength = (KVOFFSET_TABLE_SIZE + PAGE_SIZE - 1) & PAGE_MASK; psNextAllocRec->eMapType = PVR_MMAP_VIRTUAL; DPF("mmap.c - AllocateTable: KVOffsetTable allocated at 0x%08X, size 0x%08X\n", pkvPageAlignedAddress, psNextAllocRec->nLength); nNextOffset += psNextAllocRec->nLength + PAGE_SIZE; ++psNextAllocRec; /* Terminate offset table */ psNextAllocRec->nOffset = 0; pvr_mmap_register_area(lockAddress, PAGE_SIZE, PVR_MMAP_VIRTUAL, TRUE); } } /* // DeallocateTable - 'Unreserves' the memory allocated for the table and frees it up */ void DeallocateTable(void) { void *pkvAddress; unsigned long pkvPageAlignedAddress; unsigned long pkvPageAddress; unsigned long nBytes = KVOFFSET_TABLE_SIZE + PAGE_SIZE - 1; pkvAddress = (void *)OffsetTableAllocRec.pkvMem; if (!psKVOffsetStruct) return; pvr_mmap_remove_registered_area(psKVOffsetStruct->lockAddress); pvr_mmap_remove_registered_area(psKVOffsetStruct); if (pkvAddress) { /* Determine a page aligned pointer */ pkvPageAlignedAddress = ((unsigned long)pkvAddress + PAGE_SIZE - 1) & PAGE_MASK; /* Unreserve those pages to allow them to be freed */ for ( pkvPageAddress = pkvPageAlignedAddress; pkvPageAddress < pkvPageAlignedAddress + nBytes; pkvPageAddress += PAGE_SIZE ) { mem_map_unreserve(ConvertKVToPage(pkvPageAddress)); } mem_map_unreserve(ConvertKVToPage((unsigned long)psKVOffsetStruct->lockAddress)); vfree(psKVOffsetStruct->lockAddress); vfree(pkvAddress); DPF("mmap.c - DeallocateTable: KVOffsetTable successfully deallocated\n"); } else { DPF("mmap.c - DeallocateTable: Failed to deallocate KVOffsetTable\n"); } psKVOffsetTable = 0; }