/*
* Name : $RCSfile: hostfunc.c $
*
* Copyright : 2000,2001,2002 by Imagination Technologies Limited.
* All rights reserved.
* No part of this software, either material or conceptual
* may be copied or distributed, transmitted, transcribed,
* stored in a retrieval system or translated into any
* human or computer language in any form by any means,
* electronic, mechanical, manual or other-wise, or
* disclosed to third parties without the express written
* permission of:
* Imagination Technologies Limited,
* HomePark Industrial Estate,
* Kings Langley,
* Hertfordshire,
* WD4 8LZ,
* UK
*
* Description : Linux dependant functions for the Kernel Manager
*
* Version : $Revision: 1.64 $
*
*/
/* don't want the str{cat,len} macros defined so we can create func version */
#define strcat linux_strcat
#define strlen linux_strlen
#include <linux/module.h>
#include <linux/version.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/wrapper.h>
#include <linux/highmem.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include "debug.h"
#include "hostfunc.h"
#include "kernmem.h"
#include "virtmem.h"
#include "mmap.h"
#ifdef SUPPORT_AGP
#include <linux/version.h>
#include <linux/module.h>
#include <asm/semaphore.h>
#include <linux/types.h>
#include <linux/agp_backend.h>
#include <linux/agpgart.h>
#include <asm/io.h>
#endif
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#undef strcat
#undef strlen
#ifndef CONFIG_PCI /* sanity check */
#error This driver requires PCI support.
#endif
#ifdef pte_offset_atomic
#define PVR_ATOMIC_PTE
#endif
struct page *ConvertKVToPage(ulong pkvAddr);
static ulong ConvertPhysToBus(ulong dwPhysAddr);
#if defined(ARM)
static ulong ConvertBusToPhys(ulong dwPhysAddr);
#endif
extern PKV_OFFSET_LOCKED_STRUCT psKVOffsetStruct;
extern int agpInitialized;
/* Registry */
extern int number_registry_entries;
extern REGISTRY_ENTRY pvrcore_registry[MAX_REGISTRY_ENTRIES];
/* AGP */
#if defined(SUPPORT_AGP)
// #define DEBUG_AGP
/* AGP bookkeeping. We keep a region reservation map of the aperature,
each entry of which has a linked list of allocated/committed memory. */
typedef struct _agp_map_entry
{
unsigned offset; /* pages */
unsigned length; /* pages */
agp_memory *agp; /* kernel agp memory structure */
void *mappedAddr; /* kernel mapping */
struct _agp_map_entry *next;
}
agp_map_entry;
int agpInitialized = 0; /* have we setup agp? */
agp_kern_info agpInfo; /* agp bridge information */
static agp_map_entry *agpMap = 0; /* map of reserved regions */
static unsigned agpReserved = 0; /* how many pages are reserved */
static drm_agp_t *pAGP; /* handle to kernel agp funcs */
static agp_map_entry *pvr_agp_reserve(ulong dwPages);
static agp_memory *pvr_agp_commit(agp_map_entry *slot, ulong dwPages,
ulong dwOffset);
static void pvr_agp_decommit(agp_map_entry *slot, ulong dwPages,
ulong dwOffset);
static agp_memory *pvr_agp_alloc(ulong dwPages);
static void pvr_agp_free_list(agp_memory *mem_list);
static void pvr_agp_unreserve(agp_map_entry *slot);
#endif
/* defines from kmapi */
#define KM_OK 0x00000000
#define KM_ERROR_KERNEL_TIMEOUT 0x00000002
#define KM_ERROR_OUT_OF_MEMORY 0x00000200
#define KM_ERROR_SYMBOL_NOT_FOUND 0x00001000
/*===========================================================*/
ulong ConvertLinToPhys(ulong LinAddr)
{
ulong PhysAddr;
struct page *page = NULL;
#if (LINUX_VERSION_CODE >= 0x020508) || \
((LINUX_VERSION_CODE >= 0x020414) && (LINUX_VERSION_CODE < 0x020500))
page = vmalloc_to_page((void *)LinAddr);
PhysAddr = page_to_phys( page );
#else /* pre non-gpl-only vmalloc_to_page() */
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
/*
Access the Linux page tables to get the physical address.
*/
pgd = pgd_offset_k(LinAddr);
pmd = pmd_offset(pgd, LinAddr);
#ifndef PVR_ATOMIC_PTE
ptep = pte_offset(pmd, LinAddr);
pte = *ptep;
#else
ptep = pte_offset_atomic(pmd, LinAddr);
pte = *ptep;
pte_kunmap(ptep);
#endif
PhysAddr = pte_val(pte);
#endif
PhysAddr &= 0xFFFFF000;
PhysAddr += (LinAddr & 0x00000FFF);
/* DPF ("ConvertLinToPhys: Lin %08x: %08x %08x %08x [%08x]", LinAddr, pgd->pgd, pmd->pmd, pte.pte, PhysAddr); */
return (PhysAddr);
}
/*
// ConvertKVToPage
//
// Purpose: Attempts to convert a kernel virtual address to a kernel logical address.
// This step is required before converting to a physical or IO bus address.
//
// Args: pkvAddr - A kernel virtual address (e.g. result of vmalloc() or kmalloc())
//
// Returns: pklAddr - A kernel logical address or zero if no kernel virtual mapping exists.
//
// Notes: All kernel logical addresses are also kernel virtual addresses.
// However, a kernel virtual address may or may not have a kernel virtual
// mapping. Such a mapping is required before the kv address can be converted
// to a physical or IO bus address.
// N.B. Virtually contiguous pages (e.g. results of vmalloc) are not necessarily
// physically contiguous, so each page may have its own kernel virtual mapping.
// Kernel logical addresses are also known as kernel segment (KSEG) addresses.
*/
struct page *ConvertKVToPage(ulong pkvAddr)
{
struct page *rv = 0;
#if (LINUX_VERSION_CODE >= 0x020508) || \
((LINUX_VERSION_CODE >= 0x020414) && (LINUX_VERSION_CODE < 0x020500))
rv = vmalloc_to_page((void *)pkvAddr);
#else /* pre non-gpl-only vmalloc_to_page() */
pgd_t *ppgd;
pmd_t *ppmd;
pte_t *ppte, pte;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
/* For pre-2.4 kernels, sometimes we can take a shortcut */
if (MAP_NR(pkvAddr) < max_mapnr) {
/* The address is already in a kernel virtual mapping */
pksegAddr = pkvAddr;
} else
#endif
{
/* For pre-2.1 kernels, this fixes up addresses returned by vmalloc */
pkvAddr = VMALLOC_VMADDR(pkvAddr);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,10)
lock_kernel();
#else
spin_lock(&init_mm.page_table_lock);
#endif
/* Parse the page table. Start by finding the page directory. */
ppgd = pgd_offset_k(pkvAddr);
/* Check for a valid entry */
if (!pgd_none(*ppgd)) {
/* Find the page mid-level directory */
ppmd = pmd_offset(ppgd, pkvAddr);
/* Check for a valid entry */
if (!pmd_none(*ppmd)) {
/* Find the page table entry */
#ifndef PVR_ATOMIC_PTE
ppte = pte_offset(ppmd, pkvAddr);
pte = *ppte;
#else
ppte = pte_offset_atomic(ppmd, pkvAddr);
pte = *ppte;
pte_kunmap(ppte);
#endif
/* Check for a valid page */
if (pte_present(pte)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
/* pte_page returns the kernel virtual address */
rv = pte_page(pte);
#else
/* pte_page returns a pointer to the page structure */
rv = pte_page(pte);
#endif
#if 0
DPF("New: page_address:0x%08lX", pksegAddr);
#endif
#if 0
DPF("New: ppgd:0x%08lX, ppmd:0x%08lX, ppte:0x%08lX", ppgd,
ppmd, &pte);
#endif
} else {
DPF("hostfunc.c - ConvertKVToPage: Failed to find a valid page table entry");
}
} else {
DPF("hostfunc.c - ConvertKVToPage: Failed to find a valid mid-level page directory");
}
} else {
DPF("hostfunc.c - ConvertKVToPage: Failed to find a valid page directory");
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,10)
unlock_kernel();
#else
spin_unlock(&init_mm.page_table_lock);
#endif
}
#endif
return rv;
}
#if defined(CONFIG_MTRR)
/* MTRR bookkeeping so we can avoid spewing mtrr_del messages (the
linux api doesn't allow us to quietly fail to remove a mtrr, and
HostUnMapPhysToLin doesn't know if one existed */
typedef struct _mtrr_map_entry
{
unsigned physAddr;
unsigned pages;
struct _mtrr_map_entry *next;
}
mtrr_map_entry;
static mtrr_map_entry *mtrrMap = 0;
ulong pvr_mtrr_add(unsigned physAddr, unsigned pages)
{
mtrr_map_entry *tmp;
/* first make sure we don't already have this in our list */
for (tmp = mtrrMap; tmp; tmp = tmp->next)
if ((tmp->physAddr == physAddr) && (tmp->pages == pages))
return TRUE;
if (mtrr_add(physAddr, pages * PAGE_SIZE, MTRR_TYPE_WRCOMB, 0) < 0) {
DPF("pvr_mtrr_add: couldn't set mtrr");
return FALSE;
} else {
tmp = vmalloc(sizeof(mtrr_map_entry));
tmp->physAddr = physAddr;
tmp->pages = pages;
tmp->next = mtrrMap;
mtrrMap = tmp;
return TRUE;
}
}
void pvr_mtrr_del(unsigned physAddr, unsigned pages)
{
mtrr_map_entry *tmp;
tmp = mtrrMap;
if (mtrrMap && (mtrrMap->physAddr == physAddr) && (mtrrMap->pages == pages)) {
mtrr_del(-1, physAddr, pages * PAGE_SIZE);
mtrrMap = mtrrMap->next;
vfree(tmp);
return;
}
while (tmp && tmp->next && ((tmp->next->physAddr != physAddr) ||
(tmp->next->pages != pages)))
tmp = tmp->next;
if (tmp && tmp->next) {
mtrr_map_entry *t;
mtrr_del(-1, physAddr, pages * PAGE_SIZE);
t = tmp->next;
tmp->next = tmp->next->next;
vfree(t);
return;
}
if (tmp && tmp == mtrrMap) {
mtrrMap = tmp->next;
vfree(tmp);
}
}
#endif /* defined(CONFIG_MTRR) */
/*
* Function Name : HostPageablePageAlloc()
* Inputs : dwPages - number of pages to allocate
* Outputs : None
* Returns : Pointer to linear address of allocated pageable memory
* if success, returns NULL on failure
* Globals Used : None
* Description : Allocate specified number of pages of pageable memory
* NOTE Use HostPageablePageFree() to free this memory.
*/
void* HostPageablePageAlloc(ulong dwPages)
{
void* pvMem;
DPF("HostPageablePageAlloc: %d pages", dwPages);
pvMem = virtual_allocate_reserve(dwPages, TRUE);
return pvMem;
}
/*
* Function Name : HostPageablePageFree
* Inputs : pvBase - pointer to start of linear memory to free
* Outputs : None
* Returns : None
* Globals Used : None
* Description : Free memory allocated from HostPageablePageAlloc
* Used by the heap manager
*/
void HostPageablePageFree(void* pvBase)
{
virtual_deallocate_unreserve(pvBase);
}
/*
* Function Name : HostNonPageablePageAlloc()
* Inputs : dwPages - number of pages to allocate
* Outputs : None
* Returns : Pointer to linear address of allocated non-pageable memory
* if success, returns NULL on failure
* Globals Used : None
* Description : Allocate specified number of pages of non-pageable memory
* NOTE Use HostNonPageablePageFree() to free this memory.
*/
void* HostNonPageablePageAlloc(ulong dwPages)
{
void* pvMem;
DPF("HostNonPageablePageAlloc: %d pages", dwPages);
#if defined(GCC_IA32) /* REVISIT AND CHECK NON ARM VERSIONS FOR VALIDITY OF CACHE SETTING */
pvMem = virtual_allocate_reserve(dwPages, TRUE);
#else
pvMem = virtual_allocate_reserve(dwPages, FALSE);
#endif
return pvMem;
}
/*
* Function Name : HostNonPageablePageFree
* Inputs : pvBase - pointer to start of linear memory to free
* Outputs : None
* Returns : None
* Globals Used : None
* Description : Free memory allocated from HostNonPageablePageAlloc
* Used by the heap manager
*/
void HostNonPageablePageFree(void* pvBase)
{
virtual_deallocate_unreserve(pvBase);
}
/*
* Function Name : HostNonCachedPageAlloc()
* Inputs : dwPages - number of pages to allocate
* Outputs : None
* Returns : Pointer to linear address of allocated non-cached memory
* if success, returns NULL on failure
* Globals Used : None
* Description : Allocate specified number of pages of non-paged non-cached
* memory. NOTE Use HostNonCachedPageFree() to free this memory.
*/
void* HostNonCachedPageAlloc(ulong dwPages)
{
void* pvMem;
DPF("HostNonCachedPageAlloc: %d pages", dwPages);
pvMem = virtual_allocate_reserve(dwPages, FALSE);
return pvMem;
}
/*
* Function Name : HostNonCachedPageFree
* Inputs : pvBase - pointer to start of linear memory to free
* dwNumPages - number of pages to free
* Outputs : None
* Returns : None
* Globals Used : None
* Description : Free memory allocated from HostNonCachedPageAlloc()
* Used by the heap manager
*/
void HostNonCachedPageFree(void* pvBase, ulong dwNumPages)
{
if (dwNumPages)
virtual_deallocate_unreserve(pvBase);
}
/*
* Function Name : HostHeapAlloc
* Inputs : dwSize - number of bytes to allocate
* Outputs : None
* Returns : Pointer to allocated memory on success or NULL for failure
* Globals Used : None
* Description : Allocate memory from non-paged pool. No assumptions can
* be made about the page alignment or physical contiguousness
* of the allocated buffer
*/
void* HostHeapAlloc(ulong dwSize)
{
void* pvMem;
DPF("HostHeapAlloc: %d bytes", dwSize);
pvMem = virtual_allocate_reserve((dwSize+PAGE_SIZE-1) | PAGE_MASK , TRUE);
return pvMem;
}
/*
* Function Name : HostHeapFree
* Inputs : pvBase - Pointer to memory to free
* Outputs : None
* Returns : None
* Globals Used : None
* Description : Free memory from allocated from heap
*/
void HostHeapFree(void* pvBase)
{
virtual_deallocate_unreserve(pvBase);
}
/*
* Function Name : HostMoveMem
* Inputs : pvDest - pointer to destination buffer
pvSrc - pointer to source buffer
dwCount - number of bytes to copy
* Outputs : None
* Returns : None
* Globals Used : None
* Description : Equivalent to C memmove
*/
void HostMoveMem(void* pvDest, void* pvSrc, ulong dwByteCount)
{
memmove((void *)pvDest, (void *)pvSrc, (size_t)dwByteCount);
}
/*
* Function Name : HostStringEQ
* Inputs : pszStr1 - first string to compare
* pszStr2 - second string to compare
* Outputs : None
* Returns : TRUE if the two strings are equal else returns FALSE
* Globals Used : None
* Description : Compare two ASCII strings
*/
ulong HostStringsEQ(char* pszStr1, char* pszStr2)
{
return strcmp(pszStr1, pszStr2) == 0;
}
/*
* Function Name : HostCopyMem
* Inputs : pDest - Pointer to destination buffer of copy
* pSrc - Pointer to source buffer of copy
* dwCount - number of bytes to copy
* Outputs : None
* Returns : None
* Globals Used : None
* Description : Copy memory from one buffer to another (same as C memcpy)
*/
void HostCopyMem(void* pDest, void* pSrc, ulong dwCount)
{
memcpy((void *)pDest, (void *)pSrc, (size_t) dwCount);
}
/*
* Function Name : HostAbs
* Inputs : nVal - value to make positive
* Outputs : None
* Returns : The absolute value of nVal
* Globals Used : None
* Description : This routine returns the absolute value of nVal
*/
ulong HostAbs(signed int nVal)
{
return abs(nVal);
}
/*
* Function Name : HostSleep
* Inputs : dwTimePeriod - number of milliseconds to sleep
* Outputs : None
* Returns : None
* Globals Used : None
* Description : Routine to sleep for given time period
* NOTE : HostWaitus() relies on fact that HostSleep()
* only has one parameter which is the time period to
* sleep
*/
void HostSleep(ulong dwMilliSecPeriod)
{
mdelay((ulong) dwMilliSecPeriod);
}
/*
* Function Name : HostMapLinToPhys
* Inputs : psDCB - ptr to device for which the look is for
pvLinAddr - pointer to locked location
* Returns : A physical bus address corresponding
to the given linear address.
Will crash if an invalid address is passed in
(so make sure you lock it)
*/
ulong HostMapLinToPhys(void* pvLinAddr)
{
ulong dwLinAddr, dwPhysAddr, dwBusAddr;
dwLinAddr = (ulong) pvLinAddr;
dwPhysAddr = ConvertLinToPhys(dwLinAddr);
dwBusAddr = ConvertPhysToBus(dwPhysAddr);
#if 0
DPF("HostMapLinToPhys 0x%08lX --> 0x%08lX --> 0x%08lX", dwLinAddr,
dwPhysAddr, dwBusAddr);
#endif
return (dwBusAddr);
}
/*
* Function Name : HostContigHeapAlloc
* Inputs : dwSize - number of bytes to allocate
* Outputs : ppvLinAddr - pointer to variable that will receive the linear address of buffer
* pdwPhysAddr - pointer to variable that will receive the physical address of the buffer
* Returns : KM_OK if allocation successed else returns KM_ERROR_OUT_OF_MEMORY
* Globals Used : None
* Description : Allocate a contiguous block of physical non-paged memory.
* No assumptions can be made about the buffers page alignment
*/
ulong HostContigHeapAlloc(ulong dwSize, void* * ppvLinAddr, ulong* pdwPhysAddr)
{
DPF("HostContigHeapAlloc: %d bytes", dwSize);
*ppvLinAddr = kernel_allocate_reserve(dwSize);
if (*ppvLinAddr)
*pdwPhysAddr = HostMapLinToPhys(*ppvLinAddr);
else {
*pdwPhysAddr = 0;
DPF("HostContigHeapAlloc: *********** unable to allocate %d bytes contig memory", dwSize);
return KM_ERROR_OUT_OF_MEMORY;
}
return KM_OK;
}
/*
* Function Name : HostContigHeapFree
* Inputs : pvAddr - pointer to buffer allocated with HostContigHeapAlloc
* Outputs : None
* Returns : None
* Globals Used : None
* Description : Frees memory allocated with HostContigHeapAlloc
*/
void HostContigHeapFree(void* pvAddr)
{
kernel_deallocate_unreserve((unsigned)pvAddr);
}
/*
* Function Name : HostWaitDWORDChange
* Inputs : psDCB - ptr to device if addr is in FB or reg space else NULL
* pdwAddr - memory address of volatile ulong that we are wait to change
* dwWaitMask - bit mask indicating which bits we are waiting to change
* dwWaitVal - value for which we are waiting
* dwTimeOut - timeout period in microseconds
* Outputs : None
* Returns : KM_OK if change occured before timeout period expired
* else KM_ERROR_KERNEL_TIMEOUT
* Globals Used : None
* Description : Spins waiting for the givens bits to in ulong to change or
* for the timeout period to expire.
*/
ulong HostWaitDWORDChange(volatile ulong* pdwAddr, ulong dwWaitMask, ulong dwWaitVal,
ulong dwMicroSecTimeOut)
{
volatile ulong Before, After;
Before = *pdwAddr;
while (dwMicroSecTimeOut) {
After = *pdwAddr;
if ((After & dwWaitMask) == dwWaitVal)
return KM_OK;
udelay(1);
dwMicroSecTimeOut--;
}
DPF("*FAIL* HostWaitulongChange, addr 0x%08lX, before 0x%08lX, after 0x%08lX\n", (ulong) pdwAddr, Before, After);
return KM_ERROR_KERNEL_TIMEOUT;
}
/*
* Function Name : HostWaitus
* Inputs : eax - number of microseconds to wait/(sleep)
* Outputs : None
* Returns : None
* Globals Used : None
* Description : Routine to wait/(sleep) for given time period
* Note. Under Win9x this is a sit and spin for a time
* function, coz we cant guarantee that the Win9x sleep
* function will not suffer early termination due to a
* VM_WAKEUP system message. Under NTx this is implemented
* similarly to HostSleep.
* Note. This function is DEFINED via
*
* void __declspec(naked) HostWaitus(void)
* {
* ...
* }
*
* but DECLARED via
*
* void HostWaitus(void);
*/
void HostWaitus(ulong dwMicroSecs)
{
udelay(dwMicroSecs);
}
/*
* Function Name : HostMemSet
* Inputs : pvDest - pointer to start of buffer to be set
* dwSize - number of bytes to set
* bValue - value to set each byte to
* Outputs : None
* Returns : None
* Globals Used : None 7
* Description : Function that does the same as the C memset() functions
*/
void HostMemSet(void* pvDest, ulong dwSize, uchar byValue)
{
#if 0
DPF("HostMemSet, addr 0x%08lX, count %lu, value %02X\n",
(ulong) pvDest, dwSize, byValue);
#endif
memset((void *)pvDest, (int) byValue, (size_t) dwSize);
}
/*
* Function Name : HostReadRegistryulongFromString
* Inputs : pcKeyString -- which key to retrieve data from
* : pdwData -- ptr to ulong to fill out with the value
* Outputs : None
* Returns : KM_OK | KM_SYMBOL_NOT_FOUND
* Globals Used : None
* Description : Function that reads the registry
*/
ulong HostReadRegistryDWORDFromString(ulong dwDevCookie, char *pcKey,
char *pcValueName, ulong* pdwData)
{
int k;
DPF("*** Read Registry key %s, Value %s ***", pcKey, pcValueName);
for (k = 0; k < number_registry_entries; k++)
if(strcmp(pvrcore_registry[k].cEntryName,pcKey) == 0 &&
strcmp(pvrcore_registry[k].cKeyName,pcValueName) == 0) {
/* we found the entry that we searched for, let's return the value now */
*pdwData = pvrcore_registry[k].dwData;
DPF("returning 0x%X", *pdwData);
return KM_OK;
}
DPF(" ... not found");
return (KM_ERROR_SYMBOL_NOT_FOUND);
}
/*
*
* PCI stuff
*
*/
void HostPCIWriteByte(ulong dwBus, ulong dwFunc, ulong dwSlot, ulong dwReg,
uchar ucValue)
{
struct pci_dev *dev;
dev = pci_find_slot(dwBus, PCI_DEVFN(dwSlot, dwFunc));
if (dev)
pci_write_config_byte(dev, (int) dwReg, (u8) ucValue);
}
void HostPCIWriteWord(ulong dwBus, ulong dwFunc, ulong dwSlot, ulong dwReg,
ushort wValue)
{
struct pci_dev *dev;
dev = pci_find_slot(dwBus, PCI_DEVFN(dwSlot, dwFunc));
if (dev)
pci_write_config_word(dev, (int) dwReg, (u16) wValue);
}
void HostPCIWriteDword(ulong dwBus, ulong dwFunc, ulong dwSlot, ulong dwReg,
ulong dwValue)
{
struct pci_dev *dev;
dev = pci_find_slot(dwBus, PCI_DEVFN(dwSlot, dwFunc));
if (dev)
pci_write_config_dword(dev, (int) dwReg, (u32) dwValue);
}
/*
These return the actual data, not an error indication
*/
uchar HostPCIReadByte(ulong dwBus, ulong dwFunc, ulong dwSlot, ulong dwReg)
{
struct pci_dev *dev;
uchar Value;
dev = pci_find_slot(dwBus, PCI_DEVFN(dwSlot, dwFunc));
if (dev) {
pci_read_config_byte(dev, (int)dwReg, (u8 *)&Value);
return Value;
} else
return 0;
}
ushort HostPCIReadWord(ulong dwBus, ulong dwFunc, ulong dwSlot, ulong dwReg)
{
struct pci_dev *dev;
short Value;
dev = pci_find_slot(dwBus, PCI_DEVFN(dwSlot, dwFunc));
if (dev) {
pci_read_config_word(dev, (int)dwReg, (u16 *)&Value);
return Value;
} else
return 0;
}
ulong HostPCIReadDword(ulong dwBus, ulong dwFunc, ulong dwSlot, ulong dwReg)
{
struct pci_dev *dev;
ulong Value;
dev = pci_find_slot(dwBus, PCI_DEVFN(dwSlot, dwFunc));
if (dev) {
pci_read_config_dword(dev, (int)dwReg, (u32 *)&Value);
return (Value);
} else
return 0;
}
/*
* Function Name : HostMapPhysToLin
* Inputs : dwPhysBase - phys memory to map
* swPages - number of pages to map
* dwCacheType - required cacheing type of mapping (CACHETYPE_UNCACHED, CACHETYPE_CACHED or CACHETYPE_WRITECOMBINED)
* Outputs : None
* Returns : Linear addr of mapping on succes, else NULL
* Globals Used : None
* Description : Maps the physical memory into linear addr range
*/
void* HostMapPhysToLin(ulong dwDevCookie, ulong dwPhysBase, ulong dwPages,
ulong dwCacheType)
{
void *LinearAddress;
if (dwPhysBase);
if (dwPages);
if (dwCacheType);
if (dwDevCookie);
DPF("vmalloc_start: %p vmalloc_end: %p", VMALLOC_START, VMALLOC_END);
#if defined(ARM)
/* On ARM we need to use the physical address version with flag bits */
/* dwPhysBase is a bus address - we need a cpu physical address */
dwPhysBase = ConvertBusToPhys(dwPhysBase);
#endif
#if 0
DPF("HostMapPhysToLin: Calling ioremap. PhysBase:0x%08X, Size:0x%08X",
dwPhysBase, dwPages * PAGE_SIZE);
#endif
#ifdef CONFIG_MTRR
if (dwCacheType == CACHETYPE_WRITECOMBINED)
if (!pvr_mtrr_add(dwPhysBase, dwPages)) {
DPF("HostMapPhysToLin: Failed to use WC mapping, will try uncached");
dwCacheType = CACHETYPE_UNCACHED;
}
#endif
if (dwCacheType != CACHETYPE_UNCACHED)
LinearAddress = ioremap((ulong)dwPhysBase,
(ulong)(dwPages * PAGE_SIZE));
else
LinearAddress = ioremap_nocache((ulong)dwPhysBase,
(ulong)(dwPages * PAGE_SIZE));
#if 0
DPF("HostMapPhysToLin: ioremap result address: 0x%08X", LinearAddress);
#endif
pvr_mmap_register_area(LinearAddress, dwPages * PAGE_SIZE, PVR_MMAP_CONTIG,
(dwCacheType!=CACHETYPE_UNCACHED)? TRUE: FALSE);
return (void*)LinearAddress;
}
/*
* Function Name : HostUnMapPhysToLin
* Inputs : pvLinAddr - linear addr of mapping
* swPages - number of pages that were mapped
* Outputs : None
* Returns : TRUE on success, else FALSE
* Globals Used : None
* Description : Unmaps memory that was mapped with HostMapPhysToLin
*/
ulong HostUnMapPhysToLin(ulong dwDevCookie, void* pvLinAddr, ulong dwPages)
{
if (dwPages);
if (dwDevCookie);
#if 0
DPF("HostUnMapPhysToLin: Removing contiguous phys-to-lin mapping at address: 0x%08X\n", pvLinAddr);
#endif
#ifdef CONFIG_MTRR
pvr_mtrr_del(ConvertLinToPhys((ulong)pvLinAddr), dwPages);
#endif
pvr_mmap_remove_registered_area(pvLinAddr);
#if 0
DPF("HostUnMapPhysToLin: Calling iounmap for address: 0x%08X\n", pvLinAddr);
#endif
iounmap(pvLinAddr);
return TRUE;
}
/*
* Function Name : HostOSSupportAGP
* Inputs : None
* Outputs : None
* Returns : FALSE
* Globals Used : None
* Description : NT4 does not support AGP
*/
ulong HostOSSupportAGP(void)
{
#ifdef SUPPORT_AGP
DPF("HostOSSupportAGP: %s", agpInitialized ? "true" : "false");
if (agpInitialized)
return TRUE;
else
#endif
return FALSE;
}
/*
* Function Name : HostReserveLinBuf()
* Inputs : dwPages - number of pages to reserve for buffer
* dwRegion - indicates which memory reagion to reserve buffer in
* bAGPMode - indicates if device is running in AGP or not
* Outputs : ppvMDLChain - pointer to variable that will receive
* the address of the start of the region chain
* Returns : On success the linear addr of the reserved region, else NULL
* Globals Used : None
* Description : Reserves a linear buffer
*/
void* HostReserveLinBuf(ulong dwDevCookie, ulong dwPages, ulong dwRegion,
void* * ppvMDLChain, ulong bAGPMode)
{
LINBUFSTRUCT *pLinBufStruct =
(LINBUFSTRUCT *)kmalloc(sizeof(LINBUFSTRUCT), GFP_KERNEL);
void* *pRet = NULL;
if (dwDevCookie);
if (pLinBufStruct == NULL) {
DPF("HostReserveLinBuf: failed to allocate pLinBufStruct");
} else {
pLinBufStruct->dwPagesCommitted = 0;
pLinBufStruct->dwPagesReserved = dwPages;
pLinBufStruct->dwRegion = dwRegion;
pLinBufStruct->bAGPMode = bAGPMode;
pLinBufStruct->pAGPList = NULL;
pLinBufStruct->pBase = (void *)0xffffffff;
pRet = (void *)pLinBufStruct;
pLinBufStruct->pageList
= kmalloc(sizeof(ulong *)* dwPages, GFP_KERNEL);
memset(pLinBufStruct->pageList, 0, sizeof(ulong *)* dwPages);
if ((dwRegion == MEMREGION_GART_UC) || (dwRegion == MEMREGION_GART_WC))
pvr_mmap_register_area(pLinBufStruct,
pLinBufStruct->dwPagesReserved * PAGE_SIZE,
PVR_MMAP_AGP_SCATTER, TRUE);
else
pvr_mmap_register_area(pLinBufStruct,
pLinBufStruct->dwPagesReserved * PAGE_SIZE,
PVR_MMAP_SCATTER, TRUE);
}
out:
DPF("HostReserveLinBuf: %d pages, pLinBufStruct %08x, pRet %08x", dwPages,
pLinBufStruct, pRet);
*ppvMDLChain = pLinBufStruct;
return pRet;
}
ulong HostReservePageAlloc(ulong *virt)
{
ulong page;
page = __get_free_page(GFP_KERNEL);
if (page)
mem_map_reserve(virt_to_page(page));
*virt = page;
return virt_to_phys((void *)page);
}
void HostReservePageFree(ulong page)
{
mem_map_unreserve(virt_to_page(page));
free_page(page);
}
void *HostKernelAlloc(ulong nbytes)
{
return kmalloc(nbytes, GFP_KERNEL);
}
void HostKernelFree(void *ptr)
{
kfree(ptr);
}
LINBUFSTRUCT *HostScatterListCommit(LINBUFSTRUCT *pLinBufStruct,
ulong dwOffset, ulong dwPages);
/*
* Function Name : HostCommitLinBuf()
* Inputs : pvLinAddr - pointer to start of linear region that we
* want to commit physical pages to
* dwPages - number of pages to commit
* dwRegion - indicates which memory reagion to commit buffer in
* pvMDLChain - pointer to region chain returned from HostReserveLinBuf
* bAGPMode - indicates if device is running in AGP or not
* Outputs : None
* Returns : On sucess new addr of start of linear region, else NULL
* Globals Used : None
* Description : Commit some of the linear buffer allocated with HostReserveLinBuf
*/
void* HostCommitLinBuf(void* pvLinAddr, ulong dwPages, ulong dwRegion,
void* pvMDLChain, ulong bAGPMode)
{
LINBUFSTRUCT *pLinBufStruct = (LINBUFSTRUCT *)pvMDLChain;
ulong dwOffset = ((ulong)pvLinAddr - (ulong)pLinBufStruct)/PAGE_SIZE;
void* pRet = NULL;
int i;
DPF("HostCommitLinBuf: pvMDLChain: %08lx. pvLinAddr %08lx to %08lx",
pvMDLChain, pvLinAddr, (ulong) pvLinAddr + dwPages * PAGE_SIZE);
if (pLinBufStruct == NULL) {
DPF("HostCommitLinBuf: NULL pvMDLChain");
} else if (dwOffset != pLinBufStruct->dwPagesCommitted) {
DPF("HostCommitLinBuf: request to commit more pages must start from end of already committed pages (offset %d committed)",
dwOffset, pLinBufStruct->dwPagesCommitted);
} else if (dwOffset+dwPages > pLinBufStruct->dwPagesReserved) {
DPF("HostCommitLinBuf: commit request must be inside reserve range (%d %d) < %d",
dwOffset, dwPages, pLinBufStruct->dwPagesReserved);
} else
if (dwRegion == MEMREGION_GART_UC || dwRegion == MEMREGION_GART_WC) {
agp_memory *tmp, *mem = pvr_agp_alloc(dwPages);
if (pLinBufStruct->pAGPList) {
for (tmp = (agp_memory*)pLinBufStruct->pAGPList; tmp->next != NULL; tmp = tmp->next)
;
tmp->next = mem;
} else
pLinBufStruct->pAGPList = (struct agp_memory *)mem;
for (i=0; i<dwPages; i++) {
pLinBufStruct->pageList[pLinBufStruct->dwPagesCommitted] =
agpInfo.aper_base + PAGE_SIZE * (mem->pg_start + i);
// __va(mem->memory[i] & agpInfo.page_mask);
pLinBufStruct->dwPagesCommitted++;
}
} else
pLinBufStruct = HostScatterListCommit(pLinBufStruct,
dwOffset, dwPages);
pRet = pLinBufStruct;
DPF("HostCommitLinBuf: %d pages, %08x", dwPages, pRet);
return pRet;
}
/*
* Function Name : HostFreeLinBuf()
* Inputs dwRegion - indicates which memory reagion to free buffer in
* pvMDLChain - pointer to start of MDL chain for this buffer
* Outputs : None
* Returns : PMXDXSRV_OK on success, else the appropiate PMXDXSRV_ERROR_xxxx
* Globals Used : None
* Description : Free linear buffer that was allocated with HostReserveLinBuf()
*/
ulong HostFreeLinBuf(ulong dwRegion, void* pvMDLChain)
{
LINBUFSTRUCT *pLinBufStruct = (LINBUFSTRUCT *)pvMDLChain;
ulong i;
if (pLinBufStruct == NULL) {
DPF("HostFreeLinBuf: NULL pvMDLChain");
} else {
if (pLinBufStruct->dwPagesReserved == 0) {
DPF("HostFreeLinBuf: (warning) No pages reserved (%x)",
pvMDLChain);
} else if (pLinBufStruct->dwPagesCommitted == 0) {
DPF("HostFreeLinBuf: (warning) No pages (never committed?) (%x)", pvMDLChain);
}
pvr_mmap_remove_registered_area(pLinBufStruct);
if ((dwRegion == MEMREGION_GART_UC) ||
(dwRegion == MEMREGION_GART_WC)) {
pvr_agp_free_list((agp_memory*)pLinBufStruct->pAGPList);
} else {
for (i=0; i<pLinBufStruct->dwPagesCommitted; i++)
HostReservePageFree(pLinBufStruct->pageList[i]);
}
kfree(pLinBufStruct->pageList);
}
kfree(pLinBufStruct);
return TRUE;
}
/*
* Function Name : HostCommitPhysToLin
* Inputs : pvLin - linear addr to map phyical mem into
* dwPhys - start of physical memory to map
* dwPages - number of pages to map
* Outputs : None
* Returns : TRUE on success else FALSE
* Globals Used : None
* Description : Maps the given phyical memory into the given linear addr
* Not implented on NT
*/
ulong HostCommitPhysToLin(void* pvLin, ulong dwPhys, ulong dwPages)
{
DPF("HostCommitPhysToLin: Not implemented");
return FALSE;
}
/*
* Function Name : HostDecommitLinBuf()
* Inputs : pvLinAddr - pointer to start of linear region that we
* want to decommit
* dwPages - number of pages to decommit
* dwRegion - indicates which memory reagion the buffer in
* pvMDLChain - pointer to region chain returned from HostReserveLinBuf
* Outputs : None
* Returns : On sucess new addr of start of linear region, else NULL
* Description : Decommit some of the linear buffer commited with HostCommitLinBuf
*/
void* HostDeCommitLinBuf(void* pvLinAddr, ulong dwPages, ulong dwRegion,
void* pvMDLChain)
{
/* Since the driver never actually shrinks LinBuf, ignore this */
DPF("WARNING: HostDecommitLinBuf() called\n");
return pvLinAddr;
}
/*
* Function Name : HostTransBusRel2SysRelAddr
* Inputs : psLocation - struct that contains the bus rel addr to translate
* dwBusNum - bus number of device
* Outputs : None
* Returns :
* Globals Used : None
* Description : Decommits physical memory from linear addr range
*/
ulong HostTransBusRel2SysRelAddr(ulong dwBusAddr, ulong dwBusNum)
{
if (dwBusNum);
return dwBusAddr;
}
/*
* Function Name : ConvertPhysToBus
* Inputs : dwPhysAddr - A physical system address
* Outputs : None
* Returns : The corresponding bus address.
* Globals Used : None
* Description : Converts a cpu physical address to a bus address
*/
ulong ConvertPhysToBus(ulong dwPhysAddr)
{
#if defined(GCC_IA32) || defined(SH4)
/* Bus addresses and physical addresses are identical */
return dwPhysAddr;
#elif defined(ARM)
/* FIXME: Check that correct macros are used */
if (dwPhysAddr >= 0x40000000 && dwPhysAddr < 0x60000000)
/* Fix up PCI bus address */
return dwPhysAddr - 0x40000000;
else if (dwPhysAddr >= 0 && dwPhysAddr < 0x10000000)
/* Fix up host RAM address */
return dwPhysAddr + 0x80000000;
else
/* Unknown - leave unchanged */
return dwPhysAddr;
#else
#error("hostfunc.c - ConvertPhysToBus - Unknown platform.");
return dwPhysAddr;
#endif
}
#if defined(ARM)
/*
* Function Name : ConvertBusToPhys
* Inputs : dwBusAddr - A bus address understood by the device
* Outputs : None
* Returns : The corresponding system CPU physical address.
* Globals Used : None
* Description : Converts a bus address to a physical system address
*/
ulong ConvertBusToPhys(ulong dwBusAddr)
{
#if defined(GCC_IA32) || defined(SH4)
/* Bus addresses and physical addresses are identical */
return dwBusAddr;
#elif defined(ARM)
/* FIXME: Check that correct macros are used */
if (dwBusAddr >= 0x80000000 && dwBusAddr < 0x90000000)
/* Fix up host RAM address */
return dwBusAddr - 0x80000000;
else if (dwBusAddr >= 0 && dwBusAddr < 0x20000000)
/* Fix up PCI bus address */
return dwBusAddr + 0x40000000;
else
/* Unknown - leave unchanged */
return dwBusAddr;
#else
#error("hostfunc.c - ConvertBusToPhys - Unknown platform.");
return dwBusAddr;
#endif
}
#endif
/*
* Function Name : HostCreateTLBEntries
* Inputs : pvInBuffer - pointer to buffer to create TLB entries for
* dwNumPages - number of pages
* pdwTLB - pointer to TLB to fill out
* Outputs : None
* Returns : TRUE on success else FALSE
* Globals Used : None
* Description : Given a buffer it creates TLB entries that describle the
* physical memory that makes the linear buffer
*/
ulong HostCreateTLBEntries(void* pvInBuffer, ulong* pdwTLB, ulong dwNumPages)
{
uchar* pvCurLinAddr = pvInBuffer;
LINBUFSTRUCT *pLinBufStruct = (LINBUFSTRUCT *)pvInBuffer;
ulong *pdwCurTLB = pdwTLB;
ulong BusAddr, i, dwCount;
/* Check if we're looking at a scatter/gather list */
for (i=0; i==0 || (psKVOffsetStruct->pKVOffsetTable[i].nOffset != 0); i++) {
KV_OFFSET_STRUCT *pOffsetStruct = &psKVOffsetStruct->pKVOffsetTable[i];
if (pOffsetStruct->pkvPageAlignedAddress == (ulong)pLinBufStruct &&
(pOffsetStruct->eMapType == PVR_MMAP_SCATTER ||
pOffsetStruct->eMapType == PVR_MMAP_AGP_SCATTER)) {
/* found it - copy entries out */
if (pLinBufStruct->pAGPList)
/* AGP scatter-gather (linbuf) */
for (dwCount = 0; dwCount < dwNumPages; dwCount++)
*pdwCurTLB++ = pLinBufStruct->pageList[dwCount];
else
/* PCI scatter-gather (linbuf) */
for (dwCount = 0; dwCount < dwNumPages; dwCount++)
*pdwCurTLB++ = virt_to_phys((void *)pLinBufStruct->pageList[dwCount]);
return TRUE;
}
}
/*Find TLB entry for each page */
for (dwCount = 0; dwCount < dwNumPages; dwCount++) {
/* Obtain the bus address of the page */
BusAddr = HostMapLinToPhys(pvCurLinAddr);
/*Set TLB entry */
*pdwCurTLB = BusAddr & 0xfffff000;
/*Move to next page and TLB entry */
pdwCurTLB++;
pvCurLinAddr = pvCurLinAddr + PAGE_SIZE;
}
return TRUE;
}
/*
* Function Name : HostWriteRegistryString
* Inputs : pszKey - registry key to open
pszValue - registry value to writen
pszBuf - buf to reveive the registry string
* Outputs : None
* Returns : TRUE on success, else FALSE
* Globals Used : None
* Description : Write the registry
*/
ulong HostWriteRegistryString(ulong dwDevCookie, char* pszKey, char* pszValue,
char* pszBuf)
{
int k = 0;
ulong val;
/*This function should never be called for Win9x/LINUX */
DPF("HostWriteRegistryString : Cannot do on Win9x/LINUX");
val = simple_strtoul(pszBuf, NULL, 0);
for (k=0; k<number_registry_entries; k++)
if (strcmp(pvrcore_registry[k].cEntryName, pszKey) == 0 &&
strcmp(pvrcore_registry[k].cKeyName, pszValue) == 0) {
/* we found the entry that we searched for, let's store the value now */
strcpy(pvrcore_registry[k].cData, pszBuf);
pvrcore_registry[k].dwData = val;
return TRUE;
}
/* didn't find it - add a new entry */
if (number_registry_entries < MAX_REGISTRY_ENTRIES) {
strcpy(pvrcore_registry[number_registry_entries].cEntryName, pszKey);
strcpy(pvrcore_registry[number_registry_entries].cKeyName, pszValue);
strcpy(pvrcore_registry[number_registry_entries].cData, pszBuf);
pvrcore_registry[number_registry_entries].dwData = val;
number_registry_entries++;
} else
return FALSE;
return TRUE;
}
/*
* Function Name : HostGetCurrentProcess
* Inputs : None
* Outputs : None
* Returns : ID of current process
* Globals Used : None
* Description : Returns handle for current process
*/
ulong HostGetCurrentProcessID(void)
{
return current->pid;
}
/*
* Function Name : HostGetSysMemSize
* Inputs : None
* Outputs : None
* Returns : The total amount of system RAM in system
* Globals Used : None
* Description : Gets the total amount of system RAM in system
*/
ulong HostGetSysMemSize(void)
{
struct sysinfo i;
ulong rv;
si_meminfo(&i);
rv = i.totalram << PAGE_SHIFT;
return rv;
}
char *strcat(char *dest, char *src)
{
return linux_strcat(dest, src);
}
size_t strlen(char *s)
{
return linux_strlen(s);
}
/*
* Function Name : CheckAndFixUserPointer
* Inputs : pKernOrUser - the pointer to check and fix if necessary
* Outputs : None
* Returns : Kernel pointers unchanged, otherwise the kernel equivalent
* of a user address which has been mapped. For unmapped user
* addresses, return NULL.
* Globals Used : None
* Description : If necessary, retrieves the kernel equivalent of a mapped
* user address, or returns kernel pointers unchanged. If
* a user address is supplied without a kernel equivalent,
* return NULL.
*/
ulong CheckAndFixUserPointer(ulong pKernOrUser)
{
ulong pkvReturnAddress = pKernOrUser;
/* Is the pointer a user address? */
if (access_ok(VERIFY_READ, pKernOrUser, sizeof(ulong)))
/* It is a user address, attempt to remap it */
pkvReturnAddress = pvr_mmap_user_to_kern(pKernOrUser);
return pkvReturnAddress;
}
ulong HostCopyToUser(void *to, void *from, ulong count)
{
return copy_to_user(to, from, count);
}
ulong HostCopyFromUser(void *to, void *from, ulong count)
{
return copy_from_user(to, from, count);
}
/* ********************************************** */
/* AGP functions... */
/* ********************************************** */
#if defined(SUPPORT_AGP)
void InitializeAGP(void)
{
if (!(pAGP = inter_module_get_request("drm_agp", "agpgart"))) {
DPF("InitializeAGP: couldn't get agpgart interface");
return;
}
if (pAGP->acquire())
DPF("InitializeAGP: agp_backend_acquire() failed");
pAGP->copy_info(&agpInfo);
agpInitialized = 1;
pAGP->enable(agpInfo.mode);
#ifdef DEBUG_AGP
if (agpInfo.mode & 0x00000200)
DPF("AGPGART: backend supports sba");
if (agpInfo.mode & 0x00000010)
DPF("AGPGART: backend supports fw");
if (agpInfo.mode & 0x00000004)
DPF("AGPGART: mode 4x");
else if (agpInfo.mode & 0x00000002)
DPF("AGPGART: mode 2x");
else if (agpInfo.mode & 0x00000001)
DPF("AGPGART: mode 1x");
#endif
if (mtrr_add(agpInfo.aper_base,
agpInfo.aper_size * 1024 * 1024, MTRR_TYPE_WRCOMB, 0) < 0)
DPF("AGPGART: mtrr setup of gart region failed");
}
void ShutdownAGP(void)
{
if (agpInitialized) {
/* walk the map to delete any remaing agp mappings */
while (agpMap)
HostGARTRelease(agpMap->mappedAddr, agpMap, agpMap->length);
mtrr_del(-1, agpInfo.aper_base, agpInfo.aper_size * 1024 * 1024);
pAGP->release();
inter_module_put("drm_agp");
agpInitialized = 0;
}
}
static void PrintGARTMap(void)
{
#ifdef DEBUG_AGP
agp_map_entry *map;
agp_memory *mem;
DPF("GART MAP vvvvvv");
for (map = agpMap; map; map = map->next) {
DPF("AGPMAPENT: offset=%5d pages length=%4d pages [lin=%08X] [phys=%08X]",
map->offset, map->length, map->mappedAddr,
agpInfo.aper_base + PAGE_SIZE * map->offset);
for (mem = map->agp; mem; mem = mem->next)
DPF("\tAGPMAPMEM: offset=%5d pages count=%4d pages", mem->pg_start,
mem->page_count);
}
DPF("GART MAP ^^^^^^");
#endif
}
/* AGP reservation - doesn't actually allocate or bind any pages, but
sets aside the requested size in the AGP aperature. */
static agp_map_entry *pvr_agp_reserve(ulong dwPages)
{
agp_map_entry *rv = 0, *mem;
unsigned max_offset;
rv = vmalloc(sizeof(agp_map_entry));
rv->length = dwPages;
rv->agp = NULL;
if (!rv)
return 0;
max_offset = (agpInfo.aper_size * 1024 * 1024) / PAGE_SIZE;
if (!agpMap)
if (dwPages <= max_offset) {
agpMap = rv;
rv->offset = 0;
rv->mappedAddr = ioremap(agpInfo.aper_base, dwPages*PAGE_SIZE);
rv->next = NULL;
agpReserved += dwPages;
return rv;
} else {
vfree(rv);
return 0;
}
for (mem = agpMap; mem; mem = mem->next) {
if ((!mem->next && mem->offset + mem->length + dwPages <= max_offset) ||
(mem->next && mem->next->offset - (mem->offset + mem->length) >= dwPages)) {
rv->offset = mem->offset + mem->length;
rv->mappedAddr = ioremap(agpInfo.aper_base + rv->offset*PAGE_SIZE,
dwPages*PAGE_SIZE);
rv->next = mem->next;
mem->next = rv;
agpReserved += dwPages;
return rv;
}
}
vfree(rv);
return 0;
}
/* Allocate and bind the requested number of pages. Taken from slot
if provided. */
static agp_memory *
pvr_agp_commit(agp_map_entry *slot, ulong dwPages, ulong dwOffset)
{
agp_memory *mem;
ulong agpOffset;
int rv;
DPF("pvr_agp_commit: %d pages, %d offset\n", dwPages, dwOffset);
/* sanity check... */
if (!slot || (dwOffset+dwPages > slot->length)) {
DPF("pvr_agp_commit: invalid request offset %d, pages %d");
return NULL;
}
agpOffset = slot->offset+dwOffset;
/* check for overlaps */
for (mem=slot->agp; mem!=NULL; mem=mem->next)
if (mem->pg_start == agpOffset && mem->page_count == dwPages) {
DPF("pvr_agp_commit: duplicate commit");
return NULL;
} else if ((agpOffset >= mem->pg_start &&
agpOffset < mem->pg_start + mem->page_count) ||
(agpOffset+dwPages >= mem->pg_start &&
agpOffset+dwPages < mem->pg_start + mem->page_count)) {
DPF("pvr_agp_commit: overlapping commit");
return NULL;
}
mem = pAGP->allocate_memory(dwPages, AGP_NORMAL_MEMORY);
if (!mem) {
DPF("pvr_agp_commit: allocate_memory() failure");
return NULL;
}
if ((rv = pAGP->bind_memory(mem, agpOffset)) != 0) {
switch (rv) {
case -EINVAL:
DPF("pvr_agp_commit: agp_bind_memory() failed - EINVAL");
break;
case -EBUSY:
DPF("pvr_agp_commit: agp_bind_memory() failed - EBUSY");
break;
default:
DPF("pvr_agp_commit: agp_bind_memory() failed - unknown");
break;
}
pAGP->free_memory(mem);
return NULL;
}
mem->prev = NULL;
if (slot->agp)
slot->agp->prev = mem;
mem->next = slot->agp;
slot->agp = mem;
PrintGARTMap();
return mem;
}
/* Free the memory */
static void
pvr_agp_decommit(agp_map_entry *slot, ulong dwPages, ulong dwOffset)
{
agp_memory *mem;
ulong agpOffset;
int rv;
/* sanity check... */
if (!slot || (dwOffset+dwPages > slot->length)) {
DPF("pvr_agp_decommit: invalid request offset %d, pages %d");
return;
}
agpOffset = slot->offset + dwOffset;
/* check for overlaps */
for (mem=slot->agp; mem!=NULL; mem=mem->next)
if (mem->pg_start == agpOffset && mem->page_count == dwPages) {
pAGP->unbind_memory(mem);
if (mem->prev)
mem->prev->next = mem->next;
else
slot->agp = mem->next;
if (mem->next)
mem->next->prev = mem->prev;
pAGP->free_memory(mem);
PrintGARTMap();
return;
} else if (((agpOffset >= mem->pg_start) &&
(agpOffset < mem->pg_start+mem->page_count)) ||
((agpOffset+dwPages >= mem->pg_start) &&
(agpOffset+dwPages < mem->pg_start+mem->page_count))) {
DPF("pvr_agp_decommit: overlapping decommit - bailing");
return;
}
}
static agp_memory *
pvr_agp_alloc(ulong dwPages)
{
agp_map_entry *slot;
slot = pvr_agp_reserve(dwPages);
if (!slot) {
DPF("pvr_agp_alloc: pvr_agp_reserve() failed");
return NULL;
}
return pvr_agp_commit(slot, dwPages, 0);
}
static void
pvr_agp_free_list(agp_memory *mem_list)
{
agp_memory *tmp, *mem;
agp_map_entry *slot;
for (mem = mem_list; mem; mem = tmp) {
tmp = mem->next;
for (slot = agpMap; slot != NULL; slot = slot->next)
if (slot->offset == mem->pg_start && slot->length == mem->page_count) {
pvr_agp_decommit(slot, slot->length, 0);
pvr_agp_unreserve(slot);
break;
}
}
}
static void
pvr_agp_unreserve(agp_map_entry *slot)
{
agp_memory *mem, *tmp;
agp_map_entry *tmap;
if (!slot)
return;
iounmap(slot->mappedAddr);
for (mem = slot->agp; mem; mem = tmp) {
tmp = mem->next;
pvr_agp_decommit(slot, mem->page_count, mem->pg_start-slot->offset);
}
if (slot == agpMap) {
agpMap = slot->next;
agpReserved -= slot->length;
vfree(slot);
return;
}
tmap = agpMap;
while (tmap && tmap->next && tmap->next != slot)
tmap = tmap->next;
if (tmap->next == slot) {
tmap->next = slot->next;
agpReserved -= slot->length;
vfree(slot);
} else {
DPF("HostGARTRelease: couldn't find map entry to delete");
}
}
#endif /* defined(SUPPORT_AGP) */
/*
Function Name : HostGARTReserve
Inputs :
Returns :
Globals Used :
Description :
*/
void *
HostGARTReserve(ulong dwPages, ulong dwRegion, ulong *pdwPhysAddr,
void **ppvContext)
{
#if defined(SUPPORT_AGP)
agp_memory *agpMem;
unsigned linAddr;
agp_map_entry *map_entry;
#ifdef DEBUG_AGP
DPF("HostGARTReserve: %d pages %s reserve count=%d",
dwPages, (dwRegion == MEMREGION_GART_WC ? "WC" : "UC"), agpReserved);
#endif
#if 0
/* FIXME: stand-in for the NT AllocationLimit check */
if (dwPages * PAGE_SIZE > 32 * 1024 * 1024) {
#ifdef DEBUG_AGP
DPF("HostGARTReserve: kicking out too large reservation");
#endif
return NULL;
}
#endif
map_entry = pvr_agp_reserve(dwPages);
if (map_entry) {
*ppvContext = map_entry;
*pdwPhysAddr = agpInfo.aper_base + map_entry->offset * PAGE_SIZE;
pvr_mmap_register_area(map_entry->mappedAddr,
map_entry->length * PAGE_SIZE, PVR_MMAP_CONTIG, TRUE);
#ifdef DEBUG_AGP
DPF("HostGARTReserve: phys=%08X lin=%08X",
*pdwPhysAddr, map_entry->mappedAddr);
#endif
PrintGARTMap();
return map_entry->mappedAddr;
} else {
*ppvContext = NULL;
*pdwPhysAddr = 0;
return NULL;
}
#else
return NULL;
#endif /* defined(SUPPORT_AGP) */
}
/*
Function Name : HostGARTRelease
Inputs :
Returns :
Globals Used :
Description :
*/
void HostGARTRelease(void * pvLinBase, void * pvContext, ulong dwPages)
{
#if defined(SUPPORT_AGP)
agp_map_entry *tmp, *map = (agp_map_entry *)pvContext;
agp_memory *mem;
int i;
#ifdef DEBUG_AGP
DPF("HostGARTRelease: %p lin %p context %d pages", pvLinBase, pvContext,
dwPages);
#endif
pvr_mmap_remove_registered_area(pvLinBase);
pvr_agp_unreserve(map);
#endif
}
/*
* Function Name : HostGARTAlloc
* Inputs : dwPages -- number of pages
* ppvLinAddr -- pointer to pvoid to fill out with the (host) linear addr of the reserved region
* pdwPhysAddr -- pointer to dword to fill out with the gart physical addr of the reserved region
* (i.e. > 0xF8000000)
* dwRegion -- KM_GART_WC or KM_GART_UC
* Returns : KM_OK | KM_ERROR_COMMIT
* Globals Used : None
* Description : Allocates memory in the AGP GART region
*/
ulong HostGARTAlloc(ulong dwPages, void **ppvLinAddr, void **ppvContext,
ulong *pdwPhysAddr, ulong dwRegion)
{
#ifdef SUPPORT_AGP
agp_memory *agpMem, *mem;
int rv, i, offset;
#ifdef DEBUG_AGP
DPF("HostGARTAlloc: %d page %s", dwPages,
(dwRegion == MEMREGION_GART_WC ? "WC" : "UC"));
#endif
*ppvLinAddr = HostGARTReserve(dwPages, dwRegion, pdwPhysAddr, ppvContext);
if (*ppvLinAddr)
if (HostGARTCommit(dwPages, dwRegion, *ppvLinAddr, *ppvContext, 0))
return KM_OK;
else {
HostGARTRelease(*ppvLinAddr, *ppvContext, dwPages);
return KM_ERROR_OUT_OF_MEMORY;
} else
#endif
return KM_ERROR_OUT_OF_MEMORY;
}
/*
* Function Name : HostGARTFree
* Inputs : pvLinAddr -- address of region to free
* Returns : KM_OK always
* Description : Frees memory in the AGP GART region
*/
ulong HostGARTFree(void * pvLinAddr, void * pvContext, ulong dwPages)
{
#ifdef SUPPORT_AGP
int i;
agp_memory *mem = (agp_memory *)pvContext;
#ifdef DEBUG_AGP
DPF("HostGARTFree: %p lin %p context %d pages", pvLinAddr, pvContext,
dwPages);
#endif
HostGARTRelease(pvLinAddr, pvContext, dwPages);
#endif
return KM_OK;
}
/*
* Function Name : HostGARTReserveMax
* Inputs : None
* Outputs : Max GART reserve size
* Returns : TRUE on success, else FALSE
* Globals Used : None
* Description : Finds the max ammout
*/
ulong HostGARTReserveMax(ulong dwMaxRequired)
{
unsigned mem = 0;
if (dwMaxRequired);
#ifdef SUPPORT_AGP
mem = (agpInfo.aper_size * 1024 * 1024) / PAGE_SIZE - agpReserved;
#ifdef DEBUG_AGP
DPF("HostGARTReserveMax: %d pages (%d bytes)", mem, mem * PAGE_SIZE);
#endif
#endif
return mem;
}
/*
Function Name : HostGARTCommit
Inputs :
Returns :
Globals Used :
Description :
*/
ulong HostGARTCommit(ulong dwPages, ulong dwRegion, void * pvLinBase,
void * pvContext, ulong dwPageOff)
{
#if defined(SUPPORT_AGP)
agp_map_entry *map = (agp_map_entry *)pvContext;
agp_memory *agp;
int i, rv;
#ifdef DEBUG_AGP
DPF("HostGARTCommit: pages=%d offset=%d (map offset=%d length=%d)",
dwPages, dwPageOff, map->offset, map->length);
#endif
agp = pvr_agp_commit(map, dwPages, dwPageOff);
if (!agp) {
DPF("HostGARTCommit: pvr_agp_commit failed");
return FALSE;
}
return TRUE;
#else
return FALSE;
#endif
}
/*
Function Name : HostGARTDecommit
Inputs :
Returns :
Globals Used :
Description :
*/
void HostGARTDecommit(ulong dwPages, void * pvLinBase, void * pvContext,
ulong dwPageOff)
{
#if defined(SUPPORT_AGP)
agp_map_entry *map = (agp_map_entry *)pvContext;
agp_memory *mem;
int i;
#ifdef DEBUG_AGP
DPF("HostGARTDecommit: pages=%d offset=%d", dwPages, dwPageOff);
#endif
pvr_agp_decommit(map, dwPages, dwPageOff);
#endif /* defined(SUPPORT_AGP) */
}
|