summaryrefslogtreecommitdiff
path: root/sgx/services4/srvkm/common/buffer_manager.c
diff options
context:
space:
mode:
Diffstat (limited to 'sgx/services4/srvkm/common/buffer_manager.c')
-rw-r--r--sgx/services4/srvkm/common/buffer_manager.c1870
1 files changed, 1315 insertions, 555 deletions
diff --git a/sgx/services4/srvkm/common/buffer_manager.c b/sgx/services4/srvkm/common/buffer_manager.c
index e92fa6c..45d63b4 100644
--- a/sgx/services4/srvkm/common/buffer_manager.c
+++ b/sgx/services4/srvkm/common/buffer_manager.c
@@ -1,28 +1,46 @@
-/**********************************************************************
- *
- * Copyright (C) Imagination Technologies Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful but, except
- * as otherwise stated in writing, without any warranty; without even the
- * implied warranty of merchantability or fitness for a particular purpose.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
- *
- ******************************************************************************/
+/*************************************************************************/ /*!
+@Title Buffer management functions for Linux
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Manages buffers mapped into two memory spaces - cpu and device,
+ either of which can be virtual or physical.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+*/ /**************************************************************************/
#include "services_headers.h"
@@ -38,8 +56,9 @@ static IMG_VOID
BM_FreeMemory (IMG_VOID *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping);
static IMG_BOOL
BM_ImportMemory(IMG_VOID *pH, IMG_SIZE_T uSize,
- IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping,
- IMG_UINT32 uFlags, IMG_UINTPTR_T *pBase);
+ IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping,
+ IMG_UINT32 uFlags, IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength, IMG_UINTPTR_T *pBase);
static IMG_BOOL
DevMemoryAlloc (BM_CONTEXT *pBMContext,
@@ -51,14 +70,53 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
static IMG_VOID
DevMemoryFree (BM_MAPPING *pMapping);
+/*!
+******************************************************************************
+
+ @Function AllocMemory
+
+ @Description Allocate a buffer mapped into both cpu and device virtual
+ address spaces. This is now quite simple:
+
+ 1. Choose whence to get the memory;
+ 2. Obtain memory from that source;
+ 3. Work out the actual buffer addresses in other spaces.
+
+ In choosing whence to get the memory we work like this:
+
+ 1. If an import arena exists, use unless BP_CONTIGUOUS is set;
+ 2. Use a contiguous pool.
+
+ @Input pBMContext - BM context
+ @Input psBMHeap - BM heap
+ @Input psDevVAddr - device virtual address (optional)
+ @Input uSize - requested buffer size in bytes.
+ @Input uFlags - property flags for the buffer.
+ @Input uDevVAddrAlignment - required device virtual address
+ alignment, or 0.
+ @Input pvPrivData - opaque private data passed through to allocator
+ @Input ui32PrivDataLength - length of opaque private data
+
+ @Output pBuf - receives a pointer to a descriptor of the allocated
+ buffer.
+ @Return IMG_TRUE - Success
+ IMG_FALSE - Failed.
+
+ *****************************************************************************/
static IMG_BOOL
-AllocMemory (BM_CONTEXT *pBMContext,
- BM_HEAP *psBMHeap,
- IMG_DEV_VIRTADDR *psDevVAddr,
- IMG_SIZE_T uSize,
- IMG_UINT32 uFlags,
- IMG_UINT32 uDevVAddrAlignment,
- BM_BUF *pBuf)
+AllocMemory (BM_CONTEXT *pBMContext,
+ BM_HEAP *psBMHeap,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_SIZE_T uSize,
+ IMG_UINT32 uFlags,
+ IMG_UINT32 uDevVAddrAlignment,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ IMG_UINT32 ui32ChunkSize,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_BOOL *pabMapChunk,
+ BM_BUF *pBuf)
{
BM_MAPPING *pMapping;
IMG_UINTPTR_T uOffset;
@@ -68,16 +126,18 @@ AllocMemory (BM_CONTEXT *pBMContext,
"AllocMemory (uSize=0x%x, uFlags=0x%x, align=0x%x)",
uSize, uFlags, uDevVAddrAlignment));
-
-
-
+ /*
+ what to do depends on combination of DevVaddr generation
+ and backing RAM requirement
+ */
if (uFlags & PVRSRV_HAP_GPU_PAGEABLE)
{
/* in case of a pageable buffer, we must bypass RA which could
* combine/split individual mappings between buffers:
*/
if (!BM_ImportMemory(psBMHeap, uSize, IMG_NULL, &pMapping,
- uFlags, (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
+ uFlags, NULL, 0,
+ (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
{
PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: failed"));
return IMG_FALSE;
@@ -88,19 +148,19 @@ AllocMemory (BM_CONTEXT *pBMContext,
{
if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
{
-
+ /* user supplied DevVAddr, RAM backing */
PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));
return IMG_FALSE;
}
-
+ /* BM supplied DevVAddr, RAM Backing */
-
+ /* check heap attributes */
if(psBMHeap->ui32Attribs
& (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
|PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
{
-
+ /* specify arena (VM+RAM)*/
pArena = psBMHeap->pImportArena;
PVR_ASSERT(psBMHeap->sDevArena.psDeviceMemoryHeapInfo->ui32Attribs & PVRSRV_MEM_RAM_BACKED_ALLOCATION);
}
@@ -110,18 +170,76 @@ AllocMemory (BM_CONTEXT *pBMContext,
return IMG_FALSE;
}
+ /* Now allocate from the arena we chose above. */
+ if (uFlags & PVRSRV_MEM_SPARSE)
+ {
+ IMG_BOOL bSuccess;
+ IMG_SIZE_T puiActualSize;
+
+ /* Allocate physcial memory */
+ bSuccess = BM_ImportMemory(psBMHeap,
+ ui32ChunkSize * ui32NumPhysChunks,
+ &puiActualSize,
+ &pMapping,
+ uFlags,
+ pvPrivData,
+ ui32PrivDataLength,
+ IMG_NULL); /* We allocate VM space */
+
+ if (puiActualSize != ui32ChunkSize * ui32NumPhysChunks)
+ {
+ /*
+ Most likley the chunksize was not host page multiple so
+ return with an error
+ */
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: Failed to allocate memory for sparse allocation"));
+ BM_FreeMemory(pArena, IMG_NULL, pMapping);
+ return IMG_FALSE;
+ }
+
+ pMapping->uSizeVM = ui32ChunkSize * ui32NumVirtChunks;
+ uSize = pMapping->uSizeVM;
+ pMapping->ui32ChunkSize = ui32ChunkSize;
+ pMapping->ui32NumVirtChunks = ui32NumVirtChunks;
+ pMapping->ui32NumPhysChunks = ui32NumPhysChunks;
+ pMapping->pabMapChunk = pabMapChunk;
+
+ /* Allocate VA space and map in the physical memory */
+ bSuccess = DevMemoryAlloc (pBMContext,
+ pMapping,
+ IMG_NULL,
+ uFlags,
+ (IMG_UINT32)uDevVAddrAlignment,
+ &pMapping->DevVAddr);
+ if (!bSuccess)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "AllocMemory: Failed to allocate device memory"));
+ BM_FreeMemory(pArena, IMG_NULL, pMapping);
+ return IMG_FALSE;
+ }
- if (!RA_Alloc(pArena,
- uSize,
- IMG_NULL,
- (IMG_VOID*) &pMapping,
- uFlags,
- uDevVAddrAlignment,
- 0,
- (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
- {
- PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) FAILED", uSize));
- return IMG_FALSE;
+ /* uDevVAddrAlignment is currently set to zero so QAC generates warning which we override */
+ /* PRQA S 3356,3358 1 */
+ PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
+ pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr;
+ }
+ else
+ {
+ if (!RA_Alloc(pArena,
+ uSize,
+ IMG_NULL,
+ (IMG_VOID*) &pMapping,
+ uFlags,
+ uDevVAddrAlignment,
+ 0,
+ pvPrivData,
+ ui32PrivDataLength,
+ (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) FAILED", uSize));
+ return IMG_FALSE;
+ }
}
uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
@@ -134,7 +252,7 @@ AllocMemory (BM_CONTEXT *pBMContext,
pBuf->CpuVAddr = IMG_NULL;
}
- if(uSize == pMapping->uSize)
+ if(uSize == pMapping->uSizeVM)
{
pBuf->hOSMemHandle = pMapping->hOSMemHandle;
}
@@ -151,7 +269,8 @@ AllocMemory (BM_CONTEXT *pBMContext,
}
}
-
+ /* for hm_contiguous and hm_wrapped memory, the pMapping
+ * will have a physical address, else 0 */
pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;
if(uFlags & PVRSRV_MEM_ZERO)
@@ -166,7 +285,7 @@ AllocMemory (BM_CONTEXT *pBMContext,
{
if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
{
-
+ /* user supplied DevVAddr, no RAM backing */
PVR_ASSERT(psDevVAddr != IMG_NULL);
if (psDevVAddr == IMG_NULL)
@@ -175,7 +294,7 @@ AllocMemory (BM_CONTEXT *pBMContext,
return IMG_FALSE;
}
-
+ /* just make space in the pagetables */
pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
uSize,
IMG_NULL,
@@ -183,22 +302,30 @@ AllocMemory (BM_CONTEXT *pBMContext,
uDevVAddrAlignment,
psDevVAddr);
-
+ /* setup buf */
pBuf->DevVAddr = *psDevVAddr;
}
else
{
-
+ IMG_BOOL bResult;
+ /* BM supplied DevVAddr, no RAM Backing */
-
- pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
+ /* just make space in the pagetables */
+ bResult = pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
uSize,
IMG_NULL,
0,
uDevVAddrAlignment,
&pBuf->DevVAddr);
+
+ if(!bResult)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: MMUAlloc failed"));
+ return IMG_FALSE;
+ }
}
-
+
+ /* allocate a mocked-up mapping */
if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
sizeof (struct _BM_MAPPING_),
(IMG_PVOID *)&pMapping, IMG_NULL,
@@ -208,11 +335,12 @@ AllocMemory (BM_CONTEXT *pBMContext,
return IMG_FALSE;
}
-
+ /* setup buf */
pBuf->CpuVAddr = IMG_NULL;
pBuf->hOSMemHandle = 0;
pBuf->CpuPAddr.uiAddr = 0;
+ /* setup mapping */
pMapping->CpuVAddr = IMG_NULL;
pMapping->CpuPAddr.uiAddr = 0;
pMapping->DevVAddr = pBuf->DevVAddr;
@@ -221,16 +349,16 @@ AllocMemory (BM_CONTEXT *pBMContext,
pMapping->hOSMemHandle = 0;
}
-
+ /* Record the arena pointer in the mapping. */
pMapping->pArena = pArena;
pMapping->ui32DevVAddrAlignment = uDevVAddrAlignment;
pMapping->bUnmapped = IMG_FALSE;
-
+ /* record the heap */
pMapping->pBMHeap = psBMHeap;
pBuf->pMapping = pMapping;
-
+ /* output some stats */
PVR_DPF ((PVR_DBG_MESSAGE,
"AllocMemory: pMapping=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x",
(IMG_UINTPTR_T)pMapping,
@@ -247,13 +375,34 @@ AllocMemory (BM_CONTEXT *pBMContext,
pBuf->CpuPAddr.uiAddr,
uSize));
-
+ /* Verify virtual device address alignment */
PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
return IMG_TRUE;
}
+/*!
+******************************************************************************
+
+ @Function WrapMemory
+
+ @Description Allocate a buffer mapped into both cpu and device virtual
+ address spaces.
+
+ @Input psBMHeap - BM heap
+ @Input uSize - requested buffer size in bytes.
+ @Input ui32BaseOffset - Offset from page of wrap.
+ @Input bPhysContig - Is the wrap physically contiguous.
+ @Input psAddr - List of pages to wrap.
+ @Input pvCPUVAddr - Optional CPU Kernel virtual address (page aligned) of memory to wrap
+ @Input uFlags - property flags for the buffer.
+ @Output Buf - receives a pointer to a descriptor of the allocated
+ buffer.
+ @Return IMG_TRUE - Success
+ IMG_FALSE - Failed.
+
+ *****************************************************************************/
static IMG_BOOL
WrapMemory (BM_HEAP *psBMHeap,
IMG_SIZE_T uSize,
@@ -274,13 +423,13 @@ WrapMemory (BM_HEAP *psBMHeap,
(IMG_UINTPTR_T)psBMHeap, uSize, ui32BaseOffset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags));
PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
-
+ /* Only need lower 12 bits of the cpu addr - don't care what size a void* is */
PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (ui32PageSize - 1)) == 0);
uSize += ui32BaseOffset;
uSize = HOST_PAGEALIGN (uSize);
-
+ /* allocate a mocked-up mapping */
if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
sizeof(*pMapping),
(IMG_PVOID *)&pMapping, IMG_NULL,
@@ -293,6 +442,7 @@ WrapMemory (BM_HEAP *psBMHeap,
OSMemSet(pMapping, 0, sizeof (*pMapping));
pMapping->uSize = uSize;
+ pMapping->uSizeVM = uSize;
pMapping->pBMHeap = psBMHeap;
pMapping->bUnmapped = IMG_FALSE;
@@ -343,6 +493,7 @@ WrapMemory (BM_HEAP *psBMHeap,
if(OSReservePhys(pMapping->CpuPAddr,
pMapping->uSize,
uFlags,
+ IMG_NULL,
&pMapping->CpuVAddr,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
@@ -369,7 +520,9 @@ WrapMemory (BM_HEAP *psBMHeap,
}
}
-
+ /*
+ * Allocate device memory for this buffer. Map wrapped pages as read/write
+ */
bResult = DevMemoryAlloc(psBMHeap->pBMContext,
pMapping,
IMG_NULL,
@@ -384,7 +537,12 @@ WrapMemory (BM_HEAP *psBMHeap,
goto fail_cleanup;
}
-
+ /*
+ * Determine the offset of this allocation within the underlying
+ * dual mapped chunk of memory, we can assume that all three
+ * addresses associated with this allocation are placed at the same
+ * offset within the underlying chunk.
+ */
pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
if(!ui32BaseOffset)
{
@@ -456,7 +614,7 @@ fail_cleanup:
}
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
-
+ /*not nulling pointer, out of scope*/
return IMG_FALSE;
}
@@ -497,14 +655,18 @@ ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui3
IMG_SIZE_T ui32CurrentOffset = 0;
IMG_CPU_PHYADDR CpuPAddr;
-
+ /* Walk through the pBuf one page at a time and use
+ * transient mappings to zero the memory */
+
PVR_ASSERT(pBuf->hOSMemHandle);
while(ui32BytesRemaining > 0)
{
IMG_SIZE_T ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE());
CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset);
-
+ /* If the CpuPAddr isn't page aligned then start by writing up to the next page
+ * boundary (or ui32BytesRemaining if less), so that subsequent iterations can
+ * copy full physical pages. */
if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1))
{
ui32BlockBytes =
@@ -536,6 +698,26 @@ ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui3
return IMG_TRUE;
}
+/*!
+******************************************************************************
+
+ @Function FreeBuf
+
+ @Description Free a buffer previously allocated with BM_Alloc() or unwrap
+ one previous wrapped with BM_Wrap().
+ The buffer is identified by the buffer descriptor pBuf
+ returned at allocation. Note the double indirection when
+ passing the buffer.
+
+
+ @Input pBuf - buffer descriptor to free.
+ @Input ui32Flags - flags
+ @Input bFromAllocator - Is this being called by the
+ allocator?
+
+ @Return None.
+
+ *****************************************************************************/
static IMG_VOID
FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
{
@@ -547,7 +729,7 @@ FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
(IMG_UINTPTR_T)pBuf, pBuf->DevVAddr.uiAddr,
(IMG_UINTPTR_T)pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr));
-
+ /* record mapping */
pMapping = pBuf->pMapping;
psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
@@ -558,32 +740,31 @@ FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
{
-
+ /* Submemhandle is required by exported mappings */
if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
{
-
+ /* user supplied Device Virtual Address */
if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
{
-
+ /* RAM backed allocation */
PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported"));
}
else
{
-
+ /* free the mocked-up mapping */
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
- pBuf->pMapping = IMG_NULL;
+ pBuf->pMapping = IMG_NULL; /*nulling pointer alias*/
}
}
}
else
{
-
+ /* BM supplied Device Virtual Address */
if(pBuf->hOSMemHandle != pMapping->hOSMemHandle)
{
-
+ /* Submemhandle is required by exported mappings */
if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
{
-
OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
}
}
@@ -592,7 +773,7 @@ FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
/* see comment below */
if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
{
- PVR_ASSERT(pBuf->ui32ExportCount == 0)
+ PVR_ASSERT(pBuf->ui32ExportCount == 0);
BM_FreeMemory(pMapping->pBMHeap, 0, pMapping);
}
}
@@ -602,13 +783,30 @@ FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
* need to change the one above in PVRSRV_HAP_GPU_PAGEABLE
* case.. see comments in unstripped driver
*/
+ /* Submemhandle is required by exported mappings */
if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
{
-
-
-
- PVR_ASSERT(pBuf->ui32ExportCount == 0)
- RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE);
+ /*
+ RAM backed allocation
+ Note: currently no need to distinguish between hm_env and hm_contiguous
+ */
+ PVR_ASSERT(pBuf->ui32ExportCount == 0);
+ if (pBuf->pMapping->ui32Flags & PVRSRV_MEM_SPARSE)
+ {
+ IMG_UINT32 ui32FreeSize = sizeof(IMG_BOOL) * pBuf->pMapping->ui32NumVirtChunks;
+ IMG_PVOID pvFreePtr = pBuf->pMapping->pabMapChunk;
+
+ /* With sparse allocations we don't go through the sub-alloc RA */
+ BM_FreeMemory(pBuf->pMapping->pBMHeap, pBuf->DevVAddr.uiAddr, pBuf->pMapping);
+ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+ ui32FreeSize,
+ pvFreePtr,
+ IMG_NULL);
+ }
+ else
+ {
+ RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE);
+ }
}
}
else
@@ -638,9 +836,9 @@ FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
{
-
+ /* free the mocked-up mapping */
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
- pBuf->pMapping = IMG_NULL;
+ pBuf->pMapping = IMG_NULL; /*nulling pointer alias*/
}
}
}
@@ -649,10 +847,22 @@ FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
{
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL);
-
+ /*not nulling pointer, copy on stack*/
}
}
+/*!
+******************************************************************************
+
+ @Function BM_DestroyContext_AnyCb
+
+ @Description Destroy a buffer manager heap.
+
+ @Input psBMHeap
+
+ @Return PVRSRV_ERROR
+
+ *****************************************************************************/
static PVRSRV_ERROR BM_DestroyContext_AnyCb(BM_HEAP *psBMHeap)
{
if(psBMHeap->ui32Attribs
@@ -673,6 +883,20 @@ static PVRSRV_ERROR BM_DestroyContext_AnyCb(BM_HEAP *psBMHeap)
}
+/*!
+******************************************************************************
+
+ @Function BM_DestroyContext
+
+ @Description Destroy a buffer manager context. All allocated buffers must be
+ free'd before calling this function. This function is called
+ also to perform cleanup during aborted initialisations so it's
+ fairly careful not to assume any given resource has really been
+ created/allocated.
+
+ @Return PVRSRV_ERROR
+
+ *****************************************************************************/
PVRSRV_ERROR
BM_DestroyContext(IMG_HANDLE hBMContext,
IMG_BOOL *pbDestroyed)
@@ -687,8 +911,9 @@ BM_DestroyContext(IMG_HANDLE hBMContext,
*pbDestroyed = IMG_FALSE;
}
-
-
+ /*
+ Exit straight away if it's an invalid context handle
+ */
if (pBMContext == IMG_NULL)
{
PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle"));
@@ -699,34 +924,23 @@ BM_DestroyContext(IMG_HANDLE hBMContext,
if (pBMContext->ui32RefCount > 0)
{
-
+ /* Just return if there are more references to this context */
return PVRSRV_OK;
}
-
-
-
+ /*
+ Check whether there is a bug in the client which brought it here before
+ all the allocations have been freed.
+ */
eError = List_BM_HEAP_PVRSRV_ERROR_Any(pBMContext->psBMHeap, &BM_DestroyContext_AnyCb);
if(eError != PVRSRV_OK)
{
PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: List_BM_HEAP_PVRSRV_ERROR_Any failed"));
-#if 0
-
-
-
-
- PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Cleaning up with ResManFreeSpecial"));
- if(ResManFreeSpecial() != PVRSRV_OK)
- {
- PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeSpecial failed %d",eError));
- }
-
-#endif
return eError;
}
else
{
-
+ /* free the device memory context */
eError = ResManFreeResByPtr(pBMContext->hResItem, CLEANUP_WITH_POLL);
if(eError != PVRSRV_OK)
{
@@ -734,7 +948,7 @@ BM_DestroyContext(IMG_HANDLE hBMContext,
return eError;
}
-
+ /* mark context as destroyed */
if (pbDestroyed != IMG_NULL)
{
*pbDestroyed = IMG_TRUE;
@@ -745,12 +959,25 @@ BM_DestroyContext(IMG_HANDLE hBMContext,
}
+/*!
+******************************************************************************
+
+ @Function BM_DestroyContextCallBack_AnyVaCb
+
+ @Description Destroy Device memory context
+
+ @Input psBMHeap - heap to be freed.
+ @Input va - list of variable arguments with the following contents:
+ - psDeviceNode
+ @Return PVRSRV_ERROR
+
+ *****************************************************************************/
static PVRSRV_ERROR BM_DestroyContextCallBack_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
{
PVRSRV_DEVICE_NODE *psDeviceNode;
psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
-
+ /* Free up the import arenas */
if(psBMHeap->ui32Attribs
& (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
|PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
@@ -766,17 +993,30 @@ static PVRSRV_ERROR BM_DestroyContextCallBack_AnyVaCb(BM_HEAP *psBMHeap, va_list
return PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE;
}
-
+ /* Free up the MMU Heaps */
psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
-
+ /* Free Heap memory */
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
-
+ /*not nulling pointer, copy on stack*/
return PVRSRV_OK;
}
+/*!
+******************************************************************************
+
+ @Function BM_DestroyContextCallBack
+
+ @Description Destroy Device memory context
+
+ @Input pvParam - opaque void ptr param
+ @Input ui32Param - opaque unsigned long param
+
+ @Return PVRSRV_ERROR
+
+ *****************************************************************************/
static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam,
IMG_UINT32 ui32Param,
IMG_BOOL bDummy)
@@ -784,15 +1024,20 @@ static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam,
BM_CONTEXT *pBMContext = pvParam;
PVRSRV_DEVICE_NODE *psDeviceNode;
PVRSRV_ERROR eError;
+/* BM_CONTEXT **ppBMContext;
+ BM_HEAP *psBMHeap, *psTmpBMHeap;*/
+
PVR_UNREFERENCED_PARAMETER(ui32Param);
PVR_UNREFERENCED_PARAMETER(bDummy);
-
-
+ /*
+ Get DeviceNode from BMcontext
+ */
psDeviceNode = pBMContext->psDeviceNode;
-
-
+ /*
+ Free the import arenas and heaps
+ */
eError = List_BM_HEAP_PVRSRV_ERROR_Any_va(pBMContext->psBMHeap,
&BM_DestroyContextCallBack_AnyVaCb,
psDeviceNode);
@@ -800,15 +1045,17 @@ static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam,
{
return eError;
}
-
-
+ /*
+ 'Finalise' the MMU
+ */
if (pBMContext->psMMUContext)
{
psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
}
-
-
+ /*
+ Free up generic, useful resources - if they were allocated.
+ */
if (pBMContext->pBufferHash)
{
HASH_Delete(pBMContext->pBufferHash);
@@ -816,20 +1063,22 @@ static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam,
if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext)
{
-
+ /* Freeing the kernel context */
psDeviceNode->sDevMemoryInfo.pBMKernelContext = IMG_NULL;
}
else
{
if (pBMContext->ppsThis != IMG_NULL)
{
-
+ /*
+ * Remove context from the linked list
+ */
List_BM_CONTEXT_Remove(pBMContext);
}
}
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_CONTEXT), pBMContext, IMG_NULL);
-
+ /*not nulling pointer, copy on stack*/
return PVRSRV_OK;
}
@@ -841,7 +1090,7 @@ static IMG_HANDLE BM_CreateContext_IncRefCount_AnyVaCb(BM_CONTEXT *pBMContext, v
hResManContext = va_arg(va, PRESMAN_CONTEXT);
if(ResManFindResourceByPtr(hResManContext, pBMContext->hResItem) == PVRSRV_OK)
{
-
+ /* just increment the refcount and return the memory context found for this process */
pBMContext->ui32RefCount++;
return pBMContext;
}
@@ -859,13 +1108,25 @@ static IMG_VOID BM_CreateContext_InsertHeap_ForEachVaCb(BM_HEAP *psBMHeap, va_li
case DEVICE_MEMORY_HEAP_SHARED:
case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
{
-
+ /* insert the heap into the device's MMU page directory/table */
psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext, psBMHeap->pMMUHeap);
break;
}
}
}
+/*!
+******************************************************************************
+
+ @Function BM_CreateContext
+
+ @Description Creates and initialises a buffer manager context. This function must be called
+ before any other buffer manager functions.
+
+ @Return valid BM context handle - Success
+ IMG_NULL - Failed
+
+ *****************************************************************************/
IMG_HANDLE
BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
IMG_DEV_PHYADDR *psPDDevPAddr,
@@ -873,6 +1134,7 @@ BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
IMG_BOOL *pbCreated)
{
BM_CONTEXT *pBMContext;
+/* BM_HEAP *psBMHeap;*/
DEVICE_MEMORY_INFO *psDevMemoryInfo;
IMG_BOOL bKernelContext;
PRESMAN_CONTEXT hResManContext;
@@ -895,7 +1157,7 @@ BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
*pbCreated = IMG_FALSE;
}
-
+ /* setup the device memory info. */
psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
if (bKernelContext == IMG_FALSE)
@@ -909,7 +1171,7 @@ BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
}
}
-
+ /* allocate a BM context */
if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
sizeof (struct _BM_CONTEXT_),
(IMG_PVOID *)&pBMContext, IMG_NULL,
@@ -920,11 +1182,11 @@ BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
}
OSMemSet(pBMContext, 0, sizeof (BM_CONTEXT));
-
+ /* store the associated devicenode */
pBMContext->psDeviceNode = psDeviceNode;
-
-
+ /* This hash table is used to store BM_Wraps in a global way */
+ /* INTEGRATION_POINT: 32 is an abitrary limit on the number of hashed BM_wraps */
pBMContext->pBufferHash = HASH_Create(32);
if (pBMContext->pBufferHash==IMG_NULL)
{
@@ -942,17 +1204,18 @@ BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
if(bKernelContext)
{
-
+ /* just save the kernel context */
PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == IMG_NULL);
psDevMemoryInfo->pBMKernelContext = pBMContext;
}
else
{
-
-
-
-
-
+ /*
+ On the creation of each new context we must
+ insert the kernel context's 'shared' and 'shared_exported'
+ heaps into the new context
+ - check the kernel context and heaps exist
+ */
PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
if (psDevMemoryInfo->pBMKernelContext == IMG_NULL)
@@ -963,28 +1226,30 @@ BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
-
-
-
-
+ /*
+ insert the kernel heaps structures into the new context's shared heap list
+ Note. this will include the kernel only heaps but these will not actually
+ be imported into the context nor returned to the client
+ */
pBMContext->psBMSharedHeap = psDevMemoryInfo->pBMKernelContext->psBMHeap;
-
-
-
+ /*
+ insert the shared heaps into the MMU page directory/table
+ for the new context
+ */
List_BM_HEAP_ForEach_va(pBMContext->psBMSharedHeap,
&BM_CreateContext_InsertHeap_ForEachVaCb,
psDeviceNode,
pBMContext);
-
+ /* Finally, insert the new context into the list of BM contexts */
List_BM_CONTEXT_Insert(&psDevMemoryInfo->pBMContext, pBMContext);
}
-
+ /* Increment the refcount, as creation is successful */
pBMContext->ui32RefCount++;
-
+ /* register with resman */
pBMContext->hResItem = ResManRegisterRes(hResManContext,
RESMAN_TYPE_DEVICEMEM_CONTEXT,
pBMContext,
@@ -1015,7 +1280,7 @@ static IMG_VOID *BM_CreateHeap_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
psDevMemHeapInfo = va_arg(va, DEVICE_MEMORY_HEAP_INFO*);
if (psBMHeap->sDevArena.ui32HeapID == psDevMemHeapInfo->ui32HeapID)
{
-
+ /* Match - just return already created heap */
return psBMHeap;
}
else
@@ -1024,6 +1289,19 @@ static IMG_VOID *BM_CreateHeap_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
}
}
+/*!
+******************************************************************************
+
+ @Function BM_CreateHeap
+
+ @Description Creates and initialises a BM heap for a given BM context.
+
+ @Return
+ valid heap handle - success
+ IMG_NULL - failure
+
+
+ *****************************************************************************/
IMG_HANDLE
BM_CreateHeap (IMG_HANDLE hBMContext,
DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
@@ -1042,16 +1320,18 @@ BM_CreateHeap (IMG_HANDLE hBMContext,
psDeviceNode = pBMContext->psDeviceNode;
-
-
+ /*
+ * Ensure that the heap size is a multiple of the data page size.
+ */
PVR_ASSERT((psDevMemHeapInfo->ui32HeapSize & (psDevMemHeapInfo->ui32DataPageSize - 1)) == 0);
PVR_ASSERT(psDevMemHeapInfo->ui32HeapSize > 0);
-
-
-
-
-
+ /*
+ We may be being asked to create a heap in a context which already has one.
+ Test for refcount > 0 because PVRSRVGetDeviceMemHeapInfoKM doesn't increment the refcount.
+ This does mean that the first call to PVRSRVCreateDeviceMemContextKM will first try to find
+ heaps that we already know don't exist
+ */
if(pBMContext->ui32RefCount > 0)
{
psBMHeap = (BM_HEAP*)List_BM_HEAP_Any_va(pBMContext->psBMHeap,
@@ -1084,8 +1364,11 @@ BM_CreateHeap (IMG_HANDLE hBMContext,
psBMHeap->sDevArena.ui32DataPageSize = psDevMemHeapInfo->ui32DataPageSize;
psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
+#if defined(SUPPORT_MEMORY_TILING)
+ psBMHeap->ui32XTileStride = psDevMemHeapInfo->ui32XTileStride;
+#endif
-
+ /* tie the heap to the context */
psBMHeap->pBMContext = pBMContext;
psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext,
@@ -1098,7 +1381,7 @@ BM_CreateHeap (IMG_HANDLE hBMContext,
goto ErrorExit;
}
-
+ /* memory is allocated from the OS as required */
psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName,
0, 0, IMG_NULL,
MAX(HOST_PAGESIZE(), psBMHeap->sDevArena.ui32DataPageSize),
@@ -1114,10 +1397,11 @@ BM_CreateHeap (IMG_HANDLE hBMContext,
if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
{
-
-
-
-
+ /*
+ memory comes from a device memory contiguous allocator (ra)
+ Note: these arenas are shared across the system so don't delete
+ as part of heap destroy
+ */
psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena;
if(psBMHeap->pLocalDevMemArena == IMG_NULL)
{
@@ -1126,28 +1410,41 @@ BM_CreateHeap (IMG_HANDLE hBMContext,
}
}
-
+ /* insert heap into head of the heap list */
List_BM_HEAP_Insert(&pBMContext->psBMHeap, psBMHeap);
return (IMG_HANDLE)psBMHeap;
-
+ /* handle error case */
ErrorExit:
-
+ /* Free up the MMU if we created one */
if (psBMHeap->pMMUHeap != IMG_NULL)
{
psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
-
+ /* don't finalise psMMUContext as we don't own it */
}
-
+ /* Free the Heap memory */
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
-
+ /*not nulling pointer, out of scope*/
return IMG_NULL;
}
+/*!
+******************************************************************************
+
+ @Function BM_DestroyHeap
+
+ @Description Destroys a BM heap
+
+ @Return
+ valid heap handle - success
+ IMG_NULL - failure
+
+
+ *****************************************************************************/
IMG_VOID
BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
{
@@ -1158,7 +1455,7 @@ BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
if(psBMHeap)
{
-
+ /* Free up the import arenas */
if(psBMHeap->ui32Attribs
& (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
|PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
@@ -1174,14 +1471,12 @@ BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
return;
}
-
+ /* Free up the MMU Heap */
psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
-
+ /* remove from the heap list */
List_BM_HEAP_Remove(psBMHeap);
-
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
-
}
else
{
@@ -1190,6 +1485,17 @@ BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
}
+/*!
+******************************************************************************
+
+ @Function BM_Reinitialise
+
+ @Description Reinitialise the buffer manager after a power down event.
+
+ @Return IMG_TRUE - Success
+ IMG_FALSE - Failed
+
+ *****************************************************************************/
IMG_BOOL
BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode)
{
@@ -1197,16 +1503,48 @@ BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode)
PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise"));
PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ /* FIXME: Need to reenable all contexts
+ List_BM_CONTEXT_ForEach(psDeviceNode->sDevMemoryInfo.pBMContext, MMU_Enable);
+ */
return IMG_TRUE;
}
+/*!
+******************************************************************************
+
+ @Function BM_Alloc
+
+ @Description Allocate a buffer mapped into both cpu and device virtual
+ memory maps.
+
+ @Input hDevMemHeap
+ @Input psDevVAddr - device virtual address specified by caller (optional)
+ @Input uSize - require size in bytes of the buffer.
+ @Input pui32Flags - bit mask of buffer property flags.
+ @Input uDevVAddrAlignment - required alignment in bytes, or 0.
+ @Input pvPrivData - opaque private data passed through to allocator
+ @Input ui32PrivDataLength - length of opaque private data
+
+ @Output phBuf - receives buffer handle
+ @Output pui32Flags - bit mask of heap property flags.
+
+ @Return IMG_TRUE - Success
+ IMG_FALSE - Failure
+
+ *****************************************************************************/
IMG_BOOL
BM_Alloc ( IMG_HANDLE hDevMemHeap,
IMG_DEV_VIRTADDR *psDevVAddr,
IMG_SIZE_T uSize,
IMG_UINT32 *pui32Flags,
IMG_UINT32 uDevVAddrAlignment,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
+ IMG_UINT32 ui32ChunkSize,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_BOOL *pabMapChunk,
BM_HANDLE *phBuf)
{
BM_BUF *pBuf;
@@ -1238,7 +1576,9 @@ BM_Alloc ( IMG_HANDLE hDevMemHeap,
uDevVAddrAlignment = 1;
}
-
+ /*
+ * Allocate something in which to record the allocation's details.
+ */
if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
sizeof (BM_BUF),
(IMG_PVOID *)&pBuf, IMG_NULL,
@@ -1249,17 +1589,25 @@ BM_Alloc ( IMG_HANDLE hDevMemHeap,
}
OSMemSet(pBuf, 0, sizeof (BM_BUF));
-
+ /*
+ * Allocate the memory itself now.
+ */
if (AllocMemory(pBMContext,
psBMHeap,
psDevVAddr,
uSize,
uFlags,
uDevVAddrAlignment,
+ pvPrivData,
+ ui32PrivDataLength,
+ ui32ChunkSize,
+ ui32NumVirtChunks,
+ ui32NumPhysChunks,
+ pabMapChunk,
pBuf) != IMG_TRUE)
{
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
-
+ /* not nulling pointer, out of scope */
PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED"));
return IMG_FALSE;
}
@@ -1268,12 +1616,17 @@ BM_Alloc ( IMG_HANDLE hDevMemHeap,
"BM_Alloc (uSize=0x%x, uFlags=0x%x)",
uSize, uFlags));
-
+ /*
+ * Assign the handle and return.
+ */
pBuf->ui32RefCount = 1;
*phBuf = (BM_HANDLE)pBuf;
*pui32Flags = uFlags | psBMHeap->ui32Attribs;
-
+ /*
+ * If the user has specified heap CACHETYPE flags themselves,
+ * override any CACHETYPE flags inherited from the heap.
+ */
if(uFlags & PVRSRV_HAP_CACHETYPE_MASK)
{
*pui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK;
@@ -1286,6 +1639,21 @@ BM_Alloc ( IMG_HANDLE hDevMemHeap,
#if defined(PVR_LMA)
+/*!
+******************************************************************************
+
+ @Function ValidSysPAddrArrayForDev
+
+ @Description Verify the array of system address is accessible
+ by the given device.
+
+ @Input psDeviceNode
+ @Input psSysPAddr - system address array
+ @Input ui32PageSize - size of address array
+
+ @Return IMG_BOOL
+
+ *****************************************************************************/
static IMG_BOOL
ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSysPAddr, IMG_UINT32 ui32PageCount, IMG_SIZE_T ui32PageSize)
{
@@ -1312,6 +1680,21 @@ ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSy
return IMG_TRUE;
}
+/*!
+******************************************************************************
+
+ @Function ValidSysPAddrRangeForDev
+
+ @Description Verify a system address range is accessible
+ by the given device.
+
+ @Input psDeviceNode
+ @Input sStartSysPAddr - starting system address
+ @Input ui32Range - length of address range
+
+ @Return IMG_BOOL
+
+ *****************************************************************************/
static IMG_BOOL
ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStartSysPAddr, IMG_SIZE_T ui32Range)
{
@@ -1339,6 +1722,28 @@ ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStar
#endif
+/*!
+******************************************************************************
+
+ @Function BM_Wrap
+
+ @Description Create a buffer which wraps user provided system physical
+ memory.
+ The wrapped memory must be page aligned. BM_Wrap will
+ roundup the size to a multiple of cpu pages.
+
+ @Input ui32Size - size of memory to wrap.
+ @Input ui32Offset - Offset into page of memory to wrap.
+ @Input bPhysContig - Is the wrap physically contiguous.
+ @Input psSysAddr - list of system physical page addresses of memory to wrap.
+ @Input pvCPUVAddr - optional CPU kernel virtual address (Page aligned) of memory to wrap.
+ @Input uFlags - bit mask of buffer property flags.
+ @output phBuf - receives the buffer handle.
+
+ @Return IMG_TRUE - Success.
+ IMG_FALSE - Failed
+
+ *****************************************************************************/
IMG_BOOL
BM_Wrap ( IMG_HANDLE hDevMemHeap,
IMG_SIZE_T ui32Size,
@@ -1393,20 +1798,23 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
}
}
#endif
-
+ /*
+ * Insert the System Physical Address of the first page into the hash so we can optimise multiple wraps of the
+ * same memory.
+ */
sHashAddress = psSysAddr[0];
-
+ /* Add the in-page offset to ensure a unique hash */
sHashAddress.uiAddr += ui32Offset;
-
+ /* See if this address has already been wrapped */
pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, sHashAddress.uiAddr);
if(pBuf)
{
IMG_SIZE_T ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset);
-
+ /* Check base address, size and contiguity type match */
if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr))
{
@@ -1414,7 +1822,7 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
"BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
ui32Size, ui32Offset, sHashAddress.uiAddr));
- pBuf->ui32RefCount++;
+ PVRSRVBMBufIncRef(pBuf);
*phBuf = (BM_HANDLE)pBuf;
if(pui32Flags)
*pui32Flags = uFlags;
@@ -1423,12 +1831,15 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
}
else
{
-
+ /* Otherwise removed that item from the hash table
+ (a workaround for buffer device class) */
HASH_Remove(psBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddress.uiAddr);
}
}
-
+ /*
+ * Allocate something in which to record the allocation's details.
+ */
if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
sizeof (BM_BUF),
(IMG_PVOID *)&pBuf, IMG_NULL,
@@ -1439,19 +1850,23 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
}
OSMemSet(pBuf, 0, sizeof (BM_BUF));
-
+ /*
+ * Actually perform the memory wrap.
+ */
if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE)
{
PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
-
+ /*not nulling pointer, out of scope*/
return IMG_FALSE;
}
-
+ /* Only insert the buffer in the hash table if it is contiguous - allows for optimisation of multiple wraps
+ * of the same contiguous buffer.
+ */
if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
{
-
+ /* Have we calculated the right Hash key ? */
PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr);
if (!HASH_Insert (psBMContext->pBufferHash, sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
@@ -1466,36 +1881,73 @@ BM_Wrap ( IMG_HANDLE hDevMemHeap,
"BM_Wrap (uSize=0x%x, uFlags=0x%x, devVAddr=%08X)",
ui32Size, uFlags, pBuf->DevVAddr.uiAddr));
-
+ /*
+ * Assign the handle and return.
+ */
pBuf->ui32RefCount = 1;
*phBuf = (BM_HANDLE)pBuf;
if(pui32Flags)
{
-
+ /* need to override the heap attributes SINGLE PROC to MULT_PROC. */
*pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS;
}
return IMG_TRUE;
}
+/*!
+******************************************************************************
+
+ @Function BM_Export
+
+ @Description Export a buffer previously allocated via BM_Alloc.
+
+ @Input hBuf - buffer handle.
+ @Input ui32Flags - flags
+
+ @Return None.
+
+ *****************************************************************************/
+
IMG_VOID
BM_Export (BM_HANDLE hBuf)
{
BM_BUF *pBuf = (BM_BUF *)hBuf;
- pBuf->ui32ExportCount++;
+ PVRSRVBMBufIncExport(pBuf);
}
+/*!
+******************************************************************************
+ @Function BM_Export
+
+ @Description Export a buffer previously allocated via BM_Alloc.
+
+ @Input hBuf - buffer handle.
+
+ @Return None.
+**************************************************************************/
IMG_VOID
BM_FreeExport(BM_HANDLE hBuf,
IMG_UINT32 ui32Flags)
{
BM_BUF *pBuf = (BM_BUF *)hBuf;
- pBuf->ui32ExportCount--;
+ PVRSRVBMBufDecExport(pBuf);
FreeBuf (pBuf, ui32Flags, IMG_FALSE);
}
+/*!
+******************************************************************************
+ @Function BM_FreeExport
+
+ @Description Free a buffer previously exported via BM_Export.
+
+ @Input hBuf - buffer handle.
+ @Input ui32Flags - flags
+
+ @Return None.
+**************************************************************************/
IMG_VOID
BM_Free (BM_HANDLE hBuf,
IMG_UINT32 ui32Flags)
@@ -1515,8 +1967,7 @@ BM_Free (BM_HANDLE hBuf,
SysAcquireData(&psSysData);
- pBuf->ui32RefCount--;
-
+ PVRSRVBMBufDecRef(pBuf);
if(pBuf->ui32RefCount == 0)
{
if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
@@ -1530,21 +1981,6 @@ BM_Free (BM_HANDLE hBuf,
}
-IMG_VOID
-BM_RegisterSmart(BM_HANDLE hBuf, IMG_HANDLE hSmartCache)
-{
- BM_BUF *pBuf = (BM_BUF *)hBuf;
- OSMemHandleRegisterSmart(pBuf->hOSMemHandle, hSmartCache);
-}
-
-IMG_VOID
-BM_UnregisterSmart(BM_HANDLE hBuf, IMG_HANDLE hSmartCache)
-{
- BM_BUF *pBuf = (BM_BUF *)hBuf;
- OSMemHandleUnegisterSmart(pBuf->hOSMemHandle, hSmartCache);
-}
-
-
#if defined(SUPPORT_DRI_DRM_EXTERNAL)
IMG_VOID
BM_SetGEM(BM_HANDLE hBuf, IMG_HANDLE buf)
@@ -1561,7 +1997,18 @@ BM_GetGEM(BM_HANDLE hBuf)
}
#endif /* SUPPORT_DRI_DRM_EXTERNAL */
+/*!
+******************************************************************************
+ @Function BM_HandleToCpuVaddr
+
+ @Description Retreive the cpu virtual address associated with a buffer.
+
+ @Input buffer handle.
+
+ @Return buffers cpu virtual address, or NULL if none exists
+
+ *****************************************************************************/
IMG_CPU_VIRTADDR
BM_HandleToCpuVaddr (BM_HANDLE hBuf)
{
@@ -1581,6 +2028,18 @@ BM_HandleToCpuVaddr (BM_HANDLE hBuf)
}
+/*!
+******************************************************************************
+
+ @Function BM_HandleToDevVaddr
+
+ @Description Retreive the device virtual address associated with a buffer.
+
+ @Input hBuf - buffer handle.
+
+ @Return buffers device virtual address.
+
+ *****************************************************************************/
IMG_DEV_VIRTADDR
BM_HandleToDevVaddr (BM_HANDLE hBuf)
{
@@ -1599,6 +2058,18 @@ BM_HandleToDevVaddr (BM_HANDLE hBuf)
}
+/*!
+******************************************************************************
+
+ @Function BM_HandleToSysPaddr
+
+ @Description Retreive the system physical address associated with a buffer.
+
+ @Input hBuf - buffer handle.
+
+ @Return buffers device virtual address.
+
+ *****************************************************************************/
IMG_SYS_PHYADDR
BM_HandleToSysPaddr (BM_HANDLE hBuf)
{
@@ -1617,6 +2088,18 @@ BM_HandleToSysPaddr (BM_HANDLE hBuf)
return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr);
}
+/*!
+******************************************************************************
+
+ @Function BM_HandleToMemOSHandle
+
+ @Description Retreive the underlying memory handle associated with a buffer.
+
+ @Input hBuf - buffer handle.
+
+ @Return OS Specific memory handle.
+
+ *****************************************************************************/
IMG_HANDLE
BM_HandleToOSMemHandle(BM_HANDLE hBuf)
{
@@ -1742,6 +2225,33 @@ BM_RemapToDev(BM_HANDLE hBuf)
}
+/*!
+******************************************************************************
+
+ @Function DevMemoryAlloc
+
+ @Description Allocate device memory for a given physical/virtual memory
+ mapping. We handle the main cases where device MMU mappings
+ are required - these are the dynamic cases: all wrappings of
+ host OS memory and host OS imports for SYS_MMU_NORMAL mode.
+
+ If no MMU support is required then we simply map device virtual
+ space as device physical space.
+
+ @Input pBMContext - the pager to allocate from.
+ @Output pMapping - the mapping descriptor to be filled in for this
+ allocation.
+ @Output pActualSize - the actual size of the block allocated in
+ bytes.
+ @Input uFlags - allocation flags
+ @Input dev_vaddr_alignment - required device virtual address
+ alignment, or 0.
+ @Output pDevVAddr - receives the device virtual base address of the
+ allocated block.
+ @Return IMG_TRUE - Success
+ IMG_FALSE - Failed.
+
+ *****************************************************************************/
static IMG_BOOL
DevMemoryAlloc (BM_CONTEXT *pBMContext,
BM_MAPPING *pMapping,
@@ -1771,14 +2281,16 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
#ifdef PDUMP
if(uFlags & PVRSRV_MEM_DUMMY)
{
-
+ /* only one page behind a dummy allocation */
ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
}
#endif
-
+ /* Check we haven't fall through a gap */
+ PVR_ASSERT(pMapping->uSizeVM != 0);
+ /* allocate device linear space */
if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap,
- pMapping->uSize,
+ pMapping->uSizeVM,
pActualSize,
0,
dev_vaddr_alignment,
@@ -1793,7 +2305,7 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
#endif
#if defined(PDUMP)
-
+ /* pdump the memory allocate */
PDUMPMALLOCPAGES(&psDeviceNode->sDevId,
pMapping->DevVAddr.uiAddr,
pMapping->CpuVAddr,
@@ -1803,8 +2315,8 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
psDeviceNode->pfnMMUIsHeapShared(pMapping->pBMHeap->pMMUHeap),
#else
- IMG_FALSE,
-#endif
+ IMG_FALSE, // unused
+#endif /* SUPPORT_PDUMP_MULTI_PROCESS */
(IMG_HANDLE)pMapping);
#endif
@@ -1814,26 +2326,61 @@ DevMemoryAlloc (BM_CONTEXT *pBMContext,
case hm_wrapped_virtaddr:
case hm_contiguous:
{
- psDeviceNode->pfnMMUMapPages ( pMapping->pBMHeap->pMMUHeap,
- pMapping->DevVAddr,
- SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
- pMapping->uSize,
- uFlags,
- (IMG_HANDLE)pMapping);
-
+ if (uFlags & PVRSRV_MEM_SPARSE)
+ {
+ /* Check if this device supports sparse mappings */
+ PVR_ASSERT(psDeviceNode->pfnMMUMapPagesSparse != IMG_NULL);
+ psDeviceNode->pfnMMUMapPagesSparse(pMapping->pBMHeap->pMMUHeap,
+ pMapping->DevVAddr,
+ SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
+ pMapping->ui32ChunkSize,
+ pMapping->ui32NumVirtChunks,
+ pMapping->ui32NumPhysChunks,
+ pMapping->pabMapChunk,
+ uFlags,
+ (IMG_HANDLE)pMapping);
+ }
+ else
+ {
+ psDeviceNode->pfnMMUMapPages ( pMapping->pBMHeap->pMMUHeap,
+ pMapping->DevVAddr,
+ SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
+ pMapping->uSize,
+ uFlags,
+ (IMG_HANDLE)pMapping);
+ }
*pDevVAddr = pMapping->DevVAddr;
break;
}
case hm_env:
{
- psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap,
- pMapping->DevVAddr,
- pMapping->uSize,
- pMapping->CpuVAddr,
- pMapping->hOSMemHandle,
- pDevVAddr,
- uFlags,
- (IMG_HANDLE)pMapping);
+ if (uFlags & PVRSRV_MEM_SPARSE)
+ {
+ /* Check if this device supports sparse mappings */
+ PVR_ASSERT(psDeviceNode->pfnMMUMapShadowSparse != IMG_NULL);
+ psDeviceNode->pfnMMUMapShadowSparse(pMapping->pBMHeap->pMMUHeap,
+ pMapping->DevVAddr,
+ pMapping->ui32ChunkSize,
+ pMapping->ui32NumVirtChunks,
+ pMapping->ui32NumPhysChunks,
+ pMapping->pabMapChunk,
+ pMapping->CpuVAddr,
+ pMapping->hOSMemHandle,
+ pDevVAddr,
+ uFlags,
+ (IMG_HANDLE)pMapping);
+ }
+ else
+ {
+ psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap,
+ pMapping->DevVAddr,
+ pMapping->uSize,
+ pMapping->CpuVAddr,
+ pMapping->hOSMemHandle,
+ pDevVAddr,
+ uFlags,
+ (IMG_HANDLE)pMapping);
+ }
break;
}
case hm_wrapped_scatter:
@@ -1886,10 +2433,10 @@ DevMemoryFree (BM_MAPPING *pMapping)
if (sDevPAddr.uiAddr != 0)
{
#ifdef PDUMP
-
+ /* pdump the memory free */
if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
{
-
+ /* physical memory size differs in the case of Dummy allocations */
ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
}
else
@@ -1902,286 +2449,170 @@ DevMemoryFree (BM_MAPPING *pMapping)
ui32PSize,
pMapping->pBMHeap->sDevArena.ui32DataPageSize,
(IMG_HANDLE)pMapping,
- (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? IMG_TRUE : IMG_FALSE);
+ (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? IMG_TRUE : IMG_FALSE,
+ (pMapping->ui32Flags & PVRSRV_MEM_SPARSE) ? IMG_TRUE : IMG_FALSE);
#endif
}
- psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSize));
+ PVR_ASSERT(pMapping->uSizeVM != 0);
+ psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSizeVM));
pMapping->bUnmapped = IMG_TRUE;
}
-static PXProcShareDataNode gXProcWorkaroundShareDataNode = NULL;
-
-static const char const* gcXprocId = "Xproc Shared Buffer";
-
-typedef struct XProcShareDataNode{
- const char* cXprocId;
- IMG_UINT32 ui32RefCount;
- IMG_UINT32 ui32AllocFlags;
- IMG_UINT32 ui32Size;
- IMG_UINT32 ui32PageSize;
- RA_ARENA *psArena;
- IMG_SYS_PHYADDR sSysPAddr;
- IMG_VOID *pvCpuVAddr;
- IMG_HANDLE hOSMemHandle;
- struct list_head list;
-}XProcShareDataNodeT;
-
-LIST_HEAD(gXProcShareList);
-
-static unsigned int gXProcShareCount = 0;
-
-static IMG_BOOL IsXprocShareObjectValid(PXProcShareDataNode pShareDataNode)
-{
- if(pShareDataNode->cXprocId == gcXprocId)
- return IMG_TRUE;
-
- PVR_DPF((PVR_DBG_ERROR, "Share Data Node Is Invalid!!!"));
- return IMG_FALSE;
-}
-
-static void XprocPrintListStats(void)
-{
- struct list_head *list_ptr;
- PXProcShareDataNode pShareDataNodeIt = NULL;
- unsigned used_objects = 0, unused_objects = 0, invalid_objects = 0;
-
- list_for_each(list_ptr, &gXProcShareList)
- {
- pShareDataNodeIt = list_entry(list_ptr, XProcShareDataNodeT, list);
- if(IMG_TRUE == IsXprocShareObjectValid(pShareDataNodeIt))
- {
- if ((pShareDataNodeIt->ui32RefCount > 0))
- {
- used_objects++;
- }
- else
- {
- unused_objects++;
- }
- }
- else
- {
- invalid_objects++;
- }
- }
- PVR_DPF((PVR_DBG_ERROR, "Share Data List stats: used %d, unused %d, invalid %d descriptors",
- used_objects, unused_objects, invalid_objects));
-}
-
-static PXProcShareDataNode XprocShareObjectCreate(void)
-{
- PXProcShareDataNode pShareDataNode = NULL;
-
- if (OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
- sizeof (XProcShareDataNodeT),
- (IMG_PVOID *)&pShareDataNode, IMG_NULL,
- /* "Xproc Shared Buffer" */ gcXprocId) != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "OSAllocMem: pShareDataNode alloc FAILED"));
- return NULL;
- }
-
- if(NULL == pShareDataNode)
- {
- PVR_DPF((PVR_DBG_ERROR, "XprocShareObjectCreate: pShareDataNode alloc FAILED"));
- return NULL;
- }
-
- OSMemSet(pShareDataNode, 0, sizeof (XProcShareDataNodeT));
-
- /* Make it valid */
- INIT_LIST_HEAD(&pShareDataNode->list);
- pShareDataNode->cXprocId = gcXprocId;
-
- /* Add it to the list */
- list_add(&pShareDataNode->list, &gXProcShareList);
- gXProcShareCount++;
-
- PVR_DPF ((PVR_DBG_MESSAGE, "Share Object %p created, ref count: %d", pShareDataNode, gXProcShareCount));
+/* If this array grows larger, it might be preferable to use a hashtable rather than an array. */
+#ifndef XPROC_WORKAROUND_NUM_SHAREABLES
+#define XPROC_WORKAROUND_NUM_SHAREABLES 200
+#endif
- if(gXProcShareCount > 5000)
- {
- PVR_DPF((PVR_DBG_ERROR, "Too Many Share Data Nodes %d. Potential memory leak!!!", gXProcShareCount));
- XprocPrintListStats();
- }
+#define XPROC_WORKAROUND_BAD_SHAREINDEX 0773407734
- return pShareDataNode;
-}
+#define XPROC_WORKAROUND_UNKNOWN 0
+#define XPROC_WORKAROUND_ALLOC 1
+#define XPROC_WORKAROUND_MAP 2
-static void XprocShareObjectDestroy(PXProcShareDataNode pShareDataNode)
-{
- list_del(&pShareDataNode->list);
- pShareDataNode->hOSMemHandle = NULL;
- pShareDataNode->cXprocId = NULL;
- OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof (XProcShareDataNodeT), pShareDataNode, IMG_NULL);
- if(gXProcWorkaroundShareDataNode == pShareDataNode)
- {
- PVR_DPF((PVR_DBG_ERROR, "Share Data Node %p destroyed. Ignore the following error message about it!!!", pShareDataNode));
- gXProcWorkaroundShareDataNode = NULL;
- }
- gXProcShareCount--;
+static IMG_UINT32 gXProcWorkaroundShareIndex = XPROC_WORKAROUND_BAD_SHAREINDEX;
+static IMG_UINT32 gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN;
- PVR_DPF ((PVR_DBG_MESSAGE, "Share Object %p destoyed, ref count: %d", pShareDataNode, gXProcShareCount));
-}
+/* PRQA S 0686 10 */ /* force compiler to init structure */
+XPROC_DATA gXProcWorkaroundShareData[XPROC_WORKAROUND_NUM_SHAREABLES] = {{0}};
-PVRSRV_ERROR BM_XProcSetShareIndex(PXProcShareDataNode pShareDataNode)
+PVRSRV_ERROR BM_XProcWorkaroundSetShareIndex(IMG_UINT32 ui32Index)
{
-
- if (gXProcWorkaroundShareDataNode != NULL)
+ /* if you fail this assertion - did you acquire the mutex?
+ did you call "set" exactly once?
+ did you call "unset" exactly once per set?
+ */
+ if (gXProcWorkaroundShareIndex != XPROC_WORKAROUND_BAD_SHAREINDEX)
{
- PVR_DPF((PVR_DBG_ERROR, "Share Data Node already set!"));
- return PVRSRV_ERROR_NOT_OWNER;
- }
-
- if(pShareDataNode == NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "Trying to set NULL pShareDataNode!"));
+ PVR_DPF((PVR_DBG_ERROR, "No, it's already set!"));
return PVRSRV_ERROR_INVALID_PARAMS;
}
- if(IMG_TRUE != IsXprocShareObjectValid(pShareDataNode))
- return PVRSRV_ERROR_INVALID_PARAMS;
-
- gXProcWorkaroundShareDataNode = pShareDataNode;
+ gXProcWorkaroundShareIndex = ui32Index;
+ gXProcWorkaroundState = XPROC_WORKAROUND_MAP;
return PVRSRV_OK;
}
-PVRSRV_ERROR BM_XProcFinishShareIndex(PXProcShareDataNode pShareDataNode, IMG_BOOL freeIfNotUsed)
+PVRSRV_ERROR BM_XProcWorkaroundUnsetShareIndex(IMG_UINT32 ui32Index)
{
- if (gXProcWorkaroundShareDataNode == NULL)
+ /* if you fail this assertion - did you acquire the mutex?
+ did you call "set" exactly once?
+ did you call "unset" exactly once per set?
+ */
+ if (gXProcWorkaroundShareIndex == XPROC_WORKAROUND_BAD_SHAREINDEX)
{
- PVR_DPF((PVR_DBG_ERROR, "Share Data Node is NULL and should of been %p", pShareDataNode));
+ PVR_DPF((PVR_DBG_ERROR, "huh? how can it be bad??"));
return PVRSRV_ERROR_INVALID_PARAMS;
}
-
- if (gXProcWorkaroundShareDataNode != pShareDataNode)
+ if (gXProcWorkaroundShareIndex != ui32Index)
{
- PVR_DPF((PVR_DBG_ERROR, "gXProcWorkaroundShareDataNode == %p != %p == pShareDataNode", gXProcWorkaroundShareDataNode, pShareDataNode));
+ PVR_DPF((PVR_DBG_ERROR, "gXProcWorkaroundShareIndex == 0x%08x != 0x%08x == ui32Index", gXProcWorkaroundShareIndex, ui32Index));
return PVRSRV_ERROR_INVALID_PARAMS;
}
- if (pShareDataNode == NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "pShareDataNode == NULL"));
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
-
- if((freeIfNotUsed == IMG_TRUE) &&
- (IMG_TRUE == IsXprocShareObjectValid(pShareDataNode)) &&
- (pShareDataNode->ui32RefCount == 0x00))
- {
- PVR_DPF((PVR_DBG_ERROR, "Explicit free of pShareDataNode"));
- XprocShareObjectDestroy(pShareDataNode);
- }
-
- gXProcWorkaroundShareDataNode = NULL;
-
- PVR_DPF ((PVR_DBG_MESSAGE, "BM_XProcFinishShareIndex for %p", pShareDataNode));
+ gXProcWorkaroundShareIndex = XPROC_WORKAROUND_BAD_SHAREINDEX;
+ gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN;
return PVRSRV_OK;
}
-PXProcShareDataNode BM_XProcAllocNewBuffer(void)
+PVRSRV_ERROR BM_XProcWorkaroundFindNewBufferAndSetShareIndex(IMG_UINT32 *pui32Index)
{
- PXProcShareDataNode pShareDataNode;
-
- if (gXProcWorkaroundShareDataNode != NULL)
+ /* if you fail this assertion - did you acquire the mutex?
+ did you call "set" exactly once?
+ did you call "unset" exactly once per set?
+ */
+ if (gXProcWorkaroundShareIndex != XPROC_WORKAROUND_BAD_SHAREINDEX)
{
- PVR_DPF((PVR_DBG_ERROR, "Share Data Node already allocated/set!"));
- return NULL;
+ return PVRSRV_ERROR_INVALID_PARAMS;
}
- pShareDataNode = XprocShareObjectCreate();
- if(NULL == pShareDataNode)
- return NULL;
-
- PVR_DPF ((PVR_DBG_MESSAGE, "BM_XProcAllocNewBuffer and SET %p", pShareDataNode));
-
- gXProcWorkaroundShareDataNode = pShareDataNode;
-
- return pShareDataNode;
-}
+ for (*pui32Index = 0; *pui32Index < XPROC_WORKAROUND_NUM_SHAREABLES; (*pui32Index)++)
+ {
+ if (gXProcWorkaroundShareData[*pui32Index].ui32RefCount == 0)
+ {
+ gXProcWorkaroundShareIndex = *pui32Index;
+ gXProcWorkaroundState = XPROC_WORKAROUND_ALLOC;
+ return PVRSRV_OK;
+ }
+ }
-IMG_UINT32 BM_XProcWorkaroundGetRefCount(IMG_UINT32 ui32Index)
-{
- return gXProcWorkaroundShareDataNode->ui32RefCount;
+ PVR_DPF((PVR_DBG_ERROR, "ran out of shared buffers"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
}
static PVRSRV_ERROR
-XProcAllocShareable(RA_ARENA *psArena,
+XProcWorkaroundAllocShareable(RA_ARENA *psArena,
IMG_UINT32 ui32AllocFlags,
IMG_UINT32 ui32Size,
IMG_UINT32 ui32PageSize,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
IMG_VOID **ppvCpuVAddr,
IMG_HANDLE *phOSMemHandle)
{
if ((ui32AllocFlags & PVRSRV_MEM_XPROC) == 0)
{
- PVR_DPF((PVR_DBG_ERROR, "XProcAllocShareable: bad flags"));
+ PVR_DPF((PVR_DBG_VERBOSE, "XProcWorkaroundAllocShareable: bad flags"));
return PVRSRV_ERROR_INVALID_PARAMS;
}
- if (gXProcWorkaroundShareDataNode == NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "XProcAllocShareable: gXProcWorkaroundShareDataNode == NULL"));
- return PVRSRV_ERROR_NOT_OWNER;
- }
-
- if(IMG_TRUE != IsXprocShareObjectValid(gXProcWorkaroundShareDataNode))
- return PVRSRV_ERROR_INVALID_PARAMS;
-
- if (gXProcWorkaroundShareDataNode->ui32RefCount > 0)
+ if (gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32RefCount > 0)
{
PVR_DPF((PVR_DBG_VERBOSE,
- "XProcAllocShareable: re-using previously allocated pages %p", gXProcWorkaroundShareDataNode));
+ "XProcWorkaroundAllocShareable: re-using previously allocated pages"));
ui32AllocFlags &= ~PVRSRV_HAP_MAPTYPE_MASK;
ui32AllocFlags |= PVRSRV_HAP_SINGLE_PROCESS;
- if (ui32AllocFlags != gXProcWorkaroundShareDataNode->ui32AllocFlags)
+ if (ui32AllocFlags != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags)
{
PVR_DPF((PVR_DBG_ERROR,
- "Bucket Flags don't match! (bucket had 0x%08x, new one 0x%08x)",
- gXProcWorkaroundShareDataNode->ui32AllocFlags,
+ "Can't! Flags don't match! (I had 0x%08x, you gave 0x%08x)",
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags,
ui32AllocFlags));
return PVRSRV_ERROR_INVALID_PARAMS;
}
- if (ui32Size != gXProcWorkaroundShareDataNode->ui32Size)
+ if (ui32Size != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size)
{
PVR_DPF((PVR_DBG_ERROR,
- "Bucket Size doesn't match! (bucket size %d, new size %d )",
- gXProcWorkaroundShareDataNode->ui32Size, ui32Size));
+ "Can't! Size doesn't match!"));
return PVRSRV_ERROR_INVALID_PARAMS;
}
- if (ui32PageSize != gXProcWorkaroundShareDataNode->ui32PageSize)
+ if (ui32PageSize != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize)
{
PVR_DPF((PVR_DBG_ERROR,
- "Bucket Page Size doesn't match!(bucket size %d, new size %d )",
- gXProcWorkaroundShareDataNode->ui32PageSize, ui32PageSize));
+ "Can't! Page Size doesn't match!"));
return PVRSRV_ERROR_INVALID_PARAMS;
}
- *ppvCpuVAddr = gXProcWorkaroundShareDataNode->pvCpuVAddr;
- *phOSMemHandle = gXProcWorkaroundShareDataNode->hOSMemHandle;
+ *ppvCpuVAddr = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr;
+ *phOSMemHandle = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle;
- gXProcWorkaroundShareDataNode->ui32RefCount++;
+ BM_XProcIndexAcquire(gXProcWorkaroundShareIndex);
return PVRSRV_OK;
}
else
{
+ if (gXProcWorkaroundState != XPROC_WORKAROUND_ALLOC)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "XPROC workaround in bad state! About to allocate memory from non-alloc state! (%d)",
+ gXProcWorkaroundState));
+ }
+ PVR_ASSERT(gXProcWorkaroundState == XPROC_WORKAROUND_ALLOC);
+
if (psArena != IMG_NULL)
{
IMG_CPU_PHYADDR sCpuPAddr;
IMG_SYS_PHYADDR sSysPAddr;
PVR_DPF((PVR_DBG_VERBOSE,
- "XProcAllocShareable: making a NEW allocation from local mem %p(%d)",
- gXProcWorkaroundShareDataNode, gXProcWorkaroundShareDataNode->ui32RefCount));
+ "XProcWorkaroundAllocShareable: making a NEW allocation from local mem"));
if (!RA_Alloc (psArena,
ui32Size,
@@ -2190,9 +2621,11 @@ XProcAllocShareable(RA_ARENA *psArena,
0,
ui32PageSize,
0,
+ pvPrivData,
+ ui32PrivDataLength,
(IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
{
- PVR_DPF((PVR_DBG_ERROR, "XProcAllocShareable: RA_Alloc(0x%x) FAILED", ui32Size));
+ PVR_DPF((PVR_DBG_ERROR, "XProcWorkaroundAllocShareable: RA_Alloc(0x%x) FAILED", ui32Size));
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
@@ -2200,127 +2633,194 @@ XProcAllocShareable(RA_ARENA *psArena,
if(OSReservePhys(sCpuPAddr,
ui32Size,
ui32AllocFlags,
- (IMG_VOID **)&gXProcWorkaroundShareDataNode->pvCpuVAddr,
- &gXProcWorkaroundShareDataNode->hOSMemHandle) != PVRSRV_OK)
+ IMG_NULL,
+ (IMG_VOID **)&gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr,
+ &gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle) != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR, "XProcAllocShareable: OSReservePhys failed"));
+ PVR_DPF((PVR_DBG_ERROR, "XProcWorkaroundAllocShareable: OSReservePhys failed"));
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
- gXProcWorkaroundShareDataNode->sSysPAddr = sSysPAddr;
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].sSysPAddr = sSysPAddr;
}
else
{
- PVR_DPF((PVR_DBG_VERBOSE,
- "XProcAllocShareable: making a NEW allocation from OS %p(%d)",
- gXProcWorkaroundShareDataNode, gXProcWorkaroundShareDataNode->ui32RefCount));
+ PVR_DPF((PVR_DBG_VERBOSE,
+ "XProcWorkaroundAllocShareable: making a NEW allocation from OS"));
ui32AllocFlags &= ~PVRSRV_HAP_MAPTYPE_MASK;
ui32AllocFlags |= PVRSRV_HAP_SINGLE_PROCESS;
-
+ /* allocate pages from the OS RAM */
if (OSAllocPages(ui32AllocFlags,
ui32Size,
ui32PageSize,
- (IMG_VOID **)&gXProcWorkaroundShareDataNode->pvCpuVAddr,
- &gXProcWorkaroundShareDataNode->hOSMemHandle) != PVRSRV_OK)
+ pvPrivData,
+ ui32PrivDataLength,
+ IMG_NULL, /* FIXME: to support cross process sparse allocations */
+ (IMG_VOID **)&gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr,
+ &gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle) != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,
- "XProcAllocShareable: OSAllocPages(0x%x) failed",
+ "XProcWorkaroundAllocShareable: OSAllocPages(0x%x) failed",
ui32PageSize));
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
}
- gXProcWorkaroundShareDataNode->psArena = psArena;
- gXProcWorkaroundShareDataNode->ui32AllocFlags = ui32AllocFlags;
- gXProcWorkaroundShareDataNode->ui32Size = ui32Size;
- gXProcWorkaroundShareDataNode->ui32PageSize = ui32PageSize;
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].psArena = psArena;
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags = ui32AllocFlags;
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size = ui32Size;
+ gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize = ui32PageSize;
- *ppvCpuVAddr = gXProcWorkaroundShareDataNode->pvCpuVAddr;
- *phOSMemHandle = gXProcWorkaroundShareDataNode->hOSMemHandle;
+ *ppvCpuVAddr = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr;
+ *phOSMemHandle = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle;
- gXProcWorkaroundShareDataNode->ui32RefCount++;
+ BM_XProcIndexAcquire(gXProcWorkaroundShareIndex);
return PVRSRV_OK;
}
}
-static PXProcShareDataNode XProcHandleToSI(IMG_HANDLE hOSMemHandle)
+static PVRSRV_ERROR XProcWorkaroundHandleToSI(IMG_HANDLE hOSMemHandle, IMG_UINT32 *pui32SI)
{
-
- struct list_head *list_ptr;
- PXProcShareDataNode pShareDataNode = NULL;
+ IMG_UINT32 ui32SI;
+ IMG_BOOL bFound;
+ IMG_BOOL bErrorDups;
- if(hOSMemHandle == NULL)
- return NULL;
+ bFound = IMG_FALSE;
+ bErrorDups = IMG_FALSE;
- list_for_each(list_ptr, &gXProcShareList)
+ for (ui32SI = 0; ui32SI < XPROC_WORKAROUND_NUM_SHAREABLES; ui32SI++)
{
- pShareDataNode = list_entry(list_ptr, XProcShareDataNodeT, list);
- if(IMG_TRUE == IsXprocShareObjectValid(pShareDataNode))
+ if (gXProcWorkaroundShareData[ui32SI].ui32RefCount>0 && gXProcWorkaroundShareData[ui32SI].hOSMemHandle == hOSMemHandle)
{
- if ((pShareDataNode->ui32RefCount > 0) && (pShareDataNode->hOSMemHandle == hOSMemHandle))
+ if (bFound)
{
- return pShareDataNode;
+ bErrorDups = IMG_TRUE;
+ }
+ else
+ {
+ *pui32SI = ui32SI;
+ bFound = IMG_TRUE;
}
}
}
- return NULL;
-}
-
-static IMG_VOID XProcFreeShareable(IMG_HANDLE hOSMemHandle)
-{
- PXProcShareDataNode pShareDataNode;
- pShareDataNode = XProcHandleToSI(hOSMemHandle);
- if (pShareDataNode == NULL)
+ if (bErrorDups || !bFound)
{
- PVR_DPF((PVR_DBG_ERROR, "XProcHandleToSI bad handle %p", hOSMemHandle));
- return;
+ return PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE;
}
- pShareDataNode->ui32RefCount--;
+ return PVRSRV_OK;
+}
+
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+IMG_VOID _BM_XProcIndexAcquireDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index)
+#else
+IMG_VOID _BM_XProcIndexAcquire(IMG_UINT32 ui32Index)
+#endif
+{
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+ PVRSRVBMXProcIncRef2(pszFile, iLine, ui32Index);
+#else
+ PVRSRVBMXProcIncRef(ui32Index);
+#endif
+}
- PVR_DPF((PVR_DBG_VERBOSE, "Reduced refcount of Node[%p] from %d to %d",
- pShareDataNode, pShareDataNode->ui32RefCount+1, pShareDataNode->ui32RefCount));
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+IMG_VOID _BM_XProcIndexReleaseDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index)
+#else
+IMG_VOID _BM_XProcIndexRelease(IMG_UINT32 ui32Index)
+#endif
+{
+#if defined(PVRSRV_REFCOUNT_DEBUG)
+ PVRSRVBMXProcDecRef2(pszFile, iLine, ui32Index);
+#else
+ PVRSRVBMXProcDecRef(ui32Index);
+#endif
- if (pShareDataNode->ui32RefCount == 0)
+ PVR_DPF((PVR_DBG_VERBOSE, "Reduced refcount of SI[%d] from %d to %d",
+ ui32Index, gXProcWorkaroundShareData[ui32Index].ui32RefCount+1, gXProcWorkaroundShareData[ui32Index].ui32RefCount));
+
+ if (gXProcWorkaroundShareData[ui32Index].ui32RefCount == 0)
{
- if (pShareDataNode->psArena != IMG_NULL)
+ if (gXProcWorkaroundShareData[ui32Index].psArena != IMG_NULL)
{
IMG_SYS_PHYADDR sSysPAddr;
- if (pShareDataNode->pvCpuVAddr != IMG_NULL)
+ if (gXProcWorkaroundShareData[ui32Index].pvCpuVAddr != IMG_NULL)
{
- OSUnReservePhys(pShareDataNode->pvCpuVAddr,
- pShareDataNode->ui32Size,
- pShareDataNode->ui32AllocFlags,
- pShareDataNode->hOSMemHandle);
+ OSUnReservePhys(gXProcWorkaroundShareData[ui32Index].pvCpuVAddr,
+ gXProcWorkaroundShareData[ui32Index].ui32Size,
+ gXProcWorkaroundShareData[ui32Index].ui32AllocFlags,
+ gXProcWorkaroundShareData[ui32Index].hOSMemHandle);
}
- sSysPAddr = pShareDataNode->sSysPAddr;
- RA_Free (pShareDataNode->psArena,
+ sSysPAddr = gXProcWorkaroundShareData[ui32Index].sSysPAddr;
+ RA_Free (gXProcWorkaroundShareData[ui32Index].psArena,
sSysPAddr.uiAddr,
IMG_FALSE);
}
else
{
PVR_DPF((PVR_DBG_VERBOSE, "freeing OS memory"));
- OSFreePages(pShareDataNode->ui32AllocFlags,
- pShareDataNode->ui32PageSize,
- pShareDataNode->pvCpuVAddr,
- pShareDataNode->hOSMemHandle);
+ OSFreePages(gXProcWorkaroundShareData[ui32Index].ui32AllocFlags,
+ gXProcWorkaroundShareData[ui32Index].ui32PageSize,
+ gXProcWorkaroundShareData[ui32Index].pvCpuVAddr,
+ gXProcWorkaroundShareData[ui32Index].hOSMemHandle);
}
- XprocShareObjectDestroy(pShareDataNode);
}
}
+static IMG_VOID XProcWorkaroundFreeShareable(IMG_HANDLE hOSMemHandle)
+{
+ IMG_UINT32 ui32SI = (IMG_UINT32)((IMG_UINTPTR_T)hOSMemHandle & 0xffffU);
+ PVRSRV_ERROR eError;
+ eError = XProcWorkaroundHandleToSI(hOSMemHandle, &ui32SI);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "bad handle"));
+ return;
+ }
+
+ BM_XProcIndexRelease(ui32SI);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function BM_ImportMemory
+
+ @Description Provide a resource allocator with a source of pages of memory
+ from the Host OS's own allocation. Allocates a block of pages
+ larger than requested, allowing the resource allocator to
+ operate a small cache of pre allocated pages.
+
+ @Input pH - buffer manager handle, not the void type is dictated
+ by the generic nature of the resource allocator interface.
+ @Input uRequestSize - requested size in bytes
+ @Output pActualSize - receives the actual size allocated in bytes
+ which may be >= requested size
+ @Output ppsMapping - receives the arbitrary user reference
+ associated with the underlying storage.
+ @Input uFlags - bit mask of allocation flags
+ @Input pvPrivData - opaque private data passed through to allocator
+ @Input ui32PrivDataLength - length of opaque private data
+ @Output pBase - receives a pointer to the allocated storage.
+
+ @Return IMG_TRUE - success
+ IMG_FALSE - failed
+
+ *****************************************************************************/
static IMG_BOOL
BM_ImportMemory (IMG_VOID *pH,
IMG_SIZE_T uRequestSize,
IMG_SIZE_T *pActualSize,
BM_MAPPING **ppsMapping,
IMG_UINT32 uFlags,
+ IMG_PVOID pvPrivData,
+ IMG_UINT32 ui32PrivDataLength,
IMG_UINTPTR_T *pBase)
{
BM_MAPPING *pMapping;
@@ -2329,7 +2829,7 @@ BM_ImportMemory (IMG_VOID *pH,
IMG_BOOL bResult;
IMG_SIZE_T uSize;
IMG_SIZE_T uPSize;
- IMG_SIZE_T uDevVAddrAlignment = 0;
+ IMG_SIZE_T uDevVAddrAlignment = 0; /* ? */
PVR_DPF ((PVR_DBG_MESSAGE,
"BM_ImportMemory (pBMContext=0x%x, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
@@ -2361,16 +2861,25 @@ BM_ImportMemory (IMG_VOID *pH,
pMapping->DevVAddr.uiAddr = 0;
pMapping->CpuPAddr.uiAddr = 0;
pMapping->uSize = uSize;
+ if ((uFlags & PVRSRV_MEM_SPARSE) == 0)
+ {
+ pMapping->uSizeVM = uSize;
+ }
pMapping->pBMHeap = pBMHeap;
pMapping->ui32Flags = uFlags;
pMapping->bUnmapped = IMG_FALSE;
-
+
+ /*
+ * If anyone want's to know, pass back the actual size of our allocation.
+ * There could be up to an extra page's worth of memory which will be marked
+ * as free in the RA.
+ */
if (pActualSize)
{
*pActualSize = uSize;
}
-
+ /* if it's a dummy allocation only use one physical page */
if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
{
uPSize = pBMHeap->sDevArena.ui32DataPageSize;
@@ -2385,13 +2894,15 @@ BM_ImportMemory (IMG_VOID *pH,
IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs | PVRSRV_MEM_XPROC;
IMG_BOOL bBadBackingStoreType;
- bBadBackingStoreType = IMG_TRUE;
+ if(uFlags & PVRSRV_MEM_ION)
+ {
+ ui32Attribs |= PVRSRV_MEM_ION;
+ }
+
+ bBadBackingStoreType = IMG_TRUE;
- if ((ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) != 0)
+ if ((ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) != 0)
{
-#ifndef MAX
-#define MAX(a,b) ((a) > (b) ? (a) : (b))
-#endif
uDevVAddrAlignment = MAX(pBMHeap->sDevArena.ui32DataPageSize, HOST_PAGESIZE());
@@ -2400,70 +2911,81 @@ BM_ImportMemory (IMG_VOID *pH,
PVR_DPF((PVR_DBG_ERROR, "Cannot use use this memory sharing workaround with allocations that might be suballocated"));
goto fail_mapping_alloc;
}
- uDevVAddrAlignment = 0;
+ uDevVAddrAlignment = 0; /* FIXME: find out why it doesn't work if alignment is specified */
-
+ /* If the user has specified heap CACHETYPE flags, use them to
+ * override the flags inherited from the heap.
+ */
if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
{
ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
}
-
- if (XProcAllocShareable(IMG_NULL,
- ui32Attribs,
- (IMG_UINT32)uPSize,
- pBMHeap->sDevArena.ui32DataPageSize,
- (IMG_VOID **)&pMapping->CpuVAddr,
- &pMapping->hOSMemHandle) != PVRSRV_OK)
+ /* allocate "shared" pages. */
+ if (XProcWorkaroundAllocShareable(IMG_NULL,
+ ui32Attribs,
+ (IMG_UINT32)uPSize,
+ pBMHeap->sDevArena.ui32DataPageSize,
+ pvPrivData,
+ ui32PrivDataLength,
+ (IMG_VOID **)&pMapping->CpuVAddr,
+ &pMapping->hOSMemHandle) != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,
- "BM_ImportMemory: XProcAllocShareable(0x%x) failed", uPSize));
+ "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%x) failed",
+ uPSize));
goto fail_mapping_alloc;
}
-
-
-
+ /* specify how page addresses are derived */
+ /* it works just like "env" now - no need to record
+ it as shareable, as we use the actual hOSMemHandle
+ and only divert to our wrapper layer based on Attribs */
pMapping->eCpuMemoryOrigin = hm_env;
- bBadBackingStoreType = IMG_FALSE;
+ bBadBackingStoreType = IMG_FALSE;
}
- if ((ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) != 0)
+ if ((ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) != 0)
{
- uDevVAddrAlignment = pBMHeap->sDevArena.ui32DataPageSize;
+ uDevVAddrAlignment = pBMHeap->sDevArena.ui32DataPageSize;
if (uPSize % uDevVAddrAlignment != 0)
{
PVR_DPF((PVR_DBG_ERROR, "Cannot use use this memory sharing workaround with allocations that might be suballocated"));
goto fail_mapping_alloc;
}
- uDevVAddrAlignment = 0;
+ uDevVAddrAlignment = 0; /* FIXME: find out why it doesn't work if alignment is specified */
-
+ /* If the user has specified heap CACHETYPE flags, use them to
+ * override the flags inherited from the heap.
+ */
if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
{
ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
}
-
- if (XProcAllocShareable(pBMHeap->pLocalDevMemArena,
+ /* allocate "shared" pages. */
+ if (XProcWorkaroundAllocShareable(pBMHeap->pLocalDevMemArena,
ui32Attribs,
(IMG_UINT32)uPSize,
pBMHeap->sDevArena.ui32DataPageSize,
+ pvPrivData,
+ ui32PrivDataLength,
(IMG_VOID **)&pMapping->CpuVAddr,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,
- "BM_ImportMemory: XProcAllocShareable(0x%x) failed",
+ "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%x) failed",
uPSize));
goto fail_mapping_alloc;
}
-
-
-
+ /* specify how page addresses are derived */
+ /* it works just like "env" now - no need to record
+ it as shareable, as we use the actual hOSMemHandle
+ and only divert to our wrapper layer based on Attribs */
pMapping->eCpuMemoryOrigin = hm_env;
bBadBackingStoreType = IMG_FALSE;
}
@@ -2476,23 +2998,41 @@ BM_ImportMemory (IMG_VOID *pH,
}
else
-
-
+ /*
+ What type of backing store do we have?
+ */
if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
{
IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
-
+ /* The allocation code needs to know this is a sparse mapping */
+ if (pMapping->ui32Flags & PVRSRV_MEM_SPARSE)
+ {
+ ui32Attribs |= PVRSRV_MEM_SPARSE;
+ }
+
+ /* If the user has specified heap CACHETYPE flags, use them to
+ * override the flags inherited from the heap.
+ */
if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
{
ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
}
-
+ if (pMapping->ui32Flags & PVRSRV_MEM_ALLOCATENONCACHEDMEM)
+ {
+ ui32Attribs &= ~PVRSRV_MEM_ALLOCATENONCACHEDMEM;
+ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_MEM_ALLOCATENONCACHEDMEM);
+ }
+
+ /* allocate pages from the OS RAM */
if (OSAllocPages(ui32Attribs,
uPSize,
pBMHeap->sDevArena.ui32DataPageSize,
+ pvPrivData,
+ ui32PrivDataLength,
+ pMapping,
(IMG_VOID **)&pMapping->CpuVAddr,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
@@ -2502,7 +3042,7 @@ BM_ImportMemory (IMG_VOID *pH,
goto fail_mapping_alloc;
}
-
+ /* specify how page addresses are derived */
pMapping->eCpuMemoryOrigin = hm_env;
}
else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
@@ -2510,10 +3050,18 @@ BM_ImportMemory (IMG_VOID *pH,
IMG_SYS_PHYADDR sSysPAddr;
IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
-
+ /* The allocation code needs to know this is a sparse mapping */
+ if (pMapping->ui32Flags & PVRSRV_MEM_SPARSE)
+ {
+ ui32Attribs |= PVRSRV_MEM_SPARSE;
+ }
+
+ /* allocate pages from the local device memory allocator */
PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL);
-
+ /* If the user has specified heap CACHETYPE flags, use them to
+ * override the flags inherited from the heap.
+ */
if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
{
ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
@@ -2527,17 +3075,20 @@ BM_ImportMemory (IMG_VOID *pH,
0,
pBMHeap->sDevArena.ui32DataPageSize,
0,
+ pvPrivData,
+ ui32PrivDataLength,
(IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
{
PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
goto fail_mapping_alloc;
}
-
+ /* derive the CPU virtual address */
pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
if(OSReservePhys(pMapping->CpuPAddr,
uPSize,
ui32Attribs,
+ pMapping,
&pMapping->CpuVAddr,
&pMapping->hOSMemHandle) != PVRSRV_OK)
{
@@ -2545,7 +3096,7 @@ BM_ImportMemory (IMG_VOID *pH,
goto fail_dev_mem_alloc;
}
-
+ /* specify how page addresses are derived */
pMapping->eCpuMemoryOrigin = hm_contiguous;
}
else
@@ -2554,26 +3105,32 @@ BM_ImportMemory (IMG_VOID *pH,
goto fail_mapping_alloc;
}
-
- bResult = DevMemoryAlloc (pBMContext,
- pMapping,
- IMG_NULL,
- uFlags,
- (IMG_UINT32)uDevVAddrAlignment,
- &pMapping->DevVAddr);
- if (!bResult)
+ /*
+ * Allocate some device memory for what we just allocated.
+ */
+ if ((uFlags & PVRSRV_MEM_SPARSE) == 0)
{
- PVR_DPF((PVR_DBG_ERROR,
- "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
- pMapping->uSize));
- goto fail_dev_mem_alloc;
- }
-
-
+ bResult = DevMemoryAlloc (pBMContext,
+ pMapping,
+ IMG_NULL,
+ uFlags,
+ (IMG_UINT32)uDevVAddrAlignment,
+ &pMapping->DevVAddr);
+ if (!bResult)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
+ pMapping->uSize));
+ goto fail_dev_mem_alloc;
+ }
- PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
+ /* uDevVAddrAlignment is currently set to zero so QAC generates warning which we override */
+ /* PRQA S 3356,3358 1 */
+ PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
+ PVR_ASSERT(pBase);
+ *pBase = pMapping->DevVAddr.uiAddr;
+ }
- *pBase = pMapping->DevVAddr.uiAddr;
*ppsMapping = pMapping;
PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
@@ -2582,7 +3139,7 @@ BM_ImportMemory (IMG_VOID *pH,
fail_dev_mem_alloc:
if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
{
-
+ /* the size is double the actual size for interleaved allocations */
if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
{
pMapping->uSize /= 2;
@@ -2599,7 +3156,7 @@ fail_dev_mem_alloc:
if (uFlags & PVRSRV_MEM_XPROC)
{
- XProcFreeShareable(pMapping->hOSMemHandle);
+ XProcWorkaroundFreeShareable(pMapping->hOSMemHandle);
}
else
if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
@@ -2626,12 +3183,28 @@ fail_dev_mem_alloc:
}
fail_mapping_alloc:
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
-
+ /*not nulling pointer, out of scope*/
fail_exit:
return IMG_FALSE;
}
+/*!
+******************************************************************************
+
+ @Function BM_FreeMemory
+
+ @Description Free a block of pages previously allocated via
+ BM_ImportMemory.
+
+ @Input h - buffer manager handle, not the void type as dictated by
+ the generic nature of the resource allocator interface.
+ @Input _base - base address of blocks to free.
+ @Input psMapping - arbitrary user reference associated with the
+ underlying storage provided by BM_ImportMemory
+ @Return None
+
+ *****************************************************************************/
static IMG_VOID
BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
{
@@ -2652,9 +3225,17 @@ BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
return;
}
- DevMemoryFree (psMapping);
+ /*
+ Only free the virtual memory if we got as far a allocating it.
+ This NULL check should be safe as we always have a guard page
+ at virtual address 0x00000000
+ */
+ if (psMapping->DevVAddr.uiAddr)
+ {
+ DevMemoryFree (psMapping);
+ }
-
+ /* the size is double the actual for interleaved */
if((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0)
{
psMapping->uSize /= 2;
@@ -2671,7 +3252,7 @@ BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
if (psMapping->ui32Flags & PVRSRV_MEM_XPROC)
{
- XProcFreeShareable(psMapping->hOSMemHandle);
+ XProcWorkaroundFreeShareable(psMapping->hOSMemHandle);
}
else
if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
@@ -2697,13 +3278,30 @@ BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
}
OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL);
-
+ /*not nulling pointer, copy on stack*/
PVR_DPF((PVR_DBG_MESSAGE,
"..BM_FreeMemory (h=0x%x, base=0x%x)",
(IMG_UINTPTR_T)h, _base));
}
+/*!
+******************************************************************************
+
+ @Function BM_GetPhysPageAddr
+
+ @Description
+
+ @Input psMemInfo
+
+ @Input sDevVPageAddr
+
+ @Output psDevPAddr
+
+ @Return IMG_VOID
+
+******************************************************************************/
+
IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
IMG_DEV_VIRTADDR sDevVPageAddr,
IMG_DEV_PHYADDR *psDevPAddr)
@@ -2712,12 +3310,12 @@ IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr"));
- PVR_ASSERT (psMemInfo && psDevPAddr)
+ PVR_ASSERT(psMemInfo && psDevPAddr);
-
+ /* check it's a page address */
PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
-
+ /* PRQA S 0505 4 */ /* PVR_ASSERT should catch NULL ptrs */
psDeviceNode = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pBMContext->psDeviceNode;
*psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pMMUHeap,
@@ -2725,6 +3323,16 @@ IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
}
+/*!
+******************************************************************************
+ @Function BM_GetMMUContext
+
+ @Description utility function to return the MMU context
+
+ @Input hDevMemHeap - the Dev mem heap handle
+
+ @Return MMU context, else NULL
+**************************************************************************/
MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap)
{
BM_HEAP *pBMHeap = (BM_HEAP*)hDevMemHeap;
@@ -2734,6 +3342,16 @@ MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap)
return pBMHeap->pBMContext->psMMUContext;
}
+/*!
+******************************************************************************
+ @Function BM_GetMMUContextFromMemContext
+
+ @Description utility function to return the MMU context
+
+ @Input hDevMemContext - the Dev mem context handle
+
+ @Return MMU context, else NULL
+**************************************************************************/
MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext)
{
BM_CONTEXT *pBMContext = (BM_CONTEXT*)hDevMemContext;
@@ -2743,6 +3361,16 @@ MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext)
return pBMContext->psMMUContext;
}
+/*!
+******************************************************************************
+ @Function BM_GetMMUHeap
+
+ @Description utility function to return the MMU heap handle
+
+ @Input hDevMemHeap - the Dev mem heap handle
+
+ @Return MMU heap handle, else NULL
+**************************************************************************/
IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap)
{
PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap"));
@@ -2751,6 +3379,16 @@ IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap)
}
+/*!
+******************************************************************************
+ @Function BM_GetDeviceNode
+
+ @Description utility function to return the devicenode from the BM Context
+
+ @Input hDevMemContext - the Dev Mem Context
+
+ @Return MMU heap handle, else NULL
+**************************************************************************/
PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext)
{
PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode"));
@@ -2759,6 +3397,16 @@ PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext)
}
+/*!
+******************************************************************************
+ @Function BM_GetMappingHandle
+
+ @Description utility function to return the mapping handle from a meminfo
+
+ @Input psMemInfo - kernel meminfo
+
+ @Return mapping handle, else NULL
+**************************************************************************/
IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
{
PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle"));
@@ -2766,3 +3414,115 @@ IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
return ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
}
+/*!
+******************************************************************************
+ @Function BM_MappingHandleFromBuffer
+
+ @Description utility function to get the BM mapping handle from a BM buffer
+
+ @Input hBuffer - Handle to BM buffer
+
+ @Return BM mapping handle
+**************************************************************************/
+IMG_HANDLE BM_MappingHandleFromBuffer(IMG_HANDLE hBuffer)
+{
+ BM_BUF *psBuffer;
+
+ PVR_ASSERT(hBuffer != IMG_NULL);
+ psBuffer = hBuffer;
+ return psBuffer->pMapping;
+}
+
+/*!
+******************************************************************************
+ @Function BM_GetVirtualSize
+
+ @Description utility function to get the VM size of a BM mapping
+
+ @Input hBMHandle - Handle to BM mapping
+
+ @Return VM size of mapping
+**************************************************************************/
+IMG_UINT32 BM_GetVirtualSize(IMG_HANDLE hBMHandle)
+{
+ BM_MAPPING *psMapping;
+
+ PVR_ASSERT(hBMHandle != IMG_NULL);
+ psMapping = hBMHandle;
+ return psMapping->ui32ChunkSize * psMapping->ui32NumVirtChunks;
+}
+
+/*!
+******************************************************************************
+ @Function BM_MapPageAtOffset
+
+ @Description utility function check if the specificed offset in a BM mapping
+ is a page that needs tp be mapped
+
+ @Input hBMHandle - Handle to BM mapping
+
+ @Input ui32Offset - Offset into allocation
+
+ @Return IMG_TRUE if the page should be mapped
+**************************************************************************/
+IMG_BOOL BM_MapPageAtOffset(IMG_HANDLE hBMHandle, IMG_UINT32 ui32Offset)
+{
+ BM_MAPPING *psMapping;
+ IMG_UINT32 ui32ChunkIndex;
+
+ PVR_ASSERT(hBMHandle != IMG_NULL);
+ psMapping = hBMHandle;
+
+ ui32ChunkIndex = ui32Offset / psMapping->ui32ChunkSize;
+ /* Check for overrun */
+ PVR_ASSERT(ui32ChunkIndex <= psMapping->ui32NumVirtChunks);
+ return psMapping->pabMapChunk[ui32ChunkIndex];
+}
+
+/*!
+******************************************************************************
+ @Function BM_VirtOffsetToPhyscial
+
+ @Description utility function find of physical offset of a sparse allocation
+ from it's virtual offset.
+
+ @Input hBMHandle - Handle to BM mapping
+
+ @Input ui32VirtOffset - Virtual offset into allocation
+
+ @Output pui32PhysOffset - Physical offset
+
+ @Return IMG_TRUE if the virtual offset is physically backed
+**************************************************************************/
+IMG_BOOL BM_VirtOffsetToPhysical(IMG_HANDLE hBMHandle,
+ IMG_UINT32 ui32VirtOffset,
+ IMG_UINT32 *pui32PhysOffset)
+{
+ BM_MAPPING *psMapping;
+ IMG_UINT32 ui32ChunkOffset;
+ IMG_UINT32 ui32PhysOffset = 0;
+ IMG_UINT32 i;
+
+ PVR_ASSERT(hBMHandle != IMG_NULL);
+ psMapping = hBMHandle;
+
+ ui32ChunkOffset = ui32VirtOffset / psMapping->ui32ChunkSize;
+ if (!psMapping->pabMapChunk[ui32ChunkOffset])
+ {
+ return IMG_FALSE;
+ }
+
+ for (i=0;i<ui32ChunkOffset;i++)
+ {
+ if (psMapping->pabMapChunk[i])
+ {
+ ui32PhysOffset += psMapping->ui32ChunkSize;
+ }
+ }
+ *pui32PhysOffset = ui32PhysOffset;
+
+ return IMG_TRUE;
+}
+/******************************************************************************
+ End of file (buffer_manager.c)
+******************************************************************************/