summaryrefslogtreecommitdiff
path: root/MdeModulePkg
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2015-03-06 02:57:11 +0000
committerlgao4 <lgao4@Edk2>2015-03-06 02:57:11 +0000
commit6860b92c847caff16b8cbc58ca31e3dbf9c5e5cc (patch)
tree0c0acf0e63e82d5ef78728fe433513f912c7b58f /MdeModulePkg
parentf8aabf6e4c199e92498512e1d0cf9a347b62e491 (diff)
MdeModulePkg: serve allocations from higher-up bins if current bin is empty
This patch changes the allocation logic for the pool allocator to only allocate additional pages if the requested allocation cannot be fulfilled from the current bin or any of the larger ones. If there are larger blocks available, they will be used to serve the allocation, and the remainder will be carved up into smaller blocks using the existing carving up logic. Note that all pool sizes are a multiple of the smallest pool size, so it is guaranteed that the remainder will be carved up without spilling. Due to the exponential nature of the pool sizes, the amount of work is logarithmic in the size of the available block. Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Liming Gao <liming.gao@intel.com> git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@17015 6f19259b-4bc3-4df7-8a09-765794883524
Diffstat (limited to 'MdeModulePkg')
-rw-r--r--MdeModulePkg/Core/Dxe/Mem/Pool.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/MdeModulePkg/Core/Dxe/Mem/Pool.c b/MdeModulePkg/Core/Dxe/Mem/Pool.c
index 23409d35c4..fd9f065daf 100644
--- a/MdeModulePkg/Core/Dxe/Mem/Pool.c
+++ b/MdeModulePkg/Core/Dxe/Mem/Pool.c
@@ -292,7 +292,7 @@ CoreAllocatePoolI (
VOID *Buffer;
UINTN Index;
UINTN FSize;
- UINTN Offset;
+ UINTN Offset, MaxOffset;
UINTN NoPages;
UINTN Granularity;
@@ -343,6 +343,22 @@ CoreAllocatePoolI (
//
if (IsListEmpty (&Pool->FreeList[Index])) {
+ Offset = LIST_TO_SIZE (Index);
+ MaxOffset = Granularity;
+
+ //
+ // Check the bins holding larger blocks, and carve one up if needed
+ //
+ while (++Index < SIZE_TO_LIST (Granularity)) {
+ if (!IsListEmpty (&Pool->FreeList[Index])) {
+ Free = CR (Pool->FreeList[Index].ForwardLink, POOL_FREE, Link, POOL_FREE_SIGNATURE);
+ RemoveEntryList (&Free->Link);
+ NewPage = (VOID *) Free;
+ MaxOffset = LIST_TO_SIZE (Index);
+ goto Carve;
+ }
+ }
+
//
// Get another page
//
@@ -354,29 +370,28 @@ CoreAllocatePoolI (
//
// Serve the allocation request from the head of the allocated block
//
+Carve:
Head = (POOL_HEAD *) NewPage;
- Offset = LIST_TO_SIZE (Index);
//
// Carve up remaining space into free pool blocks
//
- Index = SIZE_TO_LIST (Granularity) - 1;
- while (Offset < Granularity) {
+ Index--;
+ while (Offset < MaxOffset) {
ASSERT (Index < MAX_POOL_LIST);
FSize = LIST_TO_SIZE(Index);
- while (Offset + FSize <= Granularity) {
+ while (Offset + FSize <= MaxOffset) {
Free = (POOL_FREE *) &NewPage[Offset];
Free->Signature = POOL_FREE_SIGNATURE;
Free->Index = (UINT32)Index;
InsertHeadList (&Pool->FreeList[Index], &Free->Link);
Offset += FSize;
}
-
Index -= 1;
}
- ASSERT (Offset == Granularity);
+ ASSERT (Offset == MaxOffset);
goto Done;
}