Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

heap.c File Reference

#include "ntrtlp.h"
#include "heap.h"
#include "heappriv.h"

Go to the source code of this file.

Defines

#define HEAP_SLOW_FLAGS
#define RtlFindFirstSetRightMember(Set)

Functions

PVOID RtlDebugCreateHeap (IN ULONG Flags, IN PVOID HeapBase OPTIONAL, IN SIZE_T ReserveSize OPTIONAL, IN SIZE_T CommitSize OPTIONAL, IN PVOID Lock OPTIONAL, IN PRTL_HEAP_PARAMETERS Parameters OPTIONAL)
BOOLEAN RtlDebugDestroyHeap (IN PVOID HeapHandle)
PVOID RtlDebugAllocateHeap (IN PVOID HeapHandle, IN ULONG Flags, IN SIZE_T Size)
BOOLEAN RtlDebugFreeHeap (IN PVOID HeapHandle, IN ULONG Flags, IN PVOID BaseAddress)
ULONG RtlDebugSizeHeap (IN PVOID HeapHandle, IN ULONG Flags, IN PVOID BaseAddress)
NTSTATUS RtlDebugZeroHeap (IN PVOID HeapHandle, IN ULONG Flags)
PHEAP_UNCOMMMTTED_RANGE RtlpCreateUnCommittedRange (IN PHEAP_SEGMENT Segment)
VOID RtlpDestroyUnCommittedRange (IN PHEAP_SEGMENT Segment, IN PHEAP_UNCOMMMTTED_RANGE UnCommittedRange)
VOID RtlpInsertUnCommittedPages (IN PHEAP_SEGMENT Segment, IN ULONG_PTR Address, IN SIZE_T Size)
NTSTATUS RtlpDestroyHeapSegment (IN PHEAP_SEGMENT Segment)
PHEAP_FREE_ENTRY RtlpExtendHeap (IN PHEAP Heap, IN SIZE_T AllocationSize)
PVOID RtlCreateHeap (IN ULONG Flags, IN PVOID HeapBase OPTIONAL, IN SIZE_T ReserveSize OPTIONAL, IN SIZE_T CommitSize OPTIONAL, IN PVOID Lock OPTIONAL, IN PRTL_HEAP_PARAMETERS Parameters OPTIONAL)
PVOID RtlDestroyHeap (IN PVOID HeapHandle)
PVOID RtlAllocateHeap (IN PVOID HeapHandle, IN ULONG Flags, IN SIZE_T Size)
PVOID RtlAllocateHeapSlowly (IN PVOID HeapHandle, IN ULONG Flags, IN SIZE_T Size)
BOOLEAN RtlFreeHeap (IN PVOID HeapHandle, IN ULONG Flags, IN PVOID BaseAddress)
BOOLEAN RtlFreeHeapSlowly (IN PVOID HeapHandle, IN ULONG Flags, IN PVOID BaseAddress)
SIZE_T RtlSizeHeap (IN PVOID HeapHandle, IN ULONG Flags, IN PVOID BaseAddress)
NTSTATUS RtlZeroHeap (IN PVOID HeapHandle, IN ULONG Flags)
PHEAP_FREE_ENTRY RtlpFindAndCommitPages (IN PHEAP Heap, IN PHEAP_SEGMENT Segment, IN OUT PSIZE_T Size, IN PVOID AddressWanted OPTIONAL)
BOOLEAN RtlpInitializeHeapSegment (IN PHEAP Heap, IN PHEAP_SEGMENT Segment, IN UCHAR SegmentIndex, IN ULONG Flags, IN PVOID BaseAddress, IN PVOID UnCommittedAddress, IN PVOID CommitLimitAddress)
PHEAP_FREE_ENTRY RtlpCoalesceFreeBlocks (IN PHEAP Heap, IN PHEAP_FREE_ENTRY FreeBlock, IN OUT PSIZE_T FreeSize, IN BOOLEAN RemoveFromFreeList)
VOID RtlpDeCommitFreeBlock (IN PHEAP Heap, IN PHEAP_FREE_ENTRY FreeBlock, IN SIZE_T FreeSize)
VOID RtlpInsertFreeBlock (IN PHEAP Heap, IN PHEAP_FREE_ENTRY FreeBlock, IN SIZE_T FreeSize)
PHEAP_ENTRY_EXTRA RtlpGetExtraStuffPointer (PHEAP_ENTRY BusyBlock)
SIZE_T RtlpGetSizeOfBigBlock (IN PHEAP_ENTRY BusyBlock)
BOOLEAN RtlpCheckBusyBlockTail (IN PHEAP_ENTRY BusyBlock)

Variables

ULONG RtlpDisableHeapLookaside = 0
UCHAR CheckHeapFillPattern [CHECK_HEAP_TAIL_SIZE]


Define Documentation

#define HEAP_SLOW_FLAGS
 

Value:

(HEAP_DEBUG_FLAGS | \ HEAP_SETTABLE_USER_FLAGS | \ HEAP_NEED_EXTRA_FLAGS | \ HEAP_CREATE_ALIGN_16 | \ HEAP_FREE_CHECKING_ENABLED | \ HEAP_TAIL_CHECKING_ENABLED)

Definition at line 36 of file rtl/heap.c.

Referenced by RtlAllocateHeap(), and RtlFreeHeap().

#define RtlFindFirstSetRightMember Set   ) 
 

Value:

(((Set) & 0xFFFF) ? \ (((Set) & 0xFF) ? \ RtlpBitsClearLow[(Set) & 0xFF] : \ RtlpBitsClearLow[((Set) >> 8) & 0xFF] + 8) : \ ((((Set) >> 16) & 0xFF) ? \ RtlpBitsClearLow[ ((Set) >> 16) & 0xFF] + 16 : \ RtlpBitsClearLow[ (Set) >> 24] + 24) \ )

Definition at line 68 of file rtl/heap.c.

Referenced by RtlAllocateHeap(), and RtlAllocateHeapSlowly().


Function Documentation

PVOID RtlAllocateHeap IN PVOID  HeapHandle,
IN ULONG  Flags,
IN SIZE_T  Size
 

Definition at line 1316 of file rtl/heap.c.

References _HEAP_LOOKASIDE::Depth, FALSE, _HEAP_ENTRY::Flags, _HEAP_FREE_ENTRY::Flags, _HEAP::Flags, _HEAP::ForceFlags, _HEAP::FreeLists, HEAP_ENTRY_BUSY, HEAP_ENTRY_EXTRA_PRESENT, HEAP_ENTRY_LAST_ENTRY, HEAP_ENTRY_VIRTUAL_ALLOC, HEAP_FREE_ENTRY, HEAP_GRANULARITY, HEAP_GRANULARITY_SHIFT, HEAP_MAXIMUM_BLOCK_SIZE, HEAP_MAXIMUM_FREELISTS, HEAP_SLOW_FLAGS, HeapHandle, _HEAP_SEGMENT::LastEntryInSegment, _HEAP_LOOKASIDE::LastTotalAllocates, _HEAP::LockVariable, _HEAP::Lookaside, _HEAP::LookasideLockCount, NT_SUCCESS, NTSTATUS(), NULL, PHEAP_ENTRY, PHEAP_VIRTUAL_ALLOC_ENTRY, _HEAP_FREE_ENTRY::PreviousSize, RTL_PAGED_CODE, RtlAcquireLockRoutine, RtlAllocateHeapSlowly(), RtlFindFirstSetRightMember, RtlpAdjustHeapLookasideDepth(), RtlpAllocateFromHeapLookaside(), RtlpExtendHeap(), RtlpFastInsertFreeBlockDirect, RtlpFastRemoveDedicatedFreeBlock, RtlpFastRemoveFreeBlock, RtlpFastRemoveNonDedicatedFreeBlock, RtlpInsertFreeBlock(), RtlRaiseException(), RtlReleaseLockRoutine, _HEAP_ENTRY::SegmentIndex, _HEAP_FREE_ENTRY::SegmentIndex, _HEAP::Segments, SET_LAST_STATUS, _HEAP_ENTRY::Size, _HEAP_FREE_ENTRY::Size, Size, _HEAP_ENTRY::SmallTagIndex, Status, _HEAP_LOOKASIDE::TotalAllocates, _HEAP::TotalFreeSize, TRUE, _HEAP::u, _HEAP_ENTRY::UnusedBytes, USHORT, _HEAP::VirtualAllocdBlocks, and _HEAP::VirtualMemoryThreshold.

01324 : 01325 01326 This routine allocates a memory of the specified size from the specified 01327 heap. 01328 01329 Arguments: 01330 01331 HeapHandle - Supplies a pointer to an initialized heap structure 01332 01333 Flags - Specifies the set of flags to use to control the allocation 01334 01335 Size - Specifies the size, in bytes, of the allocation 01336 01337 Return Value: 01338 01339 PVOID - returns a pointer to the newly allocated block 01340 01341 --*/ 01342 01343 { 01344 PHEAP Heap = (PHEAP)HeapHandle; 01345 PULONG FreeListsInUse; 01346 ULONG FreeListsInUseUlong; 01347 SIZE_T AllocationSize; 01348 SIZE_T FreeSize, AllocationIndex; 01349 PLIST_ENTRY FreeListHead, Next; 01350 PHEAP_ENTRY BusyBlock; 01351 PHEAP_FREE_ENTRY FreeBlock, SplitBlock, SplitBlock2; 01352 ULONG InUseIndex; 01353 UCHAR FreeFlags; 01354 NTSTATUS Status; 01355 EXCEPTION_RECORD ExceptionRecord; 01356 PVOID ReturnValue; 01357 BOOLEAN LockAcquired = FALSE; 01358 01359 RTL_PAGED_CODE(); 01360 01361 01362 #ifndef NTOS_KERNEL_RUNTIME 01363 #ifdef NTHEAP_ENABLED 01364 { 01365 if (Heap->Flags & NTHEAP_ENABLED_FLAG) { 01366 01367 return RtlAllocateNtHeap( HeapHandle, 01368 Flags, 01369 Size); 01370 } 01371 } 01372 #endif // NTHEAP_ENABLED 01373 #endif // NTOS_KERNEL_RUNTIME 01374 01375 01376 // 01377 // Take the callers flags and add in the flags that we must forcibly set 01378 // in the heap 01379 // 01380 01381 Flags |= Heap->ForceFlags; 01382 01383 // 01384 // Check for special features that force us to call the slow, do-everything 01385 // version. We do everything slow for any of the following flags. 01386 // 01387 // HEAP_SLOW_FLAGS defined as 0x6f030f60 01388 // 01389 // HEAP_DEBUG_FLAGS, defined as 0x69020000 (heappriv.h) 01390 // 01391 // HEAP_VALIDATE_PARAMETERS_ENABLED 0x40000000 (heap.h) 01392 // 01393 // HEAP_VALIDATE_ALL_ENABLED 0x20000000 (heap.h) 01394 // 01395 // HEAP_CAPTURE_STACK_BACKTRACES 0x08000000 (heap.h) 01396 // 01397 // HEAP_CREATE_ENABLE_TRACING 0x00020000 (ntrtl.h winnt obsolete) 01398 // 01399 // HEAP_FLAG_PAGE_ALLOCS 0x01000000 (heappage.h) 01400 // 01401 // HEAP_SETTABLE_USER_FLAGS 0x00000E00 (ntrtl.h) 01402 // 01403 // HEAP_NEED_EXTRA_FLAGS 0x0f000100 (heap.h) 01404 // 01405 // HEAP_CREATE_ALIGN_16 0x00010000 (ntrtl.h winnt obsolete) 01406 // 01407 // HEAP_FREE_CHECKING_ENABLED 0x00000040 (ntrtl.h winnt) 01408 // 01409 // HEAP_TAIL_CHECKING_ENABLED 0x00000020 (ntrtl.h winnt ) 01410 // 01411 // We also do everything slow if the size is greater than max long 01412 // 01413 01414 if ((Flags & HEAP_SLOW_FLAGS) || (Size >= 0x80000000)) { 01415 01416 return RtlAllocateHeapSlowly( HeapHandle, Flags, Size ); 01417 } 01418 01419 // 01420 // At this point we know we are doing everything in this routine 01421 // and not taking the slow route. 01422 // 01423 // Round the requested size up to the allocation granularity. Note 01424 // that if the request is for 0 bytes, we still allocate memory, because 01425 // we add in an extra 1 byte to protect ourselves from idiots. 01426 // 01427 // Allocation size will be either 16, 24, 32, ... 01428 // Allocation index will be 2, 3, 4, ... 01429 // 01430 // Note that allocation size 8 is skipped and are indices 0 and 1 01431 // 01432 01433 AllocationSize = ((Size ? Size : 1) + HEAP_GRANULARITY - 1 + sizeof( HEAP_ENTRY )) 01434 & ~(HEAP_GRANULARITY -1); 01435 AllocationIndex = AllocationSize >> HEAP_GRANULARITY_SHIFT; 01436 01437 // 01438 // If there is a lookaside list and the index is within limits then 01439 // try and allocate from the lookaside list. We'll actually capture 01440 // the lookaside pointer from the heap and only use the captured pointer. 01441 // This will take care of the condition where a walk or lock heap can 01442 // cause us to check for a non null pointer and then have it become null 01443 // when we read it again. If it is non null to start with then even if 01444 // the user walks or locks the heap via another thread the pointer to 01445 // still valid here so we can still try and do a lookaside list pop. 01446 // 01447 01448 #ifndef NTOS_KERNEL_RUNTIME 01449 01450 { 01451 PHEAP_LOOKASIDE Lookaside = (PHEAP_LOOKASIDE)Heap->Lookaside; 01452 01453 if ((Lookaside != NULL) && 01454 (Heap->LookasideLockCount == 0) && 01455 (AllocationIndex < HEAP_MAXIMUM_FREELISTS)) { 01456 01457 // 01458 // If the number of operation elapsed operations is 128 times the 01459 // lookaside depth then it is time to adjust the depth 01460 // 01461 01462 if ((LONG)(Lookaside[AllocationIndex].TotalAllocates - Lookaside[AllocationIndex].LastTotalAllocates) >= 01463 (Lookaside[AllocationIndex].Depth * 128)) { 01464 01465 RtlpAdjustHeapLookasideDepth(&(Lookaside[AllocationIndex])); 01466 } 01467 01468 ReturnValue = RtlpAllocateFromHeapLookaside(&(Lookaside[AllocationIndex])); 01469 01470 if (ReturnValue != NULL) { 01471 01472 PHEAP_ENTRY BusyBlock; 01473 01474 BusyBlock = ((PHEAP_ENTRY)ReturnValue) - 1; 01475 BusyBlock->UnusedBytes = (UCHAR)(AllocationSize - Size); 01476 BusyBlock->SmallTagIndex = 0; 01477 01478 if (Flags & HEAP_ZERO_MEMORY) { 01479 01480 RtlZeroMemory( ReturnValue, Size ); 01481 } 01482 01483 return ReturnValue; 01484 } 01485 } 01486 } 01487 01488 #endif // NTOS_KERNEL_RUNTIME 01489 01490 try { 01491 01492 // 01493 // Check if we need to serialize our access to the heap 01494 // 01495 01496 if (!(Flags & HEAP_NO_SERIALIZE)) { 01497 01498 // 01499 // Lock the free list. 01500 // 01501 01502 RtlAcquireLockRoutine( Heap->LockVariable ); 01503 01504 LockAcquired = TRUE; 01505 } 01506 01507 // 01508 // If the allocation index is less than the maximum free list size 01509 // then we can use the index to check the free list otherwise we have 01510 // to either pull the entry off of the [0] index list or allocate 01511 // memory directly for this request. 01512 // 01513 01514 if (AllocationIndex < HEAP_MAXIMUM_FREELISTS) { 01515 01516 // 01517 // With a size that matches a free list size grab the head 01518 // of the list and check if there is an available entry 01519 // 01520 01521 FreeListHead = &Heap->FreeLists[ AllocationIndex ]; 01522 01523 if ( !IsListEmpty( FreeListHead )) { 01524 01525 // 01526 // We're in luck the list has an entry so now get the free 01527 // entry, copy its flags, remove it from the free list 01528 // 01529 01530 FreeBlock = CONTAINING_RECORD( FreeListHead->Blink, 01531 HEAP_FREE_ENTRY, 01532 FreeList ); 01533 01534 FreeFlags = FreeBlock->Flags; 01535 01536 RtlpFastRemoveDedicatedFreeBlock( Heap, FreeBlock ); 01537 01538 // 01539 // Adjust the total number of bytes free in the heap 01540 // 01541 01542 Heap->TotalFreeSize -= AllocationIndex; 01543 01544 // 01545 // Mark the block as busy and and set the number of bytes 01546 // unused and tag index. Also if it is the last entry 01547 // then keep that flag. 01548 // 01549 01550 BusyBlock = (PHEAP_ENTRY)FreeBlock; 01551 BusyBlock->Flags = HEAP_ENTRY_BUSY | (FreeFlags & HEAP_ENTRY_LAST_ENTRY); 01552 BusyBlock->UnusedBytes = (UCHAR)(AllocationSize - Size); 01553 BusyBlock->SmallTagIndex = 0; 01554 01555 } else { 01556 01557 // 01558 // The free list that matches our request is empty 01559 // 01560 // Scan the free list in use vector to find the smallest 01561 // available free block large enough for our allocations. 01562 // 01563 01564 // 01565 // Compute the index of the ULONG where the scan should begin 01566 // 01567 01568 InUseIndex = (ULONG) (AllocationIndex >> 5); 01569 FreeListsInUse = &Heap->u.FreeListsInUseUlong[InUseIndex]; 01570 01571 // 01572 // Mask off the bits in the first ULONG that represent allocations 01573 // smaller than we need. 01574 // 01575 01576 FreeListsInUseUlong = *FreeListsInUse++ & ~((1 << ((ULONG) AllocationIndex & 0x1f)) - 1); 01577 01578 // 01579 // Begin unrolled loop to scan bit vector. 01580 // 01581 01582 switch (InUseIndex) { 01583 01584 case 0: 01585 01586 if (FreeListsInUseUlong) { 01587 01588 FreeListHead = &Heap->FreeLists[0]; 01589 break; 01590 } 01591 01592 FreeListsInUseUlong = *FreeListsInUse++; 01593 01594 // 01595 // deliberate fallthrough to next ULONG 01596 // 01597 01598 case 1: 01599 01600 if (FreeListsInUseUlong) { 01601 01602 FreeListHead = &Heap->FreeLists[32]; 01603 break; 01604 } 01605 01606 FreeListsInUseUlong = *FreeListsInUse++; 01607 01608 // 01609 // deliberate fallthrough to next ULONG 01610 // 01611 01612 case 2: 01613 01614 if (FreeListsInUseUlong) { 01615 01616 FreeListHead = &Heap->FreeLists[64]; 01617 break; 01618 } 01619 01620 FreeListsInUseUlong = *FreeListsInUse++; 01621 01622 // 01623 // deliberate fallthrough to next ULONG 01624 // 01625 01626 case 3: 01627 01628 if (FreeListsInUseUlong) { 01629 01630 FreeListHead = &Heap->FreeLists[96]; 01631 break; 01632 } 01633 01634 // 01635 // deliberate fallthrough to non dedicated list 01636 // 01637 01638 default: 01639 01640 // 01641 // No suitable entry on the free list was found. 01642 // 01643 01644 goto LookInNonDedicatedList; 01645 } 01646 01647 // 01648 // A free list has been found with a large enough allocation. 01649 // FreeListHead contains the base of the vector it was found in. 01650 // FreeListsInUseUlong contains the vector. 01651 // 01652 01653 FreeListHead += RtlFindFirstSetRightMember( FreeListsInUseUlong ); 01654 01655 // 01656 // Grab the free block and remove it from the free list 01657 // 01658 01659 FreeBlock = CONTAINING_RECORD( FreeListHead->Blink, 01660 HEAP_FREE_ENTRY, 01661 FreeList ); 01662 01663 RtlpFastRemoveDedicatedFreeBlock( Heap, FreeBlock ); 01664 01665 SplitFreeBlock: 01666 01667 // 01668 // Save the blocks flags and decrement the amount of 01669 // free space left in the heap 01670 // 01671 01672 FreeFlags = FreeBlock->Flags; 01673 Heap->TotalFreeSize -= FreeBlock->Size; 01674 01675 // 01676 // Mark the block busy 01677 // 01678 01679 BusyBlock = (PHEAP_ENTRY)FreeBlock; 01680 BusyBlock->Flags = HEAP_ENTRY_BUSY; 01681 01682 // 01683 // Compute the size (i.e., index) of the amount from this block 01684 // that we don't need and can return to the free list 01685 // 01686 01687 FreeSize = BusyBlock->Size - AllocationIndex; 01688 01689 // 01690 // Finish setting up the rest of the new busy block 01691 // 01692 01693 BusyBlock->Size = (USHORT)AllocationIndex; 01694 BusyBlock->UnusedBytes = (UCHAR)(AllocationSize - Size); 01695 BusyBlock->SmallTagIndex = 0; 01696 01697 // 01698 // Now if the size that we are going to free up is not zero 01699 // then lets get to work and to the split. 01700 // 01701 01702 if (FreeSize != 0) { 01703 01704 // 01705 // But first we won't ever bother doing a split that only 01706 // gives us 8 bytes back. So if free size is one then just 01707 // bump up the size of the new busy block 01708 // 01709 01710 if (FreeSize == 1) { 01711 01712 BusyBlock->Size += 1; 01713 BusyBlock->UnusedBytes += sizeof( HEAP_ENTRY ); 01714 01715 } else { 01716 01717 // 01718 // Get a pointer to where the new free block will be. 01719 // When we split a block the first part goes to the new 01720 // busy block and the second part goes back to the free 01721 // list 01722 // 01723 01724 SplitBlock = (PHEAP_FREE_ENTRY)(BusyBlock + AllocationIndex); 01725 01726 // 01727 // Reset the flags that we copied from the original free list 01728 // header, and set it other size fields. 01729 // 01730 01731 SplitBlock->Flags = FreeFlags; 01732 SplitBlock->PreviousSize = (USHORT)AllocationIndex; 01733 SplitBlock->SegmentIndex = BusyBlock->SegmentIndex; 01734 SplitBlock->Size = (USHORT)FreeSize; 01735 01736 // 01737 // If nothing else follows this entry then we will insert 01738 // this into the corresponding free list (and update 01739 // Segment->LastEntryInSegment) 01740 // 01741 01742 if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) { 01743 01744 RtlpFastInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize); 01745 Heap->TotalFreeSize += FreeSize; 01746 01747 } else { 01748 01749 // 01750 // Otherwise we need to check the following block 01751 // and if it is busy then update its previous size 01752 // before inserting our new free block into the 01753 // free list 01754 // 01755 01756 SplitBlock2 = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize); 01757 01758 if (SplitBlock2->Flags & HEAP_ENTRY_BUSY) { 01759 01760 SplitBlock2->PreviousSize = (USHORT)FreeSize; 01761 01762 RtlpFastInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize ); 01763 Heap->TotalFreeSize += FreeSize; 01764 01765 } else { 01766 01767 // 01768 // The following block is free so we'll merge 01769 // these to blocks. by first merging the flags 01770 // 01771 01772 SplitBlock->Flags = SplitBlock2->Flags; 01773 01774 // 01775 // Removing the second block from its free list 01776 // 01777 01778 RtlpFastRemoveFreeBlock( Heap, SplitBlock2 ); 01779 01780 // 01781 // Updating the free total number of free bytes 01782 // in the heap and updating the size of the new 01783 // free block 01784 // 01785 01786 Heap->TotalFreeSize -= SplitBlock2->Size; 01787 FreeSize += SplitBlock2->Size; 01788 01789 // 01790 // If the new free block is still less than the 01791 // maximum heap block size then we'll simply 01792 // insert it back in the free list 01793 // 01794 01795 if (FreeSize <= HEAP_MAXIMUM_BLOCK_SIZE) { 01796 01797 SplitBlock->Size = (USHORT)FreeSize; 01798 01799 // 01800 // Again check if the new following block 01801 // exists and if so then updsate is previous 01802 // size 01803 // 01804 01805 if (!(SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) { 01806 01807 ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = (USHORT)FreeSize; 01808 } 01809 01810 // 01811 // Insert the new free block into the free 01812 // list and update the free heap size 01813 // 01814 01815 RtlpFastInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize ); 01816 Heap->TotalFreeSize += FreeSize; 01817 01818 } else { 01819 01820 // 01821 // The new free block is pretty large so we 01822 // need to call a private routine to do the 01823 // insert 01824 // 01825 01826 RtlpInsertFreeBlock( Heap, SplitBlock, FreeSize ); 01827 } 01828 } 01829 } 01830 01831 // 01832 // Now that free flags made it back into a free block 01833 // we can zero out what we saved. 01834 // 01835 01836 FreeFlags = 0; 01837 01838 // 01839 // If splitblock now last, update LastEntryInSegment 01840 // 01841 01842 if (SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY) { 01843 01844 PHEAP_SEGMENT Segment; 01845 01846 Segment = Heap->Segments[SplitBlock->SegmentIndex]; 01847 Segment->LastEntryInSegment = (PHEAP_ENTRY)SplitBlock; 01848 } 01849 } 01850 } 01851 01852 // 01853 // If there are no following entries then mark the new block as 01854 // such 01855 // 01856 01857 if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) { 01858 01859 BusyBlock->Flags |= HEAP_ENTRY_LAST_ENTRY; 01860 } 01861 } 01862 01863 // 01864 // Return the address of the user portion of the allocated block. 01865 // This is the byte following the header. 01866 // 01867 01868 ReturnValue = BusyBlock + 1; 01869 01870 // 01871 // **** Release the lock before the zero memory call 01872 // 01873 01874 if (LockAcquired) { 01875 01876 RtlReleaseLockRoutine( Heap->LockVariable ); 01877 01878 LockAcquired = FALSE; 01879 } 01880 01881 // 01882 // If the flags indicate that we should zero memory then do it now 01883 // 01884 01885 if (Flags & HEAP_ZERO_MEMORY) { 01886 01887 RtlZeroMemory( ReturnValue, Size ); 01888 } 01889 01890 // 01891 // And return the allocated block to our caller 01892 // 01893 01894 leave; 01895 01896 // 01897 // Otherwise the allocation request is bigger than the last dedicated 01898 // free list size. Now check if the size is within our threshold. 01899 // Meaning that it could be in the [0] free list 01900 // 01901 01902 } else if (AllocationIndex <= Heap->VirtualMemoryThreshold) { 01903 01904 LookInNonDedicatedList: 01905 01906 // 01907 // The following code cycles through the [0] free list until 01908 // it finds a block that satisfies the request. The list 01909 // is sorted so the search is can be terminated early on success 01910 // 01911 01912 FreeListHead = &Heap->FreeLists[0]; 01913 01914 // 01915 // Check if the largest block in the list is smaller than the request 01916 // 01917 01918 Next = FreeListHead->Blink; 01919 01920 if (FreeListHead != Next) { 01921 01922 FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList ); 01923 01924 if (FreeBlock->Size >= AllocationIndex) { 01925 01926 // 01927 // Here we are sure there is at least a block here larger than 01928 // the requested size. Start searching from the first block 01929 // 01930 01931 Next = FreeListHead->Flink; 01932 01933 while (FreeListHead != Next) { 01934 01935 FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList ); 01936 01937 if (FreeBlock->Size >= AllocationIndex) { 01938 01939 // 01940 // We've found something that we can use so now remove 01941 // it from the free list and go to where we treat spliting 01942 // a free block. Note that the block we found here might 01943 // actually be the exact size we need and that is why 01944 // in the split free block case we have to consider having 01945 // nothing free after the split 01946 // 01947 01948 RtlpFastRemoveNonDedicatedFreeBlock( Heap, FreeBlock ); 01949 01950 goto SplitFreeBlock; 01951 } 01952 01953 Next = Next->Flink; 01954 } 01955 } 01956 } 01957 01958 // 01959 // The [0] list is either empty or everything is too small 01960 // so now extend the heap which should get us something less 01961 // than or equal to the virtual memory threshold 01962 // 01963 01964 FreeBlock = RtlpExtendHeap( Heap, AllocationSize ); 01965 01966 // 01967 // And provided we got something we'll treat it just like the previous 01968 // split free block cases 01969 // 01970 01971 if (FreeBlock != NULL) { 01972 01973 RtlpFastRemoveNonDedicatedFreeBlock( Heap, FreeBlock ); 01974 01975 goto SplitFreeBlock; 01976 } 01977 01978 // 01979 // We weren't able to extend the heap so we must be out of memory 01980 // 01981 01982 Status = STATUS_NO_MEMORY; 01983 01984 // 01985 // At this point the allocation is way too big for any of the free lists 01986 // and we can only satisfy this request if the heap is growable 01987 // 01988 01989 } else if (Heap->Flags & HEAP_GROWABLE) { 01990 01991 PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock; 01992 01993 VirtualAllocBlock = NULL; 01994 01995 // 01996 // Compute how much memory we will need for this allocation which 01997 // will include the allocation size plus a header, and then go 01998 // get the committed memory 01999 // 02000 02001 AllocationSize += FIELD_OFFSET( HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock ); 02002 02003 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 02004 (PVOID *)&VirtualAllocBlock, 02005 0, 02006 &AllocationSize, 02007 MEM_COMMIT, 02008 PAGE_READWRITE ); 02009 02010 if (NT_SUCCESS(Status)) { 02011 02012 // 02013 // Just committed, already zero. Fill in the new block 02014 // and insert it in the list of big allocation 02015 // 02016 02017 VirtualAllocBlock->BusyBlock.Size = (USHORT)(AllocationSize - Size); 02018 VirtualAllocBlock->BusyBlock.Flags = HEAP_ENTRY_VIRTUAL_ALLOC | HEAP_ENTRY_EXTRA_PRESENT | HEAP_ENTRY_BUSY; 02019 VirtualAllocBlock->CommitSize = AllocationSize; 02020 VirtualAllocBlock->ReserveSize = AllocationSize; 02021 02022 InsertTailList( &Heap->VirtualAllocdBlocks, (PLIST_ENTRY)VirtualAllocBlock ); 02023 02024 // 02025 // Return the address of the user portion of the allocated block. 02026 // This is the byte following the header. 02027 // 02028 02029 ReturnValue = (PHEAP_ENTRY)(VirtualAllocBlock + 1); 02030 02031 leave; 02032 } 02033 02034 } else { 02035 02036 Status = STATUS_BUFFER_TOO_SMALL; 02037 } 02038 02039 // 02040 // This is the error return. 02041 // 02042 02043 if (Flags & HEAP_GENERATE_EXCEPTIONS) { 02044 02045 // 02046 // Construct an exception record. 02047 // 02048 02049 ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY; 02050 ExceptionRecord.ExceptionRecord = (PEXCEPTION_RECORD)NULL; 02051 ExceptionRecord.NumberParameters = 1; 02052 ExceptionRecord.ExceptionFlags = 0; 02053 ExceptionRecord.ExceptionInformation[ 0 ] = AllocationSize; 02054 02055 RtlRaiseException( &ExceptionRecord ); 02056 } 02057 02058 SET_LAST_STATUS(Status); 02059 02060 ReturnValue = NULL; 02061 02062 } finally { 02063 02064 if (LockAcquired) { 02065 02066 RtlReleaseLockRoutine( Heap->LockVariable ); 02067 } 02068 } 02069 02070 return ReturnValue; 02071 }

PVOID RtlAllocateHeapSlowly IN PVOID  HeapHandle,
IN ULONG  Flags,
IN SIZE_T  Size
 

Definition at line 2075 of file rtl/heap.c.

References _HEAP::AlignMask, _HEAP::AlignRound, ALLOC_HEAP_FILL, AllocationAction, CHECK_HEAP_TAIL_FILL, CHECK_HEAP_TAIL_SIZE, DEBUG_HEAP, EXCEPTION_CONTINUE_SEARCH, EXCEPTION_EXECUTE_HANDLER, FALSE, _HEAP_FREE_ENTRY::Flags, _HEAP_ENTRY::Flags, _HEAP::Flags, _HEAP::FreeLists, HEAP_ENTRY_BUSY, HEAP_ENTRY_EXTRA, HEAP_ENTRY_EXTRA_PRESENT, HEAP_ENTRY_FILL_PATTERN, HEAP_ENTRY_LAST_ENTRY, HEAP_ENTRY_VIRTUAL_ALLOC, HEAP_GRANULARITY_SHIFT, HEAP_MAXIMUM_BLOCK_SIZE, HEAP_MAXIMUM_FREELISTS, HEAP_NEED_EXTRA_FLAGS, HEAP_SMALL_TAG_MASK, HeapHandle, IS_HEAP_TAGGING_ENABLED, _HEAP_SEGMENT::LastEntryInSegment, _HEAP::LockVariable, NT_SUCCESS, NTSTATUS(), NULL, PHEAP_ENTRY_EXTRA, _HEAP_FREE_ENTRY::PreviousSize, _HEAP::PseudoTagEntries, RTL_PAGED_CODE, RtlAcquireLockRoutine, RtlDebugAllocateHeap(), RtlFindFirstSetRightMember, RtlpExtendHeap(), RtlpGetExtraStuffPointer(), RtlpInsertFreeBlock(), RtlpInsertFreeBlockDirect, RtlpRemoveFreeBlock, RtlpUpdateTagEntry(), RtlRaiseException(), RtlReleaseLockRoutine, _HEAP_FREE_ENTRY::SegmentIndex, _HEAP_ENTRY::SegmentIndex, _HEAP::Segments, SET_LAST_STATUS, Size, _HEAP_FREE_ENTRY::Size, _HEAP_ENTRY::Size, Status, _HEAP::TotalFreeSize, TRUE, _HEAP::u, _HEAP_ENTRY::UnusedBytes, USHORT, VirtualAllocationAction, _HEAP::VirtualAllocdBlocks, and _HEAP::VirtualMemoryThreshold.

Referenced by RtlAllocateHeap(), and RtlDebugAllocateHeap().

02083 : 02084 02085 This routine does the equivalent of Rtl Allocate Heap but it does it will 02086 additional heap consistency checking logic and tagging. 02087 02088 Arguments: 02089 02090 HeapHandle - Supplies a pointer to an initialized heap structure 02091 02092 Flags - Specifies the set of flags to use to control the allocation 02093 02094 Size - Specifies the size, in bytes, of the allocation 02095 02096 Return Value: 02097 02098 PVOID - returns a pointer to the newly allocated block 02099 02100 --*/ 02101 02102 { 02103 PHEAP Heap = (PHEAP)HeapHandle; 02104 BOOLEAN LockAcquired = FALSE; 02105 PVOID ReturnValue = NULL; 02106 PULONG FreeListsInUse; 02107 ULONG FreeListsInUseUlong; 02108 SIZE_T AllocationSize; 02109 SIZE_T FreeSize, AllocationIndex; 02110 UCHAR EntryFlags, FreeFlags; 02111 PLIST_ENTRY FreeListHead, Next; 02112 PHEAP_ENTRY BusyBlock; 02113 PHEAP_FREE_ENTRY FreeBlock, SplitBlock, SplitBlock2; 02114 PHEAP_ENTRY_EXTRA ExtraStuff; 02115 NTSTATUS Status; 02116 EXCEPTION_RECORD ExceptionRecord; 02117 SIZE_T ZeroSize = 0; 02118 02119 RTL_PAGED_CODE(); 02120 02121 // 02122 // Note that Flags has already been OR'd with Heap->ForceFlags. 02123 // 02124 02125 #ifndef NTOS_KERNEL_RUNTIME 02126 02127 // 02128 // In the non kernel case check if we should be using the debug version 02129 // of heap allocation 02130 // 02131 02132 if (DEBUG_HEAP( Flags )) { 02133 02134 return RtlDebugAllocateHeap( HeapHandle, Flags, Size ); 02135 } 02136 02137 #endif // NTOS_KERNEL_RUNTIME 02138 02139 // 02140 // If the size is greater than maxlong then say we can't allocate that 02141 // much and return the error to our caller 02142 // 02143 02144 if (Size > 0x7fffffff) { 02145 02146 SET_LAST_STATUS( STATUS_NO_MEMORY ); 02147 02148 return NULL; 02149 } 02150 02151 // 02152 // Round up the requested size to the allocation granularity. Note 02153 // that if the request is for zero bytes we will still allocate memory, 02154 // 02155 // Allocation size will be either 16, 24, 32, ... 02156 // Allocation index will be 2, 3, 4, ... 02157 // 02158 02159 AllocationSize = ((Size ? Size : 1) + Heap->AlignRound) & Heap->AlignMask; 02160 02161 // 02162 // Generate the flags needed for this heap entry. Mark it busy and add 02163 // any user settable bits. Also if the input flag indicates any entry 02164 // extra fields and we have a tag to use then make room for the extra 02165 // fields in the heap entry 02166 // 02167 02168 EntryFlags = (UCHAR)(HEAP_ENTRY_BUSY | ((Flags & HEAP_SETTABLE_USER_FLAGS) >> 4)); 02169 02170 if ((Flags & HEAP_NEED_EXTRA_FLAGS) || (Heap->PseudoTagEntries != NULL)) { 02171 02172 EntryFlags |= HEAP_ENTRY_EXTRA_PRESENT; 02173 AllocationSize += sizeof( HEAP_ENTRY_EXTRA ); 02174 } 02175 02176 AllocationIndex = AllocationSize >> HEAP_GRANULARITY_SHIFT; 02177 02178 try { 02179 02180 // 02181 // Lock the free list. 02182 // 02183 02184 if (!(Flags & HEAP_NO_SERIALIZE)) { 02185 02186 RtlAcquireLockRoutine( Heap->LockVariable ); 02187 02188 LockAcquired = TRUE; 02189 } 02190 02191 // 02192 // Do all the actual heap work under the protection of a try-except clause 02193 // to protect us from corruption 02194 // 02195 02196 try { 02197 02198 // 02199 // If the allocation index is less than the maximum free list size 02200 // then we can use the index to check the free list otherwise we have 02201 // to either pull the entry off of the [0] index list or allocate 02202 // memory directly for this request. 02203 // 02204 02205 if (AllocationIndex < HEAP_MAXIMUM_FREELISTS) { 02206 02207 // 02208 // With a size that matches a free list size grab the head 02209 // of the list and check if there is an available entry 02210 // 02211 02212 FreeListHead = &Heap->FreeLists[ AllocationIndex ]; 02213 02214 if ( !IsListEmpty( FreeListHead )) { 02215 02216 // 02217 // We're in luck the list has an entry so now get the free 02218 // entry, copy its flags, remove it from the free list 02219 // 02220 02221 FreeBlock = CONTAINING_RECORD( FreeListHead->Flink, 02222 HEAP_FREE_ENTRY, 02223 FreeList ); 02224 02225 FreeFlags = FreeBlock->Flags; 02226 02227 RtlpRemoveFreeBlock( Heap, FreeBlock ); 02228 02229 // 02230 // Adjust the total number of bytes free in the heap 02231 // 02232 02233 Heap->TotalFreeSize -= AllocationIndex; 02234 02235 // 02236 // Mark the block as busy and and set the number of bytes 02237 // unused and tag index. Also if it is the last entry 02238 // then keep that flag. 02239 // 02240 02241 BusyBlock = (PHEAP_ENTRY)FreeBlock; 02242 BusyBlock->Flags = EntryFlags | (FreeFlags & HEAP_ENTRY_LAST_ENTRY); 02243 BusyBlock->UnusedBytes = (UCHAR)(AllocationSize - Size); 02244 02245 } else { 02246 02247 // 02248 // The free list that matches our request is empty. We know 02249 // that there are 128 free lists managed by a 4 ulong bitmap. 02250 // The next big if-else-if statement will decide which ulong 02251 // we tackle 02252 // 02253 // Check if the requested allocation index within the first 02254 // quarter of the free lists. 02255 // 02256 02257 if (AllocationIndex < (HEAP_MAXIMUM_FREELISTS * 1) / 4) { 02258 02259 // 02260 // Grab a pointer to the corresponding bitmap ulong, and 02261 // then get the bit we're actually interested in to be the 02262 // first bit of the ulong. 02263 // 02264 02265 FreeListsInUse = &Heap->u.FreeListsInUseUlong[ 0 ]; 02266 FreeListsInUseUlong = *FreeListsInUse++ >> ((ULONG) AllocationIndex & 0x1F); 02267 02268 // 02269 // If the remaining bitmap has any bits set then we know 02270 // there is a non empty list that is larger than our 02271 // requested index so find that bit and compute the list 02272 // head of the next non empty list 02273 // 02274 02275 if (FreeListsInUseUlong) { 02276 02277 FreeListHead += RtlFindFirstSetRightMember( FreeListsInUseUlong ); 02278 02279 } else { 02280 02281 // 02282 // The rest of the first ulong is all zeros so we need 02283 // to move to the second ulong 02284 // 02285 02286 FreeListsInUseUlong = *FreeListsInUse++; 02287 02288 // 02289 // Check if the second ulong has any bits set and if 02290 // so then compute the list head of the next non empty 02291 // list 02292 // 02293 02294 if (FreeListsInUseUlong) { 02295 02296 FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 1) / 4) - 02297 (AllocationIndex & 0x1F) + 02298 RtlFindFirstSetRightMember( FreeListsInUseUlong ); 02299 02300 } else { 02301 02302 // 02303 // Do the same test for the third ulong 02304 // 02305 02306 FreeListsInUseUlong = *FreeListsInUse++; 02307 02308 if (FreeListsInUseUlong) { 02309 02310 FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 2) / 4) - 02311 (AllocationIndex & 0x1F) + 02312 RtlFindFirstSetRightMember( FreeListsInUseUlong ); 02313 02314 } else { 02315 02316 // 02317 // Repeat the test for the forth ulong, and if 02318 // that one is also empty then we need to grab 02319 // the allocation off of the [0] index list 02320 // 02321 02322 FreeListsInUseUlong = *FreeListsInUse++; 02323 02324 if (FreeListsInUseUlong) { 02325 02326 FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 3) / 4) - 02327 (AllocationIndex & 0x1F) + 02328 RtlFindFirstSetRightMember( FreeListsInUseUlong ); 02329 02330 } else { 02331 02332 goto LookInNonDedicatedList; 02333 } 02334 } 02335 } 02336 } 02337 02338 // 02339 // Otherwise check if the requested allocation index lies 02340 // within the second quarter of the free lists. We repeat the 02341 // test just like we did above on the second, third, and forth 02342 // bitmap ulongs. 02343 // 02344 02345 } else if (AllocationIndex < (HEAP_MAXIMUM_FREELISTS * 2) / 4) { 02346 02347 FreeListsInUse = &Heap->u.FreeListsInUseUlong[ 1 ]; 02348 FreeListsInUseUlong = *FreeListsInUse++ >> ((ULONG) AllocationIndex & 0x1F); 02349 02350 if (FreeListsInUseUlong) { 02351 02352 FreeListHead += RtlFindFirstSetRightMember( FreeListsInUseUlong ); 02353 02354 } else { 02355 02356 FreeListsInUseUlong = *FreeListsInUse++; 02357 02358 if (FreeListsInUseUlong) { 02359 02360 FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 1) / 4) - 02361 (AllocationIndex & 0x1F) + 02362 RtlFindFirstSetRightMember( FreeListsInUseUlong ); 02363 02364 } else { 02365 02366 FreeListsInUseUlong = *FreeListsInUse++; 02367 02368 if (FreeListsInUseUlong) { 02369 02370 FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 2) / 4) - 02371 (AllocationIndex & 0x1F) + 02372 RtlFindFirstSetRightMember( FreeListsInUseUlong ); 02373 02374 } else { 02375 02376 goto LookInNonDedicatedList; 02377 } 02378 } 02379 } 02380 02381 // 02382 // Otherwise check if the requested allocation index lies 02383 // within the third quarter of the free lists. We repeat the 02384 // test just like we did above on the third and forth bitmap 02385 // ulongs 02386 // 02387 02388 } else if (AllocationIndex < (HEAP_MAXIMUM_FREELISTS * 3) / 4) { 02389 02390 FreeListsInUse = &Heap->u.FreeListsInUseUlong[ 2 ]; 02391 FreeListsInUseUlong = *FreeListsInUse++ >> ((ULONG) AllocationIndex & 0x1F); 02392 02393 if (FreeListsInUseUlong) { 02394 02395 FreeListHead += RtlFindFirstSetRightMember( FreeListsInUseUlong ); 02396 02397 } else { 02398 02399 FreeListsInUseUlong = *FreeListsInUse++; 02400 02401 if (FreeListsInUseUlong) { 02402 02403 FreeListHead += ((HEAP_MAXIMUM_FREELISTS * 1) / 4) - 02404 (AllocationIndex & 0x1F) + 02405 RtlFindFirstSetRightMember( FreeListsInUseUlong ); 02406 02407 } else { 02408 02409 goto LookInNonDedicatedList; 02410 } 02411 } 02412 02413 // 02414 // Lastly the requested allocation index must lie within the 02415 // last quarter of the free lists. We repeat the test just 02416 // like we did above on the forth ulong 02417 // 02418 02419 } else { 02420 02421 FreeListsInUse = &Heap->u.FreeListsInUseUlong[ 3 ]; 02422 FreeListsInUseUlong = *FreeListsInUse++ >> ((ULONG) AllocationIndex & 0x1F); 02423 02424 if (FreeListsInUseUlong) { 02425 02426 FreeListHead += RtlFindFirstSetRightMember( FreeListsInUseUlong ); 02427 02428 } else { 02429 02430 goto LookInNonDedicatedList; 02431 } 02432 } 02433 02434 // 02435 // At this point the free list head points to a non empty free 02436 // list that is of greater size than we need. 02437 // 02438 02439 FreeBlock = CONTAINING_RECORD( FreeListHead->Flink, 02440 HEAP_FREE_ENTRY, 02441 FreeList ); 02442 02443 SplitFreeBlock: 02444 02445 // 02446 // Remember the flags that go with this block and remove it 02447 // from its list 02448 // 02449 02450 FreeFlags = FreeBlock->Flags; 02451 02452 RtlpRemoveFreeBlock( Heap, FreeBlock ); 02453 02454 // 02455 // Adjust the amount free in the heap 02456 // 02457 02458 Heap->TotalFreeSize -= FreeBlock->Size; 02459 02460 // 02461 // Mark the block busy 02462 // 02463 02464 BusyBlock = (PHEAP_ENTRY)FreeBlock; 02465 BusyBlock->Flags = EntryFlags; 02466 02467 // 02468 // Compute the size (i.e., index) of the amount from this 02469 // block that we don't need and can return to the free list 02470 // 02471 02472 FreeSize = BusyBlock->Size - AllocationIndex; 02473 02474 // 02475 // Finish setting up the rest of the new busy block 02476 // 02477 02478 BusyBlock->Size = (USHORT)AllocationIndex; 02479 BusyBlock->UnusedBytes = (UCHAR)(AllocationSize - Size); 02480 02481 // 02482 // Now if the size that we are going to free up is not zero 02483 // then lets get to work and to the split. 02484 // 02485 02486 if (FreeSize != 0) { 02487 02488 // 02489 // But first we won't ever bother doing a split that only 02490 // gives us 8 bytes back. So if free size is one then 02491 // just bump up the size of the new busy block 02492 // 02493 02494 if (FreeSize == 1) { 02495 02496 BusyBlock->Size += 1; 02497 BusyBlock->UnusedBytes += sizeof( HEAP_ENTRY ); 02498 02499 } else { 02500 02501 // 02502 // Get a pointer to where the new free block will be. 02503 // When we split a block the first part goes to the 02504 // new busy block and the second part goes back to the 02505 // free list 02506 // 02507 02508 SplitBlock = (PHEAP_FREE_ENTRY)(BusyBlock + AllocationIndex); 02509 02510 // 02511 // Reset the flags that we copied from the original 02512 // free list header, and set it other size fields. 02513 // 02514 02515 SplitBlock->Flags = FreeFlags; 02516 SplitBlock->PreviousSize = (USHORT)AllocationIndex; 02517 SplitBlock->SegmentIndex = BusyBlock->SegmentIndex; 02518 SplitBlock->Size = (USHORT)FreeSize; 02519 02520 // 02521 // If nothing else follows this entry then we will 02522 // insert this into the corresponding free list 02523 // 02524 02525 if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) { 02526 02527 RtlpInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize ); 02528 02529 Heap->TotalFreeSize += FreeSize; 02530 02531 } else { 02532 02533 // 02534 // Otherwise we need to check the following block 02535 // and if it is busy then update its previous size 02536 // before inserting our new free block into the 02537 // free list 02538 // 02539 02540 SplitBlock2 = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize); 02541 02542 if (SplitBlock2->Flags & HEAP_ENTRY_BUSY) { 02543 02544 SplitBlock2->PreviousSize = (USHORT)FreeSize; 02545 02546 RtlpInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize ); 02547 02548 Heap->TotalFreeSize += FreeSize; 02549 02550 } else { 02551 02552 // 02553 // The following block is free so we'll merge 02554 // these to blocks. by first merging the flags 02555 // 02556 02557 SplitBlock->Flags = SplitBlock2->Flags; 02558 02559 // 02560 // Removing the second block from its free 02561 // list 02562 // 02563 02564 RtlpRemoveFreeBlock( Heap, SplitBlock2 ); 02565 02566 // 02567 // Updating the free total number of free 02568 // bytes in the heap and updating the size of 02569 // the new free block 02570 // 02571 02572 Heap->TotalFreeSize -= SplitBlock2->Size; 02573 FreeSize += SplitBlock2->Size; 02574 02575 // 02576 // If the new free block is still less than 02577 // the maximum heap block size then we'll 02578 // simply insert it back in the free list 02579 // 02580 02581 if (FreeSize <= HEAP_MAXIMUM_BLOCK_SIZE) { 02582 02583 SplitBlock->Size = (USHORT)FreeSize; 02584 02585 // 02586 // Again check if the new following block 02587 // exists and if so then updsate is 02588 // previous size 02589 // 02590 02591 if (!(SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) { 02592 02593 ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)SplitBlock + FreeSize))->PreviousSize = (USHORT)FreeSize; 02594 } 02595 02596 // 02597 // Insert the new free block into the free 02598 // list and update the free heap size 02599 // 02600 02601 RtlpInsertFreeBlockDirect( Heap, SplitBlock, (USHORT)FreeSize ); 02602 02603 Heap->TotalFreeSize += FreeSize; 02604 02605 } else { 02606 02607 // 02608 // The new free block is pretty large so 02609 // we need to call a private routine to do 02610 // the insert 02611 // 02612 02613 RtlpInsertFreeBlock( Heap, SplitBlock, FreeSize ); 02614 } 02615 } 02616 } 02617 02618 // 02619 // Now that free flags made it back into a free block 02620 // we can zero out what we saved. 02621 // 02622 02623 FreeFlags = 0; 02624 02625 // 02626 // If splitblock now last, update LastEntryInSegment 02627 // 02628 02629 if (SplitBlock->Flags & HEAP_ENTRY_LAST_ENTRY) { 02630 02631 PHEAP_SEGMENT Segment; 02632 02633 Segment = Heap->Segments[SplitBlock->SegmentIndex]; 02634 Segment->LastEntryInSegment = (PHEAP_ENTRY)SplitBlock; 02635 } 02636 02637 } 02638 } 02639 02640 // 02641 // If there are no following entries then mark the new block 02642 // as such 02643 // 02644 02645 if (FreeFlags & HEAP_ENTRY_LAST_ENTRY) { 02646 02647 BusyBlock->Flags |= HEAP_ENTRY_LAST_ENTRY; 02648 } 02649 } 02650 02651 // 02652 // Return the address of the user portion of the allocated block. 02653 // This is the byte following the header. 02654 // 02655 02656 ReturnValue = BusyBlock + 1; 02657 02658 // 02659 // If the flags indicate that we should zero memory then 02660 // remember how much to zero. We'll do the zeroing later 02661 // 02662 02663 if (Flags & HEAP_ZERO_MEMORY) { 02664 02665 ZeroSize = Size; 02666 02667 // 02668 // Otherwise if the flags indicate that we should fill heap then 02669 // it it now. 02670 // 02671 02672 } else if (Heap->Flags & HEAP_FREE_CHECKING_ENABLED) { 02673 02674 RtlFillMemoryUlong( (PCHAR)(BusyBlock + 1), Size & ~0x3, ALLOC_HEAP_FILL ); 02675 } 02676 02677 // 02678 // If the flags indicate that we should do tail checking then copy 02679 // the fill pattern right after the heap block. 02680 // 02681 02682 if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) { 02683 02684 RtlFillMemory( (PCHAR)ReturnValue + Size, 02685 CHECK_HEAP_TAIL_SIZE, 02686 CHECK_HEAP_TAIL_FILL ); 02687 02688 BusyBlock->Flags |= HEAP_ENTRY_FILL_PATTERN; 02689 } 02690 02691 BusyBlock->SmallTagIndex = 0; 02692 02693 // 02694 // If the flags indicate that there is an extra block persent then 02695 // we'll fill it in 02696 // 02697 02698 if (BusyBlock->Flags & HEAP_ENTRY_EXTRA_PRESENT) { 02699 02700 ExtraStuff = RtlpGetExtraStuffPointer( BusyBlock ); 02701 02702 RtlZeroMemory( ExtraStuff, sizeof( *ExtraStuff )); 02703 02704 #ifndef NTOS_KERNEL_RUNTIME 02705 02706 // 02707 // In the non kernel case the tagging goes in either the extra 02708 // stuff of the busy block small tag index 02709 // 02710 02711 if (IS_HEAP_TAGGING_ENABLED()) { 02712 02713 ExtraStuff->TagIndex = RtlpUpdateTagEntry( Heap, 02714 (USHORT)((Flags & HEAP_TAG_MASK) >> HEAP_TAG_SHIFT), 02715 0, 02716 BusyBlock->Size, 02717 AllocationAction ); 02718 } 02719 02720 } else if (IS_HEAP_TAGGING_ENABLED()) { 02721 02722 BusyBlock->SmallTagIndex = (UCHAR)RtlpUpdateTagEntry( Heap, 02723 (USHORT)((Flags & HEAP_SMALL_TAG_MASK) >> HEAP_TAG_SHIFT), 02724 0, 02725 BusyBlock->Size, 02726 AllocationAction ); 02727 02728 #endif // NTOS_KERNEL_RUNTIME 02729 02730 } 02731 02732 // 02733 // Return the address of the user portion of the allocated block. 02734 // This is the byte following the header. 02735 // 02736 02737 leave; 02738 02739 // 02740 // Otherwise the allocation request is bigger than the last dedicated 02741 // free list size. Now check if the size is within our threshold. 02742 // Meaning that it could be in the [0] free list 02743 // 02744 02745 } else if (AllocationIndex <= Heap->VirtualMemoryThreshold) { 02746 02747 LookInNonDedicatedList: 02748 02749 // 02750 // The following code cycles through the [0] free list until 02751 // it finds a block that satisfies the request. The list 02752 // is sorted so the search is can be terminated early on success 02753 // 02754 02755 FreeListHead = &Heap->FreeLists[ 0 ]; 02756 Next = FreeListHead->Flink; 02757 02758 while (FreeListHead != Next) { 02759 02760 FreeBlock = CONTAINING_RECORD( Next, HEAP_FREE_ENTRY, FreeList ); 02761 02762 if (FreeBlock->Size >= AllocationIndex) { 02763 02764 // 02765 // We've found something that we can use so now go to 02766 // where we treat spliting a free block. Note that 02767 // the block we found here might actually be the exact 02768 // size we need and that is why in the split free block 02769 // case we have to consider having nothing free after the 02770 // split 02771 // 02772 02773 goto SplitFreeBlock; 02774 02775 } else { 02776 02777 Next = Next->Flink; 02778 } 02779 } 02780 02781 // 02782 // The [0] list is either empty or everything is too small 02783 // so now extend the heap which should get us something less 02784 // than or equal to the virtual memory threshold 02785 // 02786 02787 FreeBlock = RtlpExtendHeap( Heap, AllocationSize ); 02788 02789 // 02790 // And provided we got something we'll treat it just like the 02791 // previous split free block cases 02792 // 02793 02794 if (FreeBlock != NULL) { 02795 02796 goto SplitFreeBlock; 02797 } 02798 02799 // 02800 // We weren't able to extend the heap so we must be out of memory 02801 // 02802 02803 Status = STATUS_NO_MEMORY; 02804 02805 // 02806 // At this point the allocation is way too big for any of the free 02807 // lists and we can only satisfy this request if the heap is growable 02808 // 02809 02810 } else if (Heap->Flags & HEAP_GROWABLE) { 02811 02812 PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock; 02813 02814 VirtualAllocBlock = NULL; 02815 02816 // 02817 // Compute how much memory we will need for this allocation which 02818 // will include the allocation size plus a header, and then go 02819 // get the committed memory 02820 // 02821 02822 AllocationSize += FIELD_OFFSET( HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock ); 02823 02824 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 02825 (PVOID *)&VirtualAllocBlock, 02826 0, 02827 &AllocationSize, 02828 MEM_COMMIT, 02829 PAGE_READWRITE ); 02830 02831 if (NT_SUCCESS( Status )) { 02832 02833 // 02834 // Just committed, already zero. Fill in the new block 02835 // and insert it in the list of big allocation 02836 // 02837 02838 VirtualAllocBlock->BusyBlock.Size = (USHORT)(AllocationSize - Size); 02839 VirtualAllocBlock->BusyBlock.Flags = EntryFlags | HEAP_ENTRY_VIRTUAL_ALLOC | HEAP_ENTRY_EXTRA_PRESENT; 02840 VirtualAllocBlock->CommitSize = AllocationSize; 02841 VirtualAllocBlock->ReserveSize = AllocationSize; 02842 02843 #ifndef NTOS_KERNEL_RUNTIME 02844 02845 // 02846 // In the non kernel case see if we need to add heap tagging 02847 // 02848 02849 if (IS_HEAP_TAGGING_ENABLED()) { 02850 02851 VirtualAllocBlock->ExtraStuff.TagIndex = 02852 RtlpUpdateTagEntry( Heap, 02853 (USHORT)((Flags & HEAP_SMALL_TAG_MASK) >> HEAP_TAG_SHIFT), 02854 0, 02855 VirtualAllocBlock->CommitSize >> HEAP_GRANULARITY_SHIFT, 02856 VirtualAllocationAction ); 02857 } 02858 02859 #endif // NTOS_KERNEL_RUNTIME 02860 02861 InsertTailList( &Heap->VirtualAllocdBlocks, (PLIST_ENTRY)VirtualAllocBlock ); 02862 02863 // 02864 // Return the address of the user portion of the allocated 02865 // block. This is the byte following the header. 02866 // 02867 02868 ReturnValue = (PHEAP_ENTRY)(VirtualAllocBlock + 1); 02869 02870 leave; 02871 } 02872 02873 // 02874 // Otherwise we have an error condition 02875 // 02876 02877 } else { 02878 02879 Status = STATUS_BUFFER_TOO_SMALL; 02880 } 02881 02882 SET_LAST_STATUS( Status ); 02883 02884 if (Flags & HEAP_GENERATE_EXCEPTIONS) { 02885 02886 // 02887 // Construct an exception record. 02888 // 02889 02890 ExceptionRecord.ExceptionCode = STATUS_NO_MEMORY; 02891 ExceptionRecord.ExceptionRecord = (PEXCEPTION_RECORD)NULL; 02892 ExceptionRecord.NumberParameters = 1; 02893 ExceptionRecord.ExceptionFlags = 0; 02894 ExceptionRecord.ExceptionInformation[ 0 ] = AllocationSize; 02895 02896 RtlRaiseException( &ExceptionRecord ); 02897 } 02898 02899 } except( GetExceptionCode() == STATUS_NO_MEMORY ? EXCEPTION_CONTINUE_SEARCH : 02900 EXCEPTION_EXECUTE_HANDLER ) { 02901 02902 SET_LAST_STATUS( GetExceptionCode() ); 02903 } 02904 02905 // 02906 // Check if there is anything to zero out 02907 // 02908 02909 if ( ZeroSize ) { 02910 02911 RtlZeroMemory( ReturnValue, ZeroSize ); 02912 } 02913 02914 } finally { 02915 02916 if (LockAcquired) { 02917 02918 RtlReleaseLockRoutine( Heap->LockVariable ); 02919 } 02920 } 02921 02922 // 02923 // And return to our caller 02924 // 02925 02926 return ReturnValue; 02927 }

PVOID RtlCreateHeap IN ULONG  Flags,
IN PVOID HeapBase  OPTIONAL,
IN SIZE_T ReserveSize  OPTIONAL,
IN SIZE_T CommitSize  OPTIONAL,
IN PVOID Lock  OPTIONAL,
IN PRTL_HEAP_PARAMETERS Parameters  OPTIONAL
 

Definition at line 191 of file rtl/heap.c.

References CHECK_HEAP_TAIL_SIZE, DEBUG_HEAP, EXCEPTION_EXECUTE_HANDLER, HEAP, HEAP_BREAK_WHEN_OUT_OF_VM, HEAP_CAPTURE_STACK_BACKTRACES, HEAP_ENTRY, HEAP_ENTRY_BUSY, HEAP_GRANULARITY, HEAP_GRANULARITY_SHIFT, HEAP_LOCK, HEAP_LOCK_USER_ALLOCATED, HEAP_LOOKASIDE, HEAP_MAXIMUM_BLOCK_SIZE, HEAP_MAXIMUM_FREELISTS, HEAP_NO_ALIGNMENT, HEAP_NUMBER_OF_PSEUDO_TAG, HEAP_PROTECTION_ENABLED, HEAP_PSEUDO_TAG_ENTRY, HEAP_SEGMENT_USER_ALLOCATED, HEAP_SIGNATURE, HEAP_SKIP_VALIDATION_CHECKS, HEAP_VALIDATE_ALL_ENABLED, HEAP_VALIDATE_PARAMETERS_ENABLED, HeapDebugBreak, HeapDebugPrint, IS_HEAP_TAGGING_ENABLED, Lock, MmHeapDeCommitFreeBlockThreshold, MmHeapDeCommitTotalFreeThreshold, MmHeapSegmentCommit, MmHeapSegmentReserve, n, _HEAP_UNCOMMMTTED_RANGE::Next, NT_SUCCESS, NtGlobalFlag, NTSTATUS(), NULL, PAGE_SIZE, PHEAP_LOCK, PHEAP_LOOKASIDE, PHEAP_PSEUDO_TAG_ENTRY, ROUND_UP_TO_POWER2, RTL_PAGED_CODE, RtlAllocateHeap, RtlDebugCreateHeap(), RtlGetNtGlobalFlags(), RtlInitializeLockRoutine, RtlpAddHeapToProcessList(), RtlpDebugPageHeap, RtlpDebugPageHeapCreate(), RtlpDisableHeapLookaside, RtlpInitializeHeapLookaside(), RtlpInitializeHeapSegment(), Status, and USHORT.

Referenced by CsrpConnectToServer(), LdrpForkProcess(), LdrpInitializeProcess(), main(), RtlDebugCreateHeap(), RtlpDebugPageHeapCreate(), SmbTraceStart(), and UserClientDllInitialize().

00202 : 00203 00204 This routine initializes a heap. 00205 00206 Arguments: 00207 00208 Flags - Specifies optional attributes of the heap. 00209 00210 Valid Flags Values: 00211 00212 HEAP_NO_SERIALIZE - if set, then allocations and deallocations on 00213 this heap are NOT synchronized by these routines. 00214 00215 HEAP_GROWABLE - if set, then the heap is a "sparse" heap where 00216 memory is committed only as necessary instead of 00217 being preallocated. 00218 00219 HeapBase - if not NULL, this specifies the base address for memory 00220 to use as the heap. If NULL, memory is allocated by these routines. 00221 00222 ReserveSize - if not zero, this specifies the amount of virtual address 00223 space to reserve for the heap. 00224 00225 CommitSize - if not zero, this specifies the amount of virtual address 00226 space to commit for the heap. Must be less than ReserveSize. If 00227 zero, then defaults to one page. 00228 00229 Lock - if not NULL, this parameter points to the resource lock to 00230 use. Only valid if HEAP_NO_SERIALIZE is NOT set. 00231 00232 Parameters - optional heap parameters. 00233 00234 Return Value: 00235 00236 PVOID - a pointer to be used in accessing the created heap. 00237 00238 --*/ 00239 00240 { 00241 ULONG_PTR HighestUserAddress; 00242 NTSTATUS Status; 00243 PHEAP Heap = NULL; 00244 PHEAP_SEGMENT Segment = NULL; 00245 PLIST_ENTRY FreeListHead; 00246 ULONG SizeOfHeapHeader; 00247 ULONG SegmentFlags; 00248 PVOID CommittedBase; 00249 PVOID UnCommittedBase; 00250 MEMORY_BASIC_INFORMATION MemoryInformation; 00251 SYSTEM_BASIC_INFORMATION SystemInformation; 00252 ULONG n; 00253 ULONG InitialCountOfUnusedUnCommittedRanges; 00254 SIZE_T MaximumHeapBlockSize; 00255 PVOID NextHeapHeaderAddress; 00256 PHEAP_UNCOMMMTTED_RANGE UnCommittedRange, *pp; 00257 RTL_HEAP_PARAMETERS TempParameters; 00258 ULONG NtGlobalFlag = RtlGetNtGlobalFlags(); 00259 00260 #ifndef NTOS_KERNEL_RUNTIME 00261 00262 PPEB Peb; 00263 00264 #else // NTOS_KERNEL_RUNTIME 00265 00266 extern SIZE_T MmHeapSegmentReserve; 00267 extern SIZE_T MmHeapSegmentCommit; 00268 extern SIZE_T MmHeapDeCommitTotalFreeThreshold; 00269 extern SIZE_T MmHeapDeCommitFreeBlockThreshold; 00270 00271 #endif // NTOS_KERNEL_RUNTIME 00272 00273 RTL_PAGED_CODE(); 00274 00275 #ifndef NTOS_KERNEL_RUNTIME 00276 #ifdef NTHEAP_ENABLED 00277 { 00278 if (Flags & NTHEAP_ENABLED_FLAG) { 00279 00280 Heap = RtlCreateNtHeap( Flags, NULL ); 00281 00282 if (Heap != NULL) { 00283 00284 return Heap; 00285 } 00286 00287 Flags &= ~NTHEAP_ENABLED_FLAG; 00288 } 00289 } 00290 #endif // NTHEAP_ENABLED 00291 #endif // NTOS_KERNEL_RUNTIME 00292 00293 // 00294 // Check if we should be using the page heap code. If not then turn 00295 // off any of the page heap flags before going on 00296 // 00297 00298 #ifdef DEBUG_PAGE_HEAP 00299 00300 if ( RtlpDebugPageHeap && ( HeapBase == NULL ) && ( Lock == NULL )) { 00301 00302 PVOID PageHeap; 00303 00304 PageHeap = RtlpDebugPageHeapCreate( 00305 00306 Flags, 00307 HeapBase, 00308 ReserveSize, 00309 CommitSize, 00310 Lock, 00311 Parameters ); 00312 00313 if (PageHeap != NULL) { 00314 return PageHeap; 00315 } 00316 00317 // 00318 // A `-1' value signals a recursive call from page heap 00319 // manager. We set this to null and continue creating 00320 // a normal heap. This small hack is required so that we 00321 // minimize the dependencies between the normal and the page 00322 // heap manager. 00323 // 00324 00325 if ((SIZE_T)Parameters == (SIZE_T)-1) { 00326 00327 Parameters = NULL; 00328 } 00329 } 00330 00331 Flags &= ~( HEAP_PROTECTION_ENABLED | 00332 HEAP_BREAK_WHEN_OUT_OF_VM | 00333 HEAP_NO_ALIGNMENT ); 00334 00335 #endif // DEBUG_PAGE_HEAP 00336 00337 // 00338 // If the caller does not want to skip heap validiation checks then we 00339 // need to validate the rest of the flags but simply masking out only 00340 // those flags that want on a create heap call 00341 // 00342 00343 if (!(Flags & HEAP_SKIP_VALIDATION_CHECKS)) { 00344 00345 if (Flags & ~HEAP_CREATE_VALID_MASK) { 00346 00347 HeapDebugPrint(( "Invalid flags (%08x) specified to RtlCreateHeap\n", Flags )); 00348 HeapDebugBreak( NULL ); 00349 00350 Flags &= HEAP_CREATE_VALID_MASK; 00351 } 00352 } 00353 00354 // 00355 // The maximum heap block size is really 0x7f000 which is 0x80000 minus a 00356 // page. Maximum block size is 0xfe00 and granularity shift is 3. 00357 // 00358 00359 MaximumHeapBlockSize = HEAP_MAXIMUM_BLOCK_SIZE << HEAP_GRANULARITY_SHIFT; 00360 00361 // 00362 // Assume we're going to be successful until we're shown otherwise 00363 // 00364 00365 Status = STATUS_SUCCESS; 00366 00367 // 00368 // This part of the routine builds up local variable containing all the 00369 // parameters used to initialize the heap. First thing we do is zero 00370 // it out. 00371 // 00372 00373 RtlZeroMemory( &TempParameters, sizeof( TempParameters ) ); 00374 00375 // 00376 // If our caller supplied the optional heap parameters then we'll 00377 // make sure the size is good and copy over them over to our 00378 // local copy 00379 // 00380 00381 if (ARGUMENT_PRESENT( Parameters )) { 00382 00383 try { 00384 00385 if (Parameters->Length == sizeof( *Parameters )) { 00386 00387 RtlMoveMemory( &TempParameters, Parameters, sizeof( *Parameters ) ); 00388 } 00389 00390 } except( EXCEPTION_EXECUTE_HANDLER ) { 00391 00392 Status = GetExceptionCode(); 00393 } 00394 00395 if (!NT_SUCCESS( Status )) { 00396 00397 return NULL; 00398 } 00399 } 00400 00401 // 00402 // Set the parameter block to the local copy 00403 // 00404 00405 Parameters = &TempParameters; 00406 00407 // 00408 // If nt global flags tells us to always do tail or free checking 00409 // or to disable coalescing then force those bits set in the user 00410 // specified flags 00411 // 00412 00413 if (NtGlobalFlag & FLG_HEAP_ENABLE_TAIL_CHECK) { 00414 00415 Flags |= HEAP_TAIL_CHECKING_ENABLED; 00416 } 00417 00418 if (NtGlobalFlag & FLG_HEAP_ENABLE_FREE_CHECK) { 00419 00420 Flags |= HEAP_FREE_CHECKING_ENABLED; 00421 } 00422 00423 if (NtGlobalFlag & FLG_HEAP_DISABLE_COALESCING) { 00424 00425 Flags |= HEAP_DISABLE_COALESCE_ON_FREE; 00426 } 00427 00428 #ifndef NTOS_KERNEL_RUNTIME 00429 00430 // 00431 // In the non kernel case we also check if we should 00432 // validate parameters, validate all, or do stack backtraces 00433 // 00434 00435 Peb = NtCurrentPeb(); 00436 00437 if (NtGlobalFlag & FLG_HEAP_VALIDATE_PARAMETERS) { 00438 00439 Flags |= HEAP_VALIDATE_PARAMETERS_ENABLED; 00440 } 00441 00442 if (NtGlobalFlag & FLG_HEAP_VALIDATE_ALL) { 00443 00444 Flags |= HEAP_VALIDATE_ALL_ENABLED; 00445 } 00446 00447 if (NtGlobalFlag & FLG_USER_STACK_TRACE_DB) { 00448 00449 Flags |= HEAP_CAPTURE_STACK_BACKTRACES; 00450 } 00451 00452 // 00453 // Also in the non kernel case the PEB will have some state 00454 // variables that we need to set if the user hasn't specified 00455 // otherwise 00456 // 00457 00458 if (Parameters->SegmentReserve == 0) { 00459 00460 Parameters->SegmentReserve = Peb->HeapSegmentReserve; 00461 } 00462 00463 if (Parameters->SegmentCommit == 0) { 00464 00465 Parameters->SegmentCommit = Peb->HeapSegmentCommit; 00466 } 00467 00468 if (Parameters->DeCommitFreeBlockThreshold == 0) { 00469 00470 Parameters->DeCommitFreeBlockThreshold = Peb->HeapDeCommitFreeBlockThreshold; 00471 } 00472 00473 if (Parameters->DeCommitTotalFreeThreshold == 0) { 00474 00475 Parameters->DeCommitTotalFreeThreshold = Peb->HeapDeCommitTotalFreeThreshold; 00476 } 00477 #else // NTOS_KERNEL_RUNTIME 00478 00479 // 00480 // In the kernel case Mm has some global variables that we set 00481 // into the paramters if the user hasn't specified otherwise 00482 // 00483 00484 if (Parameters->SegmentReserve == 0) { 00485 00486 Parameters->SegmentReserve = MmHeapSegmentReserve; 00487 } 00488 00489 if (Parameters->SegmentCommit == 0) { 00490 00491 Parameters->SegmentCommit = MmHeapSegmentCommit; 00492 } 00493 00494 if (Parameters->DeCommitFreeBlockThreshold == 0) { 00495 00496 Parameters->DeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold; 00497 } 00498 00499 if (Parameters->DeCommitTotalFreeThreshold == 0) { 00500 00501 Parameters->DeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold; 00502 } 00503 #endif // NTOS_KERNEL_RUNTIME 00504 00505 // 00506 // Get the highest user address 00507 // 00508 00509 if (!NT_SUCCESS(ZwQuerySystemInformation(SystemBasicInformation, 00510 &SystemInformation, 00511 sizeof(SystemInformation), 00512 NULL))) { 00513 return NULL; 00514 } 00515 HighestUserAddress = SystemInformation.MaximumUserModeAddress; 00516 00517 // 00518 // If the user hasn't said what the largest allocation size is then 00519 // we should compute it as the difference between the highest and lowest 00520 // address less one page 00521 // 00522 00523 if (Parameters->MaximumAllocationSize == 0) { 00524 00525 Parameters->MaximumAllocationSize = (HighestUserAddress - 00526 (ULONG_PTR)MM_LOWEST_USER_ADDRESS - 00527 PAGE_SIZE ); 00528 } 00529 00530 // 00531 // Set the virtual memory threshold to be non zero and not more than the 00532 // maximum heap block size of 0x7f000. If the user specified one that is 00533 // too large we automatically and silently drop it down. 00534 // 00535 00536 if ((Parameters->VirtualMemoryThreshold == 0) || 00537 (Parameters->VirtualMemoryThreshold > MaximumHeapBlockSize)) { 00538 00539 Parameters->VirtualMemoryThreshold = MaximumHeapBlockSize; 00540 } 00541 00542 // 00543 // The default commit size is one page and the default reserve size is 00544 // 64 pages. 00545 // 00546 // **** this doesn't check that commit size if specified is less than 00547 // **** reserved size if specified 00548 // 00549 00550 if (!ARGUMENT_PRESENT( CommitSize )) { 00551 00552 CommitSize = PAGE_SIZE; 00553 00554 if (!ARGUMENT_PRESENT( ReserveSize )) { 00555 00556 ReserveSize = 64 * CommitSize; 00557 00558 } else { 00559 00560 ReserveSize = ROUND_UP_TO_POWER2( ReserveSize, PAGE_SIZE ); 00561 } 00562 00563 } else { 00564 00565 // 00566 // The heap actually uses space that is reserved and commited 00567 // to store internal data structures (the LOCK, 00568 // the HEAP_PSEUDO_TAG, etc.). These structures can be larger than 00569 // 4K especially on a 64-bit build. So, make sure the commit 00570 // is at least 8K in length which is the minimal page size for 00571 // 64-bit systems 00572 // 00573 00574 CommitSize = ROUND_UP_TO_POWER2(CommitSize, PAGE_SIZE); 00575 00576 if (!ARGUMENT_PRESENT( ReserveSize )) { 00577 00578 ReserveSize = ROUND_UP_TO_POWER2( CommitSize, 16 * PAGE_SIZE ); 00579 00580 } else { 00581 00582 ReserveSize = ROUND_UP_TO_POWER2( ReserveSize, PAGE_SIZE ); 00583 } 00584 00585 } 00586 00587 #ifndef NTOS_KERNEL_RUNTIME 00588 00589 // 00590 // In the non kernel case check if we are creating a debug heap 00591 // the test checks that skip validation checks is false. 00592 // 00593 00594 if (DEBUG_HEAP( Flags )) { 00595 00596 return RtlDebugCreateHeap( Flags, 00597 HeapBase, 00598 ReserveSize, 00599 CommitSize, 00600 Lock, 00601 Parameters ); 00602 } 00603 00604 #endif // NTOS_KERNEL_RUNTIME 00605 00606 // 00607 // Compute the size of the heap which will be the 00608 // heap struct itself and if we are to seralize with 00609 // out own lock then add room for the lock. If the 00610 // user did not supply the lock then set the lock 00611 // variable to -1. 00612 // 00613 00614 SizeOfHeapHeader = sizeof( HEAP ); 00615 00616 if (!(Flags & HEAP_NO_SERIALIZE)) { 00617 00618 if (ARGUMENT_PRESENT( Lock )) { 00619 00620 Flags |= HEAP_LOCK_USER_ALLOCATED; 00621 00622 } else { 00623 00624 SizeOfHeapHeader += sizeof( HEAP_LOCK ); 00625 Lock = (PHEAP_LOCK)-1; 00626 } 00627 00628 } else if (ARGUMENT_PRESENT( Lock )) { 00629 00630 // 00631 // In this error case the call said not to seralize but also fed us 00632 // a lock 00633 // 00634 00635 return NULL; 00636 } 00637 00638 // 00639 // See if caller allocate the space for the heap. 00640 // 00641 00642 if (ARGUMENT_PRESENT( HeapBase )) { 00643 00644 // 00645 // The call specified a heap base now check if there is 00646 // a caller supplied commit routine 00647 // 00648 00649 if (Parameters->CommitRoutine != NULL) { 00650 00651 // 00652 // The caller specified a commit routine so he caller 00653 // also needs to have given us certain parameters and make 00654 // sure the heap is not growable. Otherwise it is an error 00655 // 00656 00657 if ((Parameters->InitialCommit == 0) || 00658 (Parameters->InitialReserve == 0) || 00659 (Parameters->InitialCommit > Parameters->InitialReserve) || 00660 (Flags & HEAP_GROWABLE)) { 00661 00662 return NULL; 00663 } 00664 00665 // 00666 // Set the commited base and the uncommited base to the 00667 // proper pointers within the heap. 00668 // 00669 00670 CommittedBase = HeapBase; 00671 UnCommittedBase = (PCHAR)CommittedBase + Parameters->InitialCommit; 00672 ReserveSize = Parameters->InitialReserve; 00673 00674 // 00675 // Zero out a page of the heap where our first part goes 00676 // 00677 // **** what if the size is less than a page 00678 // 00679 00680 RtlZeroMemory( CommittedBase, PAGE_SIZE ); 00681 00682 } else { 00683 00684 // 00685 // The user gave us space but not commit routine 00686 // So query the base to get its size 00687 // 00688 00689 Status = ZwQueryVirtualMemory( NtCurrentProcess(), 00690 HeapBase, 00691 MemoryBasicInformation, 00692 &MemoryInformation, 00693 sizeof( MemoryInformation ), 00694 NULL ); 00695 00696 if (!NT_SUCCESS( Status )) { 00697 00698 return NULL; 00699 } 00700 00701 // 00702 // Make sure the user gave us a base address for this block 00703 // and that the memory is not free 00704 // 00705 00706 if (MemoryInformation.BaseAddress != HeapBase) { 00707 00708 return NULL; 00709 } 00710 00711 if (MemoryInformation.State == MEM_FREE) { 00712 00713 return NULL; 00714 } 00715 00716 // 00717 // Set our commit base to the start of the range 00718 // 00719 00720 CommittedBase = MemoryInformation.BaseAddress; 00721 00722 // 00723 // If the memory is commmitted then 00724 // we can zero out a page worth 00725 // 00726 00727 if (MemoryInformation.State == MEM_COMMIT) { 00728 00729 RtlZeroMemory( CommittedBase, PAGE_SIZE ); 00730 00731 // 00732 // Set the commit size and uncommited base according 00733 // to the start of the vm 00734 // 00735 00736 CommitSize = MemoryInformation.RegionSize; 00737 UnCommittedBase = (PCHAR)CommittedBase + CommitSize; 00738 00739 // 00740 // Find out the uncommited base is reserved and if so 00741 // the update the reserve size accordingly. 00742 // 00743 00744 Status = ZwQueryVirtualMemory( NtCurrentProcess(), 00745 UnCommittedBase, 00746 MemoryBasicInformation, 00747 &MemoryInformation, 00748 sizeof( MemoryInformation ), 00749 NULL ); 00750 00751 ReserveSize = CommitSize; 00752 00753 if ((NT_SUCCESS( Status )) && 00754 (MemoryInformation.State == MEM_RESERVE)) { 00755 00756 ReserveSize += MemoryInformation.RegionSize; 00757 } 00758 00759 } else { 00760 00761 // 00762 // The memory the user gave us is not committed so dummy 00763 // up these small nummbers 00764 // 00765 00766 CommitSize = PAGE_SIZE; 00767 UnCommittedBase = CommittedBase; 00768 } 00769 } 00770 00771 // 00772 // This user gave us a base and we've just taken care of the committed 00773 // bookkeeping. So mark this segment as user supplied and set the 00774 // heap 00775 // 00776 00777 SegmentFlags = HEAP_SEGMENT_USER_ALLOCATED; 00778 Heap = (PHEAP)HeapBase; 00779 00780 } else { 00781 00782 // 00783 // The user did not specify a heap base so we have to allocate the 00784 // vm here. First make sure the user did not give us a commit routine 00785 // 00786 00787 if (Parameters->CommitRoutine != NULL) { 00788 00789 return NULL; 00790 } 00791 00792 // 00793 // Reserve the amount of virtual address space requested. 00794 // 00795 00796 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 00797 (PVOID *)&Heap, 00798 0, 00799 &ReserveSize, 00800 MEM_RESERVE, 00801 PAGE_READWRITE ); 00802 00803 if (!NT_SUCCESS( Status )) { 00804 00805 return NULL; 00806 } 00807 00808 // 00809 // Indicate that this segment is not user supplied 00810 // 00811 00812 SegmentFlags = 0; 00813 00814 // 00815 // Set the default commit size to one page 00816 // 00817 00818 if (!ARGUMENT_PRESENT( CommitSize )) { 00819 00820 CommitSize = PAGE_SIZE; 00821 } 00822 00823 // 00824 // Set the committed and uncommitted base to be the same the following 00825 // code will actually commit the page for us 00826 // 00827 00828 CommittedBase = Heap; 00829 UnCommittedBase = Heap; 00830 } 00831 00832 // 00833 // At this point we have a heap pointer, committed base, uncommitted base, 00834 // segment flags, commit size, and reserve size. If the committed and 00835 // uncommited base are the same then we need to commit the amount 00836 // specified by the commit size 00837 // 00838 00839 if (CommittedBase == UnCommittedBase) { 00840 00841 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 00842 (PVOID *)&CommittedBase, 00843 0, 00844 &CommitSize, 00845 MEM_COMMIT, 00846 PAGE_READWRITE ); 00847 00848 // 00849 // In the non successful case we need to back out any vm reservation 00850 // we did earlier 00851 // 00852 00853 if (!NT_SUCCESS( Status )) { 00854 00855 if (!ARGUMENT_PRESENT(HeapBase)) { 00856 00857 // 00858 // Return the reserved virtual address space. 00859 // 00860 00861 ZwFreeVirtualMemory( NtCurrentProcess(), 00862 (PVOID *)&Heap, 00863 &ReserveSize, 00864 MEM_RELEASE ); 00865 00866 } 00867 00868 return NULL; 00869 } 00870 00871 // 00872 // The new uncommitted base is not adjusted above what we just 00873 // committed 00874 // 00875 00876 UnCommittedBase = (PVOID)((PCHAR)UnCommittedBase + CommitSize); 00877 } 00878 00879 // 00880 // At this point we have memory for the start of the heap committed and 00881 // ready to be initialized. So now we need initialize the heap 00882 // 00883 00884 // 00885 // Calculate the end of the heap header and make room for 8 uncommitted 00886 // range structures. Once we have the room for them then chain them 00887 // together and null terminate the chain 00888 // 00889 00890 NextHeapHeaderAddress = Heap + 1; 00891 00892 UnCommittedRange = (PHEAP_UNCOMMMTTED_RANGE)ROUND_UP_TO_POWER2( NextHeapHeaderAddress, 00893 sizeof( QUAD ) ); 00894 00895 InitialCountOfUnusedUnCommittedRanges = 8; 00896 00897 SizeOfHeapHeader += InitialCountOfUnusedUnCommittedRanges * sizeof( *UnCommittedRange ); 00898 00899 // 00900 // **** what a hack Pp is really a pointer to the next field of the 00901 // **** uncommmtted range structure. So we set next by setting through Pp 00902 // 00903 00904 pp = &Heap->UnusedUnCommittedRanges; 00905 00906 while (InitialCountOfUnusedUnCommittedRanges--) { 00907 00908 *pp = UnCommittedRange; 00909 pp = &UnCommittedRange->Next; 00910 UnCommittedRange += 1; 00911 } 00912 00913 NextHeapHeaderAddress = UnCommittedRange; 00914 00915 *pp = NULL; 00916 00917 // 00918 // Check if tagging is enabled in global flags. This check is always true 00919 // in a debug build. 00920 // 00921 // If tagging is enabled then make room for 129 pseudo tag heap entry. 00922 // Which is one more than the number of free lists. Also point the heap 00923 // header to this array of pseudo tags entries. 00924 // 00925 00926 if (IS_HEAP_TAGGING_ENABLED()) { 00927 00928 Heap->PseudoTagEntries = (PHEAP_PSEUDO_TAG_ENTRY)ROUND_UP_TO_POWER2( NextHeapHeaderAddress, 00929 sizeof( QUAD ) ); 00930 00931 SizeOfHeapHeader += HEAP_NUMBER_OF_PSEUDO_TAG * sizeof( HEAP_PSEUDO_TAG_ENTRY ); 00932 00933 // 00934 // **** this advancement of the next heap address doesn't seem right 00935 // **** given that a pseudo heap entry is 12 ulongs in length and not 00936 // **** a single byte 00937 00938 NextHeapHeaderAddress = Heap->PseudoTagEntries + HEAP_NUMBER_OF_PSEUDO_TAG; 00939 } 00940 00941 // 00942 // Round the size of the heap header to the next 8 byte boundary 00943 // 00944 00945 SizeOfHeapHeader = (ULONG) ROUND_UP_TO_POWER2( SizeOfHeapHeader, 00946 HEAP_GRANULARITY ); 00947 00948 // 00949 // If the sizeof the heap header is larger than the native 00950 // page size, you have a problem. Further, if the CommitSize passed 00951 // in was smaller than the SizeOfHeapHeader, you may not even make it 00952 // this far before death... 00953 // 00954 // HeapDbgPrint() doesn't work for IA64 yet. 00955 // 00956 // HeapDbgPrint(("Size of the heap header is %u bytes, commit was %u bytes\n", SizeOfHeapHeader, (ULONG) CommitSize)); 00957 // 00958 00959 // 00960 // Fill in the heap header fields 00961 // 00962 00963 Heap->Entry.Size = (USHORT)(SizeOfHeapHeader >> HEAP_GRANULARITY_SHIFT); 00964 Heap->Entry.Flags = HEAP_ENTRY_BUSY; 00965 00966 Heap->Signature = HEAP_SIGNATURE; 00967 Heap->Flags = Flags; 00968 Heap->ForceFlags = (Flags & (HEAP_NO_SERIALIZE | 00969 HEAP_GENERATE_EXCEPTIONS | 00970 HEAP_ZERO_MEMORY | 00971 HEAP_REALLOC_IN_PLACE_ONLY | 00972 HEAP_VALIDATE_PARAMETERS_ENABLED | 00973 HEAP_VALIDATE_ALL_ENABLED | 00974 HEAP_TAIL_CHECKING_ENABLED | 00975 HEAP_CREATE_ALIGN_16 | 00976 HEAP_FREE_CHECKING_ENABLED)); 00977 00978 Heap->FreeListsInUseTerminate = 0xFFFF; 00979 Heap->HeaderValidateLength = (USHORT)((PCHAR)NextHeapHeaderAddress - (PCHAR)Heap); 00980 Heap->HeaderValidateCopy = NULL; 00981 00982 // 00983 // Initialize the free list to be all empty 00984 // 00985 00986 FreeListHead = &Heap->FreeLists[ 0 ]; 00987 n = HEAP_MAXIMUM_FREELISTS; 00988 00989 while (n--) { 00990 00991 InitializeListHead( FreeListHead ); 00992 FreeListHead++; 00993 } 00994 00995 // 00996 // Make it so that there a no big block allocations 00997 // 00998 00999 InitializeListHead( &Heap->VirtualAllocdBlocks ); 01000 01001 // 01002 // Initialize the cricital section that controls access to 01003 // the free list. If the lock variable is -1 then the caller 01004 // did not supply a lock so we need to make room for one 01005 // and initialize it. 01006 // 01007 01008 if (Lock == (PHEAP_LOCK)-1) { 01009 01010 Lock = (PHEAP_LOCK)NextHeapHeaderAddress; 01011 01012 Status = RtlInitializeLockRoutine( Lock ); 01013 01014 if (!NT_SUCCESS( Status )) { 01015 01016 return NULL; 01017 } 01018 01019 NextHeapHeaderAddress = (PHEAP_LOCK)Lock + 1; 01020 } 01021 01022 Heap->LockVariable = Lock; 01023 01024 01025 // 01026 // Initialize the first segment for the heap 01027 // 01028 01029 if (!RtlpInitializeHeapSegment( Heap, 01030 (PHEAP_SEGMENT)((PCHAR)Heap + SizeOfHeapHeader), 01031 0, 01032 SegmentFlags, 01033 CommittedBase, 01034 UnCommittedBase, 01035 (PCHAR)CommittedBase + ReserveSize )) { 01036 01037 return NULL; 01038 } 01039 01040 // 01041 // Fill in additional heap entry fields 01042 // 01043 01044 Heap->ProcessHeapsListIndex = 0; 01045 Heap->SegmentReserve = Parameters->SegmentReserve; 01046 Heap->SegmentCommit = Parameters->SegmentCommit; 01047 Heap->DeCommitFreeBlockThreshold = Parameters->DeCommitFreeBlockThreshold >> HEAP_GRANULARITY_SHIFT; 01048 Heap->DeCommitTotalFreeThreshold = Parameters->DeCommitTotalFreeThreshold >> HEAP_GRANULARITY_SHIFT; 01049 Heap->MaximumAllocationSize = Parameters->MaximumAllocationSize; 01050 01051 Heap->VirtualMemoryThreshold = (ULONG) (ROUND_UP_TO_POWER2( Parameters->VirtualMemoryThreshold, 01052 HEAP_GRANULARITY ) >> HEAP_GRANULARITY_SHIFT); 01053 01054 Heap->CommitRoutine = Parameters->CommitRoutine; 01055 01056 // 01057 // We either align the heap at 16 or 8 byte boundaries. The AlignRound 01058 // and AlignMask are used to bring allocation sizes up to the next 01059 // boundary. The align round includes the heap header and the optional 01060 // check tail size 01061 // 01062 01063 if (Flags & HEAP_CREATE_ALIGN_16) { 01064 01065 Heap->AlignRound = 15 + sizeof( HEAP_ENTRY ); 01066 Heap->AlignMask = (ULONG)~15; 01067 01068 } else { 01069 01070 Heap->AlignRound = HEAP_GRANULARITY - 1 + sizeof( HEAP_ENTRY ); 01071 Heap->AlignMask = (ULONG)~(HEAP_GRANULARITY - 1); 01072 } 01073 01074 if (Heap->Flags & HEAP_TAIL_CHECKING_ENABLED) { 01075 01076 Heap->AlignRound += CHECK_HEAP_TAIL_SIZE; 01077 } 01078 01079 #ifndef NTOS_KERNEL_RUNTIME 01080 01081 // 01082 // In the non kernel case we need to add this heap to the processes heap 01083 // list 01084 // 01085 01086 RtlpAddHeapToProcessList( Heap ); 01087 01088 // 01089 // Initialize the heap lookaide lists. This is only for the user mode 01090 // heap and the heap contains a pointer to the lookaside list array. 01091 // The array is sized the same as the dedicated free list. First we 01092 // allocate space for the lookaside list and then we initialize each 01093 // lookaside list. 01094 // 01095 // But the caller asked for no serialize or asked for non growable 01096 // heap then we won't enable the lookaside lists. 01097 // 01098 01099 Heap->Lookaside = NULL; 01100 Heap->LookasideLockCount = 0; 01101 01102 if ((!(Flags & HEAP_NO_SERIALIZE)) && 01103 ( (Flags & HEAP_GROWABLE)) && 01104 (!(RtlpDisableHeapLookaside))) { 01105 01106 ULONG i; 01107 01108 Heap->Lookaside = RtlAllocateHeap( Heap, 01109 Flags, 01110 sizeof(HEAP_LOOKASIDE) * HEAP_MAXIMUM_FREELISTS ); 01111 01112 if (Heap->Lookaside != NULL) { 01113 01114 for (i = 0; i < HEAP_MAXIMUM_FREELISTS; i += 1) { 01115 01116 RtlpInitializeHeapLookaside( &(((PHEAP_LOOKASIDE)(Heap->Lookaside))[i]), 01117 32 ); 01118 } 01119 } 01120 } 01121 01122 #endif // NTOS_KERNEL_RUNTIME 01123 01124 // 01125 // And return the fully initialized heap to our caller 01126 // 01127 01128 return (PVOID)Heap; 01129 }

PVOID RtlDebugAllocateHeap IN PVOID  HeapHandle,
IN ULONG  Flags,
IN SIZE_T  Size
 

Definition at line 444 of file heapdbg.c.

References _HEAP::AlignMask, _HEAP::AlignRound, _HEAP_STOP_ON_VALUES::AllocAddress, _HEAP_ENTRY_EXTRA::AllocatorBackTraceIndex, _HEAP_STOP_ON_VALUES::AllocTag, EXCEPTION_CONTINUE_SEARCH, EXCEPTION_EXECUTE_HANDLER, FALSE, _HEAP_ENTRY::Flags, _HEAP::Flags, _HEAP::ForceFlags, HEAP_CAPTURE_STACK_BACKTRACES, HEAP_ENTRY_EXTRA_PRESENT, HEAP_SKIP_VALIDATION_CHECKS, HEAP_VALIDATE_ALL_ENABLED, HeapDebugBreak, HeapDebugPrint, HeapHandle, _HEAP_STOP_ON_TAG::HeapIndex, IF_DEBUG_PAGE_HEAP_THEN_RETURN, IS_HEAP_TAGGING_ENABLED, _HEAP::LockVariable, _HEAP::MaximumAllocationSize, NULL, _HEAP::ProcessHeapsListIndex, RtlAcquireLockRoutine, RtlAllocateHeapSlowly(), RtlpCheckHeapSignature(), RtlpDebugPageHeapAllocate(), RtlpGetExtraStuffPointer(), RtlpGetTagName(), RtlpHeapStopOn, RtlpValidateHeap(), RtlpValidateHeapHeaders(), RtlReleaseLockRoutine, SET_LAST_STATUS, Size, _HEAP_ENTRY::SmallTagIndex, _HEAP_ENTRY_EXTRA::TagIndex, _HEAP_STOP_ON_TAG::TagIndex, TRUE, and USHORT.

Referenced by RtlAllocateHeapSlowly().

00452 : 00453 00454 Arguments: 00455 00456 Return Value: 00457 00458 --*/ 00459 00460 { 00461 PHEAP Heap = (PHEAP)HeapHandle; 00462 BOOLEAN LockAcquired = FALSE; 00463 PVOID ReturnValue = NULL; 00464 SIZE_T AllocationSize; 00465 USHORT TagIndex; 00466 PHEAP_ENTRY BusyBlock; 00467 PHEAP_ENTRY_EXTRA ExtraStuff; 00468 00469 IF_DEBUG_PAGE_HEAP_THEN_RETURN( HeapHandle, 00470 RtlpDebugPageHeapAllocate( HeapHandle, Flags, Size )); 00471 00472 try { 00473 00474 try { 00475 00476 // 00477 // Validate that HeapAddress points to a HEAP structure. 00478 // 00479 00480 if (!RtlpCheckHeapSignature( Heap, "RtlAllocateHeap" )) { 00481 00482 ReturnValue = NULL; 00483 leave; 00484 } 00485 00486 Flags |= Heap->ForceFlags | HEAP_SETTABLE_USER_VALUE | HEAP_SKIP_VALIDATION_CHECKS; 00487 00488 // 00489 // Verify that the size did not wrap or exceed the limit for this heap. 00490 // 00491 00492 AllocationSize = (((Size ? Size : 1) + Heap->AlignRound) & Heap->AlignMask) + 00493 sizeof( HEAP_ENTRY_EXTRA ); 00494 00495 if ((AllocationSize < Size) || (AllocationSize > Heap->MaximumAllocationSize)) { 00496 00497 HeapDebugPrint(( "Invalid allocation size - %lx (exceeded %x)\n", 00498 Size, 00499 Heap->MaximumAllocationSize )); 00500 00501 ReturnValue = NULL; 00502 leave; 00503 } 00504 00505 // 00506 // Lock the heap 00507 // 00508 00509 if (!(Flags & HEAP_NO_SERIALIZE)) { 00510 00511 RtlAcquireLockRoutine( Heap->LockVariable ); 00512 00513 LockAcquired = TRUE; 00514 00515 Flags |= HEAP_NO_SERIALIZE; 00516 } 00517 00518 RtlpValidateHeap( Heap, FALSE ); 00519 00520 ReturnValue = RtlAllocateHeapSlowly( HeapHandle, Flags, Size ); 00521 00522 RtlpValidateHeapHeaders( Heap, TRUE ); 00523 00524 if (ReturnValue != NULL) { 00525 00526 BusyBlock = (PHEAP_ENTRY)ReturnValue - 1; 00527 00528 if (BusyBlock->Flags & HEAP_ENTRY_EXTRA_PRESENT) { 00529 00530 ExtraStuff = RtlpGetExtraStuffPointer( BusyBlock ); 00531 00532 #if i386 00533 00534 if (Heap->Flags & HEAP_CAPTURE_STACK_BACKTRACES) { 00535 00536 ExtraStuff->AllocatorBackTraceIndex = (USHORT)RtlLogStackBackTrace(); 00537 00538 } else { 00539 00540 ExtraStuff->AllocatorBackTraceIndex = 0; 00541 } 00542 00543 #endif // i386 00544 00545 TagIndex = ExtraStuff->TagIndex; 00546 00547 } else { 00548 00549 TagIndex = BusyBlock->SmallTagIndex; 00550 } 00551 00552 if (Heap->Flags & HEAP_VALIDATE_ALL_ENABLED) { 00553 00554 RtlpValidateHeap( Heap, FALSE ); 00555 } 00556 } 00557 00558 if (ReturnValue != NULL) { 00559 00560 if ((ULONG_PTR)ReturnValue == RtlpHeapStopOn.AllocAddress) { 00561 00562 HeapDebugPrint(( "Just allocated block at %lx for 0x%x bytes\n", 00563 RtlpHeapStopOn.AllocAddress, 00564 Size )); 00565 00566 HeapDebugBreak( NULL ); 00567 00568 } else if ((IS_HEAP_TAGGING_ENABLED()) && 00569 (TagIndex != 0) && 00570 (TagIndex == RtlpHeapStopOn.AllocTag.TagIndex) && 00571 (Heap->ProcessHeapsListIndex == RtlpHeapStopOn.AllocTag.HeapIndex)) { 00572 00573 HeapDebugPrint(( "Just allocated block at %lx for 0x%x bytes with tag %ws\n", 00574 ReturnValue, 00575 Size, 00576 RtlpGetTagName( Heap, TagIndex ))); 00577 00578 HeapDebugBreak( NULL ); 00579 } 00580 } 00581 00582 } except( GetExceptionCode() == STATUS_NO_MEMORY ? EXCEPTION_CONTINUE_SEARCH : 00583 EXCEPTION_EXECUTE_HANDLER ) { 00584 00585 SET_LAST_STATUS( GetExceptionCode() ); 00586 00587 ReturnValue = NULL; 00588 } 00589 00590 } finally { 00591 00592 if (LockAcquired) { 00593 00594 RtlReleaseLockRoutine( Heap->LockVariable ); 00595 } 00596 } 00597 00598 return ReturnValue; 00599 }

PVOID RtlDebugCreateHeap IN ULONG  Flags,
IN PVOID HeapBase  OPTIONAL,
IN SIZE_T ReserveSize  OPTIONAL,
IN SIZE_T CommitSize  OPTIONAL,
IN PVOID Lock  OPTIONAL,
IN PRTL_HEAP_PARAMETERS Parameters  OPTIONAL
 

Definition at line 204 of file heapdbg.c.

References _HEAP::AllocatorBackTraceIndex, _HEAP::Flags, HEAP_CAPTURE_STACK_BACKTRACES, HEAP_SKIP_VALIDATION_CHECKS, HeapDebugBreak, HeapDebugPrint, Lock, NT_SUCCESS, NtQueryVirtualMemory(), NTSTATUS(), NULL, RtlCreateHeap(), RtlpValidateHeapHeaders(), Status, TRUE, and USHORT.

Referenced by RtlCreateHeap().

00215 : 00216 00217 Arguments: 00218 00219 Return Value: 00220 00221 --*/ 00222 00223 { 00224 PHEAP Heap; 00225 NTSTATUS Status; 00226 MEMORY_BASIC_INFORMATION MemoryInformation; 00227 00228 if (ReserveSize <= sizeof( HEAP_ENTRY )) { 00229 00230 HeapDebugPrint(( "Invalid ReserveSize parameter - %lx\n", ReserveSize )); 00231 HeapDebugBreak( NULL ); 00232 00233 return NULL; 00234 } 00235 00236 if (ReserveSize < CommitSize) { 00237 00238 HeapDebugPrint(( "Invalid CommitSize parameter - %lx\n", CommitSize )); 00239 HeapDebugBreak( NULL ); 00240 00241 return NULL; 00242 } 00243 00244 if ((Flags & HEAP_NO_SERIALIZE) && ARGUMENT_PRESENT( Lock )) { 00245 00246 HeapDebugPrint(( "May not specify Lock parameter with HEAP_NO_SERIALIZE\n" )); 00247 HeapDebugBreak( NULL ); 00248 00249 return NULL; 00250 } 00251 00252 if (ARGUMENT_PRESENT( HeapBase )) { 00253 00254 Status = NtQueryVirtualMemory( NtCurrentProcess(), 00255 HeapBase, 00256 MemoryBasicInformation, 00257 &MemoryInformation, 00258 sizeof( MemoryInformation ), 00259 NULL ); 00260 00261 if (!NT_SUCCESS( Status )) { 00262 00263 HeapDebugPrint(( "Specified HeapBase (%lx) invalid, Status = %lx\n", 00264 HeapBase, 00265 Status )); 00266 00267 HeapDebugBreak( NULL ); 00268 00269 return NULL; 00270 } 00271 00272 if (MemoryInformation.BaseAddress != HeapBase) { 00273 00274 HeapDebugPrint(( "Specified HeapBase (%lx) != to BaseAddress (%lx)\n", 00275 HeapBase, 00276 MemoryInformation.BaseAddress )); 00277 00278 HeapDebugBreak( NULL ); 00279 00280 return NULL; 00281 } 00282 00283 if (MemoryInformation.State == MEM_FREE) { 00284 00285 HeapDebugPrint(( "Specified HeapBase (%lx) is free or not writable\n", 00286 MemoryInformation.BaseAddress )); 00287 00288 HeapDebugBreak( NULL ); 00289 00290 return NULL; 00291 } 00292 } 00293 00294 Heap = RtlCreateHeap( Flags | 00295 HEAP_SKIP_VALIDATION_CHECKS | 00296 HEAP_TAIL_CHECKING_ENABLED | 00297 HEAP_FREE_CHECKING_ENABLED, 00298 HeapBase, 00299 ReserveSize, 00300 CommitSize, 00301 Lock, 00302 Parameters ); 00303 00304 if (Heap != NULL) { 00305 00306 #if i386 00307 00308 if (Heap->Flags & HEAP_CAPTURE_STACK_BACKTRACES) { 00309 00310 Heap->AllocatorBackTraceIndex = (USHORT)RtlLogStackBackTrace(); 00311 } 00312 00313 #endif // i386 00314 00315 RtlpValidateHeapHeaders( Heap, TRUE ); 00316 } 00317 00318 return Heap; 00319 }

BOOLEAN RtlDebugDestroyHeap IN PVOID  HeapHandle  ) 
 

Definition at line 388 of file heapdbg.c.

References FALSE, _HEAP::HeaderValidateCopy, HeapDebugPrint, HeapHandle, n, NtFreeVirtualMemory(), NULL, RtlpCheckHeapSignature(), RtlpValidateHeap(), _HEAP::Signature, and TRUE.

Referenced by RtlDestroyHeap().

00394 : 00395 00396 Arguments: 00397 00398 Return Value: 00399 00400 --*/ 00401 00402 { 00403 PHEAP Heap = (PHEAP)HeapHandle; 00404 LIST_ENTRY ListEntry; 00405 SIZE_T n; 00406 00407 if (HeapHandle == NtCurrentPeb()->ProcessHeap) { 00408 00409 HeapDebugPrint(( "May not destroy the process heap at %x\n", HeapHandle )); 00410 00411 return FALSE; 00412 } 00413 00414 if (!RtlpCheckHeapSignature( Heap, "RtlDestroyHeap" )) { 00415 00416 return FALSE; 00417 } 00418 00419 if (!RtlpValidateHeap( Heap, FALSE )) { 00420 00421 return FALSE; 00422 } 00423 00424 // 00425 // Now mark the heap as invalid by zeroing the signature field. 00426 // 00427 00428 Heap->Signature = 0; 00429 00430 if (Heap->HeaderValidateCopy != NULL) { 00431 00432 n = 0; 00433 NtFreeVirtualMemory( NtCurrentProcess(), 00434 &Heap->HeaderValidateCopy, 00435 &n, 00436 MEM_RELEASE ); 00437 } 00438 00439 return TRUE; 00440 }

BOOLEAN RtlDebugFreeHeap IN PVOID  HeapHandle,
IN ULONG  Flags,
IN PVOID  BaseAddress
 

Definition at line 797 of file heapdbg.c.

References EXCEPTION_EXECUTE_HANDLER, FALSE, _HEAP_ENTRY::Flags, _HEAP::ForceFlags, _HEAP_STOP_ON_VALUES::FreeAddress, _HEAP_STOP_ON_VALUES::FreeTag, HEAP_ENTRY_EXTRA_PRESENT, HEAP_GRANULARITY_SHIFT, HEAP_SKIP_VALIDATION_CHECKS, _HEAP_STOP_ON_TAG::HeapAndTagIndex, HeapDebugBreak, HeapDebugPrint, HeapHandle, _HEAP_STOP_ON_TAG::HeapIndex, IF_DEBUG_PAGE_HEAP_THEN_RETURN, IS_HEAP_TAGGING_ENABLED, _HEAP::LockVariable, NULL, _HEAP::ProcessHeapsListIndex, RtlAcquireLockRoutine, RtlFreeHeapSlowly(), RtlpCheckHeapSignature(), RtlpDebugPageHeapFree(), RtlpGetExtraStuffPointer(), RtlpGetTagName(), RtlpHeapStopOn, RtlpValidateHeap(), RtlpValidateHeapEntry(), RtlpValidateHeapHeaders(), RtlReleaseLockRoutine, SET_LAST_STATUS, Size, _HEAP_ENTRY::Size, _HEAP_ENTRY::SmallTagIndex, _HEAP_ENTRY_EXTRA::TagIndex, _HEAP_STOP_ON_TAG::TagIndex, TRUE, and USHORT.

Referenced by RtlFreeHeapSlowly().

00805 : 00806 00807 Arguments: 00808 00809 Return Value: 00810 00811 --*/ 00812 00813 { 00814 PHEAP Heap = (PHEAP)HeapHandle; 00815 PHEAP_ENTRY BusyBlock; 00816 PHEAP_ENTRY_EXTRA ExtraStuff; 00817 SIZE_T Size; 00818 BOOLEAN Result = FALSE; 00819 BOOLEAN LockAcquired = FALSE; 00820 USHORT TagIndex; 00821 00822 IF_DEBUG_PAGE_HEAP_THEN_RETURN( HeapHandle, 00823 RtlpDebugPageHeapFree( HeapHandle, Flags, BaseAddress )); 00824 00825 try { 00826 00827 try { 00828 00829 // 00830 // Validate that HeapAddress points to a HEAP structure. 00831 // 00832 00833 if (!RtlpCheckHeapSignature( Heap, "RtlFreeHeap" )) { 00834 00835 Result = FALSE; 00836 leave; 00837 } 00838 00839 Flags |= Heap->ForceFlags | HEAP_SKIP_VALIDATION_CHECKS; 00840 00841 // 00842 // Lock the heap 00843 // 00844 00845 if (!(Flags & HEAP_NO_SERIALIZE)) { 00846 00847 RtlAcquireLockRoutine( Heap->LockVariable ); 00848 00849 LockAcquired = TRUE; 00850 00851 Flags |= HEAP_NO_SERIALIZE; 00852 } 00853 00854 RtlpValidateHeap( Heap, FALSE ); 00855 00856 BusyBlock = (PHEAP_ENTRY)BaseAddress - 1; 00857 Size = BusyBlock->Size << HEAP_GRANULARITY_SHIFT; 00858 00859 if (RtlpValidateHeapEntry( Heap, BusyBlock, "RtlFreeHeap" )) { 00860 00861 if ((ULONG_PTR)BaseAddress == RtlpHeapStopOn.FreeAddress) { 00862 00863 HeapDebugPrint(( "About to free block at %lx\n", 00864 RtlpHeapStopOn.FreeAddress )); 00865 00866 HeapDebugBreak( NULL ); 00867 00868 } else if ((IS_HEAP_TAGGING_ENABLED()) && (RtlpHeapStopOn.FreeTag.HeapAndTagIndex != 0)) { 00869 00870 if (BusyBlock->Flags & HEAP_ENTRY_EXTRA_PRESENT) { 00871 00872 ExtraStuff = RtlpGetExtraStuffPointer( BusyBlock ); 00873 00874 TagIndex = ExtraStuff->TagIndex; 00875 00876 } else { 00877 00878 TagIndex = BusyBlock->SmallTagIndex; 00879 } 00880 00881 if ((TagIndex != 0) && 00882 (TagIndex == RtlpHeapStopOn.FreeTag.TagIndex) && 00883 (Heap->ProcessHeapsListIndex == RtlpHeapStopOn.FreeTag.HeapIndex)) { 00884 00885 HeapDebugPrint(( "About to free block at %lx with tag %ws\n", 00886 BaseAddress, 00887 RtlpGetTagName( Heap, TagIndex ))); 00888 00889 HeapDebugBreak( NULL ); 00890 } 00891 } 00892 00893 Result = RtlFreeHeapSlowly( HeapHandle, Flags, BaseAddress ); 00894 00895 RtlpValidateHeapHeaders( Heap, TRUE ); 00896 RtlpValidateHeap( Heap, FALSE ); 00897 } 00898 00899 } except( EXCEPTION_EXECUTE_HANDLER ) { 00900 00901 SET_LAST_STATUS( GetExceptionCode() ); 00902 00903 Result = FALSE; 00904 } 00905 00906 } finally { 00907 00908 if (LockAcquired) { 00909 00910 RtlReleaseLockRoutine( Heap->LockVariable ); 00911 } 00912 } 00913 00914 return Result; 00915 }

ULONG RtlDebugSizeHeap IN PVOID  HeapHandle,
IN ULONG  Flags,
IN PVOID  BaseAddress
 

Definition at line 1177 of file heapdbg.c.

References EXCEPTION_EXECUTE_HANDLER, FALSE, _HEAP::ForceFlags, HEAP_SKIP_VALIDATION_CHECKS, HeapHandle, IF_DEBUG_PAGE_HEAP_THEN_RETURN, _HEAP::LockVariable, RtlAcquireLockRoutine, RtlpCheckHeapSignature(), RtlpDebugPageHeapSize(), RtlpValidateHeap(), RtlpValidateHeapEntry(), RtlReleaseLockRoutine, RtlSizeHeap(), SET_LAST_STATUS, and TRUE.

Referenced by RtlSizeHeap().

01185 : 01186 01187 Arguments: 01188 01189 Return Value: 01190 01191 --*/ 01192 01193 { 01194 PHEAP Heap = (PHEAP)HeapHandle; 01195 PHEAP_ENTRY BusyBlock; 01196 BOOLEAN LockAcquired = FALSE; 01197 SIZE_T BusySize; 01198 01199 IF_DEBUG_PAGE_HEAP_THEN_RETURN( HeapHandle, 01200 RtlpDebugPageHeapSize( HeapHandle, Flags, BaseAddress )); 01201 01202 BusySize = 0xFFFFFFFF; 01203 01204 try { 01205 01206 try { 01207 01208 // 01209 // Validate that HeapAddress points to a HEAP structure. 01210 // 01211 01212 if (!RtlpCheckHeapSignature( Heap, "RtlSizeHeap" )) { 01213 01214 BusySize = FALSE; 01215 leave; 01216 } 01217 01218 Flags |= Heap->ForceFlags | HEAP_SKIP_VALIDATION_CHECKS; 01219 01220 // 01221 // Lock the heap 01222 // 01223 01224 if (!(Flags & HEAP_NO_SERIALIZE)) { 01225 01226 RtlAcquireLockRoutine( Heap->LockVariable ); 01227 01228 Flags |= HEAP_NO_SERIALIZE; 01229 01230 LockAcquired = TRUE; 01231 } 01232 01233 RtlpValidateHeap( Heap, FALSE ); 01234 01235 BusyBlock = (PHEAP_ENTRY)BaseAddress - 1; 01236 01237 if (RtlpValidateHeapEntry( Heap, BusyBlock, "RtlSizeHeap" )) { 01238 01239 BusySize = RtlSizeHeap( HeapHandle, Flags, BaseAddress ); 01240 } 01241 01242 } except( EXCEPTION_EXECUTE_HANDLER ) { 01243 01244 SET_LAST_STATUS( GetExceptionCode() ); 01245 } 01246 01247 } finally { 01248 01249 if (LockAcquired) { 01250 01251 RtlReleaseLockRoutine( Heap->LockVariable ); 01252 } 01253 } 01254 01255 return BusySize; 01256 }

NTSTATUS RtlDebugZeroHeap IN PVOID  HeapHandle,
IN ULONG  Flags
 

Definition at line 1338 of file heapdbg.c.

References EXCEPTION_EXECUTE_HANDLER, FALSE, _HEAP::ForceFlags, HEAP_SKIP_VALIDATION_CHECKS, HeapHandle, IF_DEBUG_PAGE_HEAP_THEN_RETURN, _HEAP::LockVariable, NTSTATUS(), RtlAcquireLockRoutine, RtlpCheckHeapSignature(), RtlpDebugPageHeapZero(), RtlpValidateHeap(), RtlReleaseLockRoutine, RtlZeroHeap(), Status, and TRUE.

Referenced by RtlZeroHeap().

01345 : 01346 01347 Arguments: 01348 01349 Return Value: 01350 01351 --*/ 01352 01353 { 01354 NTSTATUS Status; 01355 PHEAP Heap = (PHEAP)HeapHandle; 01356 BOOLEAN LockAcquired = FALSE; 01357 SIZE_T LargestFreeSize; 01358 01359 IF_DEBUG_PAGE_HEAP_THEN_RETURN( HeapHandle, 01360 RtlpDebugPageHeapZero( HeapHandle, Flags )); 01361 01362 Status = STATUS_SUCCESS; 01363 LargestFreeSize = 0; 01364 01365 try { 01366 01367 try { 01368 01369 // 01370 // Validate that HeapAddress points to a HEAP structure. 01371 // 01372 01373 if (!RtlpCheckHeapSignature( Heap, "RtlZeroHeap" )) { 01374 01375 Status = STATUS_INVALID_PARAMETER; 01376 leave; 01377 } 01378 01379 Flags |= Heap->ForceFlags | HEAP_SKIP_VALIDATION_CHECKS; 01380 01381 // 01382 // Lock the heap 01383 // 01384 01385 if (!(Flags & HEAP_NO_SERIALIZE)) { 01386 01387 RtlAcquireLockRoutine( Heap->LockVariable ); 01388 01389 LockAcquired = TRUE; 01390 01391 Flags |= HEAP_NO_SERIALIZE; 01392 } 01393 01394 if (!RtlpValidateHeap( Heap, FALSE )) { 01395 01396 Status = STATUS_INVALID_PARAMETER; 01397 01398 } else { 01399 01400 Status = RtlZeroHeap( HeapHandle, Flags ); 01401 } 01402 01403 } except( EXCEPTION_EXECUTE_HANDLER ) { 01404 01405 Status = GetExceptionCode(); 01406 } 01407 01408 } finally { 01409 01410 if (LockAcquired) { 01411 01412 RtlReleaseLockRoutine( Heap->LockVariable ); 01413 } 01414 } 01415 01416 return Status; 01417 }

PVOID RtlDestroyHeap IN PVOID  HeapHandle  ) 
 

Definition at line 1133 of file rtl/heap.c.

References DEBUG_HEAP, _HEAP::Flags, HEAP_LOCK_USER_ALLOCATED, HEAP_MAXIMUM_SEGMENTS, HEAP_VIRTUAL_ALLOC_ENTRY, HeapDebugPrint, HeapHandle, IF_DEBUG_PAGE_HEAP_THEN_RETURN, _HEAP::LockVariable, _HEAP_UCR_SEGMENT::Next, NULL, PHEAP_UCR_SEGMENT, RTL_PAGED_CODE, RtlDebugDestroyHeap(), RtlDeleteLockRoutine, RtlpDebugPageHeapDestroy(), RtlpDestroyHeapSegment(), RtlpDestroyTags(), RtlpRemoveHeapFromProcessList(), _HEAP::Segments, _HEAP::UCRSegments, _HEAP::VirtualAllocdBlocks, and VOID().

Referenced by RtlpDebugPageHeapDestroy(), SmbTraceDisconnect(), and UserClientDllInitialize().

01139 : 01140 01141 This routine is the opposite of Rtl Create Heap. It tears down an 01142 existing heap structure. 01143 01144 Arguments: 01145 01146 HeapHandle - Supplies a pointer to the heap being destroyed 01147 01148 Return Value: 01149 01150 PVOID - Returns null if the heap was destroyed completely and a 01151 pointer back to the heap if for some reason the heap could 01152 not be destroyed. 01153 01154 --*/ 01155 01156 { 01157 PHEAP Heap = (PHEAP)HeapHandle; 01158 PHEAP_SEGMENT Segment; 01159 PHEAP_UCR_SEGMENT UCRSegments; 01160 PLIST_ENTRY Head, Next; 01161 PVOID BaseAddress; 01162 SIZE_T RegionSize; 01163 UCHAR SegmentIndex; 01164 01165 // 01166 // Validate that HeapAddress points to a HEAP structure. 01167 // 01168 01169 RTL_PAGED_CODE(); 01170 01171 if (HeapHandle == NULL) { 01172 01173 HeapDebugPrint(( "Ignoring RtlDestroyHeap( NULL )\n" )); 01174 01175 return NULL; 01176 } 01177 01178 #ifndef NTOS_KERNEL_RUNTIME 01179 #ifdef NTHEAP_ENABLED 01180 { 01181 if (Heap->Flags & NTHEAP_ENABLED_FLAG) { 01182 01183 return RtlDestroyNtHeap( HeapHandle ); 01184 } 01185 } 01186 #endif // NTHEAP_ENABLED 01187 #endif // NTOS_KERNEL_RUNTIME 01188 01189 // 01190 // Check if this is the debug version of heap using page allocation 01191 // with guard pages 01192 // 01193 01194 IF_DEBUG_PAGE_HEAP_THEN_RETURN( HeapHandle, 01195 RtlpDebugPageHeapDestroy( HeapHandle )); 01196 01197 #ifndef NTOS_KERNEL_RUNTIME 01198 01199 // 01200 // In the non kernel case check if this is the debug version of heap 01201 // and of so then call the debug version to do the teardown 01202 // 01203 01204 if (DEBUG_HEAP( Heap->Flags )) { 01205 01206 if (!RtlDebugDestroyHeap( HeapHandle )) { 01207 01208 return HeapHandle; 01209 } 01210 } 01211 01212 // 01213 // We are not allowed to destroy the process heap 01214 // 01215 01216 if (HeapHandle == NtCurrentPeb()->ProcessHeap) { 01217 01218 return HeapHandle; 01219 } 01220 01221 #endif // NTOS_KERNEL_RUNTIME 01222 01223 // 01224 // For every big allocation we remove it from the list and free the 01225 // vm 01226 // 01227 01228 Head = &Heap->VirtualAllocdBlocks; 01229 Next = Head->Flink; 01230 01231 while (Head != Next) { 01232 01233 BaseAddress = CONTAINING_RECORD( Next, HEAP_VIRTUAL_ALLOC_ENTRY, Entry ); 01234 01235 Next = Next->Flink; 01236 RegionSize = 0; 01237 01238 ZwFreeVirtualMemory( NtCurrentProcess(), 01239 (PVOID *)&BaseAddress, 01240 &RegionSize, 01241 MEM_RELEASE ); 01242 } 01243 01244 #ifndef NTOS_KERNEL_RUNTIME 01245 01246 // 01247 // In the non kernel case we need to destory any heap tags we have setup 01248 // and remove this heap from the process heap list 01249 // 01250 01251 RtlpDestroyTags( Heap ); 01252 RtlpRemoveHeapFromProcessList( Heap ); 01253 01254 #endif // NTOS_KERNEL_RUNTIME 01255 01256 // 01257 // If the heap is serialized, delete the critical section created 01258 // by RtlCreateHeap. 01259 // 01260 01261 if (!(Heap->Flags & HEAP_NO_SERIALIZE)) { 01262 01263 if (!(Heap->Flags & HEAP_LOCK_USER_ALLOCATED)) { 01264 01265 (VOID)RtlDeleteLockRoutine( Heap->LockVariable ); 01266 } 01267 01268 Heap->LockVariable = NULL; 01269 } 01270 01271 // 01272 // For every uncommitted segment we free its vm 01273 // 01274 01275 UCRSegments = Heap->UCRSegments; 01276 Heap->UCRSegments = NULL; 01277 01278 while (UCRSegments) { 01279 01280 BaseAddress = UCRSegments; 01281 UCRSegments = UCRSegments->Next; 01282 RegionSize = 0; 01283 01284 ZwFreeVirtualMemory( NtCurrentProcess(), 01285 &BaseAddress, 01286 &RegionSize, 01287 MEM_RELEASE ); 01288 } 01289 01290 // 01291 // For every segment in the heap we call a worker routine to 01292 // destory the segment 01293 // 01294 01295 SegmentIndex = HEAP_MAXIMUM_SEGMENTS; 01296 01297 while (SegmentIndex--) { 01298 01299 Segment = Heap->Segments[ SegmentIndex ]; 01300 01301 if (Segment) { 01302 01303 RtlpDestroyHeapSegment( Segment ); 01304 } 01305 } 01306 01307 // 01308 // And we return to our caller 01309 // 01310 01311 return NULL; 01312 }

BOOLEAN RtlFreeHeap IN PVOID  HeapHandle,
IN ULONG  Flags,
IN PVOID  BaseAddress
 

Definition at line 2931 of file rtl/heap.c.

References _HEAP::DeCommitFreeBlockThreshold, _HEAP::DeCommitTotalFreeThreshold, _HEAP_VIRTUAL_ALLOC_ENTRY::Entry, EXCEPTION_EXECUTE_HANDLER, FALSE, _HEAP_FREE_ENTRY::Flags, _HEAP_ENTRY::Flags, _HEAP::Flags, _HEAP::ForceFlags, HEAP_ENTRY_BUSY, HEAP_ENTRY_LAST_ENTRY, HEAP_ENTRY_VIRTUAL_ALLOC, HEAP_MAXIMUM_BLOCK_SIZE, HEAP_MAXIMUM_FREELISTS, HEAP_MAXIMUM_SEGMENTS, HEAP_SLOW_FLAGS, HEAPASSERT, HeapHandle, _HEAP::LockVariable, _HEAP::Lookaside, _HEAP::LookasideLockCount, NT_SUCCESS, NTSTATUS(), NULL, RTL_PAGED_CODE, RtlAcquireLockRoutine, RtlFreeHeapSlowly(), RtlpCoalesceFreeBlocks(), RtlpDeCommitFreeBlock(), RtlpFastInsertDedicatedFreeBlockDirect, RtlpFastInsertNonDedicatedFreeBlockDirect, RtlpFreeToHeapLookaside(), RtlpInsertFreeBlock(), RtlReleaseLockRoutine, _HEAP_ENTRY::SegmentIndex, SET_LAST_STATUS, _HEAP_ENTRY::Size, Status, _HEAP::TotalFreeSize, TRUE, and USHORT.

02939 : 02940 02941 This routine returns a previously allocated block back to its heap 02942 02943 Arguments: 02944 02945 HeapHandle - Supplies a pointer to the owning heap structure 02946 02947 Flags - Specifies the set of flags to use in the deallocation 02948 02949 BaseAddress - Supplies a pointer to the block being freed 02950 02951 Return Value: 02952 02953 BOOLEAN - TRUE if the block was properly freed and FALSE otherwise 02954 02955 --*/ 02956 02957 { 02958 NTSTATUS Status; 02959 PHEAP Heap = (PHEAP)HeapHandle; 02960 PHEAP_ENTRY BusyBlock; 02961 PHEAP_ENTRY_EXTRA ExtraStuff; 02962 SIZE_T FreeSize; 02963 BOOLEAN LockAcquired = FALSE; 02964 BOOLEAN ReturnValue = TRUE; 02965 02966 RTL_PAGED_CODE(); 02967 02968 // 02969 // First check if the address we're given is null and if so then 02970 // there is really nothing to do so just return success 02971 // 02972 02973 if (BaseAddress == NULL) { 02974 02975 return TRUE; 02976 } 02977 02978 #ifndef NTOS_KERNEL_RUNTIME 02979 #ifdef NTHEAP_ENABLED 02980 { 02981 if (Heap->Flags & NTHEAP_ENABLED_FLAG) { 02982 02983 return RtlFreeNtHeap( HeapHandle, 02984 Flags, 02985 BaseAddress); 02986 } 02987 } 02988 #endif // NTHEAP_ENABLED 02989 #endif // NTOS_KERNEL_RUNTIME 02990 02991 02992 // 02993 // Compliment the input flags with those enforced by the heap 02994 // 02995 02996 Flags |= Heap->ForceFlags; 02997 02998 // 02999 // Now check if we should go the slow route 03000 // 03001 03002 if (Flags & HEAP_SLOW_FLAGS) { 03003 03004 return RtlFreeHeapSlowly(HeapHandle, Flags, BaseAddress); 03005 } 03006 03007 // 03008 // We can do everything in this routine. So now backup to get 03009 // a pointer to the start of the block 03010 // 03011 03012 BusyBlock = (PHEAP_ENTRY)BaseAddress - 1; 03013 03014 // 03015 // Protect ourselves from idiots by refusing to free blocks 03016 // that do not have the busy bit set. 03017 // 03018 // Also refuse to free blocks that are not eight-byte aligned. 03019 // The specific idiot in this case is Office95, which likes 03020 // to free a random pointer when you start Word95 from a desktop 03021 // shortcut. 03022 // 03023 // As further insurance against idiots, check the segment index 03024 // to make sure it is less than HEAP_MAXIMUM_SEGMENTS (16). This 03025 // should fix all the dorks who have ASCII or Unicode where the 03026 // heap header is supposed to be. 03027 // 03028 03029 try { 03030 if ((!(BusyBlock->Flags & HEAP_ENTRY_BUSY)) || 03031 (((ULONG_PTR)BaseAddress & 0x7) != 0) || 03032 (BusyBlock->SegmentIndex >= HEAP_MAXIMUM_SEGMENTS)) { 03033 03034 // 03035 // Not a busy block, or it's not aligned or the segment is 03036 // to big, meaning it's corrupt 03037 // 03038 03039 SET_LAST_STATUS( STATUS_INVALID_PARAMETER ); 03040 03041 return FALSE; 03042 } 03043 } except(EXCEPTION_EXECUTE_HANDLER) { 03044 03045 SET_LAST_STATUS( STATUS_INVALID_PARAMETER ); 03046 return FALSE; 03047 } 03048 03049 // 03050 // If there is a lookaside list and the block is not a big allocation 03051 // and the index is for a dedicated list then free the block to the 03052 // lookaside list. We'll actually capture 03053 // the lookaside pointer from the heap and only use the captured pointer. 03054 // This will take care of the condition where a walk or lock heap can 03055 // cause us to check for a non null pointer and then have it become null 03056 // when we read it again. If it is non null to start with then even if 03057 // the user walks or locks the heap via another thread the pointer to 03058 // still valid here so we can still try and do a lookaside list push 03059 // 03060 03061 #ifndef NTOS_KERNEL_RUNTIME 03062 03063 { 03064 PHEAP_LOOKASIDE Lookaside = (PHEAP_LOOKASIDE)Heap->Lookaside; 03065 03066 if ((Lookaside != NULL) && 03067 (Heap->LookasideLockCount == 0) && 03068 (!(BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC)) && 03069 ((FreeSize = BusyBlock->Size) < HEAP_MAXIMUM_FREELISTS)) { 03070 03071 if (RtlpFreeToHeapLookaside( &Lookaside[FreeSize], BaseAddress)) { 03072 03073 return TRUE; 03074 } 03075 } 03076 } 03077 03078 #endif // NTOS_KERNEL_RUNTIME 03079 03080 try { 03081 03082 // 03083 // Check if we need to lock the heap 03084 // 03085 03086 if (!(Flags & HEAP_NO_SERIALIZE)) { 03087 03088 RtlAcquireLockRoutine( Heap->LockVariable ); 03089 03090 LockAcquired = TRUE; 03091 } 03092 03093 // 03094 // Check if this is not a virtual block allocation meaning 03095 // that we it is part of the heap free list structure and not 03096 // one huge allocation that we got from vm 03097 // 03098 03099 if (!(BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC)) { 03100 03101 // 03102 // This block is not a big allocation so we need to 03103 // to get its size, and coalesce the blocks note that 03104 // the user mode heap does this conditionally on a heap 03105 // flag. The coalesce function returns the newly formed 03106 // free block and the new size. 03107 // 03108 03109 FreeSize = BusyBlock->Size; 03110 03111 #ifdef NTOS_KERNEL_RUNTIME 03112 03113 BusyBlock = (PHEAP_ENTRY)RtlpCoalesceFreeBlocks( Heap, 03114 (PHEAP_FREE_ENTRY)BusyBlock, 03115 &FreeSize, 03116 FALSE ); 03117 03118 #else // NTOS_KERNEL_RUNTIME 03119 03120 if (!(Heap->Flags & HEAP_DISABLE_COALESCE_ON_FREE)) { 03121 03122 BusyBlock = (PHEAP_ENTRY)RtlpCoalesceFreeBlocks( Heap, 03123 (PHEAP_FREE_ENTRY)BusyBlock, 03124 &FreeSize, 03125 FALSE ); 03126 } 03127 03128 #endif // NTOS_KERNEL_RUNTIME 03129 03130 // 03131 // Check for a small allocation that can go on a freelist 03132 // first, these should never trigger a decommit. 03133 // 03134 03135 HEAPASSERT(HEAP_MAXIMUM_FREELISTS < Heap->DeCommitFreeBlockThreshold); 03136 03137 // 03138 // If the allocation fits on a free list then insert it on 03139 // the appropriate free list. If the block is not the last 03140 // entry then make sure that the next block knows our correct 03141 // size, and update the heap free space counter. 03142 // 03143 03144 if (FreeSize < HEAP_MAXIMUM_FREELISTS) { 03145 03146 RtlpFastInsertDedicatedFreeBlockDirect( Heap, 03147 (PHEAP_FREE_ENTRY)BusyBlock, 03148 (USHORT)FreeSize ); 03149 03150 if (!(BusyBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) { 03151 03152 HEAPASSERT((BusyBlock + FreeSize)->PreviousSize == (USHORT)FreeSize); 03153 } 03154 03155 Heap->TotalFreeSize += FreeSize; 03156 03157 // 03158 // Otherwise the block is to big for one of the dedicated free list so 03159 // see if the free size is under the decommit threshold by itself 03160 // or the total free in the heap is under the decomit threshold then 03161 // we'll put this into a free list 03162 // 03163 03164 } else if ((FreeSize < Heap->DeCommitFreeBlockThreshold) || 03165 ((Heap->TotalFreeSize + FreeSize) < Heap->DeCommitTotalFreeThreshold)) { 03166 03167 // 03168 // Check if the block can go into the [0] index free list, and if 03169 // so then do the insert and make sure the following block is 03170 // needed knows our correct size, and update the heaps free space 03171 // counter 03172 // 03173 03174 if (FreeSize <= (ULONG)HEAP_MAXIMUM_BLOCK_SIZE) { 03175 03176 RtlpFastInsertNonDedicatedFreeBlockDirect( Heap, 03177 (PHEAP_FREE_ENTRY)BusyBlock, 03178 (USHORT)FreeSize ); 03179 03180 if (!(BusyBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) { 03181 03182 HEAPASSERT((BusyBlock + FreeSize)->PreviousSize == (USHORT)FreeSize); 03183 } 03184 03185 Heap->TotalFreeSize += FreeSize; 03186 03187 } else { 03188 03189 // 03190 // The block is too big to go on a free list in its 03191 // entirety but we don't want to decommit anything so 03192 // simply call a worker routine to hack up the block 03193 // into pieces that will fit on the free lists. 03194 // 03195 03196 RtlpInsertFreeBlock( Heap, (PHEAP_FREE_ENTRY)BusyBlock, FreeSize ); 03197 } 03198 03199 // 03200 // Otherwise the block is to big for any lists and we should decommit 03201 // the block 03202 // 03203 03204 } else { 03205 03206 RtlpDeCommitFreeBlock( Heap, (PHEAP_FREE_ENTRY)BusyBlock, FreeSize ); 03207 } 03208 03209 } else { 03210 03211 // 03212 // This is a big virtual block allocation. To free it we only have to 03213 // remove it from the heaps list of virtual allocated blocks, unlock 03214 // the heap, and return the block to vm 03215 // 03216 03217 PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock; 03218 03219 VirtualAllocBlock = CONTAINING_RECORD( BusyBlock, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock ); 03220 03221 RemoveEntryList( &VirtualAllocBlock->Entry ); 03222 03223 // 03224 // Release lock here as there is no reason to hold it across 03225 // the system call. 03226 // 03227 03228 if (LockAcquired) { 03229 03230 RtlReleaseLockRoutine( Heap->LockVariable ); 03231 LockAcquired = FALSE; 03232 } 03233 03234 FreeSize = 0; 03235 03236 Status = ZwFreeVirtualMemory( NtCurrentProcess(), 03237 (PVOID *)&VirtualAllocBlock, 03238 &FreeSize, 03239 MEM_RELEASE ); 03240 03241 03242 // 03243 // Check if we had trouble freeing the block back to vm 03244 // and return an error if necessary 03245 // 03246 03247 if (!NT_SUCCESS( Status )) { 03248 03249 SET_LAST_STATUS( Status ); 03250 03251 ReturnValue = FALSE; 03252 } 03253 } 03254 03255 } finally { 03256 03257 if (LockAcquired) { 03258 03259 RtlReleaseLockRoutine( Heap->LockVariable ); 03260 } 03261 } 03262 03263 // 03264 // The block was freed successfully so return success to our 03265 // caller 03266 // 03267 03268 return ReturnValue; 03269 }

BOOLEAN RtlFreeHeapSlowly IN PVOID  HeapHandle,
IN ULONG  Flags,
IN PVOID  BaseAddress
 

Definition at line 3273 of file rtl/heap.c.

References _HEAP_VIRTUAL_ALLOC_ENTRY::CommitSize, DEBUG_HEAP, _HEAP::DeCommitFreeBlockThreshold, _HEAP::DeCommitTotalFreeThreshold, _HEAP_VIRTUAL_ALLOC_ENTRY::Entry, EXCEPTION_EXECUTE_HANDLER, _HEAP_VIRTUAL_ALLOC_ENTRY::ExtraStuff, FALSE, _HEAP_ENTRY::Flags, _HEAP::Flags, _HEAP_FREE_ENTRY::Flags, FreeAction, _HEAP_FREE_ENTRY_EXTRA::FreeBackTraceIndex, HEAP_CAPTURE_STACK_BACKTRACES, HEAP_ENTRY_BUSY, HEAP_ENTRY_EXTRA_PRESENT, HEAP_ENTRY_LAST_ENTRY, HEAP_ENTRY_VIRTUAL_ALLOC, HEAP_GRANULARITY_SHIFT, HEAP_MAXIMUM_BLOCK_SIZE, HEAP_MAXIMUM_SEGMENTS, HEAPASSERT, HeapHandle, IS_HEAP_TAGGING_ENABLED, _HEAP::LockVariable, NT_SUCCESS, NTSTATUS(), PHEAP_FREE_ENTRY_EXTRA, RTL_PAGED_CODE, RtlAcquireLockRoutine, RtlDebugFreeHeap(), RtlpCoalesceFreeBlocks(), RtlpDeCommitFreeBlock(), RtlpInsertFreeBlock(), RtlpInsertFreeBlockDirect, RtlpUpdateTagEntry(), RtlReleaseLockRoutine, _HEAP_ENTRY::SegmentIndex, SET_LAST_STATUS, _HEAP_ENTRY::Size, _HEAP_ENTRY::SmallTagIndex, Status, _HEAP_ENTRY_EXTRA::TagIndex, _HEAP_FREE_ENTRY_EXTRA::TagIndex, _HEAP::TotalFreeSize, TRUE, USHORT, and VirtualFreeAction.

Referenced by RtlDebugFreeHeap(), and RtlFreeHeap().

03281 : 03282 03283 This routine returns a previously allocated block back to its heap. 03284 It is the slower version of Rtl Free Heap and does more checking and 03285 tagging control. 03286 03287 Arguments: 03288 03289 HeapHandle - Supplies a pointer to the owning heap structure 03290 03291 Flags - Specifies the set of flags to use in the deallocation 03292 03293 BaseAddress - Supplies a pointer to the block being freed 03294 03295 Return Value: 03296 03297 BOOLEAN - TRUE if the block was properly freed and FALSE otherwise 03298 03299 --*/ 03300 03301 { 03302 NTSTATUS Status; 03303 PHEAP Heap = (PHEAP)HeapHandle; 03304 PHEAP_ENTRY BusyBlock; 03305 PHEAP_ENTRY_EXTRA ExtraStuff; 03306 SIZE_T FreeSize; 03307 BOOLEAN Result; 03308 BOOLEAN LockAcquired = FALSE; 03309 03310 #ifndef NTOS_KERNEL_RUNTIME 03311 03312 USHORT TagIndex; 03313 03314 #endif // NTOS_KERNEL_RUNTIME 03315 03316 RTL_PAGED_CODE(); 03317 03318 // 03319 // Note that Flags has already been OR'd with Heap->ForceFlags. 03320 // 03321 03322 #ifndef NTOS_KERNEL_RUNTIME 03323 03324 // 03325 // In the non kernel case see if we should be calling the debug version to 03326 // free the heap 03327 // 03328 03329 if (DEBUG_HEAP( Flags )) { 03330 03331 return RtlDebugFreeHeap( HeapHandle, Flags, BaseAddress ); 03332 } 03333 03334 #endif // NTOS_KERNEL_RUNTIME 03335 03336 // 03337 // Until we figure out otherwise we'll assume that this call will fail 03338 // 03339 03340 Result = FALSE; 03341 03342 try { 03343 03344 // 03345 // Lock the heap 03346 // 03347 03348 if (!(Flags & HEAP_NO_SERIALIZE)) { 03349 03350 RtlAcquireLockRoutine( Heap->LockVariable ); 03351 03352 LockAcquired = TRUE; 03353 } 03354 03355 try { 03356 03357 // 03358 // Backup to get a pointer to the start of the block 03359 // 03360 03361 BusyBlock = (PHEAP_ENTRY)BaseAddress - 1; 03362 03363 // 03364 // Protect ourselves from idiots by refusing to free blocks 03365 // that do not have the busy bit set. 03366 // 03367 // Also refuse to free blocks that are not eight-byte aligned. 03368 // The specific idiot in this case is Office95, which likes 03369 // to free a random pointer when you start Word95 from a desktop 03370 // shortcut. 03371 // 03372 // As further insurance against idiots, check the segment index 03373 // to make sure it is less than HEAP_MAXIMUM_SEGMENTS (16). This 03374 // should fix all the dorks who have ASCII or Unicode where the 03375 // heap header is supposed to be. 03376 // 03377 // Note that this test is just opposite from the test used in 03378 // Rtl Free Heap 03379 // 03380 03381 if ((BusyBlock->Flags & HEAP_ENTRY_BUSY) && 03382 (((ULONG_PTR)BaseAddress & 0x7) == 0) && 03383 (BusyBlock->SegmentIndex < HEAP_MAXIMUM_SEGMENTS)) { 03384 03385 // 03386 // Check if this is a virtual block allocation 03387 // 03388 03389 if (BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) { 03390 03391 PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock; 03392 03393 // 03394 // This is a big virtual block allocation. To free it 03395 // we only have to remove it from the heaps list of 03396 // virtual allocated blocks, unlock the heap, and return 03397 // the block to vm 03398 // 03399 03400 VirtualAllocBlock = CONTAINING_RECORD( BusyBlock, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock ); 03401 03402 RemoveEntryList( &VirtualAllocBlock->Entry ); 03403 03404 #ifndef NTOS_KERNEL_RUNTIME 03405 03406 // 03407 // In the non kernel case see if we need to free the tag 03408 // 03409 03410 if (IS_HEAP_TAGGING_ENABLED()) { 03411 03412 RtlpUpdateTagEntry( Heap, 03413 VirtualAllocBlock->ExtraStuff.TagIndex, 03414 VirtualAllocBlock->CommitSize >> HEAP_GRANULARITY_SHIFT, 03415 0, 03416 VirtualFreeAction ); 03417 } 03418 03419 #endif // NTOS_KERNEL_RUNTIME 03420 03421 FreeSize = 0; 03422 03423 Status = ZwFreeVirtualMemory( NtCurrentProcess(), 03424 (PVOID *)&VirtualAllocBlock, 03425 &FreeSize, 03426 MEM_RELEASE ); 03427 03428 // 03429 // Check if everything worked okay, if we had trouble freeing 03430 // the block back to vm return an error if necessary, 03431 // 03432 03433 if (NT_SUCCESS( Status )) { 03434 03435 Result = TRUE; 03436 03437 } else { 03438 03439 SET_LAST_STATUS( Status ); 03440 } 03441 03442 } else { 03443 03444 // 03445 // This block is not a big allocation so we need to 03446 // to get its size, and coalesce the blocks note that 03447 // the user mode heap does this conditionally on a heap 03448 // flag. The coalesce function returns the newly formed 03449 // free block and the new size. 03450 // 03451 03452 #ifndef NTOS_KERNEL_RUNTIME 03453 03454 // 03455 // First in the non kernel case remove any tagging we might 03456 // have been using. Note that the will either be in 03457 // the heap header, or in the extra block if present 03458 // 03459 03460 if (IS_HEAP_TAGGING_ENABLED()) { 03461 03462 if (BusyBlock->Flags & HEAP_ENTRY_EXTRA_PRESENT) { 03463 03464 ExtraStuff = (PHEAP_ENTRY_EXTRA)(BusyBlock + BusyBlock->Size - 1); 03465 03466 TagIndex = RtlpUpdateTagEntry( Heap, 03467 ExtraStuff->TagIndex, 03468 BusyBlock->Size, 03469 0, 03470 FreeAction ); 03471 03472 } else { 03473 03474 TagIndex = RtlpUpdateTagEntry( Heap, 03475 BusyBlock->SmallTagIndex, 03476 BusyBlock->Size, 03477 0, 03478 FreeAction ); 03479 } 03480 03481 } else { 03482 03483 TagIndex = 0; 03484 } 03485 03486 #endif // NTOS_KERNEL_RUNTIME 03487 03488 // 03489 // This is the size of the block we are freeing 03490 // 03491 03492 FreeSize = BusyBlock->Size; 03493 03494 #ifndef NTOS_KERNEL_RUNTIME 03495 03496 // 03497 // In the non kernel case see if we should coalesce on free 03498 // 03499 03500 if (!(Heap->Flags & HEAP_DISABLE_COALESCE_ON_FREE)) { 03501 03502 #endif // NTOS_KERNEL_RUNTIME 03503 03504 // 03505 // In kernel case and in the tested user mode case we 03506 // now coalesce free blocks 03507 // 03508 03509 BusyBlock = (PHEAP_ENTRY)RtlpCoalesceFreeBlocks( Heap, (PHEAP_FREE_ENTRY)BusyBlock, &FreeSize, FALSE ); 03510 03511 #ifndef NTOS_KERNEL_RUNTIME 03512 03513 } 03514 03515 #endif // NTOS_KERNEL_RUNTIME 03516 03517 // 03518 // If the block should not be decommit then try and put it 03519 // on a free list 03520 // 03521 03522 if ((FreeSize < Heap->DeCommitFreeBlockThreshold) || 03523 ((Heap->TotalFreeSize + FreeSize) < Heap->DeCommitTotalFreeThreshold)) { 03524 03525 // 03526 // Check if the block can fit on one of the dedicated free 03527 // lists 03528 // 03529 03530 if (FreeSize <= (ULONG)HEAP_MAXIMUM_BLOCK_SIZE) { 03531 03532 // 03533 // It can fit on a dedicated free list so insert it on 03534 // 03535 03536 RtlpInsertFreeBlockDirect( Heap, (PHEAP_FREE_ENTRY)BusyBlock, (USHORT)FreeSize ); 03537 03538 // 03539 // If there is a following entry then make sure the 03540 // sizes agree 03541 // 03542 03543 if (!(BusyBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) { 03544 03545 HEAPASSERT((BusyBlock + FreeSize)->PreviousSize == (USHORT)FreeSize); 03546 } 03547 03548 // 03549 // Update the heap with the amount of free space 03550 // available 03551 // 03552 03553 Heap->TotalFreeSize += FreeSize; 03554 03555 } else { 03556 03557 // 03558 // The block goes on the non dedicated free list 03559 // 03560 03561 RtlpInsertFreeBlock( Heap, (PHEAP_FREE_ENTRY)BusyBlock, FreeSize ); 03562 } 03563 03564 #ifndef NTOS_KERNEL_RUNTIME 03565 03566 // 03567 // In the non kernel case see if the there was tag and if 03568 // so then update the entry to show that it's been freed 03569 // 03570 03571 if (TagIndex != 0) { 03572 03573 PHEAP_FREE_ENTRY_EXTRA FreeExtra; 03574 03575 BusyBlock->Flags |= HEAP_ENTRY_EXTRA_PRESENT; 03576 03577 FreeExtra = (PHEAP_FREE_ENTRY_EXTRA)(BusyBlock + BusyBlock->Size) - 1; 03578 03579 FreeExtra->TagIndex = TagIndex; 03580 FreeExtra->FreeBackTraceIndex = 0; 03581 03582 #if i386 03583 03584 // 03585 // In the x86 case we can also capture the stack 03586 // backtrace 03587 // 03588 03589 if (Heap->Flags & HEAP_CAPTURE_STACK_BACKTRACES) { 03590 03591 FreeExtra->FreeBackTraceIndex = (USHORT)RtlLogStackBackTrace(); 03592 } 03593 03594 #endif // i386 03595 03596 } 03597 03598 #endif // NTOS_KERNEL_RUNTIME 03599 03600 } else { 03601 03602 // 03603 // Otherwise the block is big enough to decommit so have a 03604 // worker routine to do the decommit 03605 // 03606 03607 RtlpDeCommitFreeBlock( Heap, (PHEAP_FREE_ENTRY)BusyBlock, FreeSize ); 03608 } 03609 03610 // 03611 // And say the free worked fine 03612 // 03613 03614 Result = TRUE; 03615 } 03616 03617 } else { 03618 03619 // 03620 // Not a busy block, or it's not aligned or the segment is 03621 // to big, meaning it's corrupt 03622 // 03623 03624 SET_LAST_STATUS( STATUS_INVALID_PARAMETER ); 03625 } 03626 03627 } except( EXCEPTION_EXECUTE_HANDLER ) { 03628 03629 SET_LAST_STATUS( GetExceptionCode() ); 03630 03631 Result = FALSE; 03632 } 03633 03634 } finally { 03635 03636 // 03637 // Unlock the heap 03638 // 03639 03640 if (LockAcquired) { 03641 03642 RtlReleaseLockRoutine( Heap->LockVariable ); 03643 } 03644 } 03645 03646 // 03647 // And return to our caller 03648 // 03649 03650 return Result; 03651 }

BOOLEAN RtlpCheckBusyBlockTail IN PHEAP_ENTRY  BusyBlock  ) 
 

Definition at line 6117 of file rtl/heap.c.

References CHECK_HEAP_TAIL_SIZE, CheckHeapFillPattern, FALSE, HEAP_ENTRY_VIRTUAL_ALLOC, HEAP_GRANULARITY_SHIFT, HeapDebugBreak, HeapDebugPrint, RTL_PAGED_CODE, RtlpGetSizeOfBigBlock(), Size, and TRUE.

Referenced by RtlpValidateHeap(), RtlpValidateHeapEntry(), and RtlpValidateHeapSegment().

06123 : 06124 06125 This routine checks to see if the bytes beyond the user specified 06126 allocation have been modified. It does this by checking for a tail 06127 fill pattern 06128 06129 Arguments: 06130 06131 BusyBlock - Supplies the heap block being queried 06132 06133 Return Value: 06134 06135 BOOLEAN - TRUE if the tail is still okay and FALSE otherwise 06136 06137 --*/ 06138 06139 { 06140 PCHAR Tail; 06141 SIZE_T Size, cbEqual; 06142 06143 RTL_PAGED_CODE(); 06144 06145 // 06146 // Compute the user allocated size of the input heap block 06147 // 06148 06149 if (BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) { 06150 06151 Size = RtlpGetSizeOfBigBlock( BusyBlock ); 06152 06153 } else { 06154 06155 Size = (BusyBlock->Size << HEAP_GRANULARITY_SHIFT) - BusyBlock->UnusedBytes; 06156 } 06157 06158 // 06159 // Compute a pointer to the tail of the input block. This would 06160 // be the space right after the user allocated portion 06161 // 06162 06163 Tail = (PCHAR)(BusyBlock + 1) + Size; 06164 06165 // 06166 // Check if the tail fill pattern is still there 06167 // 06168 06169 cbEqual = RtlCompareMemory( Tail, 06170 CheckHeapFillPattern, 06171 CHECK_HEAP_TAIL_SIZE ); 06172 06173 // 06174 // If the number we get back isn't equal to the tail size then 06175 // someone modified the block beyond its user specified allocation 06176 // size 06177 // 06178 06179 if (cbEqual != CHECK_HEAP_TAIL_SIZE) { 06180 06181 // 06182 // Do some debug printing 06183 // 06184 06185 HeapDebugPrint(( "Heap block at %p modified at %p past requested size of %lx\n", 06186 BusyBlock, 06187 Tail + cbEqual, 06188 Size )); 06189 06190 HeapDebugBreak( BusyBlock ); 06191 06192 // 06193 // And tell our caller there was an error 06194 // 06195 06196 return FALSE; 06197 06198 } else { 06199 06200 // 06201 // And return to our caller that the tail is fine 06202 // 06203 06204 return TRUE; 06205 } 06206 } }

PHEAP_FREE_ENTRY RtlpCoalesceFreeBlocks IN PHEAP  Heap,
IN PHEAP_FREE_ENTRY  FreeBlock,
IN OUT PSIZE_T  FreeSize,
IN BOOLEAN  RemoveFromFreeList
 

Definition at line 5312 of file rtl/heap.c.

References FALSE, _HEAP_FREE_ENTRY::Flags, HEAP_ENTRY_BUSY, HEAP_ENTRY_LAST_ENTRY, HEAP_MAXIMUM_BLOCK_SIZE, HEAPASSERT, _HEAP_SEGMENT::LastEntryInSegment, _HEAP_FREE_ENTRY::PreviousSize, RTL_PAGED_CODE, RtlpRemoveFreeBlock, _HEAP_FREE_ENTRY::SegmentIndex, _HEAP_FREE_ENTRY::Size, and USHORT.

Referenced by RtlFreeHeap(), RtlFreeHeapSlowly(), RtlpCoalesceHeap(), RtlpExtendHeap(), and RtlpGrowBlockInPlace().

05321 : 05322 05323 This routine coalesces the free block together. 05324 05325 Arguments: 05326 05327 Heap - Supplies a pointer to the heap being manipulated 05328 05329 FreeBlock - Supplies a pointer to the free block that we want coalesced 05330 05331 FreeSize - Supplies the size, in bytes, of the free block. On return it 05332 contains the size, in bytes, of the of the newly coalesced free block 05333 05334 RemoveFromFreeList - Indicates if the input free block is already on a 05335 free list and needs to be removed to before coalescing 05336 05337 Return Value: 05338 05339 PHEAP_FREE_ENTRY - returns a pointer to the newly coalesced free block 05340 05341 --*/ 05342 05343 { 05344 PHEAP_FREE_ENTRY FreeBlock1, NextFreeBlock; 05345 05346 RTL_PAGED_CODE(); 05347 05348 // 05349 // Point to the preceding block 05350 // 05351 05352 FreeBlock1 = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeBlock - FreeBlock->PreviousSize); 05353 05354 // 05355 // Check if there is a preceding block, and if it is free, and the two sizes 05356 // put together will still fit on a free lists. 05357 // 05358 05359 if ((FreeBlock1 != FreeBlock) && 05360 !(FreeBlock1->Flags & HEAP_ENTRY_BUSY) && 05361 ((*FreeSize + FreeBlock1->Size) <= HEAP_MAXIMUM_BLOCK_SIZE)) { 05362 05363 // 05364 // We are going to merge ourselves with the preceding block 05365 // 05366 05367 HEAPASSERT(FreeBlock->PreviousSize == FreeBlock1->Size); 05368 05369 // 05370 // Check if we need to remove the input block from the free list 05371 // 05372 05373 if (RemoveFromFreeList) { 05374 05375 RtlpRemoveFreeBlock( Heap, FreeBlock ); 05376 05377 Heap->TotalFreeSize -= FreeBlock->Size; 05378 05379 // 05380 // We're removed so we don't have to do it again 05381 // 05382 05383 RemoveFromFreeList = FALSE; 05384 } 05385 05386 // 05387 // Remove the preceding block from its free list 05388 // 05389 05390 RtlpRemoveFreeBlock( Heap, FreeBlock1 ); 05391 05392 // 05393 // Copy over the last entry flag if necessary from what we're freeing 05394 // to the preceding block 05395 // 05396 05397 FreeBlock1->Flags = FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY; 05398 05399 if( FreeBlock1->Flags & HEAP_ENTRY_LAST_ENTRY ) { 05400 05401 PHEAP_SEGMENT Segment; 05402 05403 Segment = Heap->Segments[FreeBlock1->SegmentIndex]; 05404 Segment->LastEntryInSegment = (PHEAP_ENTRY)FreeBlock1; 05405 } 05406 05407 // 05408 // Point to the preceding block, and adjust the sizes for the 05409 // new free block. It is the total of both blocks. 05410 // 05411 05412 FreeBlock = FreeBlock1; 05413 05414 *FreeSize += FreeBlock1->Size; 05415 05416 Heap->TotalFreeSize -= FreeBlock1->Size; 05417 05418 FreeBlock->Size = (USHORT)*FreeSize; 05419 05420 // 05421 // Check if we need to update the previous size of the next 05422 // entry 05423 // 05424 05425 if (!(FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) { 05426 05427 ((PHEAP_ENTRY)FreeBlock + *FreeSize)->PreviousSize = (USHORT)*FreeSize; 05428 } 05429 } 05430 05431 // 05432 // Check if there is a following block. 05433 // 05434 05435 if (!(FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) { 05436 05437 // 05438 // There is a following block so now get a pointer to it 05439 // and check if it is free and if putting the two blocks together 05440 // still fits on a free list 05441 // 05442 05443 NextFreeBlock = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeBlock + *FreeSize); 05444 05445 if (!(NextFreeBlock->Flags & HEAP_ENTRY_BUSY) && 05446 ((*FreeSize + NextFreeBlock->Size) <= HEAP_MAXIMUM_BLOCK_SIZE)) { 05447 05448 // 05449 // We are going to merge ourselves with the following block 05450 // 05451 05452 HEAPASSERT(*FreeSize == NextFreeBlock->PreviousSize); 05453 05454 // 05455 // Check if we need to remove the input block from the free list 05456 // 05457 05458 if (RemoveFromFreeList) { 05459 05460 RtlpRemoveFreeBlock( Heap, FreeBlock ); 05461 05462 Heap->TotalFreeSize -= FreeBlock->Size; 05463 05464 // 05465 // **** this assignment isn't necessary because there isn't 05466 // **** any more merging after this one 05467 // 05468 05469 RemoveFromFreeList = FALSE; 05470 } 05471 05472 // 05473 // Copy up the last entry flag if necessary from the following 05474 // block to our input block 05475 // 05476 05477 FreeBlock->Flags = NextFreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY; 05478 05479 if( FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY ) { 05480 05481 PHEAP_SEGMENT Segment; 05482 05483 Segment = Heap->Segments[FreeBlock->SegmentIndex]; 05484 Segment->LastEntryInSegment = (PHEAP_ENTRY)FreeBlock; 05485 } 05486 05487 // 05488 // Remove the following block from its free list 05489 // 05490 05491 RtlpRemoveFreeBlock( Heap, NextFreeBlock ); 05492 05493 // 05494 // Adjust the size for the newly combined block 05495 // 05496 05497 *FreeSize += NextFreeBlock->Size; 05498 05499 Heap->TotalFreeSize -= NextFreeBlock->Size; 05500 05501 FreeBlock->Size = (USHORT)*FreeSize; 05502 05503 // 05504 // Check if we need to update the previous size of the next block 05505 // 05506 05507 if (!(FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) { 05508 05509 ((PHEAP_ENTRY)FreeBlock + *FreeSize)->PreviousSize = (USHORT)*FreeSize; 05510 } 05511 } 05512 } 05513 05514 // 05515 // And return the free block to our caller 05516 // 05517 05518 return FreeBlock; 05519 }

PHEAP_UNCOMMMTTED_RANGE RtlpCreateUnCommittedRange IN PHEAP_SEGMENT  Segment  ) 
 

Definition at line 3962 of file rtl/heap.c.

References _HEAP_UCR_SEGMENT::CommittedSize, _HEAP_UNCOMMMTTED_RANGE::Next, _HEAP_UCR_SEGMENT::Next, NT_SUCCESS, NTSTATUS(), NULL, PAGE_SIZE, _HEAP_UCR_SEGMENT::ReservedSize, RTL_PAGED_CODE, and Status.

Referenced by RtlpDeCommitFreeBlock(), and RtlpInsertUnCommittedPages().

03968 : 03969 03970 This routine add a new uncommitted range structure to the specified heap 03971 segment. This routine works by essentially doing a pop of the stack of 03972 unused uncommitted range structures located off the heap structure. If 03973 the stack is empty then we'll create some more before doing the pop. 03974 03975 Arguments: 03976 03977 Segment - Supplies the heap segment being modified 03978 03979 Return Value: 03980 03981 PHEAP_UNCOMMITTED_RANGE - returns a pointer to the newly created 03982 uncommitted range structure 03983 03984 --*/ 03985 03986 { 03987 NTSTATUS Status; 03988 PVOID FirstEntry, LastEntry; 03989 PHEAP_UNCOMMMTTED_RANGE UnCommittedRange, *pp; 03990 SIZE_T ReserveSize, CommitSize; 03991 PHEAP_UCR_SEGMENT UCRSegment; 03992 03993 RTL_PAGED_CODE(); 03994 03995 // 03996 // Get a pointer to the unused uncommitted range structures for 03997 // the specified heap 03998 // 03999 04000 pp = &Segment->Heap->UnusedUnCommittedRanges; 04001 04002 // 04003 // If the list is null then we need to allocate some more to 04004 // put on the list 04005 // 04006 04007 if (*pp == NULL) { 04008 04009 // 04010 // Get the next uncommitted range segment from the heap 04011 // 04012 04013 UCRSegment = Segment->Heap->UCRSegments; 04014 04015 // 04016 // If there are no more uncommitted range segments or 04017 // the segemtns commited and reserved sizes are equal (meaning 04018 // it's all used up) then we need to allocate another uncommitted 04019 // range segment 04020 // 04021 04022 if ((UCRSegment == NULL) || 04023 (UCRSegment->CommittedSize == UCRSegment->ReservedSize)) { 04024 04025 // 04026 // We'll reserve 16 pages of memory and commit at this 04027 // time one page of it. 04028 // 04029 04030 ReserveSize = PAGE_SIZE * 16; 04031 UCRSegment = NULL; 04032 04033 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 04034 &UCRSegment, 04035 0, 04036 &ReserveSize, 04037 MEM_RESERVE, 04038 PAGE_READWRITE ); 04039 04040 if (!NT_SUCCESS( Status )) { 04041 04042 return NULL; 04043 } 04044 04045 CommitSize = PAGE_SIZE; 04046 04047 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 04048 &UCRSegment, 04049 0, 04050 &CommitSize, 04051 MEM_COMMIT, 04052 PAGE_READWRITE ); 04053 04054 if (!NT_SUCCESS( Status )) { 04055 04056 ZwFreeVirtualMemory( NtCurrentProcess(), 04057 &UCRSegment, 04058 &ReserveSize, 04059 MEM_RELEASE ); 04060 04061 return NULL; 04062 } 04063 04064 // 04065 // Add this new segment to the front of the UCR segments 04066 // 04067 04068 UCRSegment->Next = Segment->Heap->UCRSegments; 04069 Segment->Heap->UCRSegments = UCRSegment; 04070 04071 // 04072 // Set the segments commit and reserve size 04073 // 04074 04075 UCRSegment->ReservedSize = ReserveSize; 04076 UCRSegment->CommittedSize = CommitSize; 04077 04078 // 04079 // Point to the first free spot in the segment 04080 // 04081 04082 FirstEntry = (PCHAR)(UCRSegment + 1); 04083 04084 } else { 04085 04086 // 04087 // We have an existing UCR segment with available space 04088 // So now try and commit another PAGE_SIZE bytes. When we are done 04089 // FirstEntry will point to the newly committed space 04090 // 04091 04092 CommitSize = PAGE_SIZE; 04093 FirstEntry = (PCHAR)UCRSegment + UCRSegment->CommittedSize; 04094 04095 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 04096 &FirstEntry, 04097 0, 04098 &CommitSize, 04099 MEM_COMMIT, 04100 PAGE_READWRITE ); 04101 04102 if (!NT_SUCCESS( Status )) { 04103 04104 return NULL; 04105 } 04106 04107 // 04108 // And update the amount committed in the segment 04109 // 04110 04111 UCRSegment->CommittedSize += CommitSize; 04112 } 04113 04114 // 04115 // At this point UCR segment exists and First Entry points to the 04116 // start of the available committed space. We'll make Last Entry 04117 // point to the end of the committed space 04118 // 04119 04120 LastEntry = (PCHAR)UCRSegment + UCRSegment->CommittedSize; 04121 04122 // 04123 // Now the task is to push all of this new space unto the 04124 // unused uncommitted range list off the heap, then we can 04125 // do a regular pop 04126 // 04127 04128 UnCommittedRange = (PHEAP_UNCOMMMTTED_RANGE)FirstEntry; 04129 04130 pp = &Segment->Heap->UnusedUnCommittedRanges; 04131 04132 while ((PCHAR)UnCommittedRange < (PCHAR)LastEntry) { 04133 04134 *pp = UnCommittedRange; 04135 pp = &UnCommittedRange->Next; 04136 UnCommittedRange += 1; 04137 } 04138 04139 // 04140 // Null terminate the list 04141 // 04142 04143 *pp = NULL; 04144 04145 // 04146 // And have Pp point the new top of the list 04147 // 04148 04149 pp = &Segment->Heap->UnusedUnCommittedRanges; 04150 } 04151 04152 // 04153 // At this point the Pp points to a non empty list of unused uncommitted 04154 // range structures. So we pop the list and return the top to our caller 04155 // 04156 04157 UnCommittedRange = *pp; 04158 *pp = UnCommittedRange->Next; 04159 04160 return UnCommittedRange; 04161 }

VOID RtlpDeCommitFreeBlock IN PHEAP  Heap,
IN PHEAP_FREE_ENTRY  FreeBlock,
IN SIZE_T  FreeSize
 

Definition at line 5527 of file rtl/heap.c.

References _HEAP_SEGMENT::Entry, _HEAP_SEGMENT::FirstEntry, _HEAP_FREE_ENTRY::Flags, _HEAP_ENTRY::Flags, HEAP_ENTRY_LAST_ENTRY, HEAP_GRANULARITY_SHIFT, HeapDebugPrint, _HEAP_SEGMENT::LastEntryInSegment, NT_SUCCESS, NTSTATUS(), NULL, _HEAP_SEGMENT::NumberOfUnCommittedPages, PAGE_SIZE, _HEAP_FREE_ENTRY::PreviousSize, _HEAP_ENTRY::PreviousSize, ROUND_DOWN_TO_POWER2, ROUND_UP_TO_POWER2, RTL_PAGED_CODE, RtlpCreateUnCommittedRange(), RtlpDestroyUnCommittedRange(), RtlpInsertFreeBlock(), RtlpInsertFreeBlockDirect, RtlpInsertUnCommittedPages(), _HEAP_ENTRY::SegmentIndex, _HEAP_FREE_ENTRY::Size, Status, and USHORT.

Referenced by RtlFreeHeap(), RtlFreeHeapSlowly(), and RtlpCoalesceHeap().

05535 : 05536 05537 This routine takes a free block and decommits it. This is usually called 05538 because the block is beyond the decommit threshold 05539 05540 Arguments: 05541 05542 Heap - Supplies a pointer to the heap being manipulated 05543 05544 FreeBlock - Supplies a pointer to the block being decommitted 05545 05546 FreeSize - Supplies the size, in bytes, of the free block being decommitted 05547 05548 Return Value: 05549 05550 None. 05551 05552 --*/ 05553 05554 { 05555 NTSTATUS Status; 05556 ULONG_PTR DeCommitAddress; 05557 SIZE_T DeCommitSize; 05558 USHORT LeadingFreeSize, TrailingFreeSize; 05559 PHEAP_SEGMENT Segment; 05560 PHEAP_FREE_ENTRY LeadingFreeBlock, TrailingFreeBlock; 05561 PHEAP_ENTRY LeadingBusyBlock, TrailingBusyBlock; 05562 PHEAP_UNCOMMMTTED_RANGE UnCommittedRange; 05563 05564 RTL_PAGED_CODE(); 05565 05566 // 05567 // If the heap has a user specified decommit routine then we won't really 05568 // decommit anything instead we'll call a worker routine to chop it up 05569 // into pieces that will fit on the free lists 05570 // 05571 05572 if (Heap->CommitRoutine != NULL) { 05573 05574 RtlpInsertFreeBlock( Heap, FreeBlock, FreeSize ); 05575 05576 return; 05577 } 05578 05579 // 05580 // Get a pointer to the owning segment 05581 // 05582 05583 Segment = Heap->Segments[ FreeBlock->SegmentIndex ]; 05584 05585 // 05586 // The leading busy block identifies the preceding in use block before 05587 // what we are trying to decommit. It is only used if what we are trying 05588 // decommit is right on a page boundary and then it is the block right 05589 // before us if it exists. 05590 // 05591 // The leading free block is used to identify whatever space is needed 05592 // to round up the callers specified address to a page address. If the 05593 // caller already gave us a page aligned address then the free block 05594 // address is identical to what the caller supplied. 05595 // 05596 05597 LeadingBusyBlock = NULL; 05598 LeadingFreeBlock = FreeBlock; 05599 05600 // 05601 // Make sure the block we are trying to decommit start on the next full 05602 // page boundary. The leading free size is the size of whatever it takes 05603 // to round up the free block to the next page specified in units of 05604 // heap entries. 05605 // 05606 05607 DeCommitAddress = ROUND_UP_TO_POWER2( LeadingFreeBlock, PAGE_SIZE ); 05608 LeadingFreeSize = (USHORT)((PHEAP_ENTRY)DeCommitAddress - (PHEAP_ENTRY)LeadingFreeBlock); 05609 05610 // 05611 // If we leading free size only has space for one heap entry then we'll 05612 // bump it up to include the next page, because we don't want to leave 05613 // anything that small laying around. Otherwise if we have a preceding 05614 // block and the leading free size is zero then identify the preceding 05615 // block as the leading busy block 05616 // 05617 05618 if (LeadingFreeSize == 1) { 05619 05620 DeCommitAddress += PAGE_SIZE; 05621 LeadingFreeSize += PAGE_SIZE >> HEAP_GRANULARITY_SHIFT; 05622 05623 } else if (LeadingFreeBlock->PreviousSize != 0) { 05624 05625 if (DeCommitAddress == (ULONG_PTR)LeadingFreeBlock) { 05626 05627 LeadingBusyBlock = (PHEAP_ENTRY)LeadingFreeBlock - LeadingFreeBlock->PreviousSize; 05628 } 05629 } 05630 05631 // 05632 // The trailing busy block identifies the block immediately after the one 05633 // we are trying to decommit provided what we are decommitting ends right 05634 // on a page boundary otherwise the trailing busy block stays null and 05635 // the trailing free block value is used. 05636 // 05637 // **** gdk **** 05638 // **** the assignment of tailing free block doesn't seem right because 05639 // **** Free size should be in bytes, and not heap entries 05640 // 05641 05642 TrailingBusyBlock = NULL; 05643 TrailingFreeBlock = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeBlock + FreeSize); 05644 05645 // 05646 // Make sure the block we are trying to decommit ends on a page boundary. 05647 // 05648 // And compute how many heap entries we had to backup to make it land on a 05649 // page boundary. 05650 // 05651 05652 DeCommitSize = ROUND_DOWN_TO_POWER2( (ULONG_PTR)TrailingFreeBlock, PAGE_SIZE ); 05653 TrailingFreeSize = (USHORT)((PHEAP_ENTRY)TrailingFreeBlock - (PHEAP_ENTRY)DeCommitSize); 05654 05655 // 05656 // If the trailing free size is exactly one heap in size then we will 05657 // nibble off a bit more from the decommit size because free block of 05658 // exactly one heap entry in size are useless. Otherwise if we actually 05659 // ended on a page boundary and there is a block after us then indicate 05660 // that we have a trailing busy block 05661 // 05662 05663 if (TrailingFreeSize == (sizeof( HEAP_ENTRY ) >> HEAP_GRANULARITY_SHIFT)) { 05664 05665 DeCommitSize -= PAGE_SIZE; 05666 TrailingFreeSize += PAGE_SIZE >> HEAP_GRANULARITY_SHIFT; 05667 05668 } else if ((TrailingFreeSize == 0) && !(FreeBlock->Flags & HEAP_ENTRY_LAST_ENTRY)) { 05669 05670 TrailingBusyBlock = (PHEAP_ENTRY)TrailingFreeBlock; 05671 } 05672 05673 // 05674 // Now adjust the trailing free block to compensate for the trailing free size 05675 // we just computed. 05676 // 05677 05678 TrailingFreeBlock = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)TrailingFreeBlock - TrailingFreeSize); 05679 05680 // 05681 // Right now DeCommit size is really a pointer. If it points at is beyond 05682 // the decommit address then make the size really be just the byte count 05683 // to decommit. Otherwise the decommit size is zero. 05684 // 05685 05686 if (DeCommitSize > DeCommitAddress) { 05687 05688 DeCommitSize -= DeCommitAddress; 05689 05690 } else { 05691 05692 DeCommitSize = 0; 05693 } 05694 05695 // 05696 // **** this next test is bogus given the if-then-else that just preceded it 05697 // 05698 // Now check if we still have something to decommit 05699 // 05700 05701 if (DeCommitSize != 0) { 05702 05703 // 05704 // Before freeing the memory to MM we have to be sure we can create 05705 // a PHEAP_UNCOMMMTTED_RANGE later. So we do it right now 05706 // 05707 05708 UnCommittedRange = RtlpCreateUnCommittedRange(Segment); 05709 05710 if (UnCommittedRange == NULL) { 05711 05712 HeapDebugPrint(( "Failing creating uncommitted range (%x for %x)\n", DeCommitAddress, DeCommitSize )); 05713 05714 // 05715 // We weren't successful in the decommit so now simply 05716 // add the leading free block to the free list 05717 // 05718 05719 RtlpInsertFreeBlock( Heap, LeadingFreeBlock, FreeSize ); 05720 05721 return; 05722 } 05723 05724 // 05725 // Decommit the memory 05726 // 05727 05728 Status = ZwFreeVirtualMemory( NtCurrentProcess(), 05729 (PVOID *)&DeCommitAddress, 05730 &DeCommitSize, 05731 MEM_DECOMMIT ); 05732 05733 // 05734 // Push back the UnCommittedRange structure. Now the insert cannot fail 05735 // 05736 05737 RtlpDestroyUnCommittedRange( Segment, UnCommittedRange ); 05738 05739 if (NT_SUCCESS( Status )) { 05740 05741 // 05742 // Insert information regarding the pages we just decommitted 05743 // to the lsit of uncommited pages in the segment 05744 // 05745 05746 RtlpInsertUnCommittedPages( Segment, 05747 DeCommitAddress, 05748 DeCommitSize ); 05749 // 05750 // Adjust the segments count of uncommitted pages 05751 // 05752 05753 Segment->NumberOfUnCommittedPages += (ULONG)(DeCommitSize / PAGE_SIZE); 05754 05755 // 05756 // If we have a leading free block then mark its proper state 05757 // update the heap, and put it on the free list 05758 // 05759 05760 if (LeadingFreeSize != 0) { 05761 05762 LeadingFreeBlock->Flags = HEAP_ENTRY_LAST_ENTRY; 05763 LeadingFreeBlock->Size = LeadingFreeSize; 05764 Heap->TotalFreeSize += LeadingFreeSize; 05765 05766 Segment->LastEntryInSegment = (PHEAP_ENTRY)LeadingFreeBlock; 05767 05768 RtlpInsertFreeBlockDirect( Heap, LeadingFreeBlock, LeadingFreeSize ); 05769 05770 // 05771 // Otherwise if we actually have a leading busy block then 05772 // make sure the busy block knows we're uncommitted 05773 // 05774 05775 } else if (LeadingBusyBlock != NULL) { 05776 05777 LeadingBusyBlock->Flags |= HEAP_ENTRY_LAST_ENTRY; 05778 05779 Segment->LastEntryInSegment = LeadingBusyBlock; 05780 05781 } else if ((Segment->LastEntryInSegment >= (PHEAP_ENTRY)DeCommitAddress) 05782 && 05783 ((PCHAR)Segment->LastEntryInSegment < ((PCHAR)DeCommitAddress + DeCommitSize))) { 05784 05785 Segment->LastEntryInSegment = Segment->FirstEntry; 05786 } 05787 05788 // 05789 // If there is a trailing free block then sets its state, 05790 // update the heap, and insert it on a free list 05791 // 05792 05793 if (TrailingFreeSize != 0) { 05794 05795 TrailingFreeBlock->PreviousSize = 0; 05796 TrailingFreeBlock->SegmentIndex = Segment->Entry.SegmentIndex; 05797 TrailingFreeBlock->Flags = 0; 05798 TrailingFreeBlock->Size = TrailingFreeSize; 05799 05800 ((PHEAP_FREE_ENTRY)((PHEAP_ENTRY)TrailingFreeBlock + TrailingFreeSize))->PreviousSize = (USHORT)TrailingFreeSize; 05801 05802 RtlpInsertFreeBlockDirect( Heap, TrailingFreeBlock, TrailingFreeSize ); 05803 05804 Heap->TotalFreeSize += TrailingFreeSize; 05805 05806 // 05807 // Otherwise if we actually have a succeeding block then 05808 // make it know we are uncommitted 05809 // 05810 05811 } else if (TrailingBusyBlock != NULL) { 05812 05813 TrailingBusyBlock->PreviousSize = 0; 05814 } 05815 05816 } else { 05817 05818 // 05819 // We weren't successful in the decommit so now simply 05820 // add the leading free block to the free list 05821 // 05822 05823 RtlpInsertFreeBlock( Heap, LeadingFreeBlock, FreeSize ); 05824 } 05825 05826 } else { 05827 05828 // 05829 // There is nothing left to decommit to take our leading free block 05830 // and put it on a free list 05831 // 05832 05833 RtlpInsertFreeBlock( Heap, LeadingFreeBlock, FreeSize ); 05834 } 05835 05836 // 05837 // And return to our caller 05838 // 05839 05840 return; 05841 }

NTSTATUS RtlpDestroyHeapSegment IN PHEAP_SEGMENT  Segment  ) 
 

Definition at line 4959 of file rtl/heap.c.

References HEAP_SEGMENT_USER_ALLOCATED, and RTL_PAGED_CODE.

Referenced by RtlDestroyHeap().

04965 : 04966 04967 This routine removes an existing heap segment. After the call it 04968 is as if the segment never existed 04969 04970 Arguments: 04971 04972 Segment - Supplies a pointer to the heap segment being destroyed 04973 04974 Return Value: 04975 04976 NTSTATUS - An appropriate status value 04977 04978 --*/ 04979 04980 { 04981 PVOID BaseAddress; 04982 SIZE_T BytesToFree; 04983 04984 RTL_PAGED_CODE(); 04985 04986 // 04987 // We actually only have work to do if the segment is not 04988 // user allocated. If the segement is user allocated then 04989 // we'll assume knows how to get rid of the memory 04990 // 04991 04992 if (!(Segment->Flags & HEAP_SEGMENT_USER_ALLOCATED)) { 04993 04994 BaseAddress = Segment->BaseAddress; 04995 BytesToFree = 0; 04996 04997 // 04998 // Free all the virtual memory for the segment and return 04999 // to our caller. 05000 // 05001 05002 return ZwFreeVirtualMemory( NtCurrentProcess(), 05003 (PVOID *)&BaseAddress, 05004 &BytesToFree, 05005 MEM_RELEASE ); 05006 05007 } else { 05008 05009 // 05010 // User allocated segments are a noop 05011 // 05012 05013 return STATUS_SUCCESS; 05014 } 05015 }

VOID RtlpDestroyUnCommittedRange IN PHEAP_SEGMENT  Segment,
IN PHEAP_UNCOMMMTTED_RANGE  UnCommittedRange
 

Definition at line 4169 of file rtl/heap.c.

References RTL_PAGED_CODE.

Referenced by RtlpDeCommitFreeBlock(), RtlpFindAndCommitPages(), and RtlpInsertUnCommittedPages().

04176 : 04177 04178 This routine returns an uncommitted range structure back to the unused 04179 uncommitted range list 04180 04181 Arguments: 04182 04183 Segment - Supplies any segment in the heap being modified. Most likely but 04184 not necessarily the segment containing the uncommitted range structure 04185 04186 UnCommittedRange - Supplies a pointer to the uncommitted range structure 04187 being decommissioned. 04188 04189 Return Value: 04190 04191 None. 04192 04193 --*/ 04194 04195 { 04196 RTL_PAGED_CODE(); 04197 04198 // 04199 // This routine simply does a "push" of the uncommitted range structure 04200 // onto the heap's stack of unused uncommitted ranges 04201 // 04202 04203 UnCommittedRange->Next = Segment->Heap->UnusedUnCommittedRanges; 04204 Segment->Heap->UnusedUnCommittedRanges = UnCommittedRange; 04205 04206 // 04207 // For safety sake we'll also zero out the fields in the decommissioned 04208 // structure 04209 // 04210 04211 UnCommittedRange->Address = 0; 04212 UnCommittedRange->Size = 0; 04213 04214 // 04215 // And return to our caller 04216 // 04217 04218 return; 04219 }

PHEAP_FREE_ENTRY RtlpExtendHeap IN PHEAP  Heap,
IN SIZE_T  AllocationSize
 

Definition at line 5024 of file rtl/heap.c.

References FALSE, HEAP_GRANULARITY_SHIFT, HEAP_MAXIMUM_SEGMENTS, _HEAP_SEGMENT::LargestUnCommittedRange, NT_SUCCESS, NTSTATUS(), NULL, _HEAP_SEGMENT::NumberOfUnCommittedPages, PAGE_SIZE, RTL_PAGED_CODE, RtlpCoalesceFreeBlocks(), RtlpCoalesceHeap(), RtlpFindAndCommitPages(), RtlpInitializeHeapSegment(), RtlpInsertFreeBlock(), _HEAP_FREE_ENTRY::Size, and Status.

Referenced by RtlAllocateHeap(), and RtlAllocateHeapSlowly().

05031 : 05032 05033 This routine is used to extend the amount of committed memory in a heap 05034 05035 Arguments: 05036 05037 Heap - Supplies the heap being modified 05038 05039 AllocationSize - Supplies the size, in bytes, that we need to extend the 05040 heap 05041 05042 Return Value: 05043 05044 PHEAP_FREE_ENTRY - Returns a pointer to the newly created heap entry 05045 of the specified size, or NULL if we weren't able to extend the heap 05046 05047 --*/ 05048 05049 { 05050 NTSTATUS Status; 05051 PHEAP_SEGMENT Segment; 05052 PHEAP_FREE_ENTRY FreeBlock; 05053 UCHAR SegmentIndex, EmptySegmentIndex; 05054 ULONG NumberOfPages; 05055 SIZE_T CommitSize; 05056 SIZE_T ReserveSize; 05057 SIZE_T FreeSize; 05058 05059 RTL_PAGED_CODE(); 05060 05061 // 05062 // Compute the number of pages need to hold this extension 05063 // And then compute the real free, still in bytes, based on 05064 // the page count 05065 // 05066 05067 NumberOfPages = (ULONG) ((AllocationSize + PAGE_SIZE - 1) / PAGE_SIZE); 05068 FreeSize = NumberOfPages * PAGE_SIZE; 05069 05070 // 05071 // For every segment we're either going to look for an existing 05072 // heap segment that we can get some pages out of or we will 05073 // identify a free heap segment index where we'll try and create a new 05074 // segment 05075 // 05076 05077 EmptySegmentIndex = HEAP_MAXIMUM_SEGMENTS; 05078 05079 for (SegmentIndex=0; SegmentIndex<HEAP_MAXIMUM_SEGMENTS; SegmentIndex++) { 05080 05081 Segment = Heap->Segments[ SegmentIndex ]; 05082 05083 // 05084 // If the segment exists and number of uncommitted pages will 05085 // satisfy our reguest and the largest uncommitted range will 05086 // also satisfy our request then we'll try and segment 05087 // 05088 // **** note that this second test seems unnecessary given that 05089 // **** the largest uncommitted range is also being tested 05090 // 05091 05092 if ((Segment) && 05093 (NumberOfPages <= Segment->NumberOfUnCommittedPages) && 05094 (FreeSize <= Segment->LargestUnCommittedRange)) { 05095 05096 // 05097 // Looks like a good segment so try and commit the 05098 // amount we need 05099 // 05100 05101 FreeBlock = RtlpFindAndCommitPages( Heap, 05102 Segment, 05103 &FreeSize, 05104 NULL ); 05105 05106 // 05107 // If we were successful the we will coalesce it with adjacent 05108 // free blocks and put it in the free list then return the 05109 // the free block 05110 // 05111 05112 if (FreeBlock != NULL) { 05113 05114 // 05115 // **** gdk **** 05116 // **** this doesn't seem right given that coalesece should take 05117 // **** byte count and not heap entry count 05118 // 05119 05120 FreeSize = FreeSize >> HEAP_GRANULARITY_SHIFT; 05121 05122 FreeBlock = RtlpCoalesceFreeBlocks( Heap, FreeBlock, &FreeSize, FALSE ); 05123 05124 RtlpInsertFreeBlock( Heap, FreeBlock, FreeSize ); 05125 05126 return FreeBlock; 05127 } 05128 05129 // 05130 // Otherwise if the segment index is not in use and we haven't 05131 // yet identified a unused segment index then remembeer this 05132 // index 05133 // 05134 05135 } else if ((Segment == NULL) && 05136 (EmptySegmentIndex == HEAP_MAXIMUM_SEGMENTS)) { 05137 05138 EmptySegmentIndex = SegmentIndex; 05139 } 05140 } 05141 05142 // 05143 // At this point we weren't able to get the memory from an existing 05144 // heap segment so now check if we found an unused segment index 05145 // and if we're alowed to grow the heap. 05146 // 05147 05148 if ((EmptySegmentIndex != HEAP_MAXIMUM_SEGMENTS) && 05149 (Heap->Flags & HEAP_GROWABLE)) { 05150 05151 Segment = NULL; 05152 05153 // 05154 // Calculate a reserve size for the new segment, we might 05155 // need to fudge it up if the allocation size we're going for 05156 // right now is already beyond the default reserve size 05157 // 05158 05159 if ((AllocationSize + PAGE_SIZE) > Heap->SegmentReserve) { 05160 05161 ReserveSize = AllocationSize + PAGE_SIZE; 05162 05163 } else { 05164 05165 ReserveSize = Heap->SegmentReserve; 05166 } 05167 05168 // 05169 // Try and reserve some vm 05170 // 05171 05172 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 05173 (PVOID *)&Segment, 05174 0, 05175 &ReserveSize, 05176 MEM_RESERVE, 05177 PAGE_READWRITE ); 05178 05179 // 05180 // If we get back status no memory then we should trim back the 05181 // request to something reasonable and try again. We'll half 05182 // the amount until we it either succeeds or until we reach 05183 // the allocation size. In the latter case we are really 05184 // out of memory. 05185 // 05186 05187 while ((!NT_SUCCESS( Status )) && (ReserveSize != (AllocationSize + PAGE_SIZE))) { 05188 05189 ReserveSize = ReserveSize / 2; 05190 05191 if( ReserveSize < (AllocationSize + PAGE_SIZE) ) { 05192 05193 ReserveSize = (AllocationSize + PAGE_SIZE); 05194 } 05195 05196 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 05197 (PVOID *)&Segment, 05198 0, 05199 &ReserveSize, 05200 MEM_RESERVE, 05201 PAGE_READWRITE ); 05202 } 05203 05204 if (NT_SUCCESS( Status )) { 05205 05206 // 05207 // Adjust the heap state information 05208 // 05209 05210 Heap->SegmentReserve += ReserveSize; 05211 05212 // 05213 // Compute the commit size to be either the default, or if 05214 // that's not big enough then make it big enough to handle 05215 // this current request 05216 // 05217 05218 if ((AllocationSize + PAGE_SIZE) > Heap->SegmentCommit) { 05219 05220 CommitSize = AllocationSize + PAGE_SIZE; 05221 05222 } else { 05223 05224 CommitSize = Heap->SegmentCommit; 05225 } 05226 05227 // 05228 // Try and commit the memory 05229 // 05230 05231 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 05232 (PVOID *)&Segment, 05233 0, 05234 &CommitSize, 05235 MEM_COMMIT, 05236 PAGE_READWRITE ); 05237 05238 // 05239 // If the commit is successful but we were not able to 05240 // initialize the heap segment then still make the status 05241 // and error value 05242 // 05243 05244 if (NT_SUCCESS( Status ) && 05245 !RtlpInitializeHeapSegment( Heap, 05246 Segment, 05247 EmptySegmentIndex, 05248 0, 05249 Segment, 05250 (PCHAR)Segment + CommitSize, 05251 (PCHAR)Segment + ReserveSize)) { 05252 05253 Status = STATUS_NO_MEMORY; 05254 } 05255 05256 // 05257 // If we've been successful so far then we're done and we 05258 // can return the first entry in the segment to our caller 05259 // 05260 05261 if (NT_SUCCESS(Status)) { 05262 05263 return (PHEAP_FREE_ENTRY)Segment->FirstEntry; 05264 } 05265 05266 // 05267 // Otherwise either the commit or heap segment initialization failed 05268 // so we'll release the memory which will also decommit it if necessary 05269 // 05270 05271 ZwFreeVirtualMemory( NtCurrentProcess(), 05272 (PVOID *)&Segment, 05273 &ReserveSize, 05274 MEM_RELEASE ); 05275 } 05276 } 05277 05278 #ifndef NTOS_KERNEL_RUNTIME 05279 05280 // 05281 // In the non kernel case we disabled coaleseing on free then what we'll 05282 // do as a last resort is coalesce the heap and see if a block comes out 05283 // that we can use 05284 // 05285 05286 if (Heap->Flags & HEAP_DISABLE_COALESCE_ON_FREE) { 05287 05288 FreeBlock = RtlpCoalesceHeap( Heap ); 05289 05290 if ((FreeBlock != NULL) && (FreeBlock->Size >= AllocationSize)) { 05291 05292 return FreeBlock; 05293 } 05294 } 05295 05296 #endif // NTOS_KERNEL_RUNTIME 05297 05298 // 05299 // Either the heap cannot grow or we out of resources of some type 05300 // so we're going to return null 05301 // 05302 05303 return NULL; 05304 }

PHEAP_FREE_ENTRY RtlpFindAndCommitPages IN PHEAP  Heap,
IN PHEAP_SEGMENT  Segment,
IN OUT PSIZE_T  Size,
IN PVOID AddressWanted  OPTIONAL
 

Definition at line 4422 of file rtl/heap.c.

References _HEAP_UNCOMMMTTED_RANGE::Address, _HEAP_ENTRY::Flags, HEAP_ENTRY_LAST_ENTRY, HEAP_GRANULARITY_SHIFT, HeapDebugBreak, HeapDebugPrint, _HEAP_UNCOMMMTTED_RANGE::Next, NT_SUCCESS, NTSTATUS(), NULL, PAGE_SIZE, _HEAP_ENTRY::PreviousSize, RTL_PAGED_CODE, RtlpDestroyUnCommittedRange(), _HEAP_ENTRY::SegmentIndex, _HEAP_UNCOMMMTTED_RANGE::Size, Size, _HEAP_ENTRY::Size, Status, and USHORT.

Referenced by RtlpExtendHeap(), and RtlpGrowBlockInPlace().

04431 : 04432 04433 This function searches the supplied segment for an uncommitted range that 04434 satisfies the specified size. It commits the range and returns a heap entry 04435 for the range. 04436 04437 Arguments: 04438 04439 Heap - Supplies the heap being maniuplated 04440 04441 Segment - Supplies the segment being searched 04442 04443 Size - Supplies the size of what we need to look for, on return it contains 04444 the size of what we're just found and committed. 04445 04446 AddressWanted - Optionally gives an address where we would like the pages 04447 based. If supplied the entry must start at this address 04448 04449 Return Value: 04450 04451 PHEAP_FREE_ENTRY - Returns a pointer to the newly committed range that 04452 satisfies the given size requirement, or NULL if we could not find 04453 something large enough and/or based at the address wanted. 04454 04455 --*/ 04456 04457 { 04458 NTSTATUS Status; 04459 PHEAP_ENTRY FirstEntry, LastEntry, PreviousLastEntry; 04460 PHEAP_UNCOMMMTTED_RANGE PreviousUnCommittedRange, UnCommittedRange, *pp; 04461 ULONG_PTR Address; 04462 SIZE_T Length; 04463 04464 RTL_PAGED_CODE(); 04465 04466 // 04467 // What the outer loop does is cycle through the uncommited ranges 04468 // stored in in the specified segment 04469 // 04470 04471 PreviousUnCommittedRange = NULL; 04472 pp = &Segment->UnCommittedRanges; 04473 04474 while (UnCommittedRange = *pp) { 04475 04476 // 04477 // Check for the best of worlds, where the size of this current 04478 // uncommitted range satisfies our size request and either the user 04479 // didn't specify an address or the address match 04480 // 04481 04482 if ((UnCommittedRange->Size >= *Size) && 04483 (!ARGUMENT_PRESENT( AddressWanted ) || (UnCommittedRange->Address == (ULONG_PTR)AddressWanted ))) { 04484 04485 // 04486 // Calculate an address 04487 // 04488 04489 Address = UnCommittedRange->Address; 04490 04491 // 04492 // Commit the memory. If the heap doesn't have a commit 04493 // routine then use the default mm supplied routine. 04494 // 04495 04496 if (Heap->CommitRoutine != NULL) { 04497 04498 Status = (Heap->CommitRoutine)( Heap, 04499 (PVOID *)&Address, 04500 Size ); 04501 04502 } else { 04503 04504 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 04505 (PVOID *)&Address, 04506 0, 04507 Size, 04508 MEM_COMMIT, 04509 PAGE_READWRITE ); 04510 04511 } 04512 04513 if (!NT_SUCCESS( Status )) { 04514 04515 return NULL; 04516 } 04517 04518 // 04519 // At this point we have some committed memory, with Address and Size 04520 // giving us the necessary details 04521 // 04522 // Update the number of uncommitted pages in the segment and if necessary 04523 // mark down the largest uncommitted range 04524 // 04525 04526 Segment->NumberOfUnCommittedPages -= (ULONG) (*Size / PAGE_SIZE); 04527 04528 if (Segment->LargestUnCommittedRange == UnCommittedRange->Size) { 04529 04530 Segment->LargestUnCommittedRange = 0; 04531 } 04532 04533 // 04534 // First entry is the start of the newly committed range 04535 // 04536 04537 FirstEntry = (PHEAP_ENTRY)Address; 04538 04539 // 04540 // We want last entry to point to the last real entry before 04541 // this newly committed spot. To do this we start by 04542 // setting last entry to either the first entry for the 04543 // segment or (if we can do better), to right after the last 04544 // uncommitted range we examined. Either way it points to 04545 // some committed range 04546 // 04547 04548 if ((Segment->LastEntryInSegment->Flags & HEAP_ENTRY_LAST_ENTRY) && 04549 (ULONG_PTR)(Segment->LastEntryInSegment + Segment->LastEntryInSegment->Size) == UnCommittedRange->Address) { 04550 04551 LastEntry = Segment->LastEntryInSegment; 04552 04553 } else { 04554 04555 if (PreviousUnCommittedRange == NULL) { 04556 04557 LastEntry = Segment->FirstEntry; 04558 04559 } else { 04560 04561 LastEntry = (PHEAP_ENTRY)(PreviousUnCommittedRange->Address + 04562 PreviousUnCommittedRange->Size); 04563 } 04564 04565 // 04566 // Now we zoom through the entries until we find the one 04567 // marked last 04568 // 04569 04570 while (!(LastEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) { 04571 04572 PreviousLastEntry = LastEntry; 04573 LastEntry += LastEntry->Size; 04574 04575 if (((PCHAR)LastEntry >= (PCHAR)Segment->LastValidEntry) || (LastEntry->Size == 0)) { 04576 04577 // 04578 // Check for the situation where the last entry in the 04579 // segment isn't marked as a last entry but does put 04580 // us right where the have a new committed range 04581 // 04582 04583 if (LastEntry == (PHEAP_ENTRY)Address) { 04584 04585 LastEntry = PreviousLastEntry; 04586 04587 break; 04588 } 04589 04590 HeapDebugPrint(( "Heap missing last entry in committed range near %x\n", PreviousLastEntry )); 04591 HeapDebugBreak( PreviousLastEntry ); 04592 04593 return NULL; 04594 } 04595 } 04596 } 04597 04598 // 04599 // Turn off the last bit on this entry because what's following 04600 // is no longer uncommitted 04601 // 04602 04603 LastEntry->Flags &= ~HEAP_ENTRY_LAST_ENTRY; 04604 04605 // 04606 // Shrink the uncommited range by the size we've committed 04607 // 04608 04609 UnCommittedRange->Address += *Size; 04610 UnCommittedRange->Size -= *Size; 04611 04612 // 04613 // Now if the size is zero then we've committed everything that there 04614 // was in the range. Otherwise make sure the first entry of what 04615 // we've just committed knows that an uncommitted range follows. 04616 // 04617 04618 if (UnCommittedRange->Size == 0) { 04619 04620 // 04621 // This uncommitted range is about to vanish. Base on if the 04622 // range is the last one in the segemnt then we know how to 04623 // mark the committed range as being last or not. 04624 // 04625 04626 if (UnCommittedRange->Address == (ULONG_PTR)Segment->LastValidEntry) { 04627 04628 FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY; 04629 04630 Segment->LastEntryInSegment = FirstEntry; 04631 04632 } else { 04633 04634 FirstEntry->Flags = 0; 04635 04636 Segment->LastEntryInSegment = Segment->FirstEntry; 04637 } 04638 04639 // 04640 // Remove this zero sized range from the uncommitted range 04641 // list, and update the segment counters 04642 // 04643 04644 *pp = UnCommittedRange->Next; 04645 04646 RtlpDestroyUnCommittedRange( Segment, UnCommittedRange ); 04647 04648 Segment->NumberOfUnCommittedRanges -= 1; 04649 04650 } else { 04651 04652 // 04653 // Otherwise the range is not empty so we know what we committed 04654 // is immediately followed by an uncommitted range 04655 // 04656 04657 FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY; 04658 04659 Segment->LastEntryInSegment = FirstEntry; 04660 } 04661 04662 // 04663 // Update the fields in the first entry, and optional 04664 // following entry. 04665 // 04666 04667 FirstEntry->SegmentIndex = LastEntry->SegmentIndex; 04668 FirstEntry->Size = (USHORT)(*Size >> HEAP_GRANULARITY_SHIFT); 04669 FirstEntry->PreviousSize = LastEntry->Size; 04670 04671 if (!(FirstEntry->Flags & HEAP_ENTRY_LAST_ENTRY)) { 04672 04673 (FirstEntry + FirstEntry->Size)->PreviousSize = FirstEntry->Size; 04674 } 04675 04676 // 04677 // Now if we adjusted the largest uncommitted range to zero then 04678 // we need to go back and find the largest uncommitted range 04679 // To do that we simply zoom down the uncommitted range list 04680 // remembering the largest one 04681 // 04682 04683 if (Segment->LargestUnCommittedRange == 0) { 04684 04685 UnCommittedRange = Segment->UnCommittedRanges; 04686 04687 while (UnCommittedRange != NULL) { 04688 04689 if (UnCommittedRange->Size >= Segment->LargestUnCommittedRange) { 04690 04691 Segment->LargestUnCommittedRange = UnCommittedRange->Size; 04692 } 04693 04694 UnCommittedRange = UnCommittedRange->Next; 04695 } 04696 } 04697 04698 // 04699 // And return the heap entry to our caller 04700 // 04701 04702 return (PHEAP_FREE_ENTRY)FirstEntry; 04703 04704 } else { 04705 04706 // 04707 // Otherwise the current uncommited range is too small or 04708 // doesn't have the right address so go to the next uncommitted 04709 // range entry 04710 // 04711 04712 PreviousUnCommittedRange = UnCommittedRange; 04713 pp = &UnCommittedRange->Next; 04714 } 04715 } 04716 04717 // 04718 // At this point we did not find an uncommitted range entry that satisfied 04719 // our requirements either because of size and/or address. So return null 04720 // to tell the user we didn't find anything. 04721 // 04722 04723 return NULL; 04724 }

PHEAP_ENTRY_EXTRA RtlpGetExtraStuffPointer PHEAP_ENTRY  BusyBlock  ) 
 

Definition at line 6008 of file rtl/heap.c.

References _HEAP_VIRTUAL_ALLOC_ENTRY::ExtraStuff, _HEAP_ENTRY::Flags, HEAP_ENTRY_VIRTUAL_ALLOC, and _HEAP_ENTRY::Size.

Referenced by RtlAllocateHeapSlowly(), RtlDebugAllocateHeap(), RtlDebugFreeHeap(), RtlDebugReAllocateHeap(), RtlGetUserInfoHeap(), RtlpValidateHeapSegment(), RtlReAllocateHeap(), RtlSetUserValueHeap(), RtlUsageHeap(), and RtlWalkHeap().

06014 : 06015 06016 This routine calculates where the extra stuff record will be given 06017 the busy block and returns a pointer to it. The caller must have 06018 already checked that the entry extry field is present 06019 06020 Arguments: 06021 06022 BusyBlock - Supplies the busy block whose extra stuff we are seeking 06023 06024 Return Value: 06025 06026 PHEAP_ENTRY_EXTRA - returns a pointer to the extra stuff record. 06027 06028 --*/ 06029 06030 { 06031 ULONG AllocationIndex; 06032 06033 // 06034 // On big blocks the extra stuff is automatically part of the 06035 // block 06036 // 06037 06038 if (BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) { 06039 06040 PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock; 06041 06042 VirtualAllocBlock = CONTAINING_RECORD( BusyBlock, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock ); 06043 06044 return &VirtualAllocBlock->ExtraStuff; 06045 06046 } else { 06047 06048 // 06049 // On non big blocks the extra stuff follows immediately after 06050 // the allocation itself. 06051 // 06052 // **** What a hack 06053 // **** We do some funny math here because the busy block 06054 // **** stride is 8 bytes we know we can stride it by its 06055 // **** index minus one to get to the end of the allocation 06056 // 06057 06058 AllocationIndex = BusyBlock->Size; 06059 06060 return (PHEAP_ENTRY_EXTRA)(BusyBlock + AllocationIndex - 1); 06061 } 06062 }

SIZE_T RtlpGetSizeOfBigBlock IN PHEAP_ENTRY  BusyBlock  ) 
 

Definition at line 6070 of file rtl/heap.c.

References _HEAP_VIRTUAL_ALLOC_ENTRY::CommitSize, and RTL_PAGED_CODE.

Referenced by RtlpCheckBusyBlockTail(), RtlReAllocateHeap(), RtlSizeHeap(), and RtlWalkHeap().

06076 : 06077 06078 This routine returns the size, in bytes, of the big allocation block 06079 06080 Arguments: 06081 06082 BusyBlock - Supplies a pointer to the block being queried 06083 06084 Return Value: 06085 06086 SIZE_T - Returns the size, in bytes, that was allocated to the big 06087 block 06088 06089 --*/ 06090 06091 { 06092 PHEAP_VIRTUAL_ALLOC_ENTRY VirtualAllocBlock; 06093 06094 RTL_PAGED_CODE(); 06095 06096 // 06097 // Get a pointer to the block header itself 06098 // 06099 06100 VirtualAllocBlock = CONTAINING_RECORD( BusyBlock, HEAP_VIRTUAL_ALLOC_ENTRY, BusyBlock ); 06101 06102 // 06103 // The size allocated to the block is actually the difference between the 06104 // commit size stored in the virtual alloc block and the size stored in 06105 // in the block. 06106 // 06107 06108 return VirtualAllocBlock->CommitSize - BusyBlock->Size; 06109 }

BOOLEAN RtlpInitializeHeapSegment IN PHEAP  Heap,
IN PHEAP_SEGMENT  Segment,
IN UCHAR  SegmentIndex,
IN ULONG  Flags,
IN PVOID  BaseAddress,
IN PVOID  UnCommittedAddress,
IN PVOID  CommitLimitAddress
 

Definition at line 4732 of file rtl/heap.c.

References FALSE, _HEAP_ENTRY::Flags, HEAP_ENTRY_BUSY, HEAP_ENTRY_LAST_ENTRY, HEAP_GRANULARITY, HEAP_GRANULARITY_SHIFT, HEAP_SEGMENT_SIGNATURE, NT_SUCCESS, NtGlobalFlag, NTSTATUS(), PAGE_SIZE, _HEAP_ENTRY::PreviousSize, ROUND_UP_TO_POWER2, RTL_PAGED_CODE, RtlGetNtGlobalFlags(), RtlpInsertFreeBlock(), RtlpInsertUnCommittedPages(), _HEAP_ENTRY::SegmentIndex, Size, Status, TRUE, and USHORT.

Referenced by RtlCreateHeap(), RtlExtendHeap(), and RtlpExtendHeap().

04744 : 04745 04746 This routines initializes the internal structures for a heap segment. 04747 The caller supplies the heap and the memory for the segment being 04748 initialized 04749 04750 Arguments: 04751 04752 Heap - Supplies the address of the heap owning this segment 04753 04754 Segment - Supplies a pointer to the segment being initialized 04755 04756 SegmentIndex - Supplies the segement index within the heap that this 04757 new segment is being assigned 04758 04759 Flags - Supplies flags controlling the initialization of the segment 04760 Valid flags are: 04761 04762 HEAP_SEGMENT_USER_ALLOCATED 04763 04764 BaseAddress - Supplies the base address for the segment 04765 04766 UnCommittedAddress - Supplies the address where the uncommited range starts 04767 04768 CommitLimitAddress - Supplies the top address available to the segment 04769 04770 Return Value: 04771 04772 BOOLEAN - TRUE if the initialization is successful and FALSE otherwise 04773 04774 --*/ 04775 04776 { 04777 NTSTATUS Status; 04778 PHEAP_ENTRY FirstEntry; 04779 USHORT PreviousSize, Size; 04780 ULONG NumberOfPages; 04781 ULONG NumberOfCommittedPages; 04782 ULONG NumberOfUnCommittedPages; 04783 SIZE_T CommitSize; 04784 ULONG NtGlobalFlag = RtlGetNtGlobalFlags(); 04785 04786 RTL_PAGED_CODE(); 04787 04788 // 04789 // Compute the total number of pages possible in this segment 04790 // 04791 04792 NumberOfPages = (ULONG) (((PCHAR)CommitLimitAddress - (PCHAR)BaseAddress) / PAGE_SIZE); 04793 04794 // 04795 // First entry points to the first possible segment entry after 04796 // the segment header 04797 // 04798 04799 FirstEntry = (PHEAP_ENTRY)ROUND_UP_TO_POWER2( Segment + 1, 04800 HEAP_GRANULARITY ); 04801 04802 // 04803 // Now if the heap is equal to the base address for the segment which 04804 // it the case for the segment zero then the previous size is the 04805 // heap header. Otherwise there isn't a previous entry 04806 // 04807 04808 if ((PVOID)Heap == BaseAddress) { 04809 04810 PreviousSize = Heap->Entry.Size; 04811 04812 } else { 04813 04814 PreviousSize = 0; 04815 } 04816 04817 // 04818 // Compute the index size of the segement header 04819 // 04820 04821 Size = (USHORT)(((PCHAR)FirstEntry - (PCHAR)Segment) >> HEAP_GRANULARITY_SHIFT); 04822 04823 // 04824 // If the first available heap entry is not committed and 04825 // it is beyond the heap limit then we cannot initialize 04826 // 04827 04828 if ((PCHAR)(FirstEntry + 1) >= (PCHAR)UnCommittedAddress) { 04829 04830 if ((PCHAR)(FirstEntry + 1) >= (PCHAR)CommitLimitAddress) { 04831 04832 return FALSE; 04833 } 04834 04835 // 04836 // Enough of the segment has not been committed so we 04837 // will commit enough now to handle the first entry 04838 // 04839 04840 CommitSize = (PCHAR)(FirstEntry + 1) - (PCHAR)UnCommittedAddress; 04841 04842 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 04843 (PVOID *)&UnCommittedAddress, 04844 0, 04845 &CommitSize, 04846 MEM_COMMIT, 04847 PAGE_READWRITE ); 04848 04849 if (!NT_SUCCESS( Status )) { 04850 04851 return FALSE; 04852 } 04853 04854 // 04855 // Because we had to commit some memory we need to adjust 04856 // the uncommited address 04857 // 04858 04859 UnCommittedAddress = (PVOID)((PCHAR)UnCommittedAddress + CommitSize); 04860 } 04861 04862 // 04863 // At this point we know there is enough memory committed to handle the 04864 // segment header and one heap entry 04865 // 04866 // Now compute the number of uncommited pages and the number of committed 04867 // pages 04868 // 04869 04870 NumberOfUnCommittedPages = (ULONG)(((PCHAR)CommitLimitAddress - (PCHAR)UnCommittedAddress) / PAGE_SIZE); 04871 NumberOfCommittedPages = NumberOfPages - NumberOfUnCommittedPages; 04872 04873 // 04874 // Initialize the heap segment heap entry. We 04875 // calculated earlier if there was a previous entry 04876 // 04877 04878 Segment->Entry.PreviousSize = PreviousSize; 04879 Segment->Entry.Size = Size; 04880 Segment->Entry.Flags = HEAP_ENTRY_BUSY; 04881 Segment->Entry.SegmentIndex = SegmentIndex; 04882 04883 #if i386 && !NTOS_KERNEL_RUNTIME 04884 04885 // 04886 // In the non kernel x86 case see if we need to capture the callers stack 04887 // backtrace 04888 // 04889 04890 if (NtGlobalFlag & FLG_USER_STACK_TRACE_DB) { 04891 04892 Segment->AllocatorBackTraceIndex = (USHORT)RtlLogStackBackTrace(); 04893 } 04894 04895 #endif // i386 && !NTOS_KERNEL_RUNTIME 04896 04897 // 04898 // Now initializes the heap segment 04899 // 04900 04901 Segment->Signature = HEAP_SEGMENT_SIGNATURE; 04902 Segment->Flags = Flags; 04903 Segment->Heap = Heap; 04904 Segment->BaseAddress = BaseAddress; 04905 Segment->FirstEntry = FirstEntry; 04906 Segment->LastValidEntry = (PHEAP_ENTRY)((PCHAR)BaseAddress + (NumberOfPages * PAGE_SIZE)); 04907 Segment->NumberOfPages = NumberOfPages; 04908 Segment->NumberOfUnCommittedPages = NumberOfUnCommittedPages; 04909 04910 // 04911 // If there are uncommitted pages then we need to insert them 04912 // into the uncommitted ranges list 04913 // 04914 04915 if (NumberOfUnCommittedPages) { 04916 04917 RtlpInsertUnCommittedPages( Segment, 04918 (ULONG_PTR)UnCommittedAddress, 04919 NumberOfUnCommittedPages * PAGE_SIZE ); 04920 } 04921 04922 // 04923 // Have the containing heap point to this segment via the specified index 04924 // 04925 04926 Heap->Segments[ SegmentIndex ] = Segment; 04927 04928 // 04929 // Initialize the first free heap entry after the heap segment header and 04930 // put it in the free list. This first entry will be for whatever is left 04931 // of the committed range 04932 // 04933 04934 PreviousSize = Segment->Entry.Size; 04935 FirstEntry->Flags = HEAP_ENTRY_LAST_ENTRY; 04936 04937 Segment->LastEntryInSegment = FirstEntry; 04938 04939 FirstEntry->PreviousSize = PreviousSize; 04940 FirstEntry->SegmentIndex = SegmentIndex; 04941 04942 RtlpInsertFreeBlock( Heap, 04943 (PHEAP_FREE_ENTRY)FirstEntry, 04944 (PHEAP_ENTRY)UnCommittedAddress - FirstEntry); 04945 04946 // 04947 // And return to our caller 04948 // 04949 04950 return TRUE; 04951 }

VOID RtlpInsertFreeBlock IN PHEAP  Heap,
IN PHEAP_FREE_ENTRY  FreeBlock,
IN SIZE_T  FreeSize
 

Definition at line 5849 of file rtl/heap.c.

References _HEAP_SEGMENT::Flags, HEAP_ENTRY_LAST_ENTRY, HEAP_MAXIMUM_BLOCK_SIZE, _HEAP_SEGMENT::LastValidEntry, RTL_PAGED_CODE, RtlpInsertFreeBlockDirect, Size, and USHORT.

Referenced by RtlAllocateHeap(), RtlAllocateHeapSlowly(), RtlFreeHeap(), RtlFreeHeapSlowly(), RtlpCoalesceHeap(), RtlpDeCommitFreeBlock(), RtlpExtendHeap(), RtlpGrowBlockInPlace(), RtlpInitializeHeapSegment(), and RtlReAllocateHeap().

05857 : 05858 05859 This routines take a piece of committed memory and adds to the 05860 the appropriate free lists for the heap. If necessary this 05861 routine will divide up the free block to sizes that fit 05862 on the free list 05863 05864 05865 Arguments: 05866 05867 Heap - Supplies a pointer to the owning heap 05868 05869 FreeBlock - Supplies a pointer to the block being freed 05870 05871 FreeSize - Supplies the size, in bytes, of the block being freed 05872 05873 Return Value: 05874 05875 None. 05876 05877 --*/ 05878 05879 { 05880 USHORT PreviousSize, Size; 05881 UCHAR Flags; 05882 UCHAR SegmentIndex; 05883 PHEAP_SEGMENT Segment; 05884 05885 RTL_PAGED_CODE(); 05886 05887 // 05888 // Get the size of the previous block, the index of the segment 05889 // containing this block, and the flags specific to the block 05890 // 05891 05892 PreviousSize = FreeBlock->PreviousSize; 05893 05894 SegmentIndex = FreeBlock->SegmentIndex; 05895 Segment = Heap->Segments[ SegmentIndex ]; 05896 05897 Flags = FreeBlock->Flags; 05898 05899 // 05900 // Adjust the total amount free in the heap 05901 // 05902 05903 Heap->TotalFreeSize += FreeSize; 05904 05905 // 05906 // Now, while there is still something left to add to the free list 05907 // we'll process the information 05908 // 05909 05910 while (FreeSize != 0) { 05911 05912 // 05913 // If the size is too big for our free lists then we'll 05914 // chop it down. 05915 // 05916 05917 if (FreeSize > (ULONG)HEAP_MAXIMUM_BLOCK_SIZE) { 05918 05919 Size = HEAP_MAXIMUM_BLOCK_SIZE; 05920 05921 // 05922 // This little adjustment is so that we don't have a remainder 05923 // that is too small to be useful on the next iteration 05924 // through the loop 05925 // 05926 05927 if (FreeSize == ((ULONG)HEAP_MAXIMUM_BLOCK_SIZE + 1)) { 05928 05929 Size -= 16; 05930 } 05931 05932 // 05933 // Guarantee that Last entry does not get set in this 05934 // block. 05935 // 05936 05937 FreeBlock->Flags = 0; 05938 05939 } else { 05940 05941 Size = (USHORT)FreeSize; 05942 05943 // 05944 // This could propagate the last entry flag 05945 // 05946 05947 FreeBlock->Flags = Flags; 05948 } 05949 05950 // 05951 // Update the block sizes and then insert this 05952 // block into a free list 05953 // 05954 05955 FreeBlock->PreviousSize = PreviousSize; 05956 FreeBlock->SegmentIndex = SegmentIndex; 05957 FreeBlock->Size = Size; 05958 05959 RtlpInsertFreeBlockDirect( Heap, FreeBlock, Size ); 05960 05961 // 05962 // Note the size of what we just freed, and then update 05963 // our state information for the next time through the 05964 // loop 05965 // 05966 05967 PreviousSize = Size; 05968 05969 FreeSize -= Size; 05970 FreeBlock = (PHEAP_FREE_ENTRY)((PHEAP_ENTRY)FreeBlock + Size); 05971 05972 // 05973 // Check if we're done with the free block based on the 05974 // segment information, otherwise go back up and check size 05975 // Note that is means that we can get called with a very 05976 // large size and still work. 05977 // 05978 05979 if ((PHEAP_ENTRY)FreeBlock >= Segment->LastValidEntry) { 05980 05981 return; 05982 } 05983 } 05984 05985 // 05986 // If the block we're freeing did not think it was the last entry 05987 // then tell the next block our real size. 05988 // 05989 05990 if (!(Flags & HEAP_ENTRY_LAST_ENTRY)) { 05991 05992 FreeBlock->PreviousSize = PreviousSize; 05993 } 05994 05995 // 05996 // And return to our caller 05997 // 05998 05999 return; 06000 }

VOID RtlpInsertUnCommittedPages IN PHEAP_SEGMENT  Segment,
IN ULONG_PTR  Address,
IN SIZE_T  Size
 

Definition at line 4227 of file rtl/heap.c.

References _HEAP_UNCOMMMTTED_RANGE::Address, HeapDebugPrint, _HEAP_UNCOMMMTTED_RANGE::Next, NULL, RTL_PAGED_CODE, RtlpCreateUnCommittedRange(), RtlpDestroyUnCommittedRange(), _HEAP_UNCOMMMTTED_RANGE::Size, and Size.

Referenced by RtlpDeCommitFreeBlock(), and RtlpInitializeHeapSegment().

04235 : 04236 04237 This routine adds the specified range to the list of uncommitted pages 04238 in the segment. When done the information will hang off the segments 04239 uncommitted ranges list. 04240 04241 Arguments: 04242 04243 Segment - Supplies a segment whose uncommitted range is being modified 04244 04245 Address - Supplies the base (start) address for the uncommitted range 04246 04247 Size - Supplies the size, in bytes, of the uncommitted range 04248 04249 Return Value: 04250 04251 None. 04252 04253 --*/ 04254 04255 { 04256 PHEAP_UNCOMMMTTED_RANGE UnCommittedRange, *pp; 04257 04258 RTL_PAGED_CODE(); 04259 04260 // 04261 // Get a pointer to the front of the segments uncommitted range list 04262 // The list is sorted by ascending address 04263 // 04264 04265 pp = &Segment->UnCommittedRanges; 04266 04267 // 04268 // While we haven't reached the end of the list we'll zoom through 04269 // trying to find a fit 04270 // 04271 04272 while (UnCommittedRange = *pp) { 04273 04274 // 04275 // If address we want is less than what we're pointing at then 04276 // we've found where this new entry goes 04277 // 04278 04279 if (UnCommittedRange->Address > Address) { 04280 04281 // 04282 // If the new block matches right up to the existing block 04283 // then we can simply backup the existing block and add 04284 // to its size 04285 // 04286 04287 if ((Address + Size) == UnCommittedRange->Address) { 04288 04289 UnCommittedRange->Address = Address; 04290 UnCommittedRange->Size += Size; 04291 04292 // 04293 // Check if we need to update our notion of what the 04294 // largest uncommitted range is 04295 // 04296 04297 if (UnCommittedRange->Size > Segment->LargestUnCommittedRange) { 04298 04299 Segment->LargestUnCommittedRange = UnCommittedRange->Size; 04300 } 04301 04302 // 04303 // And return to our caller 04304 // 04305 04306 return; 04307 } 04308 04309 // 04310 // Pp is the address of the block right before us, and *Pp is the 04311 // address of the block right after us. So now fall out to where 04312 // the insertion takes place. 04313 // 04314 04315 break; 04316 04317 // 04318 // Otherwise if this existing block stops right where the new block 04319 // starts then we get to modify this entry. 04320 // 04321 04322 } else if ((UnCommittedRange->Address + UnCommittedRange->Size) == Address) { 04323 04324 // 04325 // Remember the starting address and compute the new larger size 04326 // 04327 04328 Address = UnCommittedRange->Address; 04329 Size += UnCommittedRange->Size; 04330 04331 // 04332 // Remove this entry from the list and then return it to the 04333 // unused uncommitted list 04334 // 04335 04336 *pp = UnCommittedRange->Next; 04337 04338 RtlpDestroyUnCommittedRange( Segment, UnCommittedRange ); 04339 04340 // 04341 // Modify the segemnt counters and largest size state. The next 04342 // time through the loop should hit the first case above where 04343 // we'll either merge with a list following us or add a new 04344 // entry 04345 // 04346 04347 Segment->NumberOfUnCommittedRanges -= 1; 04348 04349 if (Size > Segment->LargestUnCommittedRange) { 04350 04351 Segment->LargestUnCommittedRange = Size; 04352 } 04353 04354 // 04355 // Otherwise we'll continue search down the list 04356 // 04357 04358 } else { 04359 04360 pp = &UnCommittedRange->Next; 04361 } 04362 } 04363 04364 // 04365 // If we reach this point that means we've either fallen off the end of the 04366 // list, or the list is empty, or we've located the spot where a new uncommitted 04367 // range structure belongs. So allocate a new uncommitted range structure, 04368 // and make sure we got one. 04369 // 04370 // Pp is the address of the block right before us and *Pp is the address of the 04371 // block right after us 04372 // 04373 04374 UnCommittedRange = RtlpCreateUnCommittedRange( Segment ); 04375 04376 if (UnCommittedRange == NULL) { 04377 04378 HeapDebugPrint(( "Abandoning uncommitted range (%x for %x)\n", Address, Size )); 04379 // HeapDebugBreak( NULL ); 04380 04381 return; 04382 } 04383 04384 // 04385 // Fill in the new uncommitted range structure 04386 // 04387 04388 UnCommittedRange->Address = Address; 04389 UnCommittedRange->Size = Size; 04390 04391 // 04392 // Insert it in the list for the segment 04393 // 04394 04395 UnCommittedRange->Next = *pp; 04396 *pp = UnCommittedRange; 04397 04398 // 04399 // Update the segment counters and notion of the largest uncommitted range 04400 // 04401 04402 Segment->NumberOfUnCommittedRanges += 1; 04403 04404 if (Size >= Segment->LargestUnCommittedRange) { 04405 04406 Segment->LargestUnCommittedRange = Size; 04407 } 04408 04409 // 04410 // And return to our caller 04411 // 04412 04413 return; 04414 }

SIZE_T RtlSizeHeap IN PVOID  HeapHandle,
IN ULONG  Flags,
IN PVOID  BaseAddress
 

Definition at line 3655 of file rtl/heap.c.

References DEBUG_HEAP, _HEAP_ENTRY::Flags, _HEAP::ForceFlags, HEAP_ENTRY_BUSY, HEAP_ENTRY_VIRTUAL_ALLOC, HEAP_GRANULARITY_SHIFT, HeapHandle, RtlDebugSizeHeap(), RtlpGetSizeOfBigBlock(), SET_LAST_STATUS, _HEAP_ENTRY::Size, and _HEAP_ENTRY::UnusedBytes.

Referenced by HMAllocObject(), HMFreeObject(), RtlDebugSizeHeap(), and RtlpDphNormalHeapSize().

03663 : 03664 03665 This routine returns the size, in bytes, of the indicated block 03666 of heap storage. The size only includes the number of bytes the 03667 original caller used to allocate the block and not any unused 03668 bytes at the end of the block. 03669 03670 Arguments: 03671 03672 HeapHandle - Supplies a pointer to the heap that owns the block 03673 being queried 03674 03675 Flags - Supplies a set of flags used to allocate the block 03676 03677 BaseAddress - Supplies the address of the block being queried 03678 03679 Return Value: 03680 03681 SIZE_T - returns the size, in bytes, of the queried block, or -1 03682 if the block is not in use. 03683 03684 --*/ 03685 03686 { 03687 PHEAP Heap = (PHEAP)HeapHandle; 03688 PHEAP_ENTRY BusyBlock; 03689 SIZE_T BusySize; 03690 03691 // 03692 // Compliment the input flags with those enforced by the heap 03693 // 03694 03695 Flags |= Heap->ForceFlags; 03696 03697 // 03698 // Check if this is the nonkernel debug version of heap 03699 // 03700 03701 #ifndef NTOS_KERNEL_RUNTIME 03702 03703 if (DEBUG_HEAP( Flags )) { 03704 03705 return RtlDebugSizeHeap( HeapHandle, Flags, BaseAddress ); 03706 } 03707 03708 #endif // NTOS_KERNEL_RUNTIME 03709 03710 // 03711 // No lock is required since nothing is modified and nothing 03712 // outside the busy block is read. Backup to get a pointer 03713 // to the heap entry 03714 // 03715 03716 BusyBlock = (PHEAP_ENTRY)BaseAddress - 1; 03717 03718 // 03719 // If the block is not in use then the answer is -1 and 03720 // we'll set the error status for the user mode thread 03721 // 03722 03723 if (!(BusyBlock->Flags & HEAP_ENTRY_BUSY)) { 03724 03725 BusySize = -1; 03726 03727 SET_LAST_STATUS( STATUS_INVALID_PARAMETER ); 03728 03729 // 03730 // Otherwise if the block is from our large allocation then 03731 // we'll get the result from that routine 03732 // 03733 03734 } else if (BusyBlock->Flags & HEAP_ENTRY_VIRTUAL_ALLOC) { 03735 03736 BusySize = RtlpGetSizeOfBigBlock( BusyBlock ); 03737 03738 // 03739 // Otherwise the block must be one that we can handle so 03740 // calculate its block size and then subtract what's not being 03741 // used by the caller. 03742 // 03743 // **** this seems to include the heap entry header in its 03744 // **** calculation. Is that what we really want? 03745 // 03746 03747 } else { 03748 03749 BusySize = (BusyBlock->Size << HEAP_GRANULARITY_SHIFT) - 03750 BusyBlock->UnusedBytes; 03751 } 03752 03753 // 03754 // And return to our caller 03755 // 03756 03757 return BusySize; 03758 }

NTSTATUS RtlZeroHeap IN PVOID  HeapHandle,
IN ULONG  Flags
 

Definition at line 3762 of file rtl/heap.c.

References _HEAP_UNCOMMMTTED_RANGE::Address, DEBUG_HEAP, EXCEPTION_EXECUTE_HANDLER, FALSE, _HEAP_SEGMENT::FirstEntry, _HEAP::Flags, _HEAP_ENTRY::Flags, _HEAP::ForceFlags, FREE_HEAP_FILL, HEAP_ENTRY_BUSY, HEAP_ENTRY_FILL_PATTERN, HEAP_ENTRY_LAST_ENTRY, HEAP_GRANULARITY_SHIFT, HEAP_MAXIMUM_SEGMENTS, HeapHandle, _HEAP_SEGMENT::LastValidEntry, _HEAP::LockVariable, _HEAP_UNCOMMMTTED_RANGE::Next, NTSTATUS(), NULL, RTL_PAGED_CODE, RtlAcquireLockRoutine, RtlDebugZeroHeap(), RtlReleaseLockRoutine, _HEAP::Segments, _HEAP_UNCOMMMTTED_RANGE::Size, _HEAP_ENTRY::Size, Size, Status, TRUE, and _HEAP_SEGMENT::UnCommittedRanges.

Referenced by EndShutdown(), and RtlDebugZeroHeap().

03769 : 03770 03771 This routine zero's (or fills) in all the free blocks in a heap. 03772 It does not touch big allocations. 03773 03774 Arguments: 03775 03776 HeapHandle - Supplies a pointer to the heap being zeroed 03777 03778 Flags - Supplies a set of heap flags to compliment those already 03779 set in the heap 03780 03781 Return Value: 03782 03783 NTSTATUS - An appropriate status code 03784 03785 --*/ 03786 03787 { 03788 PHEAP Heap = (PHEAP)HeapHandle; 03789 NTSTATUS Status; 03790 BOOLEAN LockAcquired = FALSE; 03791 PHEAP_SEGMENT Segment; 03792 ULONG SegmentIndex; 03793 PHEAP_ENTRY CurrentBlock; 03794 PHEAP_FREE_ENTRY FreeBlock; 03795 SIZE_T Size; 03796 PHEAP_UNCOMMMTTED_RANGE UnCommittedRange; 03797 03798 RTL_PAGED_CODE(); 03799 03800 // 03801 // Compliment the input flags with those enforced by the heap 03802 // 03803 03804 Flags |= Heap->ForceFlags; 03805 03806 // 03807 // Check if this is the nonkernel debug version of heap 03808 // 03809 03810 #ifndef NTOS_KERNEL_RUNTIME 03811 03812 if (DEBUG_HEAP( Flags )) { 03813 03814 return RtlDebugZeroHeap( HeapHandle, Flags ); 03815 } 03816 03817 #endif // NTOS_KERNEL_RUNTIME 03818 03819 // 03820 // Unless something happens otherwise we'll assume that we'll 03821 // be successful 03822 // 03823 03824 Status = STATUS_SUCCESS; 03825 03826 try { 03827 03828 // 03829 // Lock the heap 03830 // 03831 03832 if (!(Flags & HEAP_NO_SERIALIZE)) { 03833 03834 RtlAcquireLockRoutine( Heap->LockVariable ); 03835 03836 LockAcquired = TRUE; 03837 } 03838 03839 try { 03840 03841 // 03842 // Zero fill all the free blocks in all the segements 03843 // 03844 03845 for (SegmentIndex=0; SegmentIndex<HEAP_MAXIMUM_SEGMENTS; SegmentIndex++) { 03846 03847 Segment = Heap->Segments[ SegmentIndex ]; 03848 03849 if (!Segment) { 03850 03851 continue; 03852 } 03853 03854 UnCommittedRange = Segment->UnCommittedRanges; 03855 CurrentBlock = Segment->FirstEntry; 03856 03857 // 03858 // With the current segment we'll zoom through the 03859 // blocks until we reach the end 03860 // 03861 03862 while (CurrentBlock < Segment->LastValidEntry) { 03863 03864 Size = CurrentBlock->Size << HEAP_GRANULARITY_SHIFT; 03865 03866 // 03867 // If the block is not in use then we'll either zero 03868 // it or fill it. 03869 // 03870 03871 if (!(CurrentBlock->Flags & HEAP_ENTRY_BUSY)) { 03872 03873 FreeBlock = (PHEAP_FREE_ENTRY)CurrentBlock; 03874 03875 if ((Heap->Flags & HEAP_FREE_CHECKING_ENABLED) && 03876 (CurrentBlock->Flags & HEAP_ENTRY_FILL_PATTERN)) { 03877 03878 RtlFillMemoryUlong( FreeBlock + 1, 03879 Size - sizeof( *FreeBlock ), 03880 FREE_HEAP_FILL ); 03881 03882 } else { 03883 03884 RtlFillMemoryUlong( FreeBlock + 1, 03885 Size - sizeof( *FreeBlock ), 03886 0 ); 03887 } 03888 } 03889 03890 // 03891 // If the following entry is uncommited then we need to 03892 // skip over it. This code strongly implies that the 03893 // uncommitted range list is in perfect sync with the 03894 // blocks in the segement 03895 // 03896 03897 if (CurrentBlock->Flags & HEAP_ENTRY_LAST_ENTRY) { 03898 03899 CurrentBlock += CurrentBlock->Size; 03900 03901 // 03902 // Check if the we've reached the end of the segment 03903 // and should just break out of the while loop 03904 // 03905 // 03906 // **** "break;" would probably be more clear here 03907 // 03908 03909 if (UnCommittedRange == NULL) { 03910 03911 CurrentBlock = Segment->LastValidEntry; 03912 03913 // 03914 // Otherwise skip over the uncommitted range 03915 // 03916 03917 } else { 03918 03919 CurrentBlock = (PHEAP_ENTRY) 03920 ((PCHAR)UnCommittedRange->Address + UnCommittedRange->Size); 03921 03922 UnCommittedRange = UnCommittedRange->Next; 03923 } 03924 03925 // 03926 // Otherwise the next block exists so advance to it 03927 // 03928 03929 } else { 03930 03931 CurrentBlock += CurrentBlock->Size; 03932 } 03933 } 03934 } 03935 03936 } except( EXCEPTION_EXECUTE_HANDLER ) { 03937 03938 Status = GetExceptionCode(); 03939 } 03940 03941 } finally { 03942 03943 // 03944 // Unlock the heap 03945 // 03946 03947 if (LockAcquired) { 03948 03949 RtlReleaseLockRoutine( Heap->LockVariable ); 03950 } 03951 } 03952 03953 return Status; 03954 }


Variable Documentation

UCHAR CheckHeapFillPattern[CHECK_HEAP_TAIL_SIZE]
 

Initial value:

Definition at line 43 of file rtl/heap.c.

Referenced by RtlpCheckBusyBlockTail().

ULONG RtlpDisableHeapLookaside = 0
 

Definition at line 29 of file rtl/heap.c.

Referenced by LdrpInitialize(), and RtlCreateHeap().


Generated on Sat May 15 19:44:03 2004 for test by doxygen 1.3.7