00200 :
00201
00202 This routine checks to see
if the faulting address
is within
00203
the stack limits and
if so tries to create another guard
00204 page on
the stack.
A stack over flow
is returned
if the
00205 creation of a
new guard page fails or
if the stack
is in
00206
the following form:
00207
00208
00209 stack +----------------+
00210 growth | | StackBase
00211 | +----------------+
00212 v | |
00213 | allocated |
00214 | |
00215 | ... |
00216 | |
00217 +----------------+
00218 | old guard page | <- faulting address
is in
this page.
00219 +----------------+
00220 | |
00221 +----------------+
00222 | | last page of stack (always no access)
00223 +----------------+
00224
00225 In
this case,
the page before
the last page
is committed, but
00226 not as a guard page and a STACK_OVERFLOW condition
is returned.
00227
00228 Arguments:
00229
00230 FaultingAddress - Supplies
the virtual address of
the page which
00231 was a guard page.
00232
00233 Return Value:
00234
00235 None.
00236
00237 Environment:
00238
00239 Kernel mode. No mutexes held.
00240
00241 --*/
00242
00243 {
00244 PTEB Teb;
00245 ULONG_PTR NextPage;
00246 SIZE_T RegionSize;
00247
NTSTATUS status;
00248 KIRQL OldIrql;
00249
PMMLOCK_CONFLICT Next;
00250 PVOID DeallocationStack;
00251 PVOID *StackLimit;
00252
00253
#if defined(WX86) || defined(_AXP64_)
00254
PWX86TIB Wx86Tib;
00255
#endif
00256
#if defined(_WIN64)
00257
PTEB32 Teb32;
00258
#endif
00259
00260
00261
00262
00263
00264
00265
if (!IsListEmpty (&MmLockConflictList)) {
00266 ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
00267 Next = (
PMMLOCK_CONFLICT)
MmLockConflictList.Flink;
00268
00269
while ((PVOID)Next != &
MmLockConflictList) {
00270
00271
if (Next->
Thread ==
PsGetCurrentThread()) {
00272 ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
00273
return STATUS_GUARD_PAGE_VIOLATION;
00274 }
00275 Next = (
PMMLOCK_CONFLICT)Next->
List.Flink;
00276 }
00277 ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
00278 }
00279
00280
00281
00282
00283
00284
00285
try {
00286
00287 Teb = NtCurrentTeb();
00288
00289
#if defined(_IA64_)
00290
00291
if ((Teb->NtTib.StackBase <= FaultingAddress) &&
00292 (Teb->DeallocationBStore > FaultingAddress)) {
00293
00294
00295
00296
00297
00298
00299
00300
00301
00302
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312
00313
00314
00315
00316
00317
00318 NextPage = (ULONG_PTR)
PAGE_ALIGN(FaultingAddress) +
PAGE_SIZE;
00319
00320 RegionSize =
PAGE_SIZE;
00321
00322
if ((NextPage +
PAGE_SIZE) >= (ULONG_PTR)
PAGE_ALIGN(Teb->DeallocationBStore)) {
00323
00324
00325
00326
00327
00328
00329
00330 NextPage = (ULONG_PTR)
PAGE_ALIGN(Teb->DeallocationBStore) -
PAGE_SIZE;
00331
00332 status = ZwAllocateVirtualMemory (NtCurrentProcess(),
00333 (PVOID *)&NextPage,
00334 0,
00335 &RegionSize,
00336 MEM_COMMIT,
00337 PAGE_READWRITE);
00338
if (
NT_SUCCESS(status) ) {
00339 Teb->BStoreLimit = (PVOID)( (PUCHAR)NextPage);
00340 }
00341
00342
return STATUS_STACK_OVERFLOW;
00343 }
00344
00345 Teb->BStoreLimit = (PVOID)((PUCHAR)(NextPage));
00346
00347 }
else {
00348
00349
#endif
00350
00351 DeallocationStack = Teb->DeallocationStack;
00352 StackLimit = &Teb->NtTib.StackLimit;
00353
00354
00355
00356
00357
00358
if ((Teb->NtTib.StackBase <= FaultingAddress) ||
00359 (DeallocationStack > FaultingAddress)) {
00360
00361
#if defined(WX86)
00362
00363
00364
00365 Wx86Tib = Teb->Vdm;
00366
if (Wx86Tib) {
00367
ProbeForRead(Wx86Tib,
sizeof(WX86TIB),
sizeof(ULONG));
00368
if (Wx86Tib->Size ==
sizeof(WX86TIB) &&
00369 Wx86Tib->StackBase > FaultingAddress &&
00370 Wx86Tib->DeallocationStack <= FaultingAddress) {
00371
00372 DeallocationStack = Wx86Tib->DeallocationStack;
00373 StackLimit = &Wx86Tib->StackLimit;
00374 }
else {
00375
00376
00377
00378
00379
return STATUS_GUARD_PAGE_VIOLATION;
00380 }
00381 }
else
00382
#endif
00383
#if defined(_WIN64)
00384
00385
00386
00387
if ((Teb32 = (PTEB32)Teb->NtTib.ExceptionList) !=
NULL) {
00388
ProbeForRead(Teb32,
sizeof(TEB32),
sizeof(ULONG));
00389
if ((ULONG_PTR)Teb32->NtTib.StackBase > (ULONG_PTR)FaultingAddress &&
00390 (ULONG_PTR)Teb32->DeallocationStack <= (ULONG_PTR)FaultingAddress) {
00391 DeallocationStack = (PVOID)ULongToPtr(Teb32->DeallocationStack);
00392
00393 StackLimit = (PVOID *)&Teb32->NtTib.StackLimit;
00394 }
else
00395
#if defined(_AXP64_)
00396
00397
00398
00399
if (Wx86Tib = (PWX86TIB)ULongToPtr(Teb32->Vdm)) {
00400
ProbeForRead(Wx86Tib,
sizeof(WX86TIB),
sizeof(ULONG));
00401
if (Wx86Tib->Size ==
sizeof(WX86TIB) &&
00402 (ULONG_PTR)Wx86Tib->StackBase > (ULONG_PTR)FaultingAddress &&
00403 (ULONG_PTR)Wx86Tib->DeallocationStack <= (ULONG_PTR)FaultingAddress) {
00404
00405 DeallocationStack = Wx86Tib->DeallocationStack;
00406 StackLimit = (PVOID *)(&Wx86Tib->StackLimit);
00407 }
else {
00408
00409
00410
00411
00412
return STATUS_GUARD_PAGE_VIOLATION;
00413 }
00414 }
else
00415
#endif
00416
{
00417
00418
00419
00420
00421
return STATUS_GUARD_PAGE_VIOLATION;
00422 }
00423 }
else
00424
#endif
00425
{
00426
00427
00428
00429
00430
return STATUS_GUARD_PAGE_VIOLATION;
00431 }
00432 }
00433
00434
00435
00436
00437
00438
00439
00440 NextPage = ((ULONG_PTR)
PAGE_ALIGN(FaultingAddress) -
PAGE_SIZE);
00441
00442 RegionSize =
PAGE_SIZE;
00443
00444
if ((NextPage -
PAGE_SIZE) <= (ULONG_PTR)
PAGE_ALIGN(DeallocationStack)) {
00445
00446
00447
00448
00449
00450
00451
00452 NextPage = (ULONG_PTR)
PAGE_ALIGN(DeallocationStack) +
PAGE_SIZE;
00453
00454 status = ZwAllocateVirtualMemory (NtCurrentProcess(),
00455 (PVOID *)&NextPage,
00456 0,
00457 &RegionSize,
00458 MEM_COMMIT,
00459 PAGE_READWRITE);
00460
if (
NT_SUCCESS(status) ) {
00461
00462
#if defined(_WIN64)
00463
if (Teb32) {
00464
00465 *(ULONG *)StackLimit = PtrToUlong((PUCHAR)NextPage);
00466 }
else {
00467 *StackLimit = (PVOID)( (PUCHAR)NextPage);
00468 }
00469
#else
00470
*StackLimit = (PVOID)( (PUCHAR)NextPage);
00471
#endif
00472
00473 }
00474
00475
return STATUS_STACK_OVERFLOW;
00476 }
00477
#if defined(_WIN64)
00478
if (Teb32) {
00479
00480 *(ULONG *)StackLimit = PtrToUlong((PUCHAR)(NextPage + PAGE_SIZE));
00481 }
else {
00482 *StackLimit = (PVOID)((PUCHAR)(NextPage +
PAGE_SIZE));
00483 }
00484
#else
00485
*StackLimit = (PVOID)((PUCHAR)(NextPage +
PAGE_SIZE));
00486
#endif
00487
00488
#if defined(_IA64_)
00489
}
00490
#endif // _IA64_
00491
00492 retry:
00493 status = ZwAllocateVirtualMemory (NtCurrentProcess(),
00494 (PVOID *)&NextPage,
00495 0,
00496 &RegionSize,
00497 MEM_COMMIT,
00498 PAGE_READWRITE | PAGE_GUARD);
00499
00500
00501
if (
NT_SUCCESS(status) || (status == STATUS_ALREADY_COMMITTED)) {
00502
00503
00504
00505
00506
00507
00508
return STATUS_PAGE_FAULT_GUARD_PAGE;
00509 }
00510
00511
if (
PsGetCurrentProcess() ==
ExpDefaultErrorPortProcess) {
00512
00513
00514
00515
00516
00517
00518
00519
ASSERT (status == STATUS_COMMITMENT_LIMIT);
00520
00521 ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
00522
MmTotalCommitLimit += 1;
00523
MmExtendedCommit += 1;
00524 ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
00525
goto retry;
00526 }
00527
00528
return STATUS_STACK_OVERFLOW;
00529
00530 } except (EXCEPTION_EXECUTE_HANDLER) {
00531
00532
00533
00534
00535
00536
00537
00538
return STATUS_GUARD_PAGE_VIOLATION;
00539 }
00540 }
}