| OLD | NEW | 
|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 //     * Redistributions of source code must retain the above copyright | 6 //     * Redistributions of source code must retain the above copyright | 
| 7 //       notice, this list of conditions and the following disclaimer. | 7 //       notice, this list of conditions and the following disclaimer. | 
| 8 //     * Redistributions in binary form must reproduce the above | 8 //     * Redistributions in binary form must reproduce the above | 
| 9 //       copyright notice, this list of conditions and the following | 9 //       copyright notice, this list of conditions and the following | 
| 10 //       disclaimer in the documentation and/or other materials provided | 10 //       disclaimer in the documentation and/or other materials provided | 
| (...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 471     // The available value is conservative such that it may report | 471     // The available value is conservative such that it may report | 
| 472     // zero prior to heap exhaustion. | 472     // zero prior to heap exhaustion. | 
| 473     CHECK(lo->Available() < available || available == 0); | 473     CHECK(lo->Available() < available || available == 0); | 
| 474   } | 474   } | 
| 475 | 475 | 
| 476   CHECK(!lo->IsEmpty()); | 476   CHECK(!lo->IsEmpty()); | 
| 477 | 477 | 
| 478   CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry()); | 478   CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry()); | 
| 479 } | 479 } | 
| 480 | 480 | 
| 481 | 481 TEST(SizeOfInitialHeap) { | 
| 482 TEST(SizeOfFirstPageIsLargeEnough) { |  | 
| 483   if (i::FLAG_always_opt) return; | 482   if (i::FLAG_always_opt) return; | 
| 484   // Bootstrapping without a snapshot causes more allocations. | 483   // Bootstrapping without a snapshot causes more allocations. | 
| 485   CcTest::InitializeVM(); | 484   CcTest::InitializeVM(); | 
| 486   Isolate* isolate = CcTest::i_isolate(); | 485   Isolate* isolate = CcTest::i_isolate(); | 
| 487   if (!isolate->snapshot_available()) return; | 486   if (!isolate->snapshot_available()) return; | 
| 488   HandleScope scope(isolate); | 487   HandleScope scope(isolate); | 
| 489   v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext(); | 488   v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext(); | 
| 490   // Skip this test on the custom snapshot builder. | 489   // Skip this test on the custom snapshot builder. | 
| 491   if (!CcTest::global() | 490   if (!CcTest::global() | 
| 492            ->Get(context, v8_str("assertEquals")) | 491            ->Get(context, v8_str("assertEquals")) | 
| 493            .ToLocalChecked() | 492            .ToLocalChecked() | 
| 494            ->IsUndefined()) { | 493            ->IsUndefined()) { | 
| 495     return; | 494     return; | 
| 496   } | 495   } | 
| 497 | 496 | 
| 498   // If this test fails due to enabling experimental natives that are not part | 497   // The limit for each space for an empty isolate containing just the | 
| 499   // of the snapshot, we may need to adjust CalculateFirstPageSizes. | 498   // snapshot. | 
|  | 499   const size_t kMaxInitialSizePerSpace = 1536 * KB;  // 1.5MB | 
| 500 | 500 | 
| 501   // Freshly initialized VM gets by with one page per space. | 501   // Freshly initialized VM gets by with the snapshot size (which is below | 
|  | 502   // kMaxInitialSizePerSpace per space). | 
|  | 503   Heap* heap = isolate->heap(); | 
|  | 504   int page_count[LAST_PAGED_SPACE + 1] = {0, 0, 0, 0}; | 
| 502   for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { | 505   for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { | 
| 503     // Debug code can be very large, so skip CODE_SPACE if we are generating it. | 506     // Debug code can be very large, so skip CODE_SPACE if we are generating it. | 
| 504     if (i == CODE_SPACE && i::FLAG_debug_code) continue; | 507     if (i == CODE_SPACE && i::FLAG_debug_code) continue; | 
| 505     CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages()); | 508 | 
|  | 509     page_count[i] = heap->paged_space(i)->CountTotalPages(); | 
|  | 510     // Check that the initial heap is also below the limit. | 
|  | 511     CHECK_LT(static_cast<size_t>(heap->paged_space(i)->CommittedMemory()), | 
|  | 512              kMaxInitialSizePerSpace); | 
| 506   } | 513   } | 
| 507 | 514 | 
| 508   // Executing the empty script gets by with one page per space. | 515   // Executing the empty script gets by with the same number of pages, i.e., | 
|  | 516   // requires no extra space. | 
| 509   CompileRun("/*empty*/"); | 517   CompileRun("/*empty*/"); | 
| 510   for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { | 518   for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { | 
| 511     // Debug code can be very large, so skip CODE_SPACE if we are generating it. | 519     // Debug code can be very large, so skip CODE_SPACE if we are generating it. | 
| 512     if (i == CODE_SPACE && i::FLAG_debug_code) continue; | 520     if (i == CODE_SPACE && i::FLAG_debug_code) continue; | 
| 513     CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages()); | 521     CHECK_EQ(page_count[i], isolate->heap()->paged_space(i)->CountTotalPages()); | 
| 514   } | 522   } | 
| 515 | 523 | 
| 516   // No large objects required to perform the above steps. | 524   // No large objects required to perform the above steps. | 
| 517   CHECK(isolate->heap()->lo_space()->IsEmpty()); | 525   CHECK(isolate->heap()->lo_space()->IsEmpty()); | 
| 518 } | 526 } | 
| 519 | 527 | 
| 520 static HeapObject* AllocateUnaligned(NewSpace* space, int size) { | 528 static HeapObject* AllocateUnaligned(NewSpace* space, int size) { | 
| 521   AllocationResult allocation = space->AllocateRawUnaligned(size); | 529   AllocationResult allocation = space->AllocateRawUnaligned(size); | 
| 522   CHECK(!allocation.IsRetry()); | 530   CHECK(!allocation.IsRetry()); | 
| 523   HeapObject* filler = NULL; | 531   HeapObject* filler = NULL; | 
| (...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 676     new_space->RemoveAllocationObserver(&observer2); | 684     new_space->RemoveAllocationObserver(&observer2); | 
| 677 | 685 | 
| 678     CHECK_EQ(observer1.count(), 32); | 686     CHECK_EQ(observer1.count(), 32); | 
| 679     CHECK_EQ(observer2.count(), 28); | 687     CHECK_EQ(observer2.count(), 28); | 
| 680   } | 688   } | 
| 681   isolate->Dispose(); | 689   isolate->Dispose(); | 
| 682 } | 690 } | 
| 683 | 691 | 
| 684 }  // namespace internal | 692 }  // namespace internal | 
| 685 }  // namespace v8 | 693 }  // namespace v8 | 
| OLD | NEW | 
|---|