| OLD | NEW |
| (Empty) |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | |
| 2 // Redistribution and use in source and binary forms, with or without | |
| 3 // modification, are permitted provided that the following conditions are | |
| 4 // met: | |
| 5 // | |
| 6 // * Redistributions of source code must retain the above copyright | |
| 7 // notice, this list of conditions and the following disclaimer. | |
| 8 // * Redistributions in binary form must reproduce the above | |
| 9 // copyright notice, this list of conditions and the following | |
| 10 // disclaimer in the documentation and/or other materials provided | |
| 11 // with the distribution. | |
| 12 // * Neither the name of Google Inc. nor the names of its | |
| 13 // contributors may be used to endorse or promote products derived | |
| 14 // from this software without specific prior written permission. | |
| 15 // | |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 27 | |
| 28 // TODO(mythria): Remove this define after this flag is turned on globally | |
| 29 #define V8_IMMINENT_DEPRECATION_WARNINGS | |
| 30 | |
| 31 #include <stdlib.h> | |
| 32 | |
| 33 #include "src/base/platform/platform.h" | |
| 34 #include "src/snapshot/snapshot.h" | |
| 35 #include "src/v8.h" | |
| 36 #include "test/cctest/cctest.h" | |
| 37 #include "test/cctest/heap-tester.h" | |
| 38 | |
| 39 namespace v8 { | |
| 40 namespace internal { | |
| 41 | |
| 42 #if 0 | |
| 43 static void VerifyRegionMarking(Address page_start) { | |
| 44 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER | |
| 45 Page* p = Page::FromAddress(page_start); | |
| 46 | |
| 47 p->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
| 48 | |
| 49 for (Address addr = p->ObjectAreaStart(); | |
| 50 addr < p->ObjectAreaEnd(); | |
| 51 addr += kPointerSize) { | |
| 52 CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr)); | |
| 53 } | |
| 54 | |
| 55 for (Address addr = p->ObjectAreaStart(); | |
| 56 addr < p->ObjectAreaEnd(); | |
| 57 addr += kPointerSize) { | |
| 58 Page::FromAddress(addr)->MarkRegionDirty(addr); | |
| 59 } | |
| 60 | |
| 61 for (Address addr = p->ObjectAreaStart(); | |
| 62 addr < p->ObjectAreaEnd(); | |
| 63 addr += kPointerSize) { | |
| 64 CHECK(Page::FromAddress(addr)->IsRegionDirty(addr)); | |
| 65 } | |
| 66 #endif | |
| 67 } | |
| 68 #endif | |
| 69 | |
| 70 | |
| 71 // TODO(gc) you can no longer allocate pages like this. Details are hidden. | |
| 72 #if 0 | |
| 73 TEST(Page) { | |
| 74 byte* mem = NewArray<byte>(2*Page::kPageSize); | |
| 75 CHECK(mem != NULL); | |
| 76 | |
| 77 Address start = reinterpret_cast<Address>(mem); | |
| 78 Address page_start = RoundUp(start, Page::kPageSize); | |
| 79 | |
| 80 Page* p = Page::FromAddress(page_start); | |
| 81 // Initialized Page has heap pointer, normally set by memory_allocator. | |
| 82 p->heap_ = CcTest::heap(); | |
| 83 CHECK(p->address() == page_start); | |
| 84 CHECK(p->is_valid()); | |
| 85 | |
| 86 p->opaque_header = 0; | |
| 87 p->SetIsLargeObjectPage(false); | |
| 88 CHECK(!p->next_page()->is_valid()); | |
| 89 | |
| 90 CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset); | |
| 91 CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize); | |
| 92 | |
| 93 CHECK(p->Offset(page_start + Page::kObjectStartOffset) == | |
| 94 Page::kObjectStartOffset); | |
| 95 CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize); | |
| 96 | |
| 97 CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart()); | |
| 98 CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd()); | |
| 99 | |
| 100 // test region marking | |
| 101 VerifyRegionMarking(page_start); | |
| 102 | |
| 103 DeleteArray(mem); | |
| 104 } | |
| 105 #endif | |
| 106 | |
| 107 | |
| 108 // Temporarily sets a given allocator in an isolate. | |
| 109 class TestMemoryAllocatorScope { | |
| 110 public: | |
| 111 TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator) | |
| 112 : isolate_(isolate), | |
| 113 old_allocator_(isolate->memory_allocator_) { | |
| 114 isolate->memory_allocator_ = allocator; | |
| 115 } | |
| 116 | |
| 117 ~TestMemoryAllocatorScope() { | |
| 118 isolate_->memory_allocator_ = old_allocator_; | |
| 119 } | |
| 120 | |
| 121 private: | |
| 122 Isolate* isolate_; | |
| 123 MemoryAllocator* old_allocator_; | |
| 124 | |
| 125 DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope); | |
| 126 }; | |
| 127 | |
| 128 | |
| 129 // Temporarily sets a given code range in an isolate. | |
| 130 class TestCodeRangeScope { | |
| 131 public: | |
| 132 TestCodeRangeScope(Isolate* isolate, CodeRange* code_range) | |
| 133 : isolate_(isolate), | |
| 134 old_code_range_(isolate->code_range_) { | |
| 135 isolate->code_range_ = code_range; | |
| 136 } | |
| 137 | |
| 138 ~TestCodeRangeScope() { | |
| 139 isolate_->code_range_ = old_code_range_; | |
| 140 } | |
| 141 | |
| 142 private: | |
| 143 Isolate* isolate_; | |
| 144 CodeRange* old_code_range_; | |
| 145 | |
| 146 DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope); | |
| 147 }; | |
| 148 | |
| 149 | |
| 150 static void VerifyMemoryChunk(Isolate* isolate, | |
| 151 Heap* heap, | |
| 152 CodeRange* code_range, | |
| 153 size_t reserve_area_size, | |
| 154 size_t commit_area_size, | |
| 155 size_t second_commit_area_size, | |
| 156 Executability executable) { | |
| 157 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); | |
| 158 CHECK(memory_allocator->SetUp(heap->MaxReserved(), | |
| 159 heap->MaxExecutableSize())); | |
| 160 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator); | |
| 161 TestCodeRangeScope test_code_range_scope(isolate, code_range); | |
| 162 | |
| 163 size_t header_size = (executable == EXECUTABLE) | |
| 164 ? MemoryAllocator::CodePageGuardStartOffset() | |
| 165 : MemoryChunk::kObjectStartOffset; | |
| 166 size_t guard_size = (executable == EXECUTABLE) | |
| 167 ? MemoryAllocator::CodePageGuardSize() | |
| 168 : 0; | |
| 169 | |
| 170 MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size, | |
| 171 commit_area_size, | |
| 172 executable, | |
| 173 NULL); | |
| 174 size_t alignment = code_range != NULL && code_range->valid() | |
| 175 ? MemoryChunk::kAlignment | |
| 176 : base::OS::CommitPageSize(); | |
| 177 size_t reserved_size = | |
| 178 ((executable == EXECUTABLE)) | |
| 179 ? RoundUp(header_size + guard_size + reserve_area_size + guard_size, | |
| 180 alignment) | |
| 181 : RoundUp(header_size + reserve_area_size, | |
| 182 base::OS::CommitPageSize()); | |
| 183 CHECK(memory_chunk->size() == reserved_size); | |
| 184 CHECK(memory_chunk->area_start() < memory_chunk->address() + | |
| 185 memory_chunk->size()); | |
| 186 CHECK(memory_chunk->area_end() <= memory_chunk->address() + | |
| 187 memory_chunk->size()); | |
| 188 CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size); | |
| 189 | |
| 190 Address area_start = memory_chunk->area_start(); | |
| 191 | |
| 192 memory_chunk->CommitArea(second_commit_area_size); | |
| 193 CHECK(area_start == memory_chunk->area_start()); | |
| 194 CHECK(memory_chunk->area_start() < memory_chunk->address() + | |
| 195 memory_chunk->size()); | |
| 196 CHECK(memory_chunk->area_end() <= memory_chunk->address() + | |
| 197 memory_chunk->size()); | |
| 198 CHECK(static_cast<size_t>(memory_chunk->area_size()) == | |
| 199 second_commit_area_size); | |
| 200 | |
| 201 memory_allocator->Free(memory_chunk); | |
| 202 memory_allocator->TearDown(); | |
| 203 delete memory_allocator; | |
| 204 } | |
| 205 | |
| 206 | |
| 207 TEST(Regress3540) { | |
| 208 Isolate* isolate = CcTest::i_isolate(); | |
| 209 Heap* heap = isolate->heap(); | |
| 210 const int pageSize = Page::kPageSize; | |
| 211 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); | |
| 212 CHECK( | |
| 213 memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize())); | |
| 214 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator); | |
| 215 CodeRange* code_range = new CodeRange(isolate); | |
| 216 const size_t code_range_size = 4 * pageSize; | |
| 217 if (!code_range->SetUp( | |
| 218 code_range_size + | |
| 219 RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages, | |
| 220 MemoryChunk::kAlignment) + | |
| 221 v8::internal::MemoryAllocator::CodePageAreaSize())) { | |
| 222 return; | |
| 223 } | |
| 224 | |
| 225 Address address; | |
| 226 size_t size; | |
| 227 size_t request_size = code_range_size - 2 * pageSize; | |
| 228 address = code_range->AllocateRawMemory( | |
| 229 request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()), | |
| 230 &size); | |
| 231 CHECK(address != NULL); | |
| 232 | |
| 233 Address null_address; | |
| 234 size_t null_size; | |
| 235 request_size = code_range_size - pageSize; | |
| 236 null_address = code_range->AllocateRawMemory( | |
| 237 request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()), | |
| 238 &null_size); | |
| 239 CHECK(null_address == NULL); | |
| 240 | |
| 241 code_range->FreeRawMemory(address, size); | |
| 242 delete code_range; | |
| 243 memory_allocator->TearDown(); | |
| 244 delete memory_allocator; | |
| 245 } | |
| 246 | |
| 247 | |
| 248 static unsigned int Pseudorandom() { | |
| 249 static uint32_t lo = 2345; | |
| 250 lo = 18273 * (lo & 0xFFFFF) + (lo >> 16); | |
| 251 return lo & 0xFFFFF; | |
| 252 } | |
| 253 | |
| 254 | |
| 255 TEST(MemoryChunk) { | |
| 256 Isolate* isolate = CcTest::i_isolate(); | |
| 257 Heap* heap = isolate->heap(); | |
| 258 | |
| 259 size_t reserve_area_size = 1 * MB; | |
| 260 size_t initial_commit_area_size, second_commit_area_size; | |
| 261 | |
| 262 for (int i = 0; i < 100; i++) { | |
| 263 initial_commit_area_size = Pseudorandom(); | |
| 264 second_commit_area_size = Pseudorandom(); | |
| 265 | |
| 266 // With CodeRange. | |
| 267 CodeRange* code_range = new CodeRange(isolate); | |
| 268 const size_t code_range_size = 32 * MB; | |
| 269 if (!code_range->SetUp(code_range_size)) return; | |
| 270 | |
| 271 VerifyMemoryChunk(isolate, | |
| 272 heap, | |
| 273 code_range, | |
| 274 reserve_area_size, | |
| 275 initial_commit_area_size, | |
| 276 second_commit_area_size, | |
| 277 EXECUTABLE); | |
| 278 | |
| 279 VerifyMemoryChunk(isolate, | |
| 280 heap, | |
| 281 code_range, | |
| 282 reserve_area_size, | |
| 283 initial_commit_area_size, | |
| 284 second_commit_area_size, | |
| 285 NOT_EXECUTABLE); | |
| 286 delete code_range; | |
| 287 | |
| 288 // Without CodeRange. | |
| 289 code_range = NULL; | |
| 290 VerifyMemoryChunk(isolate, | |
| 291 heap, | |
| 292 code_range, | |
| 293 reserve_area_size, | |
| 294 initial_commit_area_size, | |
| 295 second_commit_area_size, | |
| 296 EXECUTABLE); | |
| 297 | |
| 298 VerifyMemoryChunk(isolate, | |
| 299 heap, | |
| 300 code_range, | |
| 301 reserve_area_size, | |
| 302 initial_commit_area_size, | |
| 303 second_commit_area_size, | |
| 304 NOT_EXECUTABLE); | |
| 305 } | |
| 306 } | |
| 307 | |
| 308 | |
| 309 TEST(MemoryAllocator) { | |
| 310 Isolate* isolate = CcTest::i_isolate(); | |
| 311 Heap* heap = isolate->heap(); | |
| 312 | |
| 313 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); | |
| 314 CHECK(memory_allocator != nullptr); | |
| 315 CHECK(memory_allocator->SetUp(heap->MaxReserved(), | |
| 316 heap->MaxExecutableSize())); | |
| 317 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); | |
| 318 | |
| 319 { | |
| 320 int total_pages = 0; | |
| 321 OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE); | |
| 322 Page* first_page = memory_allocator->AllocatePage( | |
| 323 faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE); | |
| 324 | |
| 325 first_page->InsertAfter(faked_space.anchor()->prev_page()); | |
| 326 CHECK(first_page->is_valid()); | |
| 327 CHECK(first_page->next_page() == faked_space.anchor()); | |
| 328 total_pages++; | |
| 329 | |
| 330 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) { | |
| 331 CHECK(p->owner() == &faked_space); | |
| 332 } | |
| 333 | |
| 334 // Again, we should get n or n - 1 pages. | |
| 335 Page* other = memory_allocator->AllocatePage(faked_space.AreaSize(), | |
| 336 &faked_space, NOT_EXECUTABLE); | |
| 337 CHECK(other->is_valid()); | |
| 338 total_pages++; | |
| 339 other->InsertAfter(first_page); | |
| 340 int page_count = 0; | |
| 341 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) { | |
| 342 CHECK(p->owner() == &faked_space); | |
| 343 page_count++; | |
| 344 } | |
| 345 CHECK(total_pages == page_count); | |
| 346 | |
| 347 Page* second_page = first_page->next_page(); | |
| 348 CHECK(second_page->is_valid()); | |
| 349 | |
| 350 // OldSpace's destructor will tear down the space and free up all pages. | |
| 351 } | |
| 352 memory_allocator->TearDown(); | |
| 353 delete memory_allocator; | |
| 354 } | |
| 355 | |
| 356 | |
| 357 TEST(NewSpace) { | |
| 358 Isolate* isolate = CcTest::i_isolate(); | |
| 359 Heap* heap = isolate->heap(); | |
| 360 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); | |
| 361 CHECK(memory_allocator->SetUp(heap->MaxReserved(), | |
| 362 heap->MaxExecutableSize())); | |
| 363 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); | |
| 364 | |
| 365 NewSpace new_space(heap); | |
| 366 | |
| 367 CHECK(new_space.SetUp(CcTest::heap()->ReservedSemiSpaceSize(), | |
| 368 CcTest::heap()->ReservedSemiSpaceSize())); | |
| 369 CHECK(new_space.HasBeenSetUp()); | |
| 370 | |
| 371 while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) { | |
| 372 Object* obj = | |
| 373 new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize) | |
| 374 .ToObjectChecked(); | |
| 375 CHECK(new_space.Contains(HeapObject::cast(obj))); | |
| 376 } | |
| 377 | |
| 378 new_space.TearDown(); | |
| 379 memory_allocator->TearDown(); | |
| 380 delete memory_allocator; | |
| 381 } | |
| 382 | |
| 383 | |
| 384 TEST(OldSpace) { | |
| 385 Isolate* isolate = CcTest::i_isolate(); | |
| 386 Heap* heap = isolate->heap(); | |
| 387 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); | |
| 388 CHECK(memory_allocator->SetUp(heap->MaxReserved(), | |
| 389 heap->MaxExecutableSize())); | |
| 390 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); | |
| 391 | |
| 392 OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE); | |
| 393 CHECK(s != NULL); | |
| 394 | |
| 395 CHECK(s->SetUp()); | |
| 396 | |
| 397 while (s->Available() > 0) { | |
| 398 s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked(); | |
| 399 } | |
| 400 | |
| 401 delete s; | |
| 402 memory_allocator->TearDown(); | |
| 403 delete memory_allocator; | |
| 404 } | |
| 405 | |
| 406 | |
| 407 TEST(CompactionSpace) { | |
| 408 Isolate* isolate = CcTest::i_isolate(); | |
| 409 Heap* heap = isolate->heap(); | |
| 410 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); | |
| 411 CHECK(memory_allocator != nullptr); | |
| 412 CHECK( | |
| 413 memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize())); | |
| 414 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); | |
| 415 | |
| 416 CompactionSpace* compaction_space = | |
| 417 new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE); | |
| 418 CHECK(compaction_space != NULL); | |
| 419 CHECK(compaction_space->SetUp()); | |
| 420 | |
| 421 OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE); | |
| 422 CHECK(old_space != NULL); | |
| 423 CHECK(old_space->SetUp()); | |
| 424 | |
| 425 // Cannot loop until "Available()" since we initially have 0 bytes available | |
| 426 // and would thus neither grow, nor be able to allocate an object. | |
| 427 const int kNumObjects = 100; | |
| 428 const int kNumObjectsPerPage = | |
| 429 compaction_space->AreaSize() / Page::kMaxRegularHeapObjectSize; | |
| 430 const int kExpectedPages = | |
| 431 (kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage; | |
| 432 for (int i = 0; i < kNumObjects; i++) { | |
| 433 compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize) | |
| 434 .ToObjectChecked(); | |
| 435 } | |
| 436 int pages_in_old_space = old_space->CountTotalPages(); | |
| 437 int pages_in_compaction_space = compaction_space->CountTotalPages(); | |
| 438 CHECK_EQ(pages_in_compaction_space, kExpectedPages); | |
| 439 CHECK_LE(pages_in_old_space, 1); | |
| 440 | |
| 441 old_space->MergeCompactionSpace(compaction_space); | |
| 442 CHECK_EQ(old_space->CountTotalPages(), | |
| 443 pages_in_old_space + pages_in_compaction_space); | |
| 444 | |
| 445 delete compaction_space; | |
| 446 delete old_space; | |
| 447 | |
| 448 memory_allocator->TearDown(); | |
| 449 delete memory_allocator; | |
| 450 } | |
| 451 | |
| 452 | |
| 453 TEST(CompactionSpaceUsingExternalMemory) { | |
| 454 const int kObjectSize = 512; | |
| 455 | |
| 456 Isolate* isolate = CcTest::i_isolate(); | |
| 457 Heap* heap = isolate->heap(); | |
| 458 MemoryAllocator* allocator = new MemoryAllocator(isolate); | |
| 459 CHECK(allocator != nullptr); | |
| 460 CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize())); | |
| 461 TestMemoryAllocatorScope test_scope(isolate, allocator); | |
| 462 | |
| 463 CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap); | |
| 464 CompactionSpace* compaction_space = collection->Get(OLD_SPACE); | |
| 465 CHECK(compaction_space != NULL); | |
| 466 CHECK(compaction_space->SetUp()); | |
| 467 | |
| 468 OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE); | |
| 469 CHECK(old_space != NULL); | |
| 470 CHECK(old_space->SetUp()); | |
| 471 | |
| 472 // The linear allocation area already counts as used bytes, making | |
| 473 // exact testing impossible. | |
| 474 heap->DisableInlineAllocation(); | |
| 475 | |
| 476 // Test: | |
| 477 // * Allocate a backing store in old_space. | |
| 478 // * Compute the number num_rest_objects of kObjectSize objects that fit into | |
| 479 // of available memory. | |
| 480 // kNumRestObjects. | |
| 481 // * Add the rest of available memory to the compaction space. | |
| 482 // * Allocate kNumRestObjects in the compaction space. | |
| 483 // * Allocate one object more. | |
| 484 // * Merge the compaction space and compare the expected number of pages. | |
| 485 | |
| 486 // Allocate a single object in old_space to initialize a backing page. | |
| 487 old_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked(); | |
| 488 // Compute the number of objects that fit into the rest in old_space. | |
| 489 intptr_t rest = static_cast<int>(old_space->Available()); | |
| 490 CHECK_GT(rest, 0); | |
| 491 intptr_t num_rest_objects = rest / kObjectSize; | |
| 492 // After allocating num_rest_objects in compaction_space we allocate a bit | |
| 493 // more. | |
| 494 const intptr_t kAdditionalCompactionMemory = kObjectSize; | |
| 495 // We expect a single old_space page. | |
| 496 const intptr_t kExpectedInitialOldSpacePages = 1; | |
| 497 // We expect a single additional page in compaction space because we mostly | |
| 498 // use external memory. | |
| 499 const intptr_t kExpectedCompactionPages = 1; | |
| 500 // We expect two pages to be reachable from old_space in the end. | |
| 501 const intptr_t kExpectedOldSpacePagesAfterMerge = 2; | |
| 502 | |
| 503 CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages); | |
| 504 CHECK_EQ(compaction_space->CountTotalPages(), 0); | |
| 505 CHECK_EQ(compaction_space->Capacity(), 0); | |
| 506 // Make the rest of memory available for compaction. | |
| 507 old_space->DivideUponCompactionSpaces(&collection, 1, rest); | |
| 508 CHECK_EQ(compaction_space->CountTotalPages(), 0); | |
| 509 CHECK_EQ(compaction_space->Capacity(), rest); | |
| 510 while (num_rest_objects-- > 0) { | |
| 511 compaction_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked(); | |
| 512 } | |
| 513 // We only used external memory so far. | |
| 514 CHECK_EQ(compaction_space->CountTotalPages(), 0); | |
| 515 // Additional allocation. | |
| 516 compaction_space->AllocateRawUnaligned(kAdditionalCompactionMemory) | |
| 517 .ToObjectChecked(); | |
| 518 // Now the compaction space shouldve also acquired a page. | |
| 519 CHECK_EQ(compaction_space->CountTotalPages(), kExpectedCompactionPages); | |
| 520 | |
| 521 old_space->MergeCompactionSpace(compaction_space); | |
| 522 CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge); | |
| 523 | |
| 524 delete collection; | |
| 525 delete old_space; | |
| 526 | |
| 527 allocator->TearDown(); | |
| 528 delete allocator; | |
| 529 } | |
| 530 | |
| 531 | |
| 532 CompactionSpaceCollection** HeapTester::InitializeCompactionSpaces( | |
| 533 Heap* heap, int num_spaces) { | |
| 534 CompactionSpaceCollection** spaces = | |
| 535 new CompactionSpaceCollection*[num_spaces]; | |
| 536 for (int i = 0; i < num_spaces; i++) { | |
| 537 spaces[i] = new CompactionSpaceCollection(heap); | |
| 538 } | |
| 539 return spaces; | |
| 540 } | |
| 541 | |
| 542 | |
| 543 void HeapTester::DestroyCompactionSpaces(CompactionSpaceCollection** spaces, | |
| 544 int num_spaces) { | |
| 545 for (int i = 0; i < num_spaces; i++) { | |
| 546 delete spaces[i]; | |
| 547 } | |
| 548 delete[] spaces; | |
| 549 } | |
| 550 | |
| 551 | |
| 552 void HeapTester::MergeCompactionSpaces(PagedSpace* space, | |
| 553 CompactionSpaceCollection** spaces, | |
| 554 int num_spaces) { | |
| 555 AllocationSpace id = space->identity(); | |
| 556 for (int i = 0; i < num_spaces; i++) { | |
| 557 space->MergeCompactionSpace(spaces[i]->Get(id)); | |
| 558 CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(), 0); | |
| 559 CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Capacity(), 0); | |
| 560 CHECK_EQ(spaces[i]->Get(id)->Waste(), 0); | |
| 561 } | |
| 562 } | |
| 563 | |
| 564 | |
| 565 void HeapTester::AllocateInCompactionSpaces(CompactionSpaceCollection** spaces, | |
| 566 AllocationSpace id, int num_spaces, | |
| 567 int num_objects, int object_size) { | |
| 568 for (int i = 0; i < num_spaces; i++) { | |
| 569 for (int j = 0; j < num_objects; j++) { | |
| 570 spaces[i]->Get(id)->AllocateRawUnaligned(object_size).ToObjectChecked(); | |
| 571 } | |
| 572 spaces[i]->Get(id)->EmptyAllocationInfo(); | |
| 573 CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(), | |
| 574 num_objects * object_size); | |
| 575 CHECK_GE(spaces[i]->Get(id)->accounting_stats_.Capacity(), | |
| 576 spaces[i]->Get(id)->accounting_stats_.Size()); | |
| 577 } | |
| 578 } | |
| 579 | |
| 580 | |
| 581 void HeapTester::CompactionStats(CompactionSpaceCollection** spaces, | |
| 582 AllocationSpace id, int num_spaces, | |
| 583 intptr_t* capacity, intptr_t* size) { | |
| 584 *capacity = 0; | |
| 585 *size = 0; | |
| 586 for (int i = 0; i < num_spaces; i++) { | |
| 587 *capacity += spaces[i]->Get(id)->accounting_stats_.Capacity(); | |
| 588 *size += spaces[i]->Get(id)->accounting_stats_.Size(); | |
| 589 } | |
| 590 } | |
| 591 | |
| 592 | |
| 593 void HeapTester::TestCompactionSpaceDivide(int num_additional_objects, | |
| 594 int object_size, | |
| 595 int num_compaction_spaces, | |
| 596 int additional_capacity_in_bytes) { | |
| 597 Isolate* isolate = CcTest::i_isolate(); | |
| 598 Heap* heap = isolate->heap(); | |
| 599 OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE); | |
| 600 CHECK(old_space != nullptr); | |
| 601 CHECK(old_space->SetUp()); | |
| 602 old_space->AllocateRawUnaligned(object_size).ToObjectChecked(); | |
| 603 old_space->EmptyAllocationInfo(); | |
| 604 | |
| 605 intptr_t rest_capacity = old_space->accounting_stats_.Capacity() - | |
| 606 old_space->accounting_stats_.Size(); | |
| 607 intptr_t capacity_for_compaction_space = | |
| 608 rest_capacity / num_compaction_spaces; | |
| 609 int num_objects_in_compaction_space = | |
| 610 static_cast<int>(capacity_for_compaction_space) / object_size + | |
| 611 num_additional_objects; | |
| 612 CHECK_GT(num_objects_in_compaction_space, 0); | |
| 613 intptr_t initial_old_space_capacity = old_space->accounting_stats_.Capacity(); | |
| 614 | |
| 615 CompactionSpaceCollection** spaces = | |
| 616 InitializeCompactionSpaces(heap, num_compaction_spaces); | |
| 617 old_space->DivideUponCompactionSpaces(spaces, num_compaction_spaces, | |
| 618 capacity_for_compaction_space); | |
| 619 | |
| 620 intptr_t compaction_capacity = 0; | |
| 621 intptr_t compaction_size = 0; | |
| 622 CompactionStats(spaces, OLD_SPACE, num_compaction_spaces, | |
| 623 &compaction_capacity, &compaction_size); | |
| 624 | |
| 625 intptr_t old_space_capacity = old_space->accounting_stats_.Capacity(); | |
| 626 intptr_t old_space_size = old_space->accounting_stats_.Size(); | |
| 627 // Compaction space memory is subtracted from the original space's capacity. | |
| 628 CHECK_EQ(old_space_capacity, | |
| 629 initial_old_space_capacity - compaction_capacity); | |
| 630 CHECK_EQ(compaction_size, 0); | |
| 631 | |
| 632 AllocateInCompactionSpaces(spaces, OLD_SPACE, num_compaction_spaces, | |
| 633 num_objects_in_compaction_space, object_size); | |
| 634 | |
| 635 // Old space size and capacity should be the same as after dividing. | |
| 636 CHECK_EQ(old_space->accounting_stats_.Size(), old_space_size); | |
| 637 CHECK_EQ(old_space->accounting_stats_.Capacity(), old_space_capacity); | |
| 638 | |
| 639 CompactionStats(spaces, OLD_SPACE, num_compaction_spaces, | |
| 640 &compaction_capacity, &compaction_size); | |
| 641 MergeCompactionSpaces(old_space, spaces, num_compaction_spaces); | |
| 642 | |
| 643 CHECK_EQ(old_space->accounting_stats_.Capacity(), | |
| 644 old_space_capacity + compaction_capacity); | |
| 645 CHECK_EQ(old_space->accounting_stats_.Size(), | |
| 646 old_space_size + compaction_size); | |
| 647 // We check against the expected end capacity. | |
| 648 CHECK_EQ(old_space->accounting_stats_.Capacity(), | |
| 649 initial_old_space_capacity + additional_capacity_in_bytes); | |
| 650 | |
| 651 DestroyCompactionSpaces(spaces, num_compaction_spaces); | |
| 652 delete old_space; | |
| 653 } | |
| 654 | |
| 655 | |
| 656 HEAP_TEST(CompactionSpaceDivideSinglePage) { | |
| 657 const int kObjectSize = KB; | |
| 658 const int kCompactionSpaces = 4; | |
| 659 // Since the bound for objects is tight and the dividing is best effort, we | |
| 660 // subtract some objects to make sure we still fit in the initial page. | |
| 661 // A CHECK makes sure that the overall number of allocated objects stays | |
| 662 // > 0. | |
| 663 const int kAdditionalObjects = -10; | |
| 664 const int kAdditionalCapacityRequired = 0; | |
| 665 TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces, | |
| 666 kAdditionalCapacityRequired); | |
| 667 } | |
| 668 | |
| 669 | |
| 670 HEAP_TEST(CompactionSpaceDivideMultiplePages) { | |
| 671 const int kObjectSize = KB; | |
| 672 const int kCompactionSpaces = 4; | |
| 673 // Allocate half a page of objects to ensure that we need one more page per | |
| 674 // compaction space. | |
| 675 const int kAdditionalObjects = (Page::kPageSize / kObjectSize / 2); | |
| 676 const int kAdditionalCapacityRequired = | |
| 677 Page::kAllocatableMemory * kCompactionSpaces; | |
| 678 TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces, | |
| 679 kAdditionalCapacityRequired); | |
| 680 } | |
| 681 | |
| 682 | |
| 683 TEST(LargeObjectSpace) { | |
| 684 v8::V8::Initialize(); | |
| 685 | |
| 686 LargeObjectSpace* lo = CcTest::heap()->lo_space(); | |
| 687 CHECK(lo != NULL); | |
| 688 | |
| 689 int lo_size = Page::kPageSize; | |
| 690 | |
| 691 Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked(); | |
| 692 CHECK(obj->IsHeapObject()); | |
| 693 | |
| 694 HeapObject* ho = HeapObject::cast(obj); | |
| 695 | |
| 696 CHECK(lo->Contains(HeapObject::cast(obj))); | |
| 697 | |
| 698 CHECK(lo->FindObject(ho->address()) == obj); | |
| 699 | |
| 700 CHECK(lo->Contains(ho)); | |
| 701 | |
| 702 while (true) { | |
| 703 intptr_t available = lo->Available(); | |
| 704 { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE); | |
| 705 if (allocation.IsRetry()) break; | |
| 706 } | |
| 707 // The available value is conservative such that it may report | |
| 708 // zero prior to heap exhaustion. | |
| 709 CHECK(lo->Available() < available || available == 0); | |
| 710 } | |
| 711 | |
| 712 CHECK(!lo->IsEmpty()); | |
| 713 | |
| 714 CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry()); | |
| 715 } | |
| 716 | |
| 717 | |
| 718 TEST(SizeOfFirstPageIsLargeEnough) { | |
| 719 if (i::FLAG_always_opt) return; | |
| 720 // Bootstrapping without a snapshot causes more allocations. | |
| 721 CcTest::InitializeVM(); | |
| 722 Isolate* isolate = CcTest::i_isolate(); | |
| 723 if (!isolate->snapshot_available()) return; | |
| 724 if (Snapshot::EmbedsScript(isolate)) return; | |
| 725 | |
| 726 // If this test fails due to enabling experimental natives that are not part | |
| 727 // of the snapshot, we may need to adjust CalculateFirstPageSizes. | |
| 728 | |
| 729 // Freshly initialized VM gets by with one page per space. | |
| 730 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { | |
| 731 // Debug code can be very large, so skip CODE_SPACE if we are generating it. | |
| 732 if (i == CODE_SPACE && i::FLAG_debug_code) continue; | |
| 733 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages()); | |
| 734 } | |
| 735 | |
| 736 // Executing the empty script gets by with one page per space. | |
| 737 HandleScope scope(isolate); | |
| 738 CompileRun("/*empty*/"); | |
| 739 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { | |
| 740 // Debug code can be very large, so skip CODE_SPACE if we are generating it. | |
| 741 if (i == CODE_SPACE && i::FLAG_debug_code) continue; | |
| 742 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages()); | |
| 743 } | |
| 744 | |
| 745 // No large objects required to perform the above steps. | |
| 746 CHECK(isolate->heap()->lo_space()->IsEmpty()); | |
| 747 } | |
| 748 | |
| 749 | |
| 750 UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) { | |
| 751 FLAG_target_semi_space_size = 2 * (Page::kPageSize / MB); | |
| 752 if (FLAG_optimize_for_size) return; | |
| 753 | |
| 754 v8::Isolate::CreateParams create_params; | |
| 755 create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); | |
| 756 v8::Isolate* isolate = v8::Isolate::New(create_params); | |
| 757 { | |
| 758 v8::Isolate::Scope isolate_scope(isolate); | |
| 759 v8::HandleScope handle_scope(isolate); | |
| 760 v8::Context::New(isolate)->Enter(); | |
| 761 | |
| 762 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate); | |
| 763 | |
| 764 NewSpace* new_space = i_isolate->heap()->new_space(); | |
| 765 | |
| 766 // This test doesn't work if we start with a non-default new space | |
| 767 // configuration. | |
| 768 if (new_space->InitialTotalCapacity() == Page::kPageSize) { | |
| 769 CHECK_EQ(new_space->CommittedMemory(), new_space->InitialTotalCapacity()); | |
| 770 | |
| 771 // Fill up the first (and only) page of the semi space. | |
| 772 FillCurrentPage(new_space); | |
| 773 | |
| 774 // Try to allocate out of the new space. A new page should be added and | |
| 775 // the | |
| 776 // allocation should succeed. | |
| 777 v8::internal::AllocationResult allocation = | |
| 778 new_space->AllocateRawUnaligned(80); | |
| 779 CHECK(!allocation.IsRetry()); | |
| 780 CHECK_EQ(new_space->CommittedMemory(), 2 * Page::kPageSize); | |
| 781 | |
| 782 // Turn the allocation into a proper object so isolate teardown won't | |
| 783 // crash. | |
| 784 HeapObject* free_space = NULL; | |
| 785 CHECK(allocation.To(&free_space)); | |
| 786 new_space->heap()->CreateFillerObjectAt(free_space->address(), 80); | |
| 787 } | |
| 788 } | |
| 789 isolate->Dispose(); | |
| 790 } | |
| 791 | |
| 792 | |
| 793 static HeapObject* AllocateUnaligned(NewSpace* space, int size) { | |
| 794 AllocationResult allocation = space->AllocateRawUnaligned(size); | |
| 795 CHECK(!allocation.IsRetry()); | |
| 796 HeapObject* filler = NULL; | |
| 797 CHECK(allocation.To(&filler)); | |
| 798 space->heap()->CreateFillerObjectAt(filler->address(), size); | |
| 799 return filler; | |
| 800 } | |
| 801 | |
| 802 class Observer : public InlineAllocationObserver { | |
| 803 public: | |
| 804 explicit Observer(intptr_t step_size) | |
| 805 : InlineAllocationObserver(step_size), count_(0) {} | |
| 806 | |
| 807 void Step(int bytes_allocated, Address, size_t) override { count_++; } | |
| 808 | |
| 809 int count() const { return count_; } | |
| 810 | |
| 811 private: | |
| 812 int count_; | |
| 813 }; | |
| 814 | |
| 815 | |
| 816 UNINITIALIZED_TEST(InlineAllocationObserver) { | |
| 817 v8::Isolate::CreateParams create_params; | |
| 818 create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); | |
| 819 v8::Isolate* isolate = v8::Isolate::New(create_params); | |
| 820 { | |
| 821 v8::Isolate::Scope isolate_scope(isolate); | |
| 822 v8::HandleScope handle_scope(isolate); | |
| 823 v8::Context::New(isolate)->Enter(); | |
| 824 | |
| 825 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate); | |
| 826 | |
| 827 NewSpace* new_space = i_isolate->heap()->new_space(); | |
| 828 | |
| 829 Observer observer1(128); | |
| 830 new_space->AddInlineAllocationObserver(&observer1); | |
| 831 | |
| 832 // The observer should not get notified if we have only allocated less than | |
| 833 // 128 bytes. | |
| 834 AllocateUnaligned(new_space, 64); | |
| 835 CHECK_EQ(observer1.count(), 0); | |
| 836 | |
| 837 // The observer should get called when we have allocated exactly 128 bytes. | |
| 838 AllocateUnaligned(new_space, 64); | |
| 839 CHECK_EQ(observer1.count(), 1); | |
| 840 | |
| 841 // Another >128 bytes should get another notification. | |
| 842 AllocateUnaligned(new_space, 136); | |
| 843 CHECK_EQ(observer1.count(), 2); | |
| 844 | |
| 845 // Allocating a large object should get only one notification. | |
| 846 AllocateUnaligned(new_space, 1024); | |
| 847 CHECK_EQ(observer1.count(), 3); | |
| 848 | |
| 849 // Allocating another 2048 bytes in small objects should get 16 | |
| 850 // notifications. | |
| 851 for (int i = 0; i < 64; ++i) { | |
| 852 AllocateUnaligned(new_space, 32); | |
| 853 } | |
| 854 CHECK_EQ(observer1.count(), 19); | |
| 855 | |
| 856 // Multiple observers should work. | |
| 857 Observer observer2(96); | |
| 858 new_space->AddInlineAllocationObserver(&observer2); | |
| 859 | |
| 860 AllocateUnaligned(new_space, 2048); | |
| 861 CHECK_EQ(observer1.count(), 20); | |
| 862 CHECK_EQ(observer2.count(), 1); | |
| 863 | |
| 864 AllocateUnaligned(new_space, 104); | |
| 865 CHECK_EQ(observer1.count(), 20); | |
| 866 CHECK_EQ(observer2.count(), 2); | |
| 867 | |
| 868 // Callback should stop getting called after an observer is removed. | |
| 869 new_space->RemoveInlineAllocationObserver(&observer1); | |
| 870 | |
| 871 AllocateUnaligned(new_space, 384); | |
| 872 CHECK_EQ(observer1.count(), 20); // no more notifications. | |
| 873 CHECK_EQ(observer2.count(), 3); // this one is still active. | |
| 874 | |
| 875 // Ensure that PauseInlineAllocationObserversScope work correctly. | |
| 876 AllocateUnaligned(new_space, 48); | |
| 877 CHECK_EQ(observer2.count(), 3); | |
| 878 { | |
| 879 PauseInlineAllocationObserversScope pause_observers(new_space); | |
| 880 CHECK_EQ(observer2.count(), 3); | |
| 881 AllocateUnaligned(new_space, 384); | |
| 882 CHECK_EQ(observer2.count(), 3); | |
| 883 } | |
| 884 CHECK_EQ(observer2.count(), 3); | |
| 885 // Coupled with the 48 bytes allocated before the pause, another 48 bytes | |
| 886 // allocated here should trigger a notification. | |
| 887 AllocateUnaligned(new_space, 48); | |
| 888 CHECK_EQ(observer2.count(), 4); | |
| 889 | |
| 890 new_space->RemoveInlineAllocationObserver(&observer2); | |
| 891 AllocateUnaligned(new_space, 384); | |
| 892 CHECK_EQ(observer1.count(), 20); | |
| 893 CHECK_EQ(observer2.count(), 4); | |
| 894 } | |
| 895 isolate->Dispose(); | |
| 896 } | |
| 897 | |
| 898 | |
| 899 UNINITIALIZED_TEST(InlineAllocationObserverCadence) { | |
| 900 v8::Isolate::CreateParams create_params; | |
| 901 create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); | |
| 902 v8::Isolate* isolate = v8::Isolate::New(create_params); | |
| 903 { | |
| 904 v8::Isolate::Scope isolate_scope(isolate); | |
| 905 v8::HandleScope handle_scope(isolate); | |
| 906 v8::Context::New(isolate)->Enter(); | |
| 907 | |
| 908 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate); | |
| 909 | |
| 910 NewSpace* new_space = i_isolate->heap()->new_space(); | |
| 911 | |
| 912 Observer observer1(512); | |
| 913 new_space->AddInlineAllocationObserver(&observer1); | |
| 914 Observer observer2(576); | |
| 915 new_space->AddInlineAllocationObserver(&observer2); | |
| 916 | |
| 917 for (int i = 0; i < 512; ++i) { | |
| 918 AllocateUnaligned(new_space, 32); | |
| 919 } | |
| 920 | |
| 921 new_space->RemoveInlineAllocationObserver(&observer1); | |
| 922 new_space->RemoveInlineAllocationObserver(&observer2); | |
| 923 | |
| 924 CHECK_EQ(observer1.count(), 32); | |
| 925 CHECK_EQ(observer2.count(), 28); | |
| 926 } | |
| 927 isolate->Dispose(); | |
| 928 } | |
| 929 | |
| 930 } // namespace internal | |
| 931 } // namespace v8 | |
| OLD | NEW |