| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 321 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 332 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); | 332 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
| 333 bool result = VirtualMemory::ReleaseRegion(base, size); | 333 bool result = VirtualMemory::ReleaseRegion(base, size); |
| 334 USE(result); | 334 USE(result); |
| 335 ASSERT(result); | 335 ASSERT(result); |
| 336 } | 336 } |
| 337 } | 337 } |
| 338 | 338 |
| 339 | 339 |
| 340 Address MemoryAllocator::ReserveAlignedMemory(size_t size, | 340 Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
| 341 size_t alignment, | 341 size_t alignment, |
| 342 VirtualMemory* controller) { | 342 VirtualMemory* controller, |
| 343 VirtualMemory reservation(size, alignment); | 343 intptr_t preferred_placement) { |
| 344 VirtualMemory reservation(size, alignment, preferred_placement); |
| 344 | 345 |
| 345 if (!reservation.IsReserved()) return NULL; | 346 if (!reservation.IsReserved()) return NULL; |
| 346 size_ += reservation.size(); | 347 size_ += reservation.size(); |
| 347 Address base = RoundUp(static_cast<Address>(reservation.address()), | 348 Address base = RoundUp(static_cast<Address>(reservation.address()), |
| 348 alignment); | 349 alignment); |
| 349 controller->TakeControl(&reservation); | 350 controller->TakeControl(&reservation); |
| 350 return base; | 351 return base; |
| 351 } | 352 } |
| 352 | 353 |
| 353 | 354 |
| 354 Address MemoryAllocator::AllocateAlignedMemory(size_t size, | 355 Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
| 355 size_t alignment, | 356 size_t alignment, |
| 356 Executability executable, | 357 Executability executable, |
| 357 VirtualMemory* controller) { | 358 VirtualMemory* controller, |
| 359 intptr_t preferred_placement) { |
| 358 VirtualMemory reservation; | 360 VirtualMemory reservation; |
| 359 Address base = ReserveAlignedMemory(size, alignment, &reservation); | 361 Address base = ReserveAlignedMemory( |
| 362 size, alignment, &reservation, preferred_placement); |
| 360 if (base == NULL) return NULL; | 363 if (base == NULL) return NULL; |
| 361 if (!reservation.Commit(base, | 364 if (!reservation.Commit(base, |
| 362 size, | 365 size, |
| 363 executable == EXECUTABLE)) { | 366 executable == EXECUTABLE)) { |
| 364 return NULL; | 367 return NULL; |
| 365 } | 368 } |
| 366 controller->TakeControl(&reservation); | 369 controller->TakeControl(&reservation); |
| 367 return base; | 370 return base; |
| 368 } | 371 } |
| 369 | 372 |
| 370 | 373 |
| 374 Map* Page::MapFromIntraPageOffset(int32_t offset) { |
| 375 Heap* heap = HEAP; |
| 376 MapSpace* space = heap->map_space(); |
| 377 Page* page = space->anchor()->next_page(); |
| 378 while (page != space->anchor()) { |
| 379 uintptr_t page_number = |
| 380 reinterpret_cast<uintptr_t>(page) >> kPageSizeBits; |
| 381 // We can't use this method for the odd pages, only for the single even |
| 382 // page. |
| 383 if ((page_number & 1) == 0) break; |
| 384 page = page->next_page(); |
| 385 } |
| 386 Address start_of_map = |
| 387 reinterpret_cast<Address>(page) + offset; |
| 388 if (Map::kSize != (1 << Map::kMapSizeBits)) { |
| 389 start_of_map = page->RoundUpToObjectAlignment(start_of_map); |
| 390 } |
| 391 Map* map = Map::cast(HeapObject::FromAddress(start_of_map)); |
| 392 return map; |
| 393 } |
| 394 |
| 395 |
| 371 void Page::InitializeAsAnchor(PagedSpace* owner) { | 396 void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 372 set_owner(owner); | 397 set_owner(owner); |
| 373 set_prev_page(this); | 398 set_prev_page(this); |
| 374 set_next_page(this); | 399 set_next_page(this); |
| 375 } | 400 } |
| 376 | 401 |
| 377 | 402 |
| 378 NewSpacePage* NewSpacePage::Initialize(Heap* heap, | 403 NewSpacePage* NewSpacePage::Initialize(Heap* heap, |
| 379 Address start, | 404 Address start, |
| 380 SemiSpace* semi_space) { | 405 SemiSpace* semi_space) { |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 452 heap_->decrement_scan_on_scavenge_pages(); | 477 heap_->decrement_scan_on_scavenge_pages(); |
| 453 ClearFlag(SCAN_ON_SCAVENGE); | 478 ClearFlag(SCAN_ON_SCAVENGE); |
| 454 } | 479 } |
| 455 next_chunk_->prev_chunk_ = prev_chunk_; | 480 next_chunk_->prev_chunk_ = prev_chunk_; |
| 456 prev_chunk_->next_chunk_ = next_chunk_; | 481 prev_chunk_->next_chunk_ = next_chunk_; |
| 457 prev_chunk_ = NULL; | 482 prev_chunk_ = NULL; |
| 458 next_chunk_ = NULL; | 483 next_chunk_ = NULL; |
| 459 } | 484 } |
| 460 | 485 |
| 461 | 486 |
| 487 // Inverts the order of bits in a word. |
| 488 static uintptr_t InvertBits(uintptr_t x) { |
| 489 uintptr_t answer = 0; |
| 490 uintptr_t mask = 1; |
| 491 uintptr_t shift = 31; |
| 492 for (int i = 0; i < 16; i++) { |
| 493 answer |= (x & mask) << shift; |
| 494 mask <<= 1; |
| 495 shift -= 2; |
| 496 } |
| 497 shift = 1; |
| 498 for (int i = 0; i < 16; i++) { |
| 499 answer |= (x & mask) >> shift; |
| 500 mask <<= 1; |
| 501 shift += 2; |
| 502 } |
| 503 return answer; |
| 504 } |
| 505 |
| 506 |
| 462 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 507 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| 463 Executability executable, | 508 Executability executable, |
| 464 Space* owner) { | 509 Space* owner) { |
| 465 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; | 510 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
| 466 Heap* heap = isolate_->heap(); | 511 Heap* heap = isolate_->heap(); |
| 467 Address base = NULL; | 512 Address base = NULL; |
| 468 VirtualMemory reservation; | 513 VirtualMemory reservation; |
| 469 if (executable == EXECUTABLE) { | 514 if (executable == EXECUTABLE) { |
| 470 // Check executable memory limit. | 515 // Check executable memory limit. |
| 471 if (size_executable_ + chunk_size > capacity_executable_) { | 516 if (size_executable_ + chunk_size > capacity_executable_) { |
| (...skipping 15 matching lines...) Expand all Loading... |
| 487 size_executable_ += chunk_size; | 532 size_executable_ += chunk_size; |
| 488 } else { | 533 } else { |
| 489 base = AllocateAlignedMemory(chunk_size, | 534 base = AllocateAlignedMemory(chunk_size, |
| 490 MemoryChunk::kAlignment, | 535 MemoryChunk::kAlignment, |
| 491 executable, | 536 executable, |
| 492 &reservation); | 537 &reservation); |
| 493 if (base == NULL) return NULL; | 538 if (base == NULL) return NULL; |
| 494 // Update executable memory size. | 539 // Update executable memory size. |
| 495 size_executable_ += reservation.size(); | 540 size_executable_ += reservation.size(); |
| 496 } | 541 } |
| 542 #if defined(V8_TARGET_ARCH_ARM) |
| 543 } else if (owner->identity() == MAP_SPACE) { |
| 544 // First map page, try to allocate at a good address for map comparisons on |
| 545 // ARM. |
| 546 uintptr_t place = 1; |
| 547 bool even = true; |
| 548 if (reinterpret_cast<MapSpace*>(owner)->anchor()->next_page() != |
| 549 reinterpret_cast<MapSpace*>(owner)->anchor()) { |
| 550 // Not the first page. We must allocate at an odd address. |
| 551 place = 1 << (kBitsPerPointer - kPageSizeBits - 1); |
| 552 even = false; |
| 553 } |
| 554 while (true) { |
| 555 base = AllocateAlignedMemory(chunk_size, |
| 556 MemoryChunk::kAlignment, |
| 557 executable, |
| 558 &reservation, |
| 559 InvertBits(place)); |
| 560 if (base == NULL) return NULL; |
| 561 uintptr_t base_pointer = reinterpret_cast<uintptr_t>(base); |
| 562 // If the evenness of the page is as we hoped. |
| 563 if ((((base_pointer >> kPageSizeBits) & 1) == 0) == even) break; |
| 564 // We were given an address we did not want. Return it immediately. |
| 565 FreeMemory(&reservation, NOT_EXECUTABLE); |
| 566 place++; |
| 567 } |
| 568 #endif |
| 497 } else { | 569 } else { |
| 498 base = AllocateAlignedMemory(chunk_size, | 570 base = AllocateAlignedMemory(chunk_size, |
| 499 MemoryChunk::kAlignment, | 571 MemoryChunk::kAlignment, |
| 500 executable, | 572 executable, |
| 501 &reservation); | 573 &reservation); |
| 502 | 574 |
| 503 if (base == NULL) return NULL; | 575 if (base == NULL) return NULL; |
| 504 } | 576 } |
| 505 | 577 |
| 506 #ifdef DEBUG | 578 #ifdef DEBUG |
| (...skipping 2167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2674 object->ShortPrint(); | 2746 object->ShortPrint(); |
| 2675 PrintF("\n"); | 2747 PrintF("\n"); |
| 2676 } | 2748 } |
| 2677 printf(" --------------------------------------\n"); | 2749 printf(" --------------------------------------\n"); |
| 2678 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 2750 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2679 } | 2751 } |
| 2680 | 2752 |
| 2681 #endif // DEBUG | 2753 #endif // DEBUG |
| 2682 | 2754 |
| 2683 } } // namespace v8::internal | 2755 } } // namespace v8::internal |
| OLD | NEW |