Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 200 if (requested <= allocation_list_[current_allocation_block_index_].size) { | 200 if (requested <= allocation_list_[current_allocation_block_index_].size) { |
| 201 return; // Found a large enough allocation block. | 201 return; // Found a large enough allocation block. |
| 202 } | 202 } |
| 203 } | 203 } |
| 204 | 204 |
| 205 // Code range is full or too fragmented. | 205 // Code range is full or too fragmented. |
| 206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); | 206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); |
| 207 } | 207 } |
| 208 | 208 |
| 209 | 209 |
| 210 | |
| 211 Address CodeRange::AllocateRawMemory(const size_t requested, | 210 Address CodeRange::AllocateRawMemory(const size_t requested, |
| 212 size_t* allocated) { | 211 size_t* allocated, |
| 212 size_t initial_commit_size) { | |
| 213 ASSERT(initial_commit_size <= requested); | |
| 213 ASSERT(current_allocation_block_index_ < allocation_list_.length()); | 214 ASSERT(current_allocation_block_index_ < allocation_list_.length()); |
| 214 if (requested > allocation_list_[current_allocation_block_index_].size) { | 215 if (requested > allocation_list_[current_allocation_block_index_].size) { |
| 215 // Find an allocation block large enough. This function call may | 216 // Find an allocation block large enough. This function call may |
| 216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. | 217 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. |
| 217 GetNextAllocationBlock(requested); | 218 GetNextAllocationBlock(requested); |
| 218 } | 219 } |
| 219 // Commit the requested memory at the start of the current allocation block. | 220 // Commit the requested memory at the start of the current allocation block. |
| 220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); | 221 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); |
| 221 FreeBlock current = allocation_list_[current_allocation_block_index_]; | 222 FreeBlock current = allocation_list_[current_allocation_block_index_]; |
| 222 if (aligned_requested >= (current.size - Page::kPageSize)) { | 223 if (aligned_requested >= (current.size - Page::kPageSize)) { |
| 223 // Don't leave a small free block, useless for a large object or chunk. | 224 // Don't leave a small free block, useless for a large object or chunk. |
| 224 *allocated = current.size; | 225 *allocated = current.size; |
| 225 } else { | 226 } else { |
| 226 *allocated = aligned_requested; | 227 *allocated = aligned_requested; |
| 227 } | 228 } |
| 228 ASSERT(*allocated <= current.size); | 229 ASSERT(*allocated <= current.size); |
| 229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); | 230 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
| 230 if (!MemoryAllocator::CommitCodePage(code_range_, | 231 if (!MemoryAllocator::CommitCodePage(code_range_, |
| 231 current.start, | 232 current.start, |
| 232 *allocated)) { | 233 initial_commit_size)) { |
| 233 *allocated = 0; | 234 *allocated = 0; |
| 234 return NULL; | 235 return NULL; |
| 235 } | 236 } |
| 236 allocation_list_[current_allocation_block_index_].start += *allocated; | 237 allocation_list_[current_allocation_block_index_].start += *allocated; |
| 237 allocation_list_[current_allocation_block_index_].size -= *allocated; | 238 allocation_list_[current_allocation_block_index_].size -= *allocated; |
| 238 if (*allocated == current.size) { | 239 if (*allocated == current.size) { |
| 239 GetNextAllocationBlock(0); // This block is used up, get the next one. | 240 GetNextAllocationBlock(0); // This block is used up, get the next one. |
| 240 } | 241 } |
| 241 return current.start; | 242 return current.start; |
| 242 } | 243 } |
| 243 | 244 |
| 244 | 245 |
| 246 bool CodeRange::RecommitRawMemory(Address start, size_t size) { | |
|
danno
2013/01/16 14:48:54
If you make the changes in MemoryChunk to support
haitao.feng
2013/01/17 05:34:01
Done.
| |
| 247 ASSERT(reinterpret_cast<int64_t>(start) % MemoryChunk::kAlignment == | |
| 248 MemoryAllocator::CodePageAreaStartOffset()); | |
| 249 | |
| 250 // Recommit page body (executable). | |
| 251 if (!code_range_->Commit(start, size, true)) return false; | |
| 252 // Append a guard page. | |
| 253 if (!code_range_->Guard(start + size)) return false; | |
|
danno
2013/01/16 14:48:54
Guard pages are managed by the MemoryChunk (not he
haitao.feng
2013/01/17 05:34:01
Done.
| |
| 254 | |
| 255 return true; | |
| 256 } | |
| 257 | |
| 258 | |
| 245 void CodeRange::FreeRawMemory(Address address, size_t length) { | 259 void CodeRange::FreeRawMemory(Address address, size_t length) { |
| 246 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); | 260 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); |
| 247 free_list_.Add(FreeBlock(address, length)); | 261 free_list_.Add(FreeBlock(address, length)); |
| 248 code_range_->Uncommit(address, length); | 262 code_range_->Uncommit(address, length); |
| 249 } | 263 } |
| 250 | 264 |
| 251 | 265 |
| 252 void CodeRange::TearDown() { | 266 void CodeRange::TearDown() { |
| 253 delete code_range_; // Frees all memory in the virtual memory range. | 267 delete code_range_; // Frees all memory in the virtual memory range. |
| 254 code_range_ = NULL; | 268 code_range_ = NULL; |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 339 } | 353 } |
| 340 | 354 |
| 341 | 355 |
| 342 Address MemoryAllocator::ReserveAlignedMemory(size_t size, | 356 Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
| 343 size_t alignment, | 357 size_t alignment, |
| 344 VirtualMemory* controller) { | 358 VirtualMemory* controller) { |
| 345 VirtualMemory reservation(size, alignment); | 359 VirtualMemory reservation(size, alignment); |
| 346 | 360 |
| 347 if (!reservation.IsReserved()) return NULL; | 361 if (!reservation.IsReserved()) return NULL; |
| 348 size_ += reservation.size(); | 362 size_ += reservation.size(); |
| 349 Address base = RoundUp(static_cast<Address>(reservation.address()), | 363 Address base = static_cast<Address>(reservation.address()); |
| 350 alignment); | |
| 351 controller->TakeControl(&reservation); | 364 controller->TakeControl(&reservation); |
| 352 return base; | 365 return base; |
| 353 } | 366 } |
| 354 | 367 |
| 355 | 368 |
| 356 Address MemoryAllocator::AllocateAlignedMemory(size_t size, | 369 Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
| 357 size_t alignment, | 370 size_t alignment, |
| 371 size_t initial_commit_size, | |
| 358 Executability executable, | 372 Executability executable, |
| 359 VirtualMemory* controller) { | 373 VirtualMemory* controller) { |
| 374 ASSERT(initial_commit_size <= size); | |
| 360 VirtualMemory reservation; | 375 VirtualMemory reservation; |
| 361 Address base = ReserveAlignedMemory(size, alignment, &reservation); | 376 Address base = ReserveAlignedMemory(size, alignment, &reservation); |
| 362 if (base == NULL) return NULL; | 377 if (base == NULL) return NULL; |
| 363 | 378 |
| 364 if (executable == EXECUTABLE) { | 379 if (executable == EXECUTABLE) { |
| 365 if (!CommitCodePage(&reservation, base, size)) { | 380 if (!CommitCodePage(&reservation, base, initial_commit_size)) { |
|
danno
2013/01/16 14:48:54
CommitCodePage should take both size and initial_c
haitao.feng
2013/01/17 05:34:01
Done.
| |
| 366 base = NULL; | 381 base = NULL; |
| 367 } | 382 } |
| 368 } else { | 383 } else { |
| 369 if (!reservation.Commit(base, size, false)) { | 384 if (!reservation.Commit(base, initial_commit_size, false)) { |
| 370 base = NULL; | 385 base = NULL; |
| 371 } | 386 } |
| 372 } | 387 } |
| 373 | 388 |
| 374 if (base == NULL) { | 389 if (base == NULL) { |
| 375 // Failed to commit the body. Release the mapping and any partially | 390 // Failed to commit the body. Release the mapping and any partially |
| 376 // commited regions inside it. | 391 // commited regions inside it. |
| 377 reservation.Release(); | 392 reservation.Release(); |
| 378 return NULL; | 393 return NULL; |
| 379 } | 394 } |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 463 } | 478 } |
| 464 | 479 |
| 465 if (owner == heap->old_data_space()) { | 480 if (owner == heap->old_data_space()) { |
| 466 chunk->SetFlag(CONTAINS_ONLY_DATA); | 481 chunk->SetFlag(CONTAINS_ONLY_DATA); |
| 467 } | 482 } |
| 468 | 483 |
| 469 return chunk; | 484 return chunk; |
| 470 } | 485 } |
| 471 | 486 |
| 472 | 487 |
| 488 bool MemoryChunk::RecommitBody(size_t body_size, Executability executable) { | |
|
danno
2013/01/16 14:48:54
I think I'd prefer that the MemoryChunk remembered
haitao.feng
2013/01/17 05:34:01
I am using area_end_ - area_start_ to remember "co
| |
| 489 ASSERT(area_start_ + body_size <= area_end_); | |
| 490 if (reservation_.IsReserved()) { | |
| 491 if (executable == EXECUTABLE) { | |
| 492 // Recommit page body (executable). | |
| 493 if (!reservation_.Commit(area_start_, body_size, true)) return false; | |
| 494 // Append a guard page. | |
| 495 if (!reservation_.Guard(area_start_ + body_size)) return false; | |
|
danno
2013/01/16 14:48:54
As mentioned above, the guard page should always b
haitao.feng
2013/01/17 05:34:01
Done.
| |
| 496 } else { | |
| 497 if (!reservation_.Commit(area_start_, body_size, false)) return false; | |
| 498 } | |
| 499 } else { | |
| 500 ASSERT(heap_->isolate()->code_range()->exists() && | |
| 501 (executable == EXECUTABLE)); | |
| 502 // Recommit page body (executable). | |
| 503 if (!heap_->isolate()->code_range()->RecommitRawMemory(area_start_, | |
|
danno
2013/01/16 14:48:54
If you change this method to be called "CommitBody
haitao.feng
2013/01/17 05:34:01
Done.
| |
| 504 body_size)) { | |
| 505 return false; | |
| 506 } | |
| 507 } | |
| 508 | |
| 509 if (Heap::ShouldZapGarbage()) { | |
|
danno
2013/01/16 14:48:54
Just zap the delta of uncommitted pages.
haitao.feng
2013/01/17 05:34:01
Done.
| |
| 510 heap_->isolate()->memory_allocator()->ZapBlock(area_start_, body_size); | |
| 511 } | |
| 512 | |
| 513 return true; | |
| 514 } | |
| 515 | |
| 516 | |
| 473 void MemoryChunk::InsertAfter(MemoryChunk* other) { | 517 void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| 474 next_chunk_ = other->next_chunk_; | 518 next_chunk_ = other->next_chunk_; |
| 475 prev_chunk_ = other; | 519 prev_chunk_ = other; |
| 476 other->next_chunk_->prev_chunk_ = this; | 520 other->next_chunk_->prev_chunk_ = this; |
| 477 other->next_chunk_ = this; | 521 other->next_chunk_ = this; |
| 478 } | 522 } |
| 479 | 523 |
| 480 | 524 |
| 481 void MemoryChunk::Unlink() { | 525 void MemoryChunk::Unlink() { |
| 482 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { | 526 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { |
| 483 heap_->decrement_scan_on_scavenge_pages(); | 527 heap_->decrement_scan_on_scavenge_pages(); |
| 484 ClearFlag(SCAN_ON_SCAVENGE); | 528 ClearFlag(SCAN_ON_SCAVENGE); |
| 485 } | 529 } |
| 486 next_chunk_->prev_chunk_ = prev_chunk_; | 530 next_chunk_->prev_chunk_ = prev_chunk_; |
| 487 prev_chunk_->next_chunk_ = next_chunk_; | 531 prev_chunk_->next_chunk_ = next_chunk_; |
| 488 prev_chunk_ = NULL; | 532 prev_chunk_ = NULL; |
| 489 next_chunk_ = NULL; | 533 next_chunk_ = NULL; |
| 490 } | 534 } |
| 491 | 535 |
| 492 | 536 |
| 493 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 537 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| 538 intptr_t commit_size, | |
| 494 Executability executable, | 539 Executability executable, |
| 495 Space* owner) { | 540 Space* owner) { |
| 496 size_t chunk_size; | 541 ASSERT(commit_size <= body_size); |
| 542 | |
| 543 size_t chunk_size, initial_commit_size; | |
|
danno
2013/01/16 14:48:54
nit: declare initial_commit_size in the scope that
| |
| 497 Heap* heap = isolate_->heap(); | 544 Heap* heap = isolate_->heap(); |
| 498 Address base = NULL; | 545 Address base = NULL; |
| 499 VirtualMemory reservation; | 546 VirtualMemory reservation; |
| 500 Address area_start = NULL; | 547 Address area_start = NULL; |
| 501 Address area_end = NULL; | 548 Address area_end = NULL; |
| 502 | 549 |
| 503 if (executable == EXECUTABLE) { | 550 if (executable == EXECUTABLE) { |
| 504 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, | 551 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, |
| 505 OS::CommitPageSize()) + CodePageGuardSize(); | 552 OS::CommitPageSize()) + CodePageGuardSize(); |
| 506 | 553 |
| 507 // Check executable memory limit. | 554 // Check executable memory limit. |
| 508 if (size_executable_ + chunk_size > capacity_executable_) { | 555 if (size_executable_ + chunk_size > capacity_executable_) { |
| 509 LOG(isolate_, | 556 LOG(isolate_, |
| 510 StringEvent("MemoryAllocator::AllocateRawMemory", | 557 StringEvent("MemoryAllocator::AllocateRawMemory", |
| 511 "V8 Executable Allocation capacity exceeded")); | 558 "V8 Executable Allocation capacity exceeded")); |
| 512 return NULL; | 559 return NULL; |
| 513 } | 560 } |
| 514 | 561 |
| 562 initial_commit_size = RoundUp(CodePageAreaStartOffset() + commit_size, | |
|
danno
2013/01/16 14:48:54
I don't think you need to round this up if you mak
danno
2013/01/16 14:48:54
As noted above:
size_t initial_commit_size = ...
haitao.feng
2013/01/17 05:34:01
Done.
haitao.feng
2013/01/17 05:34:01
Done.
| |
| 563 OS::CommitPageSize()) + CodePageGuardSize(); | |
| 564 | |
| 515 // Allocate executable memory either from code range or from the | 565 // Allocate executable memory either from code range or from the |
| 516 // OS. | 566 // OS. |
| 517 if (isolate_->code_range()->exists()) { | 567 if (isolate_->code_range()->exists()) { |
| 518 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); | 568 base = isolate_->code_range()->AllocateRawMemory(chunk_size, |
| 569 &chunk_size, | |
| 570 initial_commit_size); | |
| 519 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), | 571 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
| 520 MemoryChunk::kAlignment)); | 572 MemoryChunk::kAlignment)); |
| 521 if (base == NULL) return NULL; | 573 if (base == NULL) return NULL; |
| 522 size_ += chunk_size; | 574 size_ += chunk_size; |
| 523 // Update executable memory size. | 575 // Update executable memory size. |
| 524 size_executable_ += chunk_size; | 576 size_executable_ += chunk_size; |
| 525 } else { | 577 } else { |
| 526 base = AllocateAlignedMemory(chunk_size, | 578 base = AllocateAlignedMemory(chunk_size, |
| 527 MemoryChunk::kAlignment, | 579 MemoryChunk::kAlignment, |
| 580 initial_commit_size, | |
| 528 executable, | 581 executable, |
| 529 &reservation); | 582 &reservation); |
| 530 if (base == NULL) return NULL; | 583 if (base == NULL) return NULL; |
| 531 // Update executable memory size. | 584 // Update executable memory size. |
| 532 size_executable_ += reservation.size(); | 585 size_executable_ += reservation.size(); |
| 533 } | 586 } |
| 534 | 587 |
| 535 if (Heap::ShouldZapGarbage()) { | 588 if (Heap::ShouldZapGarbage()) { |
| 536 ZapBlock(base, CodePageGuardStartOffset()); | 589 ZapBlock(base, CodePageGuardStartOffset()); |
| 537 ZapBlock(base + CodePageAreaStartOffset(), body_size); | 590 ZapBlock(base + CodePageAreaStartOffset(), commit_size); |
| 538 } | 591 } |
| 539 | 592 |
| 540 area_start = base + CodePageAreaStartOffset(); | 593 area_start = base + CodePageAreaStartOffset(); |
| 541 area_end = area_start + body_size; | 594 area_end = area_start + body_size; |
| 542 } else { | 595 } else { |
| 543 chunk_size = MemoryChunk::kObjectStartOffset + body_size; | 596 chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
| 597 initial_commit_size = MemoryChunk::kObjectStartOffset + commit_size; | |
|
danno
2013/01/16 14:48:54
As noted above:
size_t initial_commit_size = ...
haitao.feng
2013/01/17 05:34:01
Done.
| |
| 544 base = AllocateAlignedMemory(chunk_size, | 598 base = AllocateAlignedMemory(chunk_size, |
| 545 MemoryChunk::kAlignment, | 599 MemoryChunk::kAlignment, |
| 600 initial_commit_size, | |
| 546 executable, | 601 executable, |
| 547 &reservation); | 602 &reservation); |
| 548 | 603 |
| 549 if (base == NULL) return NULL; | 604 if (base == NULL) return NULL; |
| 550 | 605 |
| 551 if (Heap::ShouldZapGarbage()) { | 606 if (Heap::ShouldZapGarbage()) { |
| 552 ZapBlock(base, chunk_size); | 607 ZapBlock(base, commit_size); |
| 553 } | 608 } |
| 554 | 609 |
| 555 area_start = base + Page::kObjectStartOffset; | 610 area_start = base + Page::kObjectStartOffset; |
| 556 area_end = base + chunk_size; | 611 area_end = base + chunk_size; |
| 557 } | 612 } |
| 558 | 613 |
| 559 isolate_->counters()->memory_allocated()-> | 614 isolate_->counters()->memory_allocated()-> |
| 560 Increment(static_cast<int>(chunk_size)); | 615 Increment(static_cast<int>(chunk_size)); |
| 561 | 616 |
| 562 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); | 617 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 573 executable, | 628 executable, |
| 574 owner); | 629 owner); |
| 575 result->set_reserved_memory(&reservation); | 630 result->set_reserved_memory(&reservation); |
| 576 return result; | 631 return result; |
| 577 } | 632 } |
| 578 | 633 |
| 579 | 634 |
| 580 Page* MemoryAllocator::AllocatePage(intptr_t size, | 635 Page* MemoryAllocator::AllocatePage(intptr_t size, |
| 581 PagedSpace* owner, | 636 PagedSpace* owner, |
| 582 Executability executable) { | 637 Executability executable) { |
| 583 MemoryChunk* chunk = AllocateChunk(size, executable, owner); | 638 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); |
| 584 | 639 |
| 585 if (chunk == NULL) return NULL; | 640 if (chunk == NULL) return NULL; |
| 586 | 641 |
| 587 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | 642 return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 588 } | 643 } |
| 589 | 644 |
| 590 | 645 |
| 591 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 646 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 592 Space* owner, | 647 Space* owner, |
| 593 Executability executable) { | 648 Executability executable) { |
| 594 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); | 649 MemoryChunk* chunk = AllocateChunk(object_size, |
| 650 object_size, | |
| 651 executable, | |
| 652 owner); | |
| 595 if (chunk == NULL) return NULL; | 653 if (chunk == NULL) return NULL; |
| 596 return LargePage::Initialize(isolate_->heap(), chunk); | 654 return LargePage::Initialize(isolate_->heap(), chunk); |
| 597 } | 655 } |
| 598 | 656 |
| 599 | 657 |
| 600 void MemoryAllocator::Free(MemoryChunk* chunk) { | 658 void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 601 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 659 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 602 if (chunk->owner() != NULL) { | 660 if (chunk->owner() != NULL) { |
| 603 ObjectSpace space = | 661 ObjectSpace space = |
| 604 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 662 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| (...skipping 2381 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2986 object->ShortPrint(); | 3044 object->ShortPrint(); |
| 2987 PrintF("\n"); | 3045 PrintF("\n"); |
| 2988 } | 3046 } |
| 2989 printf(" --------------------------------------\n"); | 3047 printf(" --------------------------------------\n"); |
| 2990 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3048 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2991 } | 3049 } |
| 2992 | 3050 |
| 2993 #endif // DEBUG | 3051 #endif // DEBUG |
| 2994 | 3052 |
| 2995 } } // namespace v8::internal | 3053 } } // namespace v8::internal |
| OLD | NEW |