| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
| 9 #include "src/full-codegen.h" | 9 #include "src/full-codegen.h" |
| 10 #include "src/heap/mark-compact.h" | 10 #include "src/heap/mark-compact.h" |
| (...skipping 1441 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1452 int remaining_in_page = static_cast<int>(limit - top); | 1452 int remaining_in_page = static_cast<int>(limit - top); |
| 1453 heap()->CreateFillerObjectAt(top, remaining_in_page); | 1453 heap()->CreateFillerObjectAt(top, remaining_in_page); |
| 1454 pages_used_++; | 1454 pages_used_++; |
| 1455 UpdateAllocationInfo(); | 1455 UpdateAllocationInfo(); |
| 1456 | 1456 |
| 1457 return true; | 1457 return true; |
| 1458 } | 1458 } |
| 1459 | 1459 |
| 1460 | 1460 |
| 1461 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes, | 1461 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes, |
| 1462 bool double_aligned) { | 1462 AllocationAlignment alignment) { |
| 1463 Address old_top = allocation_info_.top(); | 1463 Address old_top = allocation_info_.top(); |
| 1464 Address high = to_space_.page_high(); | 1464 Address high = to_space_.page_high(); |
| 1465 if (allocation_info_.limit() < high) { | 1465 if (allocation_info_.limit() < high) { |
| 1466 // Either the limit has been lowered because linear allocation was disabled | 1466 // Either the limit has been lowered because linear allocation was disabled |
| 1467 // or because incremental marking wants to get a chance to do a step. Set | 1467 // or because incremental marking wants to get a chance to do a step. Set |
| 1468 // the new limit accordingly. | 1468 // the new limit accordingly. |
| 1469 int aligned_size = size_in_bytes; | 1469 int aligned_size = size_in_bytes; |
| 1470 aligned_size += (double_aligned ? kPointerSize : 0); | 1470 aligned_size += (alignment != kWordAligned) ? kPointerSize : 0; |
| 1471 Address new_top = old_top + aligned_size; | 1471 Address new_top = old_top + aligned_size; |
| 1472 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); | 1472 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); |
| 1473 heap()->incremental_marking()->Step(bytes_allocated, | 1473 heap()->incremental_marking()->Step(bytes_allocated, |
| 1474 IncrementalMarking::GC_VIA_STACK_GUARD); | 1474 IncrementalMarking::GC_VIA_STACK_GUARD); |
| 1475 UpdateInlineAllocationLimit(aligned_size); | 1475 UpdateInlineAllocationLimit(aligned_size); |
| 1476 top_on_previous_step_ = new_top; | 1476 top_on_previous_step_ = new_top; |
| 1477 if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes); | 1477 if (alignment == kDoubleAligned) |
| 1478 return AllocateRawAligned(size_in_bytes, kDoubleAligned); |
| 1479 else if (alignment == kDoubleUnaligned) |
| 1480 return AllocateRawAligned(size_in_bytes, kDoubleUnaligned); |
| 1478 return AllocateRaw(size_in_bytes); | 1481 return AllocateRaw(size_in_bytes); |
| 1479 } else if (AddFreshPage()) { | 1482 } else if (AddFreshPage()) { |
| 1480 // Switched to new page. Try allocating again. | 1483 // Switched to new page. Try allocating again. |
| 1481 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); | 1484 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); |
| 1482 heap()->incremental_marking()->Step(bytes_allocated, | 1485 heap()->incremental_marking()->Step(bytes_allocated, |
| 1483 IncrementalMarking::GC_VIA_STACK_GUARD); | 1486 IncrementalMarking::GC_VIA_STACK_GUARD); |
| 1484 top_on_previous_step_ = to_space_.page_low(); | 1487 top_on_previous_step_ = to_space_.page_low(); |
| 1485 if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes); | 1488 if (alignment == kDoubleAligned) |
| 1489 return AllocateRawAligned(size_in_bytes, kDoubleAligned); |
| 1490 else if (alignment == kDoubleUnaligned) |
| 1491 return AllocateRawAligned(size_in_bytes, kDoubleUnaligned); |
| 1486 return AllocateRaw(size_in_bytes); | 1492 return AllocateRaw(size_in_bytes); |
| 1487 } else { | 1493 } else { |
| 1488 return AllocationResult::Retry(); | 1494 return AllocationResult::Retry(); |
| 1489 } | 1495 } |
| 1490 } | 1496 } |
| 1491 | 1497 |
| 1492 | 1498 |
| 1493 #ifdef VERIFY_HEAP | 1499 #ifdef VERIFY_HEAP |
| 1494 // We do not use the SemiSpaceIterator because verification doesn't assume | 1500 // We do not use the SemiSpaceIterator because verification doesn't assume |
| 1495 // that it works (it depends on the invariants we are checking). | 1501 // that it works (it depends on the invariants we are checking). |
| (...skipping 1631 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3127 object->ShortPrint(); | 3133 object->ShortPrint(); |
| 3128 PrintF("\n"); | 3134 PrintF("\n"); |
| 3129 } | 3135 } |
| 3130 printf(" --------------------------------------\n"); | 3136 printf(" --------------------------------------\n"); |
| 3131 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3137 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3132 } | 3138 } |
| 3133 | 3139 |
| 3134 #endif // DEBUG | 3140 #endif // DEBUG |
| 3135 } | 3141 } |
| 3136 } // namespace v8::internal | 3142 } // namespace v8::internal |
| OLD | NEW |