Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(682)

Side by Side Diff: src/heap/spaces.cc

Issue 1150593003: Clean up aligned allocation code in preparation for SIMD alignments. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Only check for unaligned double case. Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen.h" 9 #include "src/full-codegen.h"
10 #include "src/heap/mark-compact.h" 10 #include "src/heap/mark-compact.h"
(...skipping 1445 matching lines...) Expand 10 before | Expand all | Expand 10 after
1456 1456
1457 return true; 1457 return true;
1458 } 1458 }
1459 1459
1460 1460
1461 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes, 1461 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
1462 AllocationAlignment alignment) { 1462 AllocationAlignment alignment) {
1463 Address old_top = allocation_info_.top(); 1463 Address old_top = allocation_info_.top();
1464 Address high = to_space_.page_high(); 1464 Address high = to_space_.page_high();
1465 if (allocation_info_.limit() < high) { 1465 if (allocation_info_.limit() < high) {
1466 int alignment_size = Heap::GetFillToAlign(old_top, alignment);
1467 int aligned_size_in_bytes = size_in_bytes + alignment_size;
1468
1466 // Either the limit has been lowered because linear allocation was disabled 1469 // Either the limit has been lowered because linear allocation was disabled
1467 // or because incremental marking wants to get a chance to do a step. Set 1470 // or because incremental marking wants to get a chance to do a step. Set
1468 // the new limit accordingly. 1471 // the new limit accordingly.
1469 int aligned_size = size_in_bytes; 1472 Address new_top = old_top + aligned_size_in_bytes;
1470 aligned_size += (alignment != kWordAligned) ? kPointerSize : 0;
1471 Address new_top = old_top + aligned_size;
1472 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); 1473 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1473 heap()->incremental_marking()->Step(bytes_allocated, 1474 heap()->incremental_marking()->Step(bytes_allocated,
1474 IncrementalMarking::GC_VIA_STACK_GUARD); 1475 IncrementalMarking::GC_VIA_STACK_GUARD);
1475 UpdateInlineAllocationLimit(aligned_size); 1476 UpdateInlineAllocationLimit(aligned_size_in_bytes);
1476 top_on_previous_step_ = new_top; 1477 top_on_previous_step_ = new_top;
1477 if (alignment == kDoubleAligned) 1478 if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
1478 return AllocateRawAligned(size_in_bytes, kDoubleAligned); 1479 return AllocateRawAligned(size_in_bytes, alignment);
1479 else if (alignment == kDoubleUnaligned)
1480 return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
1481 return AllocateRawUnaligned(size_in_bytes);
1482 } else if (AddFreshPage()) { 1480 } else if (AddFreshPage()) {
1483 // Switched to new page. Try allocating again. 1481 // Switched to new page. Try allocating again.
1484 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); 1482 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1485 heap()->incremental_marking()->Step(bytes_allocated, 1483 heap()->incremental_marking()->Step(bytes_allocated,
1486 IncrementalMarking::GC_VIA_STACK_GUARD); 1484 IncrementalMarking::GC_VIA_STACK_GUARD);
1487 top_on_previous_step_ = to_space_.page_low(); 1485 top_on_previous_step_ = to_space_.page_low();
1488 if (alignment == kDoubleAligned) 1486 if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
1489 return AllocateRawAligned(size_in_bytes, kDoubleAligned); 1487 return AllocateRawAligned(size_in_bytes, alignment);
1490 else if (alignment == kDoubleUnaligned)
1491 return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
1492 return AllocateRawUnaligned(size_in_bytes);
1493 } else { 1488 } else {
1494 return AllocationResult::Retry(); 1489 return AllocationResult::Retry();
1495 } 1490 }
1496 } 1491 }
1497 1492
1498 1493
1499 #ifdef VERIFY_HEAP 1494 #ifdef VERIFY_HEAP
1500 // We do not use the SemiSpaceIterator because verification doesn't assume 1495 // We do not use the SemiSpaceIterator because verification doesn't assume
1501 // that it works (it depends on the invariants we are checking). 1496 // that it works (it depends on the invariants we are checking).
1502 void NewSpace::Verify() { 1497 void NewSpace::Verify() {
(...skipping 1630 matching lines...) Expand 10 before | Expand all | Expand 10 after
3133 object->ShortPrint(); 3128 object->ShortPrint();
3134 PrintF("\n"); 3129 PrintF("\n");
3135 } 3130 }
3136 printf(" --------------------------------------\n"); 3131 printf(" --------------------------------------\n");
3137 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3132 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3138 } 3133 }
3139 3134
3140 #endif // DEBUG 3135 #endif // DEBUG
3141 } 3136 }
3142 } // namespace v8::internal 3137 } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698