| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
| 8 #include "src/api.h" | 8 #include "src/api.h" |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/base/once.h" | 10 #include "src/base/once.h" |
| (...skipping 1347 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1358 void PromotionQueue::Initialize() { | 1358 void PromotionQueue::Initialize() { |
| 1359 // Assumes that a NewSpacePage exactly fits a number of promotion queue | 1359 // Assumes that a NewSpacePage exactly fits a number of promotion queue |
| 1360 // entries (where each is a pair of intptr_t). This allows us to simplify | 1360 // entries (where each is a pair of intptr_t). This allows us to simplify |
| 1361 // the test fpr when to switch pages. | 1361 // the test fpr when to switch pages. |
| 1362 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) == | 1362 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) == |
| 1363 0); | 1363 0); |
| 1364 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart()); | 1364 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart()); |
| 1365 front_ = rear_ = | 1365 front_ = rear_ = |
| 1366 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); | 1366 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); |
| 1367 emergency_stack_ = NULL; | 1367 emergency_stack_ = NULL; |
| 1368 guard_ = false; | |
| 1369 } | 1368 } |
| 1370 | 1369 |
| 1371 | 1370 |
| 1372 void PromotionQueue::RelocateQueueHead() { | 1371 void PromotionQueue::RelocateQueueHead() { |
| 1373 DCHECK(emergency_stack_ == NULL); | 1372 DCHECK(emergency_stack_ == NULL); |
| 1374 | 1373 |
| 1375 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); | 1374 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); |
| 1376 intptr_t* head_start = rear_; | 1375 intptr_t* head_start = rear_; |
| 1377 intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end())); | 1376 intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end())); |
| 1378 | 1377 |
| (...skipping 577 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1956 DCHECK(alignment == kDoubleAlignment); | 1955 DCHECK(alignment == kDoubleAlignment); |
| 1957 allocation_size += kPointerSize; | 1956 allocation_size += kPointerSize; |
| 1958 } | 1957 } |
| 1959 | 1958 |
| 1960 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE)); | 1959 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE)); |
| 1961 AllocationResult allocation = | 1960 AllocationResult allocation = |
| 1962 heap->new_space()->AllocateRaw(allocation_size); | 1961 heap->new_space()->AllocateRaw(allocation_size); |
| 1963 | 1962 |
| 1964 HeapObject* target = NULL; // Initialization to please compiler. | 1963 HeapObject* target = NULL; // Initialization to please compiler. |
| 1965 if (allocation.To(&target)) { | 1964 if (allocation.To(&target)) { |
| 1965 // Order is important here: Set the promotion limit before storing a |
| 1966 // filler for double alignment or migrating the object. Otherwise we |
| 1967 // may end up overwriting promotion queue entries when we migrate the |
| 1968 // object. |
| 1969 heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); |
| 1970 |
| 1966 if (alignment != kObjectAlignment) { | 1971 if (alignment != kObjectAlignment) { |
| 1967 target = EnsureDoubleAligned(heap, target, allocation_size); | 1972 target = EnsureDoubleAligned(heap, target, allocation_size); |
| 1968 } | 1973 } |
| 1969 | 1974 |
| 1970 // Order is important here: Set the promotion limit before migrating | |
| 1971 // the object. Otherwise we may end up overwriting promotion queue | |
| 1972 // entries when we migrate the object. | |
| 1973 heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); | |
| 1974 | |
| 1975 // Order is important: slot might be inside of the target if target | 1975 // Order is important: slot might be inside of the target if target |
| 1976 // was allocated over a dead object and slot comes from the store | 1976 // was allocated over a dead object and slot comes from the store |
| 1977 // buffer. | 1977 // buffer. |
| 1978 *slot = target; | 1978 *slot = target; |
| 1979 MigrateObject(heap, object, target, object_size); | 1979 MigrateObject(heap, object, target, object_size); |
| 1980 | 1980 |
| 1981 heap->IncrementSemiSpaceCopiedObjectSize(object_size); | 1981 heap->IncrementSemiSpaceCopiedObjectSize(object_size); |
| 1982 return true; | 1982 return true; |
| 1983 } | 1983 } |
| 1984 return false; | 1984 return false; |
| (...skipping 4141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6126 static_cast<int>(object_sizes_last_time_[index])); | 6126 static_cast<int>(object_sizes_last_time_[index])); |
| 6127 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) | 6127 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) |
| 6128 #undef ADJUST_LAST_TIME_OBJECT_COUNT | 6128 #undef ADJUST_LAST_TIME_OBJECT_COUNT |
| 6129 | 6129 |
| 6130 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); | 6130 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); |
| 6131 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); | 6131 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); |
| 6132 ClearObjectStats(); | 6132 ClearObjectStats(); |
| 6133 } | 6133 } |
| 6134 } | 6134 } |
| 6135 } // namespace v8::internal | 6135 } // namespace v8::internal |
| OLD | NEW |