Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/heap.h" | 5 #include "src/heap/heap.h" |
| 6 | 6 |
| 7 #include <unordered_map> | 7 #include <unordered_map> |
| 8 #include <unordered_set> | 8 #include <unordered_set> |
| 9 | 9 |
| 10 #include "src/accessors.h" | 10 #include "src/accessors.h" |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 50 #include "src/snapshot/snapshot.h" | 50 #include "src/snapshot/snapshot.h" |
| 51 #include "src/tracing/trace-event.h" | 51 #include "src/tracing/trace-event.h" |
| 52 #include "src/utils.h" | 52 #include "src/utils.h" |
| 53 #include "src/v8.h" | 53 #include "src/v8.h" |
| 54 #include "src/v8threads.h" | 54 #include "src/v8threads.h" |
| 55 #include "src/vm-state-inl.h" | 55 #include "src/vm-state-inl.h" |
| 56 | 56 |
| 57 namespace v8 { | 57 namespace v8 { |
| 58 namespace internal { | 58 namespace internal { |
| 59 | 59 |
| 60 | |
| 61 struct Heap::StrongRootsList { | 60 struct Heap::StrongRootsList { |
| 62 Object** start; | 61 Object** start; |
| 63 Object** end; | 62 Object** end; |
| 64 StrongRootsList* next; | 63 StrongRootsList* next; |
| 65 }; | 64 }; |
| 66 | 65 |
| 67 class IdleScavengeObserver : public AllocationObserver { | 66 class IdleScavengeObserver : public AllocationObserver { |
| 68 public: | 67 public: |
| 69 IdleScavengeObserver(Heap& heap, intptr_t step_size) | 68 IdleScavengeObserver(Heap& heap, intptr_t step_size) |
| 70 : AllocationObserver(step_size), heap_(heap) {} | 69 : AllocationObserver(step_size), heap_(heap) {} |
| 71 | 70 |
| 72 void Step(int bytes_allocated, Address, size_t) override { | 71 void Step(int bytes_allocated, Address, size_t) override { |
| 73 heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated); | 72 heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated); |
| 74 } | 73 } |
| 75 | 74 |
| 76 private: | 75 private: |
| 77 Heap& heap_; | 76 Heap& heap_; |
| 78 }; | 77 }; |
| 79 | 78 |
| 80 Heap::Heap() | 79 Heap::Heap() |
| 81 : external_memory_(0), | 80 : external_memory_(0), |
| 82 external_memory_limit_(kExternalAllocationSoftLimit), | 81 external_memory_limit_(kExternalAllocationSoftLimit), |
| 83 external_memory_at_last_mark_compact_(0), | 82 external_memory_at_last_mark_compact_(0), |
| 84 isolate_(nullptr), | 83 isolate_(nullptr), |
| 85 code_range_size_(0), | 84 code_range_size_(0), |
| 86 // semispace_size_ should be a power of 2 and old_generation_size_ should | 85 // semispace_size_ should be a power of 2 and old_generation_size_ should |
| 87 // be a multiple of Page::kPageSize. | 86 // be a multiple of Page::kPageSize. |
| 88 max_semi_space_size_(8 * (kPointerSize / 4) * MB), | 87 max_semi_space_size_(8 * (kPointerSize / 4) * MB), |
| 89 initial_semispace_size_(MB), | 88 initial_semispace_size_(Page::kPageSize), |
|
Michael Lippautz
2017/06/14 07:25:08
This would not only allow, but also change the min
Hannes Payer (out of office)
2017/06/14 07:57:02
Absolutely intentional. Done.
| |
| 90 max_old_generation_size_(700ul * (kPointerSize / 4) * MB), | 89 max_old_generation_size_(700ul * (kPointerSize / 4) * MB), |
| 91 initial_max_old_generation_size_(max_old_generation_size_), | 90 initial_max_old_generation_size_(max_old_generation_size_), |
| 92 initial_old_generation_size_(max_old_generation_size_ / | 91 initial_old_generation_size_(max_old_generation_size_ / |
| 93 kInitalOldGenerationLimitFactor), | 92 kInitalOldGenerationLimitFactor), |
| 94 old_generation_size_configured_(false), | 93 old_generation_size_configured_(false), |
| 95 // Variables set based on semispace_size_ and old_generation_size_ in | 94 // Variables set based on semispace_size_ and old_generation_size_ in |
| 96 // ConfigureHeap. | 95 // ConfigureHeap. |
| 97 // Will be 4 * reserved_semispace_size_ to ensure that young | 96 // Will be 4 * reserved_semispace_size_ to ensure that young |
| 98 // generation can be aligned to its size. | 97 // generation can be aligned to its size. |
| 99 maximum_committed_(0), | 98 maximum_committed_(0), |
| (...skipping 5098 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5198 // We don't do a v->Synchronize call here, because in debug mode that will | 5197 // We don't do a v->Synchronize call here, because in debug mode that will |
| 5199 // output a flag to the snapshot. However at this point the serializer and | 5198 // output a flag to the snapshot. However at this point the serializer and |
| 5200 // deserializer are deliberately a little unsynchronized (see above) so the | 5199 // deserializer are deliberately a little unsynchronized (see above) so the |
| 5201 // checking of the sync flag in the snapshot would fail. | 5200 // checking of the sync flag in the snapshot would fail. |
| 5202 } | 5201 } |
| 5203 | 5202 |
| 5204 | 5203 |
| 5205 // TODO(1236194): Since the heap size is configurable on the command line | 5204 // TODO(1236194): Since the heap size is configurable on the command line |
| 5206 // and through the API, we should gracefully handle the case that the heap | 5205 // and through the API, we should gracefully handle the case that the heap |
| 5207 // size is not big enough to fit all the initial objects. | 5206 // size is not big enough to fit all the initial objects. |
| 5208 bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size, | 5207 bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb, |
| 5209 size_t code_range_size) { | 5208 size_t max_old_generation_size_in_mb, |
| 5209 size_t code_range_size_in_mb) { | |
| 5210 if (HasBeenSetUp()) return false; | 5210 if (HasBeenSetUp()) return false; |
| 5211 | 5211 |
| 5212 // Overwrite default configuration. | 5212 // Overwrite default configuration. |
| 5213 if (max_semi_space_size != 0) { | 5213 if (max_semi_space_size_in_kb != 0) { |
| 5214 max_semi_space_size_ = max_semi_space_size * MB; | 5214 max_semi_space_size_ = |
| 5215 ROUND_UP(max_semi_space_size_in_kb * KB, Page::kPageSize); | |
| 5215 } | 5216 } |
| 5216 if (max_old_space_size != 0) { | 5217 if (max_old_generation_size_in_mb != 0) { |
| 5217 max_old_generation_size_ = max_old_space_size * MB; | 5218 max_old_generation_size_ = max_old_generation_size_in_mb * MB; |
| 5218 } | 5219 } |
| 5219 | 5220 |
| 5220 // If max space size flags are specified overwrite the configuration. | 5221 // If max space size flags are specified overwrite the configuration. |
| 5221 if (FLAG_max_semi_space_size > 0) { | 5222 if (FLAG_max_semi_space_size > 0) { |
| 5222 max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB; | 5223 max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB; |
| 5223 } | 5224 } |
| 5224 if (FLAG_max_old_space_size > 0) { | 5225 if (FLAG_max_old_space_size > 0) { |
| 5225 max_old_generation_size_ = | 5226 max_old_generation_size_ = |
| 5226 static_cast<size_t>(FLAG_max_old_space_size) * MB; | 5227 static_cast<size_t>(FLAG_max_old_space_size) * MB; |
| 5227 } | 5228 } |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5278 max_old_generation_size_ / kInitalOldGenerationLimitFactor; | 5279 max_old_generation_size_ / kInitalOldGenerationLimitFactor; |
| 5279 } | 5280 } |
| 5280 old_generation_allocation_limit_ = initial_old_generation_size_; | 5281 old_generation_allocation_limit_ = initial_old_generation_size_; |
| 5281 | 5282 |
| 5282 // We rely on being able to allocate new arrays in paged spaces. | 5283 // We rely on being able to allocate new arrays in paged spaces. |
| 5283 DCHECK(kMaxRegularHeapObjectSize >= | 5284 DCHECK(kMaxRegularHeapObjectSize >= |
| 5284 (JSArray::kSize + | 5285 (JSArray::kSize + |
| 5285 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) + | 5286 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) + |
| 5286 AllocationMemento::kSize)); | 5287 AllocationMemento::kSize)); |
| 5287 | 5288 |
| 5288 code_range_size_ = code_range_size * MB; | 5289 code_range_size_ = code_range_size_in_mb * MB; |
| 5289 | 5290 |
| 5290 configured_ = true; | 5291 configured_ = true; |
| 5291 return true; | 5292 return true; |
| 5292 } | 5293 } |
| 5293 | 5294 |
| 5294 | 5295 |
| 5295 void Heap::AddToRingBuffer(const char* string) { | 5296 void Heap::AddToRingBuffer(const char* string) { |
| 5296 size_t first_part = | 5297 size_t first_part = |
| 5297 Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_); | 5298 Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_); |
| 5298 memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part); | 5299 memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part); |
| (...skipping 1321 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6620 case LO_SPACE: | 6621 case LO_SPACE: |
| 6621 return "LO_SPACE"; | 6622 return "LO_SPACE"; |
| 6622 default: | 6623 default: |
| 6623 UNREACHABLE(); | 6624 UNREACHABLE(); |
| 6624 } | 6625 } |
| 6625 return NULL; | 6626 return NULL; |
| 6626 } | 6627 } |
| 6627 | 6628 |
| 6628 } // namespace internal | 6629 } // namespace internal |
| 6629 } // namespace v8 | 6630 } // namespace v8 |
| OLD | NEW |