| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
| 9 #include "src/full-codegen.h" | 9 #include "src/full-codegen.h" |
| 10 #include "src/heap/mark-compact.h" | 10 #include "src/heap/mark-compact.h" |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 86 | 86 |
| 87 // ----------------------------------------------------------------------------- | 87 // ----------------------------------------------------------------------------- |
| 88 // CodeRange | 88 // CodeRange |
| 89 | 89 |
| 90 | 90 |
| 91 CodeRange::CodeRange(Isolate* isolate) | 91 CodeRange::CodeRange(Isolate* isolate) |
| 92 : isolate_(isolate), | 92 : isolate_(isolate), |
| 93 code_range_(NULL), | 93 code_range_(NULL), |
| 94 free_list_(0), | 94 free_list_(0), |
| 95 allocation_list_(0), | 95 allocation_list_(0), |
| 96 current_allocation_block_index_(0) {} | 96 current_allocation_block_index_(0), |
| 97 emergency_block_() {} |
| 97 | 98 |
| 98 | 99 |
| 99 bool CodeRange::SetUp(size_t requested) { | 100 bool CodeRange::SetUp(size_t requested) { |
| 100 DCHECK(code_range_ == NULL); | 101 DCHECK(code_range_ == NULL); |
| 101 | 102 |
| 102 if (requested == 0) { | 103 if (requested == 0) { |
| 103 // When a target requires the code range feature, we put all code objects | 104 // When a target requires the code range feature, we put all code objects |
| 104 // in a kMaximalCodeRangeSize range of virtual address space, so that | 105 // in a kMaximalCodeRangeSize range of virtual address space, so that |
| 105 // they can call each other with near calls. | 106 // they can call each other with near calls. |
| 106 if (kRequiresCodeRange) { | 107 if (kRequiresCodeRange) { |
| (...skipping 30 matching lines...) Expand all Loading... |
| 137 return false; | 138 return false; |
| 138 } | 139 } |
| 139 base += kReservedCodeRangePages * base::OS::CommitPageSize(); | 140 base += kReservedCodeRangePages * base::OS::CommitPageSize(); |
| 140 } | 141 } |
| 141 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); | 142 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); |
| 142 size_t size = code_range_->size() - (aligned_base - base); | 143 size_t size = code_range_->size() - (aligned_base - base); |
| 143 allocation_list_.Add(FreeBlock(aligned_base, size)); | 144 allocation_list_.Add(FreeBlock(aligned_base, size)); |
| 144 current_allocation_block_index_ = 0; | 145 current_allocation_block_index_ = 0; |
| 145 | 146 |
| 146 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); | 147 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); |
| 148 ReserveEmergencyBlock(); |
| 147 return true; | 149 return true; |
| 148 } | 150 } |
| 149 | 151 |
| 150 | 152 |
| 151 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, | 153 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, |
| 152 const FreeBlock* right) { | 154 const FreeBlock* right) { |
| 153 // The entire point of CodeRange is that the difference between two | 155 // The entire point of CodeRange is that the difference between two |
| 154 // addresses in the range can be represented as a signed 32-bit int, | 156 // addresses in the range can be represented as a signed 32-bit int, |
| 155 // so the cast is semantically correct. | 157 // so the cast is semantically correct. |
| 156 return static_cast<int>(left->start - right->start); | 158 return static_cast<int>(left->start - right->start); |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 195 current_allocation_block_index_ = 0; | 197 current_allocation_block_index_ = 0; |
| 196 // Code range is full or too fragmented. | 198 // Code range is full or too fragmented. |
| 197 return false; | 199 return false; |
| 198 } | 200 } |
| 199 | 201 |
| 200 | 202 |
| 201 Address CodeRange::AllocateRawMemory(const size_t requested_size, | 203 Address CodeRange::AllocateRawMemory(const size_t requested_size, |
| 202 const size_t commit_size, | 204 const size_t commit_size, |
| 203 size_t* allocated) { | 205 size_t* allocated) { |
| 204 DCHECK(commit_size <= requested_size); | 206 DCHECK(commit_size <= requested_size); |
| 205 DCHECK(allocation_list_.length() == 0 || | 207 FreeBlock current; |
| 206 current_allocation_block_index_ < allocation_list_.length()); | 208 if (!ReserveBlock(requested_size, ¤t)) { |
| 207 if (allocation_list_.length() == 0 || | 209 *allocated = 0; |
| 208 requested_size > allocation_list_[current_allocation_block_index_].size) { | 210 return NULL; |
| 209 // Find an allocation block large enough. | |
| 210 if (!GetNextAllocationBlock(requested_size)) return NULL; | |
| 211 } | 211 } |
| 212 // Commit the requested memory at the start of the current allocation block. | 212 *allocated = current.size; |
| 213 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); | |
| 214 FreeBlock current = allocation_list_[current_allocation_block_index_]; | |
| 215 if (aligned_requested >= (current.size - Page::kPageSize)) { | |
| 216 // Don't leave a small free block, useless for a large object or chunk. | |
| 217 *allocated = current.size; | |
| 218 } else { | |
| 219 *allocated = aligned_requested; | |
| 220 } | |
| 221 DCHECK(*allocated <= current.size); | 213 DCHECK(*allocated <= current.size); |
| 222 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); | 214 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
| 223 if (!isolate_->memory_allocator()->CommitExecutableMemory( | 215 if (!isolate_->memory_allocator()->CommitExecutableMemory( |
| 224 code_range_, current.start, commit_size, *allocated)) { | 216 code_range_, current.start, commit_size, *allocated)) { |
| 225 *allocated = 0; | 217 *allocated = 0; |
| 218 ReleaseBlock(¤t); |
| 226 return NULL; | 219 return NULL; |
| 227 } | 220 } |
| 228 allocation_list_[current_allocation_block_index_].start += *allocated; | |
| 229 allocation_list_[current_allocation_block_index_].size -= *allocated; | |
| 230 if (*allocated == current.size) { | |
| 231 // This block is used up, get the next one. | |
| 232 GetNextAllocationBlock(0); | |
| 233 } | |
| 234 return current.start; | 221 return current.start; |
| 235 } | 222 } |
| 236 | 223 |
| 237 | 224 |
| 238 bool CodeRange::CommitRawMemory(Address start, size_t length) { | 225 bool CodeRange::CommitRawMemory(Address start, size_t length) { |
| 239 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); | 226 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); |
| 240 } | 227 } |
| 241 | 228 |
| 242 | 229 |
| 243 bool CodeRange::UncommitRawMemory(Address start, size_t length) { | 230 bool CodeRange::UncommitRawMemory(Address start, size_t length) { |
| 244 return code_range_->Uncommit(start, length); | 231 return code_range_->Uncommit(start, length); |
| 245 } | 232 } |
| 246 | 233 |
| 247 | 234 |
| 248 void CodeRange::FreeRawMemory(Address address, size_t length) { | 235 void CodeRange::FreeRawMemory(Address address, size_t length) { |
| 249 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); | 236 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); |
| 250 free_list_.Add(FreeBlock(address, length)); | 237 free_list_.Add(FreeBlock(address, length)); |
| 251 code_range_->Uncommit(address, length); | 238 code_range_->Uncommit(address, length); |
| 252 } | 239 } |
| 253 | 240 |
| 254 | 241 |
| 255 void CodeRange::TearDown() { | 242 void CodeRange::TearDown() { |
| 256 delete code_range_; // Frees all memory in the virtual memory range. | 243 delete code_range_; // Frees all memory in the virtual memory range. |
| 257 code_range_ = NULL; | 244 code_range_ = NULL; |
| 258 free_list_.Free(); | 245 free_list_.Free(); |
| 259 allocation_list_.Free(); | 246 allocation_list_.Free(); |
| 260 } | 247 } |
| 261 | 248 |
| 262 | 249 |
| 250 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) { |
| 251 DCHECK(allocation_list_.length() == 0 || |
| 252 current_allocation_block_index_ < allocation_list_.length()); |
| 253 if (allocation_list_.length() == 0 || |
| 254 requested_size > allocation_list_[current_allocation_block_index_].size) { |
| 255 // Find an allocation block large enough. |
| 256 if (!GetNextAllocationBlock(requested_size)) return false; |
| 257 } |
| 258 // Commit the requested memory at the start of the current allocation block. |
| 259 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); |
| 260 *block = allocation_list_[current_allocation_block_index_]; |
| 261 // Don't leave a small free block, useless for a large object or chunk. |
| 262 if (aligned_requested < (block->size - Page::kPageSize)) { |
| 263 block->size = aligned_requested; |
| 264 } |
| 265 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment)); |
| 266 allocation_list_[current_allocation_block_index_].start += block->size; |
| 267 allocation_list_[current_allocation_block_index_].size -= block->size; |
| 268 return true; |
| 269 } |
| 270 |
| 271 |
| 272 void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); } |
| 273 |
| 274 |
| 275 void CodeRange::ReserveEmergencyBlock() { |
| 276 const size_t requested_size = MemoryAllocator::CodePageAreaSize(); |
| 277 if (emergency_block_.size == 0) { |
| 278 ReserveBlock(requested_size, &emergency_block_); |
| 279 } else { |
| 280 DCHECK(emergency_block_.size >= requested_size); |
| 281 } |
| 282 } |
| 283 |
| 284 |
| 285 void CodeRange::ReleaseEmergencyBlock() { |
| 286 if (emergency_block_.size != 0) { |
| 287 ReleaseBlock(&emergency_block_); |
| 288 emergency_block_.size = 0; |
| 289 } |
| 290 } |
| 291 |
| 292 |
| 263 // ----------------------------------------------------------------------------- | 293 // ----------------------------------------------------------------------------- |
| 264 // MemoryAllocator | 294 // MemoryAllocator |
| 265 // | 295 // |
| 266 | 296 |
| 267 MemoryAllocator::MemoryAllocator(Isolate* isolate) | 297 MemoryAllocator::MemoryAllocator(Isolate* isolate) |
| 268 : isolate_(isolate), | 298 : isolate_(isolate), |
| 269 capacity_(0), | 299 capacity_(0), |
| 270 capacity_executable_(0), | 300 capacity_executable_(0), |
| 271 size_(0), | 301 size_(0), |
| 272 size_executable_(0), | 302 size_executable_(0), |
| (...skipping 826 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1099 } else { | 1129 } else { |
| 1100 heap()->QueueMemoryChunkForFree(page); | 1130 heap()->QueueMemoryChunkForFree(page); |
| 1101 } | 1131 } |
| 1102 | 1132 |
| 1103 DCHECK(Capacity() > 0); | 1133 DCHECK(Capacity() > 0); |
| 1104 accounting_stats_.ShrinkSpace(AreaSize()); | 1134 accounting_stats_.ShrinkSpace(AreaSize()); |
| 1105 } | 1135 } |
| 1106 | 1136 |
| 1107 | 1137 |
| 1108 void PagedSpace::CreateEmergencyMemory() { | 1138 void PagedSpace::CreateEmergencyMemory() { |
| 1139 if (identity() == CODE_SPACE) { |
| 1140 // Make the emergency block available to the allocator. |
| 1141 CodeRange* code_range = heap()->isolate()->code_range(); |
| 1142 if (code_range != NULL && code_range->valid()) { |
| 1143 code_range->ReleaseEmergencyBlock(); |
| 1144 } |
| 1145 DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize()); |
| 1146 } |
| 1109 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( | 1147 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( |
| 1110 AreaSize(), AreaSize(), executable(), this); | 1148 AreaSize(), AreaSize(), executable(), this); |
| 1111 } | 1149 } |
| 1112 | 1150 |
| 1113 | 1151 |
| 1114 void PagedSpace::FreeEmergencyMemory() { | 1152 void PagedSpace::FreeEmergencyMemory() { |
| 1115 Page* page = static_cast<Page*>(emergency_memory_); | 1153 Page* page = static_cast<Page*>(emergency_memory_); |
| 1116 DCHECK(page->LiveBytes() == 0); | 1154 DCHECK(page->LiveBytes() == 0); |
| 1117 DCHECK(AreaSize() == page->area_size()); | 1155 DCHECK(AreaSize() == page->area_size()); |
| 1118 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | 1156 DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
| (...skipping 2047 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3166 object->ShortPrint(); | 3204 object->ShortPrint(); |
| 3167 PrintF("\n"); | 3205 PrintF("\n"); |
| 3168 } | 3206 } |
| 3169 printf(" --------------------------------------\n"); | 3207 printf(" --------------------------------------\n"); |
| 3170 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3208 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3171 } | 3209 } |
| 3172 | 3210 |
| 3173 #endif // DEBUG | 3211 #endif // DEBUG |
| 3174 } | 3212 } |
| 3175 } // namespace v8::internal | 3213 } // namespace v8::internal |
| OLD | NEW |