| OLD | NEW |
| 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 24 matching lines...) Expand all Loading... |
| 35 namespace v8 { | 35 namespace v8 { |
| 36 namespace internal { | 36 namespace internal { |
| 37 | 37 |
| 38 // For contiguous spaces, top should be in the space (or at the end) and limit | 38 // For contiguous spaces, top should be in the space (or at the end) and limit |
| 39 // should be the end of the space. | 39 // should be the end of the space. |
| 40 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ | 40 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ |
| 41 ASSERT((space).low() <= (info).top \ | 41 ASSERT((space).low() <= (info).top \ |
| 42 && (info).top <= (space).high() \ | 42 && (info).top <= (space).high() \ |
| 43 && (info).limit == (space).high()) | 43 && (info).limit == (space).high()) |
| 44 | 44 |
| 45 intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED; | |
| 46 | |
| 47 // ---------------------------------------------------------------------------- | 45 // ---------------------------------------------------------------------------- |
| 48 // HeapObjectIterator | 46 // HeapObjectIterator |
| 49 | 47 |
| 50 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { | 48 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
| 51 Initialize(space->bottom(), space->top(), NULL); | 49 Initialize(space->bottom(), space->top(), NULL); |
| 52 } | 50 } |
| 53 | 51 |
| 54 | 52 |
| 55 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, | 53 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, |
| 56 HeapObjectCallback size_func) { | 54 HeapObjectCallback size_func) { |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 142 #endif | 140 #endif |
| 143 stop_page_ = space->last_page_; | 141 stop_page_ = space->last_page_; |
| 144 break; | 142 break; |
| 145 } | 143 } |
| 146 } | 144 } |
| 147 | 145 |
| 148 | 146 |
| 149 // ----------------------------------------------------------------------------- | 147 // ----------------------------------------------------------------------------- |
| 150 // CodeRange | 148 // CodeRange |
| 151 | 149 |
| 152 List<CodeRange::FreeBlock> CodeRange::free_list_(0); | 150 |
| 153 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); | 151 CodeRange::CodeRange() |
| 154 int CodeRange::current_allocation_block_index_ = 0; | 152 : code_range_(NULL), |
| 155 VirtualMemory* CodeRange::code_range_ = NULL; | 153 free_list_(0), |
| 154 allocation_list_(0), |
| 155 current_allocation_block_index_(0), |
| 156 isolate_(NULL) { |
| 157 } |
| 156 | 158 |
| 157 | 159 |
| 158 bool CodeRange::Setup(const size_t requested) { | 160 bool CodeRange::Setup(const size_t requested) { |
| 159 ASSERT(code_range_ == NULL); | 161 ASSERT(code_range_ == NULL); |
| 160 | 162 |
| 161 code_range_ = new VirtualMemory(requested); | 163 code_range_ = new VirtualMemory(requested); |
| 162 CHECK(code_range_ != NULL); | 164 CHECK(code_range_ != NULL); |
| 163 if (!code_range_->IsReserved()) { | 165 if (!code_range_->IsReserved()) { |
| 164 delete code_range_; | 166 delete code_range_; |
| 165 code_range_ = NULL; | 167 code_range_ = NULL; |
| 166 return false; | 168 return false; |
| 167 } | 169 } |
| 168 | 170 |
| 169 // We are sure that we have mapped a block of requested addresses. | 171 // We are sure that we have mapped a block of requested addresses. |
| 170 ASSERT(code_range_->size() == requested); | 172 ASSERT(code_range_->size() == requested); |
| 171 LOG(NewEvent("CodeRange", code_range_->address(), requested)); | 173 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); |
| 172 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size())); | 174 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size())); |
| 173 current_allocation_block_index_ = 0; | 175 current_allocation_block_index_ = 0; |
| 174 return true; | 176 return true; |
| 175 } | 177 } |
| 176 | 178 |
| 177 | 179 |
| 178 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, | 180 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, |
| 179 const FreeBlock* right) { | 181 const FreeBlock* right) { |
| 180 // The entire point of CodeRange is that the difference between two | 182 // The entire point of CodeRange is that the difference between two |
| 181 // addresses in the range can be represented as a signed 32-bit int, | 183 // addresses in the range can be represented as a signed 32-bit int, |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 264 delete code_range_; // Frees all memory in the virtual memory range. | 266 delete code_range_; // Frees all memory in the virtual memory range. |
| 265 code_range_ = NULL; | 267 code_range_ = NULL; |
| 266 free_list_.Free(); | 268 free_list_.Free(); |
| 267 allocation_list_.Free(); | 269 allocation_list_.Free(); |
| 268 } | 270 } |
| 269 | 271 |
| 270 | 272 |
| 271 // ----------------------------------------------------------------------------- | 273 // ----------------------------------------------------------------------------- |
| 272 // MemoryAllocator | 274 // MemoryAllocator |
| 273 // | 275 // |
| 274 intptr_t MemoryAllocator::capacity_ = 0; | |
| 275 intptr_t MemoryAllocator::capacity_executable_ = 0; | |
| 276 intptr_t MemoryAllocator::size_ = 0; | |
| 277 intptr_t MemoryAllocator::size_executable_ = 0; | |
| 278 | |
| 279 List<MemoryAllocator::MemoryAllocationCallbackRegistration> | |
| 280 MemoryAllocator::memory_allocation_callbacks_; | |
| 281 | |
| 282 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; | |
| 283 | 276 |
| 284 // 270 is an estimate based on the static default heap size of a pair of 256K | 277 // 270 is an estimate based on the static default heap size of a pair of 256K |
| 285 // semispaces and a 64M old generation. | 278 // semispaces and a 64M old generation. |
| 286 const int kEstimatedNumberOfChunks = 270; | 279 const int kEstimatedNumberOfChunks = 270; |
| 287 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_( | 280 |
| 288 kEstimatedNumberOfChunks); | 281 |
| 289 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks); | 282 MemoryAllocator::MemoryAllocator() |
| 290 int MemoryAllocator::max_nof_chunks_ = 0; | 283 : capacity_(0), |
| 291 int MemoryAllocator::top_ = 0; | 284 capacity_executable_(0), |
| 285 size_(0), |
| 286 size_executable_(0), |
| 287 initial_chunk_(NULL), |
| 288 chunks_(kEstimatedNumberOfChunks), |
| 289 free_chunk_ids_(kEstimatedNumberOfChunks), |
| 290 max_nof_chunks_(0), |
| 291 top_(0), |
| 292 isolate_(NULL) { |
| 293 } |
| 292 | 294 |
| 293 | 295 |
| 294 void MemoryAllocator::Push(int free_chunk_id) { | 296 void MemoryAllocator::Push(int free_chunk_id) { |
| 295 ASSERT(max_nof_chunks_ > 0); | 297 ASSERT(max_nof_chunks_ > 0); |
| 296 ASSERT(top_ < max_nof_chunks_); | 298 ASSERT(top_ < max_nof_chunks_); |
| 297 free_chunk_ids_[top_++] = free_chunk_id; | 299 free_chunk_ids_[top_++] = free_chunk_id; |
| 298 } | 300 } |
| 299 | 301 |
| 300 | 302 |
| 301 int MemoryAllocator::Pop() { | 303 int MemoryAllocator::Pop() { |
| (...skipping 25 matching lines...) Expand all Loading... |
| 327 ChunkInfo info; // uninitialized element. | 329 ChunkInfo info; // uninitialized element. |
| 328 for (int i = max_nof_chunks_ - 1; i >= 0; i--) { | 330 for (int i = max_nof_chunks_ - 1; i >= 0; i--) { |
| 329 chunks_.Add(info); | 331 chunks_.Add(info); |
| 330 free_chunk_ids_.Add(i); | 332 free_chunk_ids_.Add(i); |
| 331 } | 333 } |
| 332 top_ = max_nof_chunks_; | 334 top_ = max_nof_chunks_; |
| 333 return true; | 335 return true; |
| 334 } | 336 } |
| 335 | 337 |
| 336 | 338 |
| 337 bool MemoryAllocator::SafeIsInAPageChunk(Address addr) { | |
| 338 return InInitialChunk(addr) || InAllocatedChunks(addr); | |
| 339 } | |
| 340 | |
| 341 | |
| 342 void MemoryAllocator::TearDown() { | 339 void MemoryAllocator::TearDown() { |
| 343 for (int i = 0; i < max_nof_chunks_; i++) { | 340 for (int i = 0; i < max_nof_chunks_; i++) { |
| 344 if (chunks_[i].address() != NULL) DeleteChunk(i); | 341 if (chunks_[i].address() != NULL) DeleteChunk(i); |
| 345 } | 342 } |
| 346 chunks_.Clear(); | 343 chunks_.Clear(); |
| 347 free_chunk_ids_.Clear(); | 344 free_chunk_ids_.Clear(); |
| 348 | 345 |
| 349 if (initial_chunk_ != NULL) { | 346 if (initial_chunk_ != NULL) { |
| 350 LOG(DeleteEvent("InitialChunk", initial_chunk_->address())); | 347 LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address())); |
| 351 delete initial_chunk_; | 348 delete initial_chunk_; |
| 352 initial_chunk_ = NULL; | 349 initial_chunk_ = NULL; |
| 353 } | 350 } |
| 354 | 351 |
| 355 FreeChunkTables(&chunk_table_[0], | |
| 356 kChunkTableTopLevelEntries, | |
| 357 kChunkTableLevels); | |
| 358 | |
| 359 ASSERT(top_ == max_nof_chunks_); // all chunks are free | 352 ASSERT(top_ == max_nof_chunks_); // all chunks are free |
| 360 top_ = 0; | 353 top_ = 0; |
| 361 capacity_ = 0; | 354 capacity_ = 0; |
| 362 capacity_executable_ = 0; | 355 capacity_executable_ = 0; |
| 363 size_ = 0; | 356 size_ = 0; |
| 364 max_nof_chunks_ = 0; | 357 max_nof_chunks_ = 0; |
| 365 } | 358 } |
| 366 | 359 |
| 367 | 360 |
| 368 void MemoryAllocator::FreeChunkTables(uintptr_t* array, int len, int level) { | |
| 369 for (int i = 0; i < len; i++) { | |
| 370 if (array[i] != kUnusedChunkTableEntry) { | |
| 371 uintptr_t* subarray = reinterpret_cast<uintptr_t*>(array[i]); | |
| 372 if (level > 1) { | |
| 373 array[i] = kUnusedChunkTableEntry; | |
| 374 FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1); | |
| 375 } else { | |
| 376 array[i] = kUnusedChunkTableEntry; | |
| 377 } | |
| 378 delete[] subarray; | |
| 379 } | |
| 380 } | |
| 381 } | |
| 382 | |
| 383 | |
| 384 void* MemoryAllocator::AllocateRawMemory(const size_t requested, | 361 void* MemoryAllocator::AllocateRawMemory(const size_t requested, |
| 385 size_t* allocated, | 362 size_t* allocated, |
| 386 Executability executable) { | 363 Executability executable) { |
| 387 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) { | 364 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) { |
| 388 return NULL; | 365 return NULL; |
| 389 } | 366 } |
| 390 | 367 |
| 391 void* mem; | 368 void* mem; |
| 392 if (executable == EXECUTABLE) { | 369 if (executable == EXECUTABLE) { |
| 393 // Check executable memory limit. | 370 // Check executable memory limit. |
| 394 if (size_executable_ + requested > | 371 if (size_executable_ + requested > |
| 395 static_cast<size_t>(capacity_executable_)) { | 372 static_cast<size_t>(capacity_executable_)) { |
| 396 LOG(StringEvent("MemoryAllocator::AllocateRawMemory", | 373 LOG(isolate_, |
| 374 StringEvent("MemoryAllocator::AllocateRawMemory", |
| 397 "V8 Executable Allocation capacity exceeded")); | 375 "V8 Executable Allocation capacity exceeded")); |
| 398 return NULL; | 376 return NULL; |
| 399 } | 377 } |
| 400 // Allocate executable memory either from code range or from the | 378 // Allocate executable memory either from code range or from the |
| 401 // OS. | 379 // OS. |
| 402 if (CodeRange::exists()) { | 380 if (isolate_->code_range()->exists()) { |
| 403 mem = CodeRange::AllocateRawMemory(requested, allocated); | 381 mem = isolate_->code_range()->AllocateRawMemory(requested, allocated); |
| 404 } else { | 382 } else { |
| 405 mem = OS::Allocate(requested, allocated, true); | 383 mem = OS::Allocate(requested, allocated, true); |
| 406 } | 384 } |
| 407 // Update executable memory size. | 385 // Update executable memory size. |
| 408 size_executable_ += static_cast<int>(*allocated); | 386 size_executable_ += static_cast<int>(*allocated); |
| 409 } else { | 387 } else { |
| 410 mem = OS::Allocate(requested, allocated, false); | 388 mem = OS::Allocate(requested, allocated, false); |
| 411 } | 389 } |
| 412 int alloced = static_cast<int>(*allocated); | 390 int alloced = static_cast<int>(*allocated); |
| 413 size_ += alloced; | 391 size_ += alloced; |
| 414 | 392 |
| 415 #ifdef DEBUG | 393 #ifdef DEBUG |
| 416 ZapBlock(reinterpret_cast<Address>(mem), alloced); | 394 ZapBlock(reinterpret_cast<Address>(mem), alloced); |
| 417 #endif | 395 #endif |
| 418 Counters::memory_allocated.Increment(alloced); | 396 COUNTERS->memory_allocated()->Increment(alloced); |
| 419 return mem; | 397 return mem; |
| 420 } | 398 } |
| 421 | 399 |
| 422 | 400 |
| 423 void MemoryAllocator::FreeRawMemory(void* mem, | 401 void MemoryAllocator::FreeRawMemory(void* mem, |
| 424 size_t length, | 402 size_t length, |
| 425 Executability executable) { | 403 Executability executable) { |
| 426 #ifdef DEBUG | 404 #ifdef DEBUG |
| 427 ZapBlock(reinterpret_cast<Address>(mem), length); | 405 ZapBlock(reinterpret_cast<Address>(mem), length); |
| 428 #endif | 406 #endif |
| 429 if (CodeRange::contains(static_cast<Address>(mem))) { | 407 if (isolate_->code_range()->contains(static_cast<Address>(mem))) { |
| 430 CodeRange::FreeRawMemory(mem, length); | 408 isolate_->code_range()->FreeRawMemory(mem, length); |
| 431 } else { | 409 } else { |
| 432 OS::Free(mem, length); | 410 OS::Free(mem, length); |
| 433 } | 411 } |
| 434 Counters::memory_allocated.Decrement(static_cast<int>(length)); | 412 COUNTERS->memory_allocated()->Decrement(static_cast<int>(length)); |
| 435 size_ -= static_cast<int>(length); | 413 size_ -= static_cast<int>(length); |
| 436 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); | 414 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); |
| 437 | 415 |
| 438 ASSERT(size_ >= 0); | 416 ASSERT(size_ >= 0); |
| 439 ASSERT(size_executable_ >= 0); | 417 ASSERT(size_executable_ >= 0); |
| 440 } | 418 } |
| 441 | 419 |
| 442 | 420 |
| 443 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, | 421 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, |
| 444 AllocationAction action, | 422 AllocationAction action, |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 491 initial_chunk_ = new VirtualMemory(requested); | 469 initial_chunk_ = new VirtualMemory(requested); |
| 492 CHECK(initial_chunk_ != NULL); | 470 CHECK(initial_chunk_ != NULL); |
| 493 if (!initial_chunk_->IsReserved()) { | 471 if (!initial_chunk_->IsReserved()) { |
| 494 delete initial_chunk_; | 472 delete initial_chunk_; |
| 495 initial_chunk_ = NULL; | 473 initial_chunk_ = NULL; |
| 496 return NULL; | 474 return NULL; |
| 497 } | 475 } |
| 498 | 476 |
| 499 // We are sure that we have mapped a block of requested addresses. | 477 // We are sure that we have mapped a block of requested addresses. |
| 500 ASSERT(initial_chunk_->size() == requested); | 478 ASSERT(initial_chunk_->size() == requested); |
| 501 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested)); | 479 LOG(isolate_, |
| 480 NewEvent("InitialChunk", initial_chunk_->address(), requested)); |
| 502 size_ += static_cast<int>(requested); | 481 size_ += static_cast<int>(requested); |
| 503 return initial_chunk_->address(); | 482 return initial_chunk_->address(); |
| 504 } | 483 } |
| 505 | 484 |
| 506 | 485 |
| 507 static int PagesInChunk(Address start, size_t size) { | 486 static int PagesInChunk(Address start, size_t size) { |
| 508 // The first page starts on the first page-aligned address from start onward | 487 // The first page starts on the first page-aligned address from start onward |
| 509 // and the last page ends on the last page-aligned address before | 488 // and the last page ends on the last page-aligned address before |
| 510 // start+size. Page::kPageSize is a power of two so we can divide by | 489 // start+size. Page::kPageSize is a power of two so we can divide by |
| 511 // shifting. | 490 // shifting. |
| 512 return static_cast<int>((RoundDown(start + size, Page::kPageSize) | 491 return static_cast<int>((RoundDown(start + size, Page::kPageSize) |
| 513 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits); | 492 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits); |
| 514 } | 493 } |
| 515 | 494 |
| 516 | 495 |
| 517 Page* MemoryAllocator::AllocatePages(int requested_pages, | 496 Page* MemoryAllocator::AllocatePages(int requested_pages, |
| 518 int* allocated_pages, | 497 int* allocated_pages, |
| 519 PagedSpace* owner) { | 498 PagedSpace* owner) { |
| 520 if (requested_pages <= 0) return Page::FromAddress(NULL); | 499 if (requested_pages <= 0) return Page::FromAddress(NULL); |
| 521 size_t chunk_size = requested_pages * Page::kPageSize; | 500 size_t chunk_size = requested_pages * Page::kPageSize; |
| 522 | 501 |
| 523 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); | 502 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); |
| 524 if (chunk == NULL) return Page::FromAddress(NULL); | 503 if (chunk == NULL) return Page::FromAddress(NULL); |
| 525 LOG(NewEvent("PagedChunk", chunk, chunk_size)); | 504 LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size)); |
| 526 | 505 |
| 527 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); | 506 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); |
| 528 // We may 'lose' a page due to alignment. | 507 // We may 'lose' a page due to alignment. |
| 529 ASSERT(*allocated_pages >= kPagesPerChunk - 1); | 508 ASSERT(*allocated_pages >= kPagesPerChunk - 1); |
| 530 if (*allocated_pages == 0) { | 509 if (*allocated_pages == 0) { |
| 531 FreeRawMemory(chunk, chunk_size, owner->executable()); | 510 FreeRawMemory(chunk, chunk_size, owner->executable()); |
| 532 LOG(DeleteEvent("PagedChunk", chunk)); | 511 LOG(isolate_, DeleteEvent("PagedChunk", chunk)); |
| 533 return Page::FromAddress(NULL); | 512 return Page::FromAddress(NULL); |
| 534 } | 513 } |
| 535 | 514 |
| 536 int chunk_id = Pop(); | 515 int chunk_id = Pop(); |
| 537 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); | 516 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); |
| 538 | 517 |
| 539 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | 518 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 540 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | 519 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
| 541 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner); | 520 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner); |
| 542 | 521 |
| 543 AddToAllocatedChunks(static_cast<Address>(chunk), chunk_size); | |
| 544 | |
| 545 return new_pages; | 522 return new_pages; |
| 546 } | 523 } |
| 547 | 524 |
| 548 | 525 |
| 549 Page* MemoryAllocator::CommitPages(Address start, size_t size, | 526 Page* MemoryAllocator::CommitPages(Address start, size_t size, |
| 550 PagedSpace* owner, int* num_pages) { | 527 PagedSpace* owner, int* num_pages) { |
| 551 ASSERT(start != NULL); | 528 ASSERT(start != NULL); |
| 552 *num_pages = PagesInChunk(start, size); | 529 *num_pages = PagesInChunk(start, size); |
| 553 ASSERT(*num_pages > 0); | 530 ASSERT(*num_pages > 0); |
| 554 ASSERT(initial_chunk_ != NULL); | 531 ASSERT(initial_chunk_ != NULL); |
| 555 ASSERT(InInitialChunk(start)); | 532 ASSERT(InInitialChunk(start)); |
| 556 ASSERT(InInitialChunk(start + size - 1)); | 533 ASSERT(InInitialChunk(start + size - 1)); |
| 557 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { | 534 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { |
| 558 return Page::FromAddress(NULL); | 535 return Page::FromAddress(NULL); |
| 559 } | 536 } |
| 560 #ifdef DEBUG | 537 #ifdef DEBUG |
| 561 ZapBlock(start, size); | 538 ZapBlock(start, size); |
| 562 #endif | 539 #endif |
| 563 Counters::memory_allocated.Increment(static_cast<int>(size)); | 540 COUNTERS->memory_allocated()->Increment(static_cast<int>(size)); |
| 564 | 541 |
| 565 // So long as we correctly overestimated the number of chunks we should not | 542 // So long as we correctly overestimated the number of chunks we should not |
| 566 // run out of chunk ids. | 543 // run out of chunk ids. |
| 567 CHECK(!OutOfChunkIds()); | 544 CHECK(!OutOfChunkIds()); |
| 568 int chunk_id = Pop(); | 545 int chunk_id = Pop(); |
| 569 chunks_[chunk_id].init(start, size, owner); | 546 chunks_[chunk_id].init(start, size, owner); |
| 570 return InitializePagesInChunk(chunk_id, *num_pages, owner); | 547 return InitializePagesInChunk(chunk_id, *num_pages, owner); |
| 571 } | 548 } |
| 572 | 549 |
| 573 | 550 |
| 574 bool MemoryAllocator::CommitBlock(Address start, | 551 bool MemoryAllocator::CommitBlock(Address start, |
| 575 size_t size, | 552 size_t size, |
| 576 Executability executable) { | 553 Executability executable) { |
| 577 ASSERT(start != NULL); | 554 ASSERT(start != NULL); |
| 578 ASSERT(size > 0); | 555 ASSERT(size > 0); |
| 579 ASSERT(initial_chunk_ != NULL); | 556 ASSERT(initial_chunk_ != NULL); |
| 580 ASSERT(InInitialChunk(start)); | 557 ASSERT(InInitialChunk(start)); |
| 581 ASSERT(InInitialChunk(start + size - 1)); | 558 ASSERT(InInitialChunk(start + size - 1)); |
| 582 | 559 |
| 583 if (!initial_chunk_->Commit(start, size, executable)) return false; | 560 if (!initial_chunk_->Commit(start, size, executable)) return false; |
| 584 #ifdef DEBUG | 561 #ifdef DEBUG |
| 585 ZapBlock(start, size); | 562 ZapBlock(start, size); |
| 586 #endif | 563 #endif |
| 587 Counters::memory_allocated.Increment(static_cast<int>(size)); | 564 COUNTERS->memory_allocated()->Increment(static_cast<int>(size)); |
| 588 return true; | 565 return true; |
| 589 } | 566 } |
| 590 | 567 |
| 591 | 568 |
| 592 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { | 569 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { |
| 593 ASSERT(start != NULL); | 570 ASSERT(start != NULL); |
| 594 ASSERT(size > 0); | 571 ASSERT(size > 0); |
| 595 ASSERT(initial_chunk_ != NULL); | 572 ASSERT(initial_chunk_ != NULL); |
| 596 ASSERT(InInitialChunk(start)); | 573 ASSERT(InInitialChunk(start)); |
| 597 ASSERT(InInitialChunk(start + size - 1)); | 574 ASSERT(InInitialChunk(start + size - 1)); |
| 598 | 575 |
| 599 if (!initial_chunk_->Uncommit(start, size)) return false; | 576 if (!initial_chunk_->Uncommit(start, size)) return false; |
| 600 Counters::memory_allocated.Decrement(static_cast<int>(size)); | 577 COUNTERS->memory_allocated()->Decrement(static_cast<int>(size)); |
| 601 return true; | 578 return true; |
| 602 } | 579 } |
| 603 | 580 |
| 604 | 581 |
| 605 void MemoryAllocator::ZapBlock(Address start, size_t size) { | 582 void MemoryAllocator::ZapBlock(Address start, size_t size) { |
| 606 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { | 583 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { |
| 607 Memory::Address_at(start + s) = kZapValue; | 584 Memory::Address_at(start + s) = kZapValue; |
| 608 } | 585 } |
| 609 } | 586 } |
| 610 | 587 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 621 #ifdef DEBUG | 598 #ifdef DEBUG |
| 622 size_t chunk_size = chunks_[chunk_id].size(); | 599 size_t chunk_size = chunks_[chunk_id].size(); |
| 623 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); | 600 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); |
| 624 ASSERT(pages_in_chunk <= | 601 ASSERT(pages_in_chunk <= |
| 625 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize)); | 602 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize)); |
| 626 #endif | 603 #endif |
| 627 | 604 |
| 628 Address page_addr = low; | 605 Address page_addr = low; |
| 629 for (int i = 0; i < pages_in_chunk; i++) { | 606 for (int i = 0; i < pages_in_chunk; i++) { |
| 630 Page* p = Page::FromAddress(page_addr); | 607 Page* p = Page::FromAddress(page_addr); |
| 608 p->heap_ = owner->heap(); |
| 631 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; | 609 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; |
| 632 p->InvalidateWatermark(true); | 610 p->InvalidateWatermark(true); |
| 633 p->SetIsLargeObjectPage(false); | 611 p->SetIsLargeObjectPage(false); |
| 634 p->SetAllocationWatermark(p->ObjectAreaStart()); | 612 p->SetAllocationWatermark(p->ObjectAreaStart()); |
| 635 p->SetCachedAllocationWatermark(p->ObjectAreaStart()); | 613 p->SetCachedAllocationWatermark(p->ObjectAreaStart()); |
| 636 page_addr += Page::kPageSize; | 614 page_addr += Page::kPageSize; |
| 637 } | 615 } |
| 638 | 616 |
| 639 // Set the next page of the last page to 0. | 617 // Set the next page of the last page to 0. |
| 640 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); | 618 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 690 | 668 |
| 691 ChunkInfo& c = chunks_[chunk_id]; | 669 ChunkInfo& c = chunks_[chunk_id]; |
| 692 | 670 |
| 693 // We cannot free a chunk contained in the initial chunk because it was not | 671 // We cannot free a chunk contained in the initial chunk because it was not |
| 694 // allocated with AllocateRawMemory. Instead we uncommit the virtual | 672 // allocated with AllocateRawMemory. Instead we uncommit the virtual |
| 695 // memory. | 673 // memory. |
| 696 if (InInitialChunk(c.address())) { | 674 if (InInitialChunk(c.address())) { |
| 697 // TODO(1240712): VirtualMemory::Uncommit has a return value which | 675 // TODO(1240712): VirtualMemory::Uncommit has a return value which |
| 698 // is ignored here. | 676 // is ignored here. |
| 699 initial_chunk_->Uncommit(c.address(), c.size()); | 677 initial_chunk_->Uncommit(c.address(), c.size()); |
| 700 Counters::memory_allocated.Decrement(static_cast<int>(c.size())); | 678 COUNTERS->memory_allocated()->Decrement(static_cast<int>(c.size())); |
| 701 } else { | 679 } else { |
| 702 RemoveFromAllocatedChunks(c.address(), c.size()); | 680 LOG(isolate_, DeleteEvent("PagedChunk", c.address())); |
| 703 LOG(DeleteEvent("PagedChunk", c.address())); | 681 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity()); |
| 704 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity()); | |
| 705 size_t size = c.size(); | 682 size_t size = c.size(); |
| 706 FreeRawMemory(c.address(), size, c.executable()); | 683 FreeRawMemory(c.address(), size, c.executable()); |
| 707 PerformAllocationCallback(space, kAllocationActionFree, size); | 684 PerformAllocationCallback(space, kAllocationActionFree, size); |
| 708 } | 685 } |
| 709 c.init(NULL, 0, NULL); | 686 c.init(NULL, 0, NULL); |
| 710 Push(chunk_id); | 687 Push(chunk_id); |
| 711 } | 688 } |
| 712 | 689 |
| 713 | 690 |
| 714 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { | 691 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 806 last_page->opaque_header = OffsetFrom(0) | chunk_id; | 783 last_page->opaque_header = OffsetFrom(0) | chunk_id; |
| 807 | 784 |
| 808 if (last_page->WasInUseBeforeMC()) { | 785 if (last_page->WasInUseBeforeMC()) { |
| 809 *last_page_in_use = last_page; | 786 *last_page_in_use = last_page; |
| 810 } | 787 } |
| 811 | 788 |
| 812 return last_page; | 789 return last_page; |
| 813 } | 790 } |
| 814 | 791 |
| 815 | 792 |
| 816 void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) { | |
| 817 ASSERT(size == kChunkSize); | |
| 818 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr); | |
| 819 AddChunkUsingAddress(int_address, int_address); | |
| 820 AddChunkUsingAddress(int_address, int_address + size - 1); | |
| 821 } | |
| 822 | |
| 823 | |
| 824 void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start, | |
| 825 uintptr_t chunk_index_base) { | |
| 826 uintptr_t* fine_grained = AllocatedChunksFinder( | |
| 827 chunk_table_, | |
| 828 chunk_index_base, | |
| 829 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel, | |
| 830 kCreateTablesAsNeeded); | |
| 831 int index = FineGrainedIndexForAddress(chunk_index_base); | |
| 832 if (fine_grained[index] != kUnusedChunkTableEntry) index++; | |
| 833 ASSERT(fine_grained[index] == kUnusedChunkTableEntry); | |
| 834 fine_grained[index] = chunk_start; | |
| 835 } | |
| 836 | |
| 837 | |
| 838 void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) { | |
| 839 ASSERT(size == kChunkSize); | |
| 840 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr); | |
| 841 RemoveChunkFoundUsingAddress(int_address, int_address); | |
| 842 RemoveChunkFoundUsingAddress(int_address, int_address + size - 1); | |
| 843 } | |
| 844 | |
| 845 | |
| 846 void MemoryAllocator::RemoveChunkFoundUsingAddress( | |
| 847 uintptr_t chunk_start, | |
| 848 uintptr_t chunk_index_base) { | |
| 849 uintptr_t* fine_grained = AllocatedChunksFinder( | |
| 850 chunk_table_, | |
| 851 chunk_index_base, | |
| 852 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel, | |
| 853 kDontCreateTables); | |
| 854 // Can't remove an entry that's not there. | |
| 855 ASSERT(fine_grained != kUnusedChunkTableEntry); | |
| 856 int index = FineGrainedIndexForAddress(chunk_index_base); | |
| 857 ASSERT(fine_grained[index] != kUnusedChunkTableEntry); | |
| 858 if (fine_grained[index] != chunk_start) { | |
| 859 index++; | |
| 860 ASSERT(fine_grained[index] == chunk_start); | |
| 861 fine_grained[index] = kUnusedChunkTableEntry; | |
| 862 } else { | |
| 863 // If only one of the entries is used it must be the first, since | |
| 864 // InAllocatedChunks relies on that. Move things around so that this is | |
| 865 // the case. | |
| 866 fine_grained[index] = fine_grained[index + 1]; | |
| 867 fine_grained[index + 1] = kUnusedChunkTableEntry; | |
| 868 } | |
| 869 } | |
| 870 | |
| 871 | |
| 872 bool MemoryAllocator::InAllocatedChunks(Address addr) { | |
| 873 uintptr_t int_address = reinterpret_cast<uintptr_t>(addr); | |
| 874 uintptr_t* fine_grained = AllocatedChunksFinder( | |
| 875 chunk_table_, | |
| 876 int_address, | |
| 877 kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel, | |
| 878 kDontCreateTables); | |
| 879 if (fine_grained == NULL) return false; | |
| 880 int index = FineGrainedIndexForAddress(int_address); | |
| 881 if (fine_grained[index] == kUnusedChunkTableEntry) return false; | |
| 882 uintptr_t entry = fine_grained[index]; | |
| 883 if (entry <= int_address && entry + kChunkSize > int_address) return true; | |
| 884 index++; | |
| 885 if (fine_grained[index] == kUnusedChunkTableEntry) return false; | |
| 886 entry = fine_grained[index]; | |
| 887 if (entry <= int_address && entry + kChunkSize > int_address) return true; | |
| 888 return false; | |
| 889 } | |
| 890 | |
| 891 | |
| 892 uintptr_t* MemoryAllocator::AllocatedChunksFinder( | |
| 893 uintptr_t* table, | |
| 894 uintptr_t address, | |
| 895 int bit_position, | |
| 896 CreateTables create_as_needed) { | |
| 897 if (bit_position == kChunkSizeLog2) { | |
| 898 return table; | |
| 899 } | |
| 900 ASSERT(bit_position >= kChunkSizeLog2 + kChunkTableBitsPerLevel); | |
| 901 int index = | |
| 902 ((address >> bit_position) & | |
| 903 ((V8_INTPTR_C(1) << kChunkTableBitsPerLevel) - 1)); | |
| 904 uintptr_t more_fine_grained_address = | |
| 905 address & ((V8_INTPTR_C(1) << bit_position) - 1); | |
| 906 ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) || | |
| 907 (table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel)); | |
| 908 uintptr_t* more_fine_grained_table = | |
| 909 reinterpret_cast<uintptr_t*>(table[index]); | |
| 910 if (more_fine_grained_table == kUnusedChunkTableEntry) { | |
| 911 if (create_as_needed == kDontCreateTables) return NULL; | |
| 912 int words_needed = 1 << kChunkTableBitsPerLevel; | |
| 913 if (bit_position == kChunkTableBitsPerLevel + kChunkSizeLog2) { | |
| 914 words_needed = | |
| 915 (1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry; | |
| 916 } | |
| 917 more_fine_grained_table = new uintptr_t[words_needed]; | |
| 918 for (int i = 0; i < words_needed; i++) { | |
| 919 more_fine_grained_table[i] = kUnusedChunkTableEntry; | |
| 920 } | |
| 921 table[index] = reinterpret_cast<uintptr_t>(more_fine_grained_table); | |
| 922 } | |
| 923 return AllocatedChunksFinder( | |
| 924 more_fine_grained_table, | |
| 925 more_fine_grained_address, | |
| 926 bit_position - kChunkTableBitsPerLevel, | |
| 927 create_as_needed); | |
| 928 } | |
| 929 | |
| 930 | |
| 931 uintptr_t MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries]; | |
| 932 | |
| 933 | |
| 934 // ----------------------------------------------------------------------------- | 793 // ----------------------------------------------------------------------------- |
| 935 // PagedSpace implementation | 794 // PagedSpace implementation |
| 936 | 795 |
| 937 PagedSpace::PagedSpace(intptr_t max_capacity, | 796 PagedSpace::PagedSpace(Heap* heap, |
| 797 intptr_t max_capacity, |
| 938 AllocationSpace id, | 798 AllocationSpace id, |
| 939 Executability executable) | 799 Executability executable) |
| 940 : Space(id, executable) { | 800 : Space(heap, id, executable) { |
| 941 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 801 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 942 * Page::kObjectAreaSize; | 802 * Page::kObjectAreaSize; |
| 943 accounting_stats_.Clear(); | 803 accounting_stats_.Clear(); |
| 944 | 804 |
| 945 allocation_info_.top = NULL; | 805 allocation_info_.top = NULL; |
| 946 allocation_info_.limit = NULL; | 806 allocation_info_.limit = NULL; |
| 947 | 807 |
| 948 mc_forwarding_info_.top = NULL; | 808 mc_forwarding_info_.top = NULL; |
| 949 mc_forwarding_info_.limit = NULL; | 809 mc_forwarding_info_.limit = NULL; |
| 950 } | 810 } |
| 951 | 811 |
| 952 | 812 |
| 953 bool PagedSpace::Setup(Address start, size_t size) { | 813 bool PagedSpace::Setup(Address start, size_t size) { |
| 954 if (HasBeenSetup()) return false; | 814 if (HasBeenSetup()) return false; |
| 955 | 815 |
| 956 int num_pages = 0; | 816 int num_pages = 0; |
| 957 // Try to use the virtual memory range passed to us. If it is too small to | 817 // Try to use the virtual memory range passed to us. If it is too small to |
| 958 // contain at least one page, ignore it and allocate instead. | 818 // contain at least one page, ignore it and allocate instead. |
| 959 int pages_in_chunk = PagesInChunk(start, size); | 819 int pages_in_chunk = PagesInChunk(start, size); |
| 960 if (pages_in_chunk > 0) { | 820 if (pages_in_chunk > 0) { |
| 961 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize), | 821 first_page_ = Isolate::Current()->memory_allocator()->CommitPages( |
| 962 Page::kPageSize * pages_in_chunk, | 822 RoundUp(start, Page::kPageSize), |
| 963 this, &num_pages); | 823 Page::kPageSize * pages_in_chunk, |
| 824 this, &num_pages); |
| 964 } else { | 825 } else { |
| 965 int requested_pages = | 826 int requested_pages = |
| 966 Min(MemoryAllocator::kPagesPerChunk, | 827 Min(MemoryAllocator::kPagesPerChunk, |
| 967 static_cast<int>(max_capacity_ / Page::kObjectAreaSize)); | 828 static_cast<int>(max_capacity_ / Page::kObjectAreaSize)); |
| 968 first_page_ = | 829 first_page_ = |
| 969 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this); | 830 Isolate::Current()->memory_allocator()->AllocatePages( |
| 831 requested_pages, &num_pages, this); |
| 970 if (!first_page_->is_valid()) return false; | 832 if (!first_page_->is_valid()) return false; |
| 971 } | 833 } |
| 972 | 834 |
| 973 // We are sure that the first page is valid and that we have at least one | 835 // We are sure that the first page is valid and that we have at least one |
| 974 // page. | 836 // page. |
| 975 ASSERT(first_page_->is_valid()); | 837 ASSERT(first_page_->is_valid()); |
| 976 ASSERT(num_pages > 0); | 838 ASSERT(num_pages > 0); |
| 977 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); | 839 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); |
| 978 ASSERT(Capacity() <= max_capacity_); | 840 ASSERT(Capacity() <= max_capacity_); |
| 979 | 841 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 992 return true; | 854 return true; |
| 993 } | 855 } |
| 994 | 856 |
| 995 | 857 |
| 996 bool PagedSpace::HasBeenSetup() { | 858 bool PagedSpace::HasBeenSetup() { |
| 997 return (Capacity() > 0); | 859 return (Capacity() > 0); |
| 998 } | 860 } |
| 999 | 861 |
| 1000 | 862 |
| 1001 void PagedSpace::TearDown() { | 863 void PagedSpace::TearDown() { |
| 1002 MemoryAllocator::FreeAllPages(this); | 864 Isolate::Current()->memory_allocator()->FreeAllPages(this); |
| 1003 first_page_ = NULL; | 865 first_page_ = NULL; |
| 1004 accounting_stats_.Clear(); | 866 accounting_stats_.Clear(); |
| 1005 } | 867 } |
| 1006 | 868 |
| 1007 | 869 |
| 1008 #ifdef ENABLE_HEAP_PROTECTION | 870 #ifdef ENABLE_HEAP_PROTECTION |
| 1009 | 871 |
| 1010 void PagedSpace::Protect() { | 872 void PagedSpace::Protect() { |
| 1011 Page* page = first_page_; | 873 Page* page = first_page_; |
| 1012 while (page->is_valid()) { | 874 while (page->is_valid()) { |
| 1013 MemoryAllocator::ProtectChunkFromPage(page); | 875 Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page); |
| 1014 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); | 876 page = Isolate::Current()->memory_allocator()-> |
| 877 FindLastPageInSameChunk(page)->next_page(); |
| 1015 } | 878 } |
| 1016 } | 879 } |
| 1017 | 880 |
| 1018 | 881 |
| 1019 void PagedSpace::Unprotect() { | 882 void PagedSpace::Unprotect() { |
| 1020 Page* page = first_page_; | 883 Page* page = first_page_; |
| 1021 while (page->is_valid()) { | 884 while (page->is_valid()) { |
| 1022 MemoryAllocator::UnprotectChunkFromPage(page); | 885 Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page); |
| 1023 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); | 886 page = Isolate::Current()->memory_allocator()-> |
| 887 FindLastPageInSameChunk(page)->next_page(); |
| 1024 } | 888 } |
| 1025 } | 889 } |
| 1026 | 890 |
| 1027 #endif | 891 #endif |
| 1028 | 892 |
| 1029 | 893 |
| 1030 void PagedSpace::MarkAllPagesClean() { | 894 void PagedSpace::MarkAllPagesClean() { |
| 1031 PageIterator it(this, PageIterator::ALL_PAGES); | 895 PageIterator it(this, PageIterator::ALL_PAGES); |
| 1032 while (it.has_next()) { | 896 while (it.has_next()) { |
| 1033 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); | 897 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); |
| 1034 } | 898 } |
| 1035 } | 899 } |
| 1036 | 900 |
| 1037 | 901 |
| 1038 MaybeObject* PagedSpace::FindObject(Address addr) { | 902 MaybeObject* PagedSpace::FindObject(Address addr) { |
| 1039 // Note: this function can only be called before or after mark-compact GC | 903 // Note: this function can only be called before or after mark-compact GC |
| 1040 // because it accesses map pointers. | 904 // because it accesses map pointers. |
| 1041 ASSERT(!MarkCompactCollector::in_use()); | 905 ASSERT(!heap()->mark_compact_collector()->in_use()); |
| 1042 | 906 |
| 1043 if (!Contains(addr)) return Failure::Exception(); | 907 if (!Contains(addr)) return Failure::Exception(); |
| 1044 | 908 |
| 1045 Page* p = Page::FromAddress(addr); | 909 Page* p = Page::FromAddress(addr); |
| 1046 ASSERT(IsUsed(p)); | 910 ASSERT(IsUsed(p)); |
| 1047 Address cur = p->ObjectAreaStart(); | 911 Address cur = p->ObjectAreaStart(); |
| 1048 Address end = p->AllocationTop(); | 912 Address end = p->AllocationTop(); |
| 1049 while (cur < end) { | 913 while (cur < end) { |
| 1050 HeapObject* obj = HeapObject::FromAddress(cur); | 914 HeapObject* obj = HeapObject::FromAddress(cur); |
| 1051 Address next = cur + obj->Size(); | 915 Address next = cur + obj->Size(); |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1151 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); | 1015 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); |
| 1152 | 1016 |
| 1153 int available_pages = | 1017 int available_pages = |
| 1154 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize); | 1018 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize); |
| 1155 // We don't want to have to handle small chunks near the end so if there are | 1019 // We don't want to have to handle small chunks near the end so if there are |
| 1156 // not kPagesPerChunk pages available without exceeding the max capacity then | 1020 // not kPagesPerChunk pages available without exceeding the max capacity then |
| 1157 // act as if memory has run out. | 1021 // act as if memory has run out. |
| 1158 if (available_pages < MemoryAllocator::kPagesPerChunk) return false; | 1022 if (available_pages < MemoryAllocator::kPagesPerChunk) return false; |
| 1159 | 1023 |
| 1160 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); | 1024 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); |
| 1161 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this); | 1025 Page* p = heap()->isolate()->memory_allocator()->AllocatePages( |
| 1026 desired_pages, &desired_pages, this); |
| 1162 if (!p->is_valid()) return false; | 1027 if (!p->is_valid()) return false; |
| 1163 | 1028 |
| 1164 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); | 1029 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); |
| 1165 ASSERT(Capacity() <= max_capacity_); | 1030 ASSERT(Capacity() <= max_capacity_); |
| 1166 | 1031 |
| 1167 MemoryAllocator::SetNextPage(last_page, p); | 1032 heap()->isolate()->memory_allocator()->SetNextPage(last_page, p); |
| 1168 | 1033 |
| 1169 // Sequentially clear region marks of new pages and and cache the | 1034 // Sequentially clear region marks of new pages and and cache the |
| 1170 // new last page in the space. | 1035 // new last page in the space. |
| 1171 while (p->is_valid()) { | 1036 while (p->is_valid()) { |
| 1172 p->SetRegionMarks(Page::kAllRegionsCleanMarks); | 1037 p->SetRegionMarks(Page::kAllRegionsCleanMarks); |
| 1173 last_page_ = p; | 1038 last_page_ = p; |
| 1174 p = p->next_page(); | 1039 p = p->next_page(); |
| 1175 } | 1040 } |
| 1176 | 1041 |
| 1177 return true; | 1042 return true; |
| (...skipping 22 matching lines...) Expand all Loading... |
| 1200 Page* top_page = AllocationTopPage(); | 1065 Page* top_page = AllocationTopPage(); |
| 1201 ASSERT(top_page->is_valid()); | 1066 ASSERT(top_page->is_valid()); |
| 1202 | 1067 |
| 1203 // Count the number of pages we would like to free. | 1068 // Count the number of pages we would like to free. |
| 1204 int pages_to_free = 0; | 1069 int pages_to_free = 0; |
| 1205 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { | 1070 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { |
| 1206 pages_to_free++; | 1071 pages_to_free++; |
| 1207 } | 1072 } |
| 1208 | 1073 |
| 1209 // Free pages after top_page. | 1074 // Free pages after top_page. |
| 1210 Page* p = MemoryAllocator::FreePages(top_page->next_page()); | 1075 Page* p = heap()->isolate()->memory_allocator()-> |
| 1211 MemoryAllocator::SetNextPage(top_page, p); | 1076 FreePages(top_page->next_page()); |
| 1077 heap()->isolate()->memory_allocator()->SetNextPage(top_page, p); |
| 1212 | 1078 |
| 1213 // Find out how many pages we failed to free and update last_page_. | 1079 // Find out how many pages we failed to free and update last_page_. |
| 1214 // Please note pages can only be freed in whole chunks. | 1080 // Please note pages can only be freed in whole chunks. |
| 1215 last_page_ = top_page; | 1081 last_page_ = top_page; |
| 1216 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { | 1082 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { |
| 1217 pages_to_free--; | 1083 pages_to_free--; |
| 1218 last_page_ = p; | 1084 last_page_ = p; |
| 1219 } | 1085 } |
| 1220 | 1086 |
| 1221 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize); | 1087 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize); |
| 1222 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize); | 1088 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize); |
| 1223 } | 1089 } |
| 1224 | 1090 |
| 1225 | 1091 |
| 1226 bool PagedSpace::EnsureCapacity(int capacity) { | 1092 bool PagedSpace::EnsureCapacity(int capacity) { |
| 1227 if (Capacity() >= capacity) return true; | 1093 if (Capacity() >= capacity) return true; |
| 1228 | 1094 |
| 1229 // Start from the allocation top and loop to the last page in the space. | 1095 // Start from the allocation top and loop to the last page in the space. |
| 1230 Page* last_page = AllocationTopPage(); | 1096 Page* last_page = AllocationTopPage(); |
| 1231 Page* next_page = last_page->next_page(); | 1097 Page* next_page = last_page->next_page(); |
| 1232 while (next_page->is_valid()) { | 1098 while (next_page->is_valid()) { |
| 1233 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page); | 1099 last_page = heap()->isolate()->memory_allocator()-> |
| 1100 FindLastPageInSameChunk(next_page); |
| 1234 next_page = last_page->next_page(); | 1101 next_page = last_page->next_page(); |
| 1235 } | 1102 } |
| 1236 | 1103 |
| 1237 // Expand the space until it has the required capacity or expansion fails. | 1104 // Expand the space until it has the required capacity or expansion fails. |
| 1238 do { | 1105 do { |
| 1239 if (!Expand(last_page)) return false; | 1106 if (!Expand(last_page)) return false; |
| 1240 ASSERT(last_page->next_page()->is_valid()); | 1107 ASSERT(last_page->next_page()->is_valid()); |
| 1241 last_page = | 1108 last_page = |
| 1242 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page()); | 1109 heap()->isolate()->memory_allocator()->FindLastPageInSameChunk( |
| 1110 last_page->next_page()); |
| 1243 } while (Capacity() < capacity); | 1111 } while (Capacity() < capacity); |
| 1244 | 1112 |
| 1245 return true; | 1113 return true; |
| 1246 } | 1114 } |
| 1247 | 1115 |
| 1248 | 1116 |
| 1249 #ifdef DEBUG | 1117 #ifdef DEBUG |
| 1250 void PagedSpace::Print() { } | 1118 void PagedSpace::Print() { } |
| 1251 #endif | 1119 #endif |
| 1252 | 1120 |
| 1253 | 1121 |
| 1254 #ifdef DEBUG | 1122 #ifdef DEBUG |
| 1255 // We do not assume that the PageIterator works, because it depends on the | 1123 // We do not assume that the PageIterator works, because it depends on the |
| 1256 // invariants we are checking during verification. | 1124 // invariants we are checking during verification. |
| 1257 void PagedSpace::Verify(ObjectVisitor* visitor) { | 1125 void PagedSpace::Verify(ObjectVisitor* visitor) { |
| 1258 // The allocation pointer should be valid, and it should be in a page in the | 1126 // The allocation pointer should be valid, and it should be in a page in the |
| 1259 // space. | 1127 // space. |
| 1260 ASSERT(allocation_info_.VerifyPagedAllocation()); | 1128 ASSERT(allocation_info_.VerifyPagedAllocation()); |
| 1261 Page* top_page = Page::FromAllocationTop(allocation_info_.top); | 1129 Page* top_page = Page::FromAllocationTop(allocation_info_.top); |
| 1262 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this)); | 1130 ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this)); |
| 1263 | 1131 |
| 1264 // Loop over all the pages. | 1132 // Loop over all the pages. |
| 1265 bool above_allocation_top = false; | 1133 bool above_allocation_top = false; |
| 1266 Page* current_page = first_page_; | 1134 Page* current_page = first_page_; |
| 1267 while (current_page->is_valid()) { | 1135 while (current_page->is_valid()) { |
| 1268 if (above_allocation_top) { | 1136 if (above_allocation_top) { |
| 1269 // We don't care what's above the allocation top. | 1137 // We don't care what's above the allocation top. |
| 1270 } else { | 1138 } else { |
| 1271 Address top = current_page->AllocationTop(); | 1139 Address top = current_page->AllocationTop(); |
| 1272 if (current_page == top_page) { | 1140 if (current_page == top_page) { |
| 1273 ASSERT(top == allocation_info_.top); | 1141 ASSERT(top == allocation_info_.top); |
| 1274 // The next page will be above the allocation top. | 1142 // The next page will be above the allocation top. |
| 1275 above_allocation_top = true; | 1143 above_allocation_top = true; |
| 1276 } | 1144 } |
| 1277 | 1145 |
| 1278 // It should be packed with objects from the bottom to the top. | 1146 // It should be packed with objects from the bottom to the top. |
| 1279 Address current = current_page->ObjectAreaStart(); | 1147 Address current = current_page->ObjectAreaStart(); |
| 1280 while (current < top) { | 1148 while (current < top) { |
| 1281 HeapObject* object = HeapObject::FromAddress(current); | 1149 HeapObject* object = HeapObject::FromAddress(current); |
| 1282 | 1150 |
| 1283 // The first word should be a map, and we expect all map pointers to | 1151 // The first word should be a map, and we expect all map pointers to |
| 1284 // be in map space. | 1152 // be in map space. |
| 1285 Map* map = object->map(); | 1153 Map* map = object->map(); |
| 1286 ASSERT(map->IsMap()); | 1154 ASSERT(map->IsMap()); |
| 1287 ASSERT(Heap::map_space()->Contains(map)); | 1155 ASSERT(heap()->map_space()->Contains(map)); |
| 1288 | 1156 |
| 1289 // Perform space-specific object verification. | 1157 // Perform space-specific object verification. |
| 1290 VerifyObject(object); | 1158 VerifyObject(object); |
| 1291 | 1159 |
| 1292 // The object itself should look OK. | 1160 // The object itself should look OK. |
| 1293 object->Verify(); | 1161 object->Verify(); |
| 1294 | 1162 |
| 1295 // All the interior pointers should be contained in the heap and | 1163 // All the interior pointers should be contained in the heap and |
| 1296 // have page regions covering intergenerational references should be | 1164 // have page regions covering intergenerational references should be |
| 1297 // marked dirty. | 1165 // marked dirty. |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1313 | 1181 |
| 1314 // ----------------------------------------------------------------------------- | 1182 // ----------------------------------------------------------------------------- |
| 1315 // NewSpace implementation | 1183 // NewSpace implementation |
| 1316 | 1184 |
| 1317 | 1185 |
| 1318 bool NewSpace::Setup(Address start, int size) { | 1186 bool NewSpace::Setup(Address start, int size) { |
| 1319 // Setup new space based on the preallocated memory block defined by | 1187 // Setup new space based on the preallocated memory block defined by |
| 1320 // start and size. The provided space is divided into two semi-spaces. | 1188 // start and size. The provided space is divided into two semi-spaces. |
| 1321 // To support fast containment testing in the new space, the size of | 1189 // To support fast containment testing in the new space, the size of |
| 1322 // this chunk must be a power of two and it must be aligned to its size. | 1190 // this chunk must be a power of two and it must be aligned to its size. |
| 1323 int initial_semispace_capacity = Heap::InitialSemiSpaceSize(); | 1191 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); |
| 1324 int maximum_semispace_capacity = Heap::MaxSemiSpaceSize(); | 1192 int maximum_semispace_capacity = heap()->MaxSemiSpaceSize(); |
| 1325 | 1193 |
| 1326 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); | 1194 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); |
| 1327 ASSERT(IsPowerOf2(maximum_semispace_capacity)); | 1195 ASSERT(IsPowerOf2(maximum_semispace_capacity)); |
| 1328 | 1196 |
| 1329 // Allocate and setup the histogram arrays if necessary. | 1197 // Allocate and setup the histogram arrays if necessary. |
| 1330 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1198 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| 1331 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 1199 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1332 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 1200 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1333 | 1201 |
| 1334 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ | 1202 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ |
| 1335 promoted_histogram_[name].set_name(#name); | 1203 promoted_histogram_[name].set_name(#name); |
| 1336 INSTANCE_TYPE_LIST(SET_NAME) | 1204 INSTANCE_TYPE_LIST(SET_NAME) |
| 1337 #undef SET_NAME | 1205 #undef SET_NAME |
| 1338 #endif | 1206 #endif |
| 1339 | 1207 |
| 1340 ASSERT(size == 2 * Heap::ReservedSemiSpaceSize()); | 1208 ASSERT(size == 2 * heap()->ReservedSemiSpaceSize()); |
| 1341 ASSERT(IsAddressAligned(start, size, 0)); | 1209 ASSERT(IsAddressAligned(start, size, 0)); |
| 1342 | 1210 |
| 1343 if (!to_space_.Setup(start, | 1211 if (!to_space_.Setup(start, |
| 1344 initial_semispace_capacity, | 1212 initial_semispace_capacity, |
| 1345 maximum_semispace_capacity)) { | 1213 maximum_semispace_capacity)) { |
| 1346 return false; | 1214 return false; |
| 1347 } | 1215 } |
| 1348 if (!from_space_.Setup(start + maximum_semispace_capacity, | 1216 if (!from_space_.Setup(start + maximum_semispace_capacity, |
| 1349 initial_semispace_capacity, | 1217 initial_semispace_capacity, |
| 1350 maximum_semispace_capacity)) { | 1218 maximum_semispace_capacity)) { |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1385 mc_forwarding_info_.limit = NULL; | 1253 mc_forwarding_info_.limit = NULL; |
| 1386 | 1254 |
| 1387 to_space_.TearDown(); | 1255 to_space_.TearDown(); |
| 1388 from_space_.TearDown(); | 1256 from_space_.TearDown(); |
| 1389 } | 1257 } |
| 1390 | 1258 |
| 1391 | 1259 |
| 1392 #ifdef ENABLE_HEAP_PROTECTION | 1260 #ifdef ENABLE_HEAP_PROTECTION |
| 1393 | 1261 |
| 1394 void NewSpace::Protect() { | 1262 void NewSpace::Protect() { |
| 1395 MemoryAllocator::Protect(ToSpaceLow(), Capacity()); | 1263 heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity()); |
| 1396 MemoryAllocator::Protect(FromSpaceLow(), Capacity()); | 1264 heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity()); |
| 1397 } | 1265 } |
| 1398 | 1266 |
| 1399 | 1267 |
| 1400 void NewSpace::Unprotect() { | 1268 void NewSpace::Unprotect() { |
| 1401 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(), | 1269 heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(), |
| 1402 to_space_.executable()); | 1270 to_space_.executable()); |
| 1403 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(), | 1271 heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(), |
| 1404 from_space_.executable()); | 1272 from_space_.executable()); |
| 1405 } | 1273 } |
| 1406 | 1274 |
| 1407 #endif | 1275 #endif |
| 1408 | 1276 |
| 1409 | 1277 |
| 1410 void NewSpace::Flip() { | 1278 void NewSpace::Flip() { |
| 1411 SemiSpace tmp = from_space_; | 1279 SemiSpace tmp = from_space_; |
| 1412 from_space_ = to_space_; | 1280 from_space_ = to_space_; |
| 1413 to_space_ = tmp; | 1281 to_space_ = tmp; |
| 1414 } | 1282 } |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1488 // There should be objects packed in from the low address up to the | 1356 // There should be objects packed in from the low address up to the |
| 1489 // allocation pointer. | 1357 // allocation pointer. |
| 1490 Address current = to_space_.low(); | 1358 Address current = to_space_.low(); |
| 1491 while (current < top()) { | 1359 while (current < top()) { |
| 1492 HeapObject* object = HeapObject::FromAddress(current); | 1360 HeapObject* object = HeapObject::FromAddress(current); |
| 1493 | 1361 |
| 1494 // The first word should be a map, and we expect all map pointers to | 1362 // The first word should be a map, and we expect all map pointers to |
| 1495 // be in map space. | 1363 // be in map space. |
| 1496 Map* map = object->map(); | 1364 Map* map = object->map(); |
| 1497 ASSERT(map->IsMap()); | 1365 ASSERT(map->IsMap()); |
| 1498 ASSERT(Heap::map_space()->Contains(map)); | 1366 ASSERT(heap()->map_space()->Contains(map)); |
| 1499 | 1367 |
| 1500 // The object should not be code or a map. | 1368 // The object should not be code or a map. |
| 1501 ASSERT(!object->IsMap()); | 1369 ASSERT(!object->IsMap()); |
| 1502 ASSERT(!object->IsCode()); | 1370 ASSERT(!object->IsCode()); |
| 1503 | 1371 |
| 1504 // The object itself should look OK. | 1372 // The object itself should look OK. |
| 1505 object->Verify(); | 1373 object->Verify(); |
| 1506 | 1374 |
| 1507 // All the interior pointers should be contained in the heap. | 1375 // All the interior pointers should be contained in the heap. |
| 1508 VerifyPointersVisitor visitor; | 1376 VerifyPointersVisitor visitor; |
| 1509 int size = object->Size(); | 1377 int size = object->Size(); |
| 1510 object->IterateBody(map->instance_type(), size, &visitor); | 1378 object->IterateBody(map->instance_type(), size, &visitor); |
| 1511 | 1379 |
| 1512 current += size; | 1380 current += size; |
| 1513 } | 1381 } |
| 1514 | 1382 |
| 1515 // The allocation pointer should not be in the middle of an object. | 1383 // The allocation pointer should not be in the middle of an object. |
| 1516 ASSERT(current == top()); | 1384 ASSERT(current == top()); |
| 1517 } | 1385 } |
| 1518 #endif | 1386 #endif |
| 1519 | 1387 |
| 1520 | 1388 |
| 1521 bool SemiSpace::Commit() { | 1389 bool SemiSpace::Commit() { |
| 1522 ASSERT(!is_committed()); | 1390 ASSERT(!is_committed()); |
| 1523 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) { | 1391 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1392 start_, capacity_, executable())) { |
| 1524 return false; | 1393 return false; |
| 1525 } | 1394 } |
| 1526 committed_ = true; | 1395 committed_ = true; |
| 1527 return true; | 1396 return true; |
| 1528 } | 1397 } |
| 1529 | 1398 |
| 1530 | 1399 |
| 1531 bool SemiSpace::Uncommit() { | 1400 bool SemiSpace::Uncommit() { |
| 1532 ASSERT(is_committed()); | 1401 ASSERT(is_committed()); |
| 1533 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) { | 1402 if (!heap()->isolate()->memory_allocator()->UncommitBlock( |
| 1403 start_, capacity_)) { |
| 1534 return false; | 1404 return false; |
| 1535 } | 1405 } |
| 1536 committed_ = false; | 1406 committed_ = false; |
| 1537 return true; | 1407 return true; |
| 1538 } | 1408 } |
| 1539 | 1409 |
| 1540 | 1410 |
| 1541 // ----------------------------------------------------------------------------- | 1411 // ----------------------------------------------------------------------------- |
| 1542 // SemiSpace implementation | 1412 // SemiSpace implementation |
| 1543 | 1413 |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1569 start_ = NULL; | 1439 start_ = NULL; |
| 1570 capacity_ = 0; | 1440 capacity_ = 0; |
| 1571 } | 1441 } |
| 1572 | 1442 |
| 1573 | 1443 |
| 1574 bool SemiSpace::Grow() { | 1444 bool SemiSpace::Grow() { |
| 1575 // Double the semispace size but only up to maximum capacity. | 1445 // Double the semispace size but only up to maximum capacity. |
| 1576 int maximum_extra = maximum_capacity_ - capacity_; | 1446 int maximum_extra = maximum_capacity_ - capacity_; |
| 1577 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), | 1447 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), |
| 1578 maximum_extra); | 1448 maximum_extra); |
| 1579 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) { | 1449 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1450 high(), extra, executable())) { |
| 1580 return false; | 1451 return false; |
| 1581 } | 1452 } |
| 1582 capacity_ += extra; | 1453 capacity_ += extra; |
| 1583 return true; | 1454 return true; |
| 1584 } | 1455 } |
| 1585 | 1456 |
| 1586 | 1457 |
| 1587 bool SemiSpace::GrowTo(int new_capacity) { | 1458 bool SemiSpace::GrowTo(int new_capacity) { |
| 1588 ASSERT(new_capacity <= maximum_capacity_); | 1459 ASSERT(new_capacity <= maximum_capacity_); |
| 1589 ASSERT(new_capacity > capacity_); | 1460 ASSERT(new_capacity > capacity_); |
| 1590 size_t delta = new_capacity - capacity_; | 1461 size_t delta = new_capacity - capacity_; |
| 1591 ASSERT(IsAligned(delta, OS::AllocateAlignment())); | 1462 ASSERT(IsAligned(delta, OS::AllocateAlignment())); |
| 1592 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) { | 1463 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1464 high(), delta, executable())) { |
| 1593 return false; | 1465 return false; |
| 1594 } | 1466 } |
| 1595 capacity_ = new_capacity; | 1467 capacity_ = new_capacity; |
| 1596 return true; | 1468 return true; |
| 1597 } | 1469 } |
| 1598 | 1470 |
| 1599 | 1471 |
| 1600 bool SemiSpace::ShrinkTo(int new_capacity) { | 1472 bool SemiSpace::ShrinkTo(int new_capacity) { |
| 1601 ASSERT(new_capacity >= initial_capacity_); | 1473 ASSERT(new_capacity >= initial_capacity_); |
| 1602 ASSERT(new_capacity < capacity_); | 1474 ASSERT(new_capacity < capacity_); |
| 1603 size_t delta = capacity_ - new_capacity; | 1475 size_t delta = capacity_ - new_capacity; |
| 1604 ASSERT(IsAligned(delta, OS::AllocateAlignment())); | 1476 ASSERT(IsAligned(delta, OS::AllocateAlignment())); |
| 1605 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) { | 1477 if (!heap()->isolate()->memory_allocator()->UncommitBlock( |
| 1478 high() - delta, delta)) { |
| 1606 return false; | 1479 return false; |
| 1607 } | 1480 } |
| 1608 capacity_ = new_capacity; | 1481 capacity_ = new_capacity; |
| 1609 return true; | 1482 return true; |
| 1610 } | 1483 } |
| 1611 | 1484 |
| 1612 | 1485 |
| 1613 #ifdef DEBUG | 1486 #ifdef DEBUG |
| 1614 void SemiSpace::Print() { } | 1487 void SemiSpace::Print() { } |
| 1615 | 1488 |
| (...skipping 27 matching lines...) Expand all Loading... |
| 1643 ASSERT(space->ToSpaceLow() <= end | 1516 ASSERT(space->ToSpaceLow() <= end |
| 1644 && end <= space->ToSpaceHigh()); | 1517 && end <= space->ToSpaceHigh()); |
| 1645 space_ = &space->to_space_; | 1518 space_ = &space->to_space_; |
| 1646 current_ = start; | 1519 current_ = start; |
| 1647 limit_ = end; | 1520 limit_ = end; |
| 1648 size_func_ = size_func; | 1521 size_func_ = size_func; |
| 1649 } | 1522 } |
| 1650 | 1523 |
| 1651 | 1524 |
| 1652 #ifdef DEBUG | 1525 #ifdef DEBUG |
| 1653 // A static array of histogram info for each type. | |
| 1654 static HistogramInfo heap_histograms[LAST_TYPE+1]; | |
| 1655 static JSObject::SpillInformation js_spill_information; | |
| 1656 | |
| 1657 // heap_histograms is shared, always clear it before using it. | 1526 // heap_histograms is shared, always clear it before using it. |
| 1658 static void ClearHistograms() { | 1527 static void ClearHistograms() { |
| 1528 Isolate* isolate = Isolate::Current(); |
| 1659 // We reset the name each time, though it hasn't changed. | 1529 // We reset the name each time, though it hasn't changed. |
| 1660 #define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name); | 1530 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name); |
| 1661 INSTANCE_TYPE_LIST(DEF_TYPE_NAME) | 1531 INSTANCE_TYPE_LIST(DEF_TYPE_NAME) |
| 1662 #undef DEF_TYPE_NAME | 1532 #undef DEF_TYPE_NAME |
| 1663 | 1533 |
| 1664 #define CLEAR_HISTOGRAM(name) heap_histograms[name].clear(); | 1534 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear(); |
| 1665 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) | 1535 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) |
| 1666 #undef CLEAR_HISTOGRAM | 1536 #undef CLEAR_HISTOGRAM |
| 1667 | 1537 |
| 1668 js_spill_information.Clear(); | 1538 isolate->js_spill_information()->Clear(); |
| 1669 } | 1539 } |
| 1670 | 1540 |
| 1671 | 1541 |
| 1672 static int code_kind_statistics[Code::NUMBER_OF_KINDS]; | |
| 1673 | |
| 1674 | |
| 1675 static void ClearCodeKindStatistics() { | 1542 static void ClearCodeKindStatistics() { |
| 1543 Isolate* isolate = Isolate::Current(); |
| 1676 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { | 1544 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { |
| 1677 code_kind_statistics[i] = 0; | 1545 isolate->code_kind_statistics()[i] = 0; |
| 1678 } | 1546 } |
| 1679 } | 1547 } |
| 1680 | 1548 |
| 1681 | 1549 |
| 1682 static void ReportCodeKindStatistics() { | 1550 static void ReportCodeKindStatistics() { |
| 1551 Isolate* isolate = Isolate::Current(); |
| 1683 const char* table[Code::NUMBER_OF_KINDS] = { NULL }; | 1552 const char* table[Code::NUMBER_OF_KINDS] = { NULL }; |
| 1684 | 1553 |
| 1685 #define CASE(name) \ | 1554 #define CASE(name) \ |
| 1686 case Code::name: table[Code::name] = #name; \ | 1555 case Code::name: table[Code::name] = #name; \ |
| 1687 break | 1556 break |
| 1688 | 1557 |
| 1689 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { | 1558 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { |
| 1690 switch (static_cast<Code::Kind>(i)) { | 1559 switch (static_cast<Code::Kind>(i)) { |
| 1691 CASE(FUNCTION); | 1560 CASE(FUNCTION); |
| 1692 CASE(OPTIMIZED_FUNCTION); | 1561 CASE(OPTIMIZED_FUNCTION); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1703 CASE(BINARY_OP_IC); | 1572 CASE(BINARY_OP_IC); |
| 1704 CASE(TYPE_RECORDING_BINARY_OP_IC); | 1573 CASE(TYPE_RECORDING_BINARY_OP_IC); |
| 1705 CASE(COMPARE_IC); | 1574 CASE(COMPARE_IC); |
| 1706 } | 1575 } |
| 1707 } | 1576 } |
| 1708 | 1577 |
| 1709 #undef CASE | 1578 #undef CASE |
| 1710 | 1579 |
| 1711 PrintF("\n Code kind histograms: \n"); | 1580 PrintF("\n Code kind histograms: \n"); |
| 1712 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { | 1581 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { |
| 1713 if (code_kind_statistics[i] > 0) { | 1582 if (isolate->code_kind_statistics()[i] > 0) { |
| 1714 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]); | 1583 PrintF(" %-20s: %10d bytes\n", table[i], |
| 1584 isolate->code_kind_statistics()[i]); |
| 1715 } | 1585 } |
| 1716 } | 1586 } |
| 1717 PrintF("\n"); | 1587 PrintF("\n"); |
| 1718 } | 1588 } |
| 1719 | 1589 |
| 1720 | 1590 |
| 1721 static int CollectHistogramInfo(HeapObject* obj) { | 1591 static int CollectHistogramInfo(HeapObject* obj) { |
| 1592 Isolate* isolate = Isolate::Current(); |
| 1722 InstanceType type = obj->map()->instance_type(); | 1593 InstanceType type = obj->map()->instance_type(); |
| 1723 ASSERT(0 <= type && type <= LAST_TYPE); | 1594 ASSERT(0 <= type && type <= LAST_TYPE); |
| 1724 ASSERT(heap_histograms[type].name() != NULL); | 1595 ASSERT(isolate->heap_histograms()[type].name() != NULL); |
| 1725 heap_histograms[type].increment_number(1); | 1596 isolate->heap_histograms()[type].increment_number(1); |
| 1726 heap_histograms[type].increment_bytes(obj->Size()); | 1597 isolate->heap_histograms()[type].increment_bytes(obj->Size()); |
| 1727 | 1598 |
| 1728 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { | 1599 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { |
| 1729 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information); | 1600 JSObject::cast(obj)->IncrementSpillStatistics( |
| 1601 isolate->js_spill_information()); |
| 1730 } | 1602 } |
| 1731 | 1603 |
| 1732 return obj->Size(); | 1604 return obj->Size(); |
| 1733 } | 1605 } |
| 1734 | 1606 |
| 1735 | 1607 |
| 1736 static void ReportHistogram(bool print_spill) { | 1608 static void ReportHistogram(bool print_spill) { |
| 1609 Isolate* isolate = Isolate::Current(); |
| 1737 PrintF("\n Object Histogram:\n"); | 1610 PrintF("\n Object Histogram:\n"); |
| 1738 for (int i = 0; i <= LAST_TYPE; i++) { | 1611 for (int i = 0; i <= LAST_TYPE; i++) { |
| 1739 if (heap_histograms[i].number() > 0) { | 1612 if (isolate->heap_histograms()[i].number() > 0) { |
| 1740 PrintF(" %-34s%10d (%10d bytes)\n", | 1613 PrintF(" %-34s%10d (%10d bytes)\n", |
| 1741 heap_histograms[i].name(), | 1614 isolate->heap_histograms()[i].name(), |
| 1742 heap_histograms[i].number(), | 1615 isolate->heap_histograms()[i].number(), |
| 1743 heap_histograms[i].bytes()); | 1616 isolate->heap_histograms()[i].bytes()); |
| 1744 } | 1617 } |
| 1745 } | 1618 } |
| 1746 PrintF("\n"); | 1619 PrintF("\n"); |
| 1747 | 1620 |
| 1748 // Summarize string types. | 1621 // Summarize string types. |
| 1749 int string_number = 0; | 1622 int string_number = 0; |
| 1750 int string_bytes = 0; | 1623 int string_bytes = 0; |
| 1751 #define INCREMENT(type, size, name, camel_name) \ | 1624 #define INCREMENT(type, size, name, camel_name) \ |
| 1752 string_number += heap_histograms[type].number(); \ | 1625 string_number += isolate->heap_histograms()[type].number(); \ |
| 1753 string_bytes += heap_histograms[type].bytes(); | 1626 string_bytes += isolate->heap_histograms()[type].bytes(); |
| 1754 STRING_TYPE_LIST(INCREMENT) | 1627 STRING_TYPE_LIST(INCREMENT) |
| 1755 #undef INCREMENT | 1628 #undef INCREMENT |
| 1756 if (string_number > 0) { | 1629 if (string_number > 0) { |
| 1757 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, | 1630 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, |
| 1758 string_bytes); | 1631 string_bytes); |
| 1759 } | 1632 } |
| 1760 | 1633 |
| 1761 if (FLAG_collect_heap_spill_statistics && print_spill) { | 1634 if (FLAG_collect_heap_spill_statistics && print_spill) { |
| 1762 js_spill_information.Print(); | 1635 isolate->js_spill_information()->Print(); |
| 1763 } | 1636 } |
| 1764 } | 1637 } |
| 1765 #endif // DEBUG | 1638 #endif // DEBUG |
| 1766 | 1639 |
| 1767 | 1640 |
| 1768 // Support for statistics gathering for --heap-stats and --log-gc. | 1641 // Support for statistics gathering for --heap-stats and --log-gc. |
| 1769 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1642 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| 1770 void NewSpace::ClearHistograms() { | 1643 void NewSpace::ClearHistograms() { |
| 1771 for (int i = 0; i <= LAST_TYPE; i++) { | 1644 for (int i = 0; i <= LAST_TYPE; i++) { |
| 1772 allocated_histogram_[i].clear(); | 1645 allocated_histogram_[i].clear(); |
| 1773 promoted_histogram_[i].clear(); | 1646 promoted_histogram_[i].clear(); |
| 1774 } | 1647 } |
| 1775 } | 1648 } |
| 1776 | 1649 |
| 1777 // Because the copying collector does not touch garbage objects, we iterate | 1650 // Because the copying collector does not touch garbage objects, we iterate |
| 1778 // the new space before a collection to get a histogram of allocated objects. | 1651 // the new space before a collection to get a histogram of allocated objects. |
| 1779 // This only happens (1) when compiled with DEBUG and the --heap-stats flag is | 1652 // This only happens (1) when compiled with DEBUG and the --heap-stats flag is |
| 1780 // set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc | 1653 // set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc |
| 1781 // flag is set. | 1654 // flag is set. |
| 1782 void NewSpace::CollectStatistics() { | 1655 void NewSpace::CollectStatistics() { |
| 1783 ClearHistograms(); | 1656 ClearHistograms(); |
| 1784 SemiSpaceIterator it(this); | 1657 SemiSpaceIterator it(this); |
| 1785 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) | 1658 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) |
| 1786 RecordAllocation(obj); | 1659 RecordAllocation(obj); |
| 1787 } | 1660 } |
| 1788 | 1661 |
| 1789 | 1662 |
| 1790 #ifdef ENABLE_LOGGING_AND_PROFILING | 1663 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 1791 static void DoReportStatistics(HistogramInfo* info, const char* description) { | 1664 static void DoReportStatistics(Isolate* isolate, |
| 1792 LOG(HeapSampleBeginEvent("NewSpace", description)); | 1665 HistogramInfo* info, const char* description) { |
| 1666 LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); |
| 1793 // Lump all the string types together. | 1667 // Lump all the string types together. |
| 1794 int string_number = 0; | 1668 int string_number = 0; |
| 1795 int string_bytes = 0; | 1669 int string_bytes = 0; |
| 1796 #define INCREMENT(type, size, name, camel_name) \ | 1670 #define INCREMENT(type, size, name, camel_name) \ |
| 1797 string_number += info[type].number(); \ | 1671 string_number += info[type].number(); \ |
| 1798 string_bytes += info[type].bytes(); | 1672 string_bytes += info[type].bytes(); |
| 1799 STRING_TYPE_LIST(INCREMENT) | 1673 STRING_TYPE_LIST(INCREMENT) |
| 1800 #undef INCREMENT | 1674 #undef INCREMENT |
| 1801 if (string_number > 0) { | 1675 if (string_number > 0) { |
| 1802 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); | 1676 LOG(isolate, |
| 1677 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); |
| 1803 } | 1678 } |
| 1804 | 1679 |
| 1805 // Then do the other types. | 1680 // Then do the other types. |
| 1806 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { | 1681 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { |
| 1807 if (info[i].number() > 0) { | 1682 if (info[i].number() > 0) { |
| 1808 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(), | 1683 LOG(isolate, |
| 1684 HeapSampleItemEvent(info[i].name(), info[i].number(), |
| 1809 info[i].bytes())); | 1685 info[i].bytes())); |
| 1810 } | 1686 } |
| 1811 } | 1687 } |
| 1812 LOG(HeapSampleEndEvent("NewSpace", description)); | 1688 LOG(isolate, HeapSampleEndEvent("NewSpace", description)); |
| 1813 } | 1689 } |
| 1814 #endif // ENABLE_LOGGING_AND_PROFILING | 1690 #endif // ENABLE_LOGGING_AND_PROFILING |
| 1815 | 1691 |
| 1816 | 1692 |
| 1817 void NewSpace::ReportStatistics() { | 1693 void NewSpace::ReportStatistics() { |
| 1818 #ifdef DEBUG | 1694 #ifdef DEBUG |
| 1819 if (FLAG_heap_stats) { | 1695 if (FLAG_heap_stats) { |
| 1820 float pct = static_cast<float>(Available()) / Capacity(); | 1696 float pct = static_cast<float>(Available()) / Capacity(); |
| 1821 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 1697 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 1822 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | 1698 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 1823 Capacity(), Available(), static_cast<int>(pct*100)); | 1699 Capacity(), Available(), static_cast<int>(pct*100)); |
| 1824 PrintF("\n Object Histogram:\n"); | 1700 PrintF("\n Object Histogram:\n"); |
| 1825 for (int i = 0; i <= LAST_TYPE; i++) { | 1701 for (int i = 0; i <= LAST_TYPE; i++) { |
| 1826 if (allocated_histogram_[i].number() > 0) { | 1702 if (allocated_histogram_[i].number() > 0) { |
| 1827 PrintF(" %-34s%10d (%10d bytes)\n", | 1703 PrintF(" %-34s%10d (%10d bytes)\n", |
| 1828 allocated_histogram_[i].name(), | 1704 allocated_histogram_[i].name(), |
| 1829 allocated_histogram_[i].number(), | 1705 allocated_histogram_[i].number(), |
| 1830 allocated_histogram_[i].bytes()); | 1706 allocated_histogram_[i].bytes()); |
| 1831 } | 1707 } |
| 1832 } | 1708 } |
| 1833 PrintF("\n"); | 1709 PrintF("\n"); |
| 1834 } | 1710 } |
| 1835 #endif // DEBUG | 1711 #endif // DEBUG |
| 1836 | 1712 |
| 1837 #ifdef ENABLE_LOGGING_AND_PROFILING | 1713 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 1838 if (FLAG_log_gc) { | 1714 if (FLAG_log_gc) { |
| 1839 DoReportStatistics(allocated_histogram_, "allocated"); | 1715 Isolate* isolate = ISOLATE; |
| 1840 DoReportStatistics(promoted_histogram_, "promoted"); | 1716 DoReportStatistics(isolate, allocated_histogram_, "allocated"); |
| 1717 DoReportStatistics(isolate, promoted_histogram_, "promoted"); |
| 1841 } | 1718 } |
| 1842 #endif // ENABLE_LOGGING_AND_PROFILING | 1719 #endif // ENABLE_LOGGING_AND_PROFILING |
| 1843 } | 1720 } |
| 1844 | 1721 |
| 1845 | 1722 |
| 1846 void NewSpace::RecordAllocation(HeapObject* obj) { | 1723 void NewSpace::RecordAllocation(HeapObject* obj) { |
| 1847 InstanceType type = obj->map()->instance_type(); | 1724 InstanceType type = obj->map()->instance_type(); |
| 1848 ASSERT(0 <= type && type <= LAST_TYPE); | 1725 ASSERT(0 <= type && type <= LAST_TYPE); |
| 1849 allocated_histogram_[type].increment_number(1); | 1726 allocated_histogram_[type].increment_number(1); |
| 1850 allocated_histogram_[type].increment_bytes(obj->Size()); | 1727 allocated_histogram_[type].increment_bytes(obj->Size()); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 1868 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1745 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
| 1869 | 1746 |
| 1870 // We write a map and possibly size information to the block. If the block | 1747 // We write a map and possibly size information to the block. If the block |
| 1871 // is big enough to be a ByteArray with at least one extra word (the next | 1748 // is big enough to be a ByteArray with at least one extra word (the next |
| 1872 // pointer), we set its map to be the byte array map and its size to an | 1749 // pointer), we set its map to be the byte array map and its size to an |
| 1873 // appropriate array length for the desired size from HeapObject::Size(). | 1750 // appropriate array length for the desired size from HeapObject::Size(). |
| 1874 // If the block is too small (eg, one or two words), to hold both a size | 1751 // If the block is too small (eg, one or two words), to hold both a size |
| 1875 // field and a next pointer, we give it a filler map that gives it the | 1752 // field and a next pointer, we give it a filler map that gives it the |
| 1876 // correct size. | 1753 // correct size. |
| 1877 if (size_in_bytes > ByteArray::kHeaderSize) { | 1754 if (size_in_bytes > ByteArray::kHeaderSize) { |
| 1878 set_map(Heap::raw_unchecked_byte_array_map()); | 1755 set_map(HEAP->raw_unchecked_byte_array_map()); |
| 1879 // Can't use ByteArray::cast because it fails during deserialization. | 1756 // Can't use ByteArray::cast because it fails during deserialization. |
| 1880 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); | 1757 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); |
| 1881 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); | 1758 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); |
| 1882 } else if (size_in_bytes == kPointerSize) { | 1759 } else if (size_in_bytes == kPointerSize) { |
| 1883 set_map(Heap::raw_unchecked_one_pointer_filler_map()); | 1760 set_map(HEAP->raw_unchecked_one_pointer_filler_map()); |
| 1884 } else if (size_in_bytes == 2 * kPointerSize) { | 1761 } else if (size_in_bytes == 2 * kPointerSize) { |
| 1885 set_map(Heap::raw_unchecked_two_pointer_filler_map()); | 1762 set_map(HEAP->raw_unchecked_two_pointer_filler_map()); |
| 1886 } else { | 1763 } else { |
| 1887 UNREACHABLE(); | 1764 UNREACHABLE(); |
| 1888 } | 1765 } |
| 1889 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during | 1766 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during |
| 1890 // deserialization because the byte array map is not done yet. | 1767 // deserialization because the byte array map is not done yet. |
| 1891 } | 1768 } |
| 1892 | 1769 |
| 1893 | 1770 |
| 1894 Address FreeListNode::next() { | 1771 Address FreeListNode::next() { |
| 1895 ASSERT(IsFreeListNode(this)); | 1772 ASSERT(IsFreeListNode(this)); |
| 1896 if (map() == Heap::raw_unchecked_byte_array_map()) { | 1773 if (map() == HEAP->raw_unchecked_byte_array_map()) { |
| 1897 ASSERT(Size() >= kNextOffset + kPointerSize); | 1774 ASSERT(Size() >= kNextOffset + kPointerSize); |
| 1898 return Memory::Address_at(address() + kNextOffset); | 1775 return Memory::Address_at(address() + kNextOffset); |
| 1899 } else { | 1776 } else { |
| 1900 return Memory::Address_at(address() + kPointerSize); | 1777 return Memory::Address_at(address() + kPointerSize); |
| 1901 } | 1778 } |
| 1902 } | 1779 } |
| 1903 | 1780 |
| 1904 | 1781 |
| 1905 void FreeListNode::set_next(Address next) { | 1782 void FreeListNode::set_next(Address next) { |
| 1906 ASSERT(IsFreeListNode(this)); | 1783 ASSERT(IsFreeListNode(this)); |
| 1907 if (map() == Heap::raw_unchecked_byte_array_map()) { | 1784 if (map() == HEAP->raw_unchecked_byte_array_map()) { |
| 1908 ASSERT(Size() >= kNextOffset + kPointerSize); | 1785 ASSERT(Size() >= kNextOffset + kPointerSize); |
| 1909 Memory::Address_at(address() + kNextOffset) = next; | 1786 Memory::Address_at(address() + kNextOffset) = next; |
| 1910 } else { | 1787 } else { |
| 1911 Memory::Address_at(address() + kPointerSize) = next; | 1788 Memory::Address_at(address() + kPointerSize) = next; |
| 1912 } | 1789 } |
| 1913 } | 1790 } |
| 1914 | 1791 |
| 1915 | 1792 |
| 1916 OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) { | 1793 OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) { |
| 1917 Reset(); | 1794 Reset(); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1938 cur = i; | 1815 cur = i; |
| 1939 } | 1816 } |
| 1940 } | 1817 } |
| 1941 free_[cur].next_size_ = kEnd; | 1818 free_[cur].next_size_ = kEnd; |
| 1942 needs_rebuild_ = false; | 1819 needs_rebuild_ = false; |
| 1943 } | 1820 } |
| 1944 | 1821 |
| 1945 | 1822 |
| 1946 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { | 1823 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { |
| 1947 #ifdef DEBUG | 1824 #ifdef DEBUG |
| 1948 MemoryAllocator::ZapBlock(start, size_in_bytes); | 1825 Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes); |
| 1949 #endif | 1826 #endif |
| 1950 FreeListNode* node = FreeListNode::FromAddress(start); | 1827 FreeListNode* node = FreeListNode::FromAddress(start); |
| 1951 node->set_size(size_in_bytes); | 1828 node->set_size(size_in_bytes); |
| 1952 | 1829 |
| 1953 // We don't use the freelists in compacting mode. This makes it more like a | 1830 // We don't use the freelists in compacting mode. This makes it more like a |
| 1954 // GC that only has mark-sweep-compact and doesn't have a mark-sweep | 1831 // GC that only has mark-sweep-compact and doesn't have a mark-sweep |
| 1955 // collector. | 1832 // collector. |
| 1956 if (FLAG_always_compact) { | 1833 if (FLAG_always_compact) { |
| 1957 return size_in_bytes; | 1834 return size_in_bytes; |
| 1958 } | 1835 } |
| (...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2082 | 1959 |
| 2083 | 1960 |
| 2084 void FixedSizeFreeList::Reset() { | 1961 void FixedSizeFreeList::Reset() { |
| 2085 available_ = 0; | 1962 available_ = 0; |
| 2086 head_ = tail_ = NULL; | 1963 head_ = tail_ = NULL; |
| 2087 } | 1964 } |
| 2088 | 1965 |
| 2089 | 1966 |
| 2090 void FixedSizeFreeList::Free(Address start) { | 1967 void FixedSizeFreeList::Free(Address start) { |
| 2091 #ifdef DEBUG | 1968 #ifdef DEBUG |
| 2092 MemoryAllocator::ZapBlock(start, object_size_); | 1969 Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_); |
| 2093 #endif | 1970 #endif |
| 2094 // We only use the freelists with mark-sweep. | 1971 // We only use the freelists with mark-sweep. |
| 2095 ASSERT(!MarkCompactCollector::IsCompacting()); | 1972 ASSERT(!HEAP->mark_compact_collector()->IsCompacting()); |
| 2096 FreeListNode* node = FreeListNode::FromAddress(start); | 1973 FreeListNode* node = FreeListNode::FromAddress(start); |
| 2097 node->set_size(object_size_); | 1974 node->set_size(object_size_); |
| 2098 node->set_next(NULL); | 1975 node->set_next(NULL); |
| 2099 if (head_ == NULL) { | 1976 if (head_ == NULL) { |
| 2100 tail_ = head_ = node->address(); | 1977 tail_ = head_ = node->address(); |
| 2101 } else { | 1978 } else { |
| 2102 FreeListNode::FromAddress(tail_)->set_next(node->address()); | 1979 FreeListNode::FromAddress(tail_)->set_next(node->address()); |
| 2103 tail_ = node->address(); | 1980 tail_ = node->address(); |
| 2104 } | 1981 } |
| 2105 available_ += object_size_; | 1982 available_ += object_size_; |
| (...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2212 } | 2089 } |
| 2213 | 2090 |
| 2214 Page* first = NULL; | 2091 Page* first = NULL; |
| 2215 | 2092 |
| 2216 // Remove pages from the list. | 2093 // Remove pages from the list. |
| 2217 if (prev == NULL) { | 2094 if (prev == NULL) { |
| 2218 first = first_page_; | 2095 first = first_page_; |
| 2219 first_page_ = last->next_page(); | 2096 first_page_ = last->next_page(); |
| 2220 } else { | 2097 } else { |
| 2221 first = prev->next_page(); | 2098 first = prev->next_page(); |
| 2222 MemoryAllocator::SetNextPage(prev, last->next_page()); | 2099 heap()->isolate()->memory_allocator()->SetNextPage( |
| 2100 prev, last->next_page()); |
| 2223 } | 2101 } |
| 2224 | 2102 |
| 2225 // Attach it after the last page. | 2103 // Attach it after the last page. |
| 2226 MemoryAllocator::SetNextPage(last_page_, first); | 2104 heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first); |
| 2227 last_page_ = last; | 2105 last_page_ = last; |
| 2228 MemoryAllocator::SetNextPage(last, NULL); | 2106 heap()->isolate()->memory_allocator()->SetNextPage(last, NULL); |
| 2229 | 2107 |
| 2230 // Clean them up. | 2108 // Clean them up. |
| 2231 do { | 2109 do { |
| 2232 first->InvalidateWatermark(true); | 2110 first->InvalidateWatermark(true); |
| 2233 first->SetAllocationWatermark(first->ObjectAreaStart()); | 2111 first->SetAllocationWatermark(first->ObjectAreaStart()); |
| 2234 first->SetCachedAllocationWatermark(first->ObjectAreaStart()); | 2112 first->SetCachedAllocationWatermark(first->ObjectAreaStart()); |
| 2235 first->SetRegionMarks(Page::kAllRegionsCleanMarks); | 2113 first->SetRegionMarks(Page::kAllRegionsCleanMarks); |
| 2236 first = first->next_page(); | 2114 first = first->next_page(); |
| 2237 } while (first != NULL); | 2115 } while (first != NULL); |
| 2238 | 2116 |
| (...skipping 18 matching lines...) Expand all Loading... |
| 2257 if (p == last_in_use) { | 2135 if (p == last_in_use) { |
| 2258 // We passed a page containing allocation top. All consequent | 2136 // We passed a page containing allocation top. All consequent |
| 2259 // pages are not used. | 2137 // pages are not used. |
| 2260 in_use = false; | 2138 in_use = false; |
| 2261 } | 2139 } |
| 2262 } | 2140 } |
| 2263 | 2141 |
| 2264 if (page_list_is_chunk_ordered_) return; | 2142 if (page_list_is_chunk_ordered_) return; |
| 2265 | 2143 |
| 2266 Page* new_last_in_use = Page::FromAddress(NULL); | 2144 Page* new_last_in_use = Page::FromAddress(NULL); |
| 2267 MemoryAllocator::RelinkPageListInChunkOrder(this, | 2145 heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder( |
| 2268 &first_page_, | 2146 this, &first_page_, &last_page_, &new_last_in_use); |
| 2269 &last_page_, | |
| 2270 &new_last_in_use); | |
| 2271 ASSERT(new_last_in_use->is_valid()); | 2147 ASSERT(new_last_in_use->is_valid()); |
| 2272 | 2148 |
| 2273 if (new_last_in_use != last_in_use) { | 2149 if (new_last_in_use != last_in_use) { |
| 2274 // Current allocation top points to a page which is now in the middle | 2150 // Current allocation top points to a page which is now in the middle |
| 2275 // of page list. We should move allocation top forward to the new last | 2151 // of page list. We should move allocation top forward to the new last |
| 2276 // used page so various object iterators will continue to work properly. | 2152 // used page so various object iterators will continue to work properly. |
| 2277 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - | 2153 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - |
| 2278 last_in_use->AllocationTop()); | 2154 last_in_use->AllocationTop()); |
| 2279 | 2155 |
| 2280 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); | 2156 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); |
| 2281 if (size_in_bytes > 0) { | 2157 if (size_in_bytes > 0) { |
| 2282 Address start = last_in_use->AllocationTop(); | 2158 Address start = last_in_use->AllocationTop(); |
| 2283 if (deallocate_blocks) { | 2159 if (deallocate_blocks) { |
| 2284 accounting_stats_.AllocateBytes(size_in_bytes); | 2160 accounting_stats_.AllocateBytes(size_in_bytes); |
| 2285 DeallocateBlock(start, size_in_bytes, add_to_freelist); | 2161 DeallocateBlock(start, size_in_bytes, add_to_freelist); |
| 2286 } else { | 2162 } else { |
| 2287 Heap::CreateFillerObjectAt(start, size_in_bytes); | 2163 heap()->CreateFillerObjectAt(start, size_in_bytes); |
| 2288 } | 2164 } |
| 2289 } | 2165 } |
| 2290 | 2166 |
| 2291 // New last in use page was in the middle of the list before | 2167 // New last in use page was in the middle of the list before |
| 2292 // sorting so it full. | 2168 // sorting so it full. |
| 2293 SetTop(new_last_in_use->AllocationTop()); | 2169 SetTop(new_last_in_use->AllocationTop()); |
| 2294 | 2170 |
| 2295 ASSERT(AllocationTopPage() == new_last_in_use); | 2171 ASSERT(AllocationTopPage() == new_last_in_use); |
| 2296 ASSERT(AllocationTopPage()->WasInUseBeforeMC()); | 2172 ASSERT(AllocationTopPage()->WasInUseBeforeMC()); |
| 2297 } | 2173 } |
| 2298 | 2174 |
| 2299 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); | 2175 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); |
| 2300 while (pages_in_use_iterator.has_next()) { | 2176 while (pages_in_use_iterator.has_next()) { |
| 2301 Page* p = pages_in_use_iterator.next(); | 2177 Page* p = pages_in_use_iterator.next(); |
| 2302 if (!p->WasInUseBeforeMC()) { | 2178 if (!p->WasInUseBeforeMC()) { |
| 2303 // Empty page is in the middle of a sequence of used pages. | 2179 // Empty page is in the middle of a sequence of used pages. |
| 2304 // Allocate it as a whole and deallocate immediately. | 2180 // Allocate it as a whole and deallocate immediately. |
| 2305 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - | 2181 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - |
| 2306 p->ObjectAreaStart()); | 2182 p->ObjectAreaStart()); |
| 2307 | 2183 |
| 2308 p->SetAllocationWatermark(p->ObjectAreaStart()); | 2184 p->SetAllocationWatermark(p->ObjectAreaStart()); |
| 2309 Address start = p->ObjectAreaStart(); | 2185 Address start = p->ObjectAreaStart(); |
| 2310 if (deallocate_blocks) { | 2186 if (deallocate_blocks) { |
| 2311 accounting_stats_.AllocateBytes(size_in_bytes); | 2187 accounting_stats_.AllocateBytes(size_in_bytes); |
| 2312 DeallocateBlock(start, size_in_bytes, add_to_freelist); | 2188 DeallocateBlock(start, size_in_bytes, add_to_freelist); |
| 2313 } else { | 2189 } else { |
| 2314 Heap::CreateFillerObjectAt(start, size_in_bytes); | 2190 heap()->CreateFillerObjectAt(start, size_in_bytes); |
| 2315 } | 2191 } |
| 2316 } | 2192 } |
| 2317 } | 2193 } |
| 2318 | 2194 |
| 2319 page_list_is_chunk_ordered_ = true; | 2195 page_list_is_chunk_ordered_ = true; |
| 2320 } | 2196 } |
| 2321 | 2197 |
| 2322 | 2198 |
| 2323 void PagedSpace::PrepareForMarkCompact(bool will_compact) { | 2199 void PagedSpace::PrepareForMarkCompact(bool will_compact) { |
| 2324 if (will_compact) { | 2200 if (will_compact) { |
| 2325 RelinkPageListInChunkOrder(false); | 2201 RelinkPageListInChunkOrder(false); |
| 2326 } | 2202 } |
| 2327 } | 2203 } |
| 2328 | 2204 |
| 2329 | 2205 |
| 2330 bool PagedSpace::ReserveSpace(int bytes) { | 2206 bool PagedSpace::ReserveSpace(int bytes) { |
| 2331 Address limit = allocation_info_.limit; | 2207 Address limit = allocation_info_.limit; |
| 2332 Address top = allocation_info_.top; | 2208 Address top = allocation_info_.top; |
| 2333 if (limit - top >= bytes) return true; | 2209 if (limit - top >= bytes) return true; |
| 2334 | 2210 |
| 2335 // There wasn't enough space in the current page. Lets put the rest | 2211 // There wasn't enough space in the current page. Lets put the rest |
| 2336 // of the page on the free list and start a fresh page. | 2212 // of the page on the free list and start a fresh page. |
| 2337 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_)); | 2213 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_)); |
| 2338 | 2214 |
| 2339 Page* reserved_page = TopPageOf(allocation_info_); | 2215 Page* reserved_page = TopPageOf(allocation_info_); |
| 2340 int bytes_left_to_reserve = bytes; | 2216 int bytes_left_to_reserve = bytes; |
| 2341 while (bytes_left_to_reserve > 0) { | 2217 while (bytes_left_to_reserve > 0) { |
| 2342 if (!reserved_page->next_page()->is_valid()) { | 2218 if (!reserved_page->next_page()->is_valid()) { |
| 2343 if (Heap::OldGenerationAllocationLimitReached()) return false; | 2219 if (heap()->OldGenerationAllocationLimitReached()) return false; |
| 2344 Expand(reserved_page); | 2220 Expand(reserved_page); |
| 2345 } | 2221 } |
| 2346 bytes_left_to_reserve -= Page::kPageSize; | 2222 bytes_left_to_reserve -= Page::kPageSize; |
| 2347 reserved_page = reserved_page->next_page(); | 2223 reserved_page = reserved_page->next_page(); |
| 2348 if (!reserved_page->is_valid()) return false; | 2224 if (!reserved_page->is_valid()) return false; |
| 2349 } | 2225 } |
| 2350 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); | 2226 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); |
| 2351 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true); | 2227 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true); |
| 2352 SetAllocationInfo(&allocation_info_, | 2228 SetAllocationInfo(&allocation_info_, |
| 2353 TopPageOf(allocation_info_)->next_page()); | 2229 TopPageOf(allocation_info_)->next_page()); |
| 2354 return true; | 2230 return true; |
| 2355 } | 2231 } |
| 2356 | 2232 |
| 2357 | 2233 |
| 2358 // You have to call this last, since the implementation from PagedSpace | 2234 // You have to call this last, since the implementation from PagedSpace |
| 2359 // doesn't know that memory was 'promised' to large object space. | 2235 // doesn't know that memory was 'promised' to large object space. |
| 2360 bool LargeObjectSpace::ReserveSpace(int bytes) { | 2236 bool LargeObjectSpace::ReserveSpace(int bytes) { |
| 2361 return Heap::OldGenerationSpaceAvailable() >= bytes; | 2237 return heap()->OldGenerationSpaceAvailable() >= bytes; |
| 2362 } | 2238 } |
| 2363 | 2239 |
| 2364 | 2240 |
| 2365 // Slow case for normal allocation. Try in order: (1) allocate in the next | 2241 // Slow case for normal allocation. Try in order: (1) allocate in the next |
| 2366 // page in the space, (2) allocate off the space's free list, (3) expand the | 2242 // page in the space, (2) allocate off the space's free list, (3) expand the |
| 2367 // space, (4) fail. | 2243 // space, (4) fail. |
| 2368 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { | 2244 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { |
| 2369 // Linear allocation in this space has failed. If there is another page | 2245 // Linear allocation in this space has failed. If there is another page |
| 2370 // in the space, move to that page and allocate there. This allocation | 2246 // in the space, move to that page and allocate there. This allocation |
| 2371 // should succeed (size_in_bytes should not be greater than a page's | 2247 // should succeed (size_in_bytes should not be greater than a page's |
| 2372 // object area size). | 2248 // object area size). |
| 2373 Page* current_page = TopPageOf(allocation_info_); | 2249 Page* current_page = TopPageOf(allocation_info_); |
| 2374 if (current_page->next_page()->is_valid()) { | 2250 if (current_page->next_page()->is_valid()) { |
| 2375 return AllocateInNextPage(current_page, size_in_bytes); | 2251 return AllocateInNextPage(current_page, size_in_bytes); |
| 2376 } | 2252 } |
| 2377 | 2253 |
| 2378 // There is no next page in this space. Try free list allocation unless that | 2254 // There is no next page in this space. Try free list allocation unless that |
| 2379 // is currently forbidden. | 2255 // is currently forbidden. |
| 2380 if (!Heap::linear_allocation()) { | 2256 if (!heap()->linear_allocation()) { |
| 2381 int wasted_bytes; | 2257 int wasted_bytes; |
| 2382 Object* result; | 2258 Object* result; |
| 2383 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes); | 2259 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes); |
| 2384 accounting_stats_.WasteBytes(wasted_bytes); | 2260 accounting_stats_.WasteBytes(wasted_bytes); |
| 2385 if (maybe->ToObject(&result)) { | 2261 if (maybe->ToObject(&result)) { |
| 2386 accounting_stats_.AllocateBytes(size_in_bytes); | 2262 accounting_stats_.AllocateBytes(size_in_bytes); |
| 2387 | 2263 |
| 2388 HeapObject* obj = HeapObject::cast(result); | 2264 HeapObject* obj = HeapObject::cast(result); |
| 2389 Page* p = Page::FromAddress(obj->address()); | 2265 Page* p = Page::FromAddress(obj->address()); |
| 2390 | 2266 |
| 2391 if (obj->address() >= p->AllocationWatermark()) { | 2267 if (obj->address() >= p->AllocationWatermark()) { |
| 2392 // There should be no hole between the allocation watermark | 2268 // There should be no hole between the allocation watermark |
| 2393 // and allocated object address. | 2269 // and allocated object address. |
| 2394 // Memory above the allocation watermark was not swept and | 2270 // Memory above the allocation watermark was not swept and |
| 2395 // might contain garbage pointers to new space. | 2271 // might contain garbage pointers to new space. |
| 2396 ASSERT(obj->address() == p->AllocationWatermark()); | 2272 ASSERT(obj->address() == p->AllocationWatermark()); |
| 2397 p->SetAllocationWatermark(obj->address() + size_in_bytes); | 2273 p->SetAllocationWatermark(obj->address() + size_in_bytes); |
| 2398 } | 2274 } |
| 2399 | 2275 |
| 2400 return obj; | 2276 return obj; |
| 2401 } | 2277 } |
| 2402 } | 2278 } |
| 2403 | 2279 |
| 2404 // Free list allocation failed and there is no next page. Fail if we have | 2280 // Free list allocation failed and there is no next page. Fail if we have |
| 2405 // hit the old generation size limit that should cause a garbage | 2281 // hit the old generation size limit that should cause a garbage |
| 2406 // collection. | 2282 // collection. |
| 2407 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 2283 if (!heap()->always_allocate() && |
| 2284 heap()->OldGenerationAllocationLimitReached()) { |
| 2408 return NULL; | 2285 return NULL; |
| 2409 } | 2286 } |
| 2410 | 2287 |
| 2411 // Try to expand the space and allocate in the new next page. | 2288 // Try to expand the space and allocate in the new next page. |
| 2412 ASSERT(!current_page->next_page()->is_valid()); | 2289 ASSERT(!current_page->next_page()->is_valid()); |
| 2413 if (Expand(current_page)) { | 2290 if (Expand(current_page)) { |
| 2414 return AllocateInNextPage(current_page, size_in_bytes); | 2291 return AllocateInNextPage(current_page, size_in_bytes); |
| 2415 } | 2292 } |
| 2416 | 2293 |
| 2417 // Finally, fail. | 2294 // Finally, fail. |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2460 | 2337 |
| 2461 | 2338 |
| 2462 void OldSpace::DeallocateBlock(Address start, | 2339 void OldSpace::DeallocateBlock(Address start, |
| 2463 int size_in_bytes, | 2340 int size_in_bytes, |
| 2464 bool add_to_freelist) { | 2341 bool add_to_freelist) { |
| 2465 Free(start, size_in_bytes, add_to_freelist); | 2342 Free(start, size_in_bytes, add_to_freelist); |
| 2466 } | 2343 } |
| 2467 | 2344 |
| 2468 | 2345 |
| 2469 #ifdef DEBUG | 2346 #ifdef DEBUG |
| 2470 struct CommentStatistic { | |
| 2471 const char* comment; | |
| 2472 int size; | |
| 2473 int count; | |
| 2474 void Clear() { | |
| 2475 comment = NULL; | |
| 2476 size = 0; | |
| 2477 count = 0; | |
| 2478 } | |
| 2479 }; | |
| 2480 | |
| 2481 | |
| 2482 // must be small, since an iteration is used for lookup | |
| 2483 const int kMaxComments = 64; | |
| 2484 static CommentStatistic comments_statistics[kMaxComments+1]; | |
| 2485 | |
| 2486 | |
| 2487 void PagedSpace::ReportCodeStatistics() { | 2347 void PagedSpace::ReportCodeStatistics() { |
| 2348 Isolate* isolate = Isolate::Current(); |
| 2349 CommentStatistic* comments_statistics = |
| 2350 isolate->paged_space_comments_statistics(); |
| 2488 ReportCodeKindStatistics(); | 2351 ReportCodeKindStatistics(); |
| 2489 PrintF("Code comment statistics (\" [ comment-txt : size/ " | 2352 PrintF("Code comment statistics (\" [ comment-txt : size/ " |
| 2490 "count (average)\"):\n"); | 2353 "count (average)\"):\n"); |
| 2491 for (int i = 0; i <= kMaxComments; i++) { | 2354 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { |
| 2492 const CommentStatistic& cs = comments_statistics[i]; | 2355 const CommentStatistic& cs = comments_statistics[i]; |
| 2493 if (cs.size > 0) { | 2356 if (cs.size > 0) { |
| 2494 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, | 2357 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, |
| 2495 cs.size/cs.count); | 2358 cs.size/cs.count); |
| 2496 } | 2359 } |
| 2497 } | 2360 } |
| 2498 PrintF("\n"); | 2361 PrintF("\n"); |
| 2499 } | 2362 } |
| 2500 | 2363 |
| 2501 | 2364 |
| 2502 void PagedSpace::ResetCodeStatistics() { | 2365 void PagedSpace::ResetCodeStatistics() { |
| 2366 Isolate* isolate = Isolate::Current(); |
| 2367 CommentStatistic* comments_statistics = |
| 2368 isolate->paged_space_comments_statistics(); |
| 2503 ClearCodeKindStatistics(); | 2369 ClearCodeKindStatistics(); |
| 2504 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear(); | 2370 for (int i = 0; i < CommentStatistic::kMaxComments; i++) { |
| 2505 comments_statistics[kMaxComments].comment = "Unknown"; | 2371 comments_statistics[i].Clear(); |
| 2506 comments_statistics[kMaxComments].size = 0; | 2372 } |
| 2507 comments_statistics[kMaxComments].count = 0; | 2373 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown"; |
| 2374 comments_statistics[CommentStatistic::kMaxComments].size = 0; |
| 2375 comments_statistics[CommentStatistic::kMaxComments].count = 0; |
| 2508 } | 2376 } |
| 2509 | 2377 |
| 2510 | 2378 |
| 2511 // Adds comment to 'comment_statistics' table. Performance OK sa long as | 2379 // Adds comment to 'comment_statistics' table. Performance OK as long as |
| 2512 // 'kMaxComments' is small | 2380 // 'kMaxComments' is small |
| 2513 static void EnterComment(const char* comment, int delta) { | 2381 static void EnterComment(Isolate* isolate, const char* comment, int delta) { |
| 2382 CommentStatistic* comments_statistics = |
| 2383 isolate->paged_space_comments_statistics(); |
| 2514 // Do not count empty comments | 2384 // Do not count empty comments |
| 2515 if (delta <= 0) return; | 2385 if (delta <= 0) return; |
| 2516 CommentStatistic* cs = &comments_statistics[kMaxComments]; | 2386 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments]; |
| 2517 // Search for a free or matching entry in 'comments_statistics': 'cs' | 2387 // Search for a free or matching entry in 'comments_statistics': 'cs' |
| 2518 // points to result. | 2388 // points to result. |
| 2519 for (int i = 0; i < kMaxComments; i++) { | 2389 for (int i = 0; i < CommentStatistic::kMaxComments; i++) { |
| 2520 if (comments_statistics[i].comment == NULL) { | 2390 if (comments_statistics[i].comment == NULL) { |
| 2521 cs = &comments_statistics[i]; | 2391 cs = &comments_statistics[i]; |
| 2522 cs->comment = comment; | 2392 cs->comment = comment; |
| 2523 break; | 2393 break; |
| 2524 } else if (strcmp(comments_statistics[i].comment, comment) == 0) { | 2394 } else if (strcmp(comments_statistics[i].comment, comment) == 0) { |
| 2525 cs = &comments_statistics[i]; | 2395 cs = &comments_statistics[i]; |
| 2526 break; | 2396 break; |
| 2527 } | 2397 } |
| 2528 } | 2398 } |
| 2529 // Update entry for 'comment' | 2399 // Update entry for 'comment' |
| 2530 cs->size += delta; | 2400 cs->size += delta; |
| 2531 cs->count += 1; | 2401 cs->count += 1; |
| 2532 } | 2402 } |
| 2533 | 2403 |
| 2534 | 2404 |
| 2535 // Call for each nested comment start (start marked with '[ xxx', end marked | 2405 // Call for each nested comment start (start marked with '[ xxx', end marked |
| 2536 // with ']'. RelocIterator 'it' must point to a comment reloc info. | 2406 // with ']'. RelocIterator 'it' must point to a comment reloc info. |
| 2537 static void CollectCommentStatistics(RelocIterator* it) { | 2407 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { |
| 2538 ASSERT(!it->done()); | 2408 ASSERT(!it->done()); |
| 2539 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT); | 2409 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT); |
| 2540 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); | 2410 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); |
| 2541 if (tmp[0] != '[') { | 2411 if (tmp[0] != '[') { |
| 2542 // Not a nested comment; skip | 2412 // Not a nested comment; skip |
| 2543 return; | 2413 return; |
| 2544 } | 2414 } |
| 2545 | 2415 |
| 2546 // Search for end of nested comment or a new nested comment | 2416 // Search for end of nested comment or a new nested comment |
| 2547 const char* const comment_txt = | 2417 const char* const comment_txt = |
| 2548 reinterpret_cast<const char*>(it->rinfo()->data()); | 2418 reinterpret_cast<const char*>(it->rinfo()->data()); |
| 2549 const byte* prev_pc = it->rinfo()->pc(); | 2419 const byte* prev_pc = it->rinfo()->pc(); |
| 2550 int flat_delta = 0; | 2420 int flat_delta = 0; |
| 2551 it->next(); | 2421 it->next(); |
| 2552 while (true) { | 2422 while (true) { |
| 2553 // All nested comments must be terminated properly, and therefore exit | 2423 // All nested comments must be terminated properly, and therefore exit |
| 2554 // from loop. | 2424 // from loop. |
| 2555 ASSERT(!it->done()); | 2425 ASSERT(!it->done()); |
| 2556 if (it->rinfo()->rmode() == RelocInfo::COMMENT) { | 2426 if (it->rinfo()->rmode() == RelocInfo::COMMENT) { |
| 2557 const char* const txt = | 2427 const char* const txt = |
| 2558 reinterpret_cast<const char*>(it->rinfo()->data()); | 2428 reinterpret_cast<const char*>(it->rinfo()->data()); |
| 2559 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); | 2429 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); |
| 2560 if (txt[0] == ']') break; // End of nested comment | 2430 if (txt[0] == ']') break; // End of nested comment |
| 2561 // A new comment | 2431 // A new comment |
| 2562 CollectCommentStatistics(it); | 2432 CollectCommentStatistics(isolate, it); |
| 2563 // Skip code that was covered with previous comment | 2433 // Skip code that was covered with previous comment |
| 2564 prev_pc = it->rinfo()->pc(); | 2434 prev_pc = it->rinfo()->pc(); |
| 2565 } | 2435 } |
| 2566 it->next(); | 2436 it->next(); |
| 2567 } | 2437 } |
| 2568 EnterComment(comment_txt, flat_delta); | 2438 EnterComment(isolate, comment_txt, flat_delta); |
| 2569 } | 2439 } |
| 2570 | 2440 |
| 2571 | 2441 |
| 2572 // Collects code size statistics: | 2442 // Collects code size statistics: |
| 2573 // - by code kind | 2443 // - by code kind |
| 2574 // - by code comment | 2444 // - by code comment |
| 2575 void PagedSpace::CollectCodeStatistics() { | 2445 void PagedSpace::CollectCodeStatistics() { |
| 2446 Isolate* isolate = heap()->isolate(); |
| 2576 HeapObjectIterator obj_it(this); | 2447 HeapObjectIterator obj_it(this); |
| 2577 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 2448 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
| 2578 if (obj->IsCode()) { | 2449 if (obj->IsCode()) { |
| 2579 Code* code = Code::cast(obj); | 2450 Code* code = Code::cast(obj); |
| 2580 code_kind_statistics[code->kind()] += code->Size(); | 2451 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 2581 RelocIterator it(code); | 2452 RelocIterator it(code); |
| 2582 int delta = 0; | 2453 int delta = 0; |
| 2583 const byte* prev_pc = code->instruction_start(); | 2454 const byte* prev_pc = code->instruction_start(); |
| 2584 while (!it.done()) { | 2455 while (!it.done()) { |
| 2585 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { | 2456 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { |
| 2586 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); | 2457 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); |
| 2587 CollectCommentStatistics(&it); | 2458 CollectCommentStatistics(isolate, &it); |
| 2588 prev_pc = it.rinfo()->pc(); | 2459 prev_pc = it.rinfo()->pc(); |
| 2589 } | 2460 } |
| 2590 it.next(); | 2461 it.next(); |
| 2591 } | 2462 } |
| 2592 | 2463 |
| 2593 ASSERT(code->instruction_start() <= prev_pc && | 2464 ASSERT(code->instruction_start() <= prev_pc && |
| 2594 prev_pc <= code->instruction_end()); | 2465 prev_pc <= code->instruction_end()); |
| 2595 delta += static_cast<int>(code->instruction_end() - prev_pc); | 2466 delta += static_cast<int>(code->instruction_end() - prev_pc); |
| 2596 EnterComment("NoComment", delta); | 2467 EnterComment(isolate, "NoComment", delta); |
| 2597 } | 2468 } |
| 2598 } | 2469 } |
| 2599 } | 2470 } |
| 2600 | 2471 |
| 2601 | 2472 |
| 2602 void OldSpace::ReportStatistics() { | 2473 void OldSpace::ReportStatistics() { |
| 2603 int pct = static_cast<int>(Available() * 100 / Capacity()); | 2474 int pct = static_cast<int>(Available() * 100 / Capacity()); |
| 2604 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 2475 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 2605 ", waste: %" V8_PTR_PREFIX "d" | 2476 ", waste: %" V8_PTR_PREFIX "d" |
| 2606 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | 2477 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2680 // in the space, move to that page and allocate there. This allocation | 2551 // in the space, move to that page and allocate there. This allocation |
| 2681 // should succeed. | 2552 // should succeed. |
| 2682 Page* current_page = TopPageOf(allocation_info_); | 2553 Page* current_page = TopPageOf(allocation_info_); |
| 2683 if (current_page->next_page()->is_valid()) { | 2554 if (current_page->next_page()->is_valid()) { |
| 2684 return AllocateInNextPage(current_page, size_in_bytes); | 2555 return AllocateInNextPage(current_page, size_in_bytes); |
| 2685 } | 2556 } |
| 2686 | 2557 |
| 2687 // There is no next page in this space. Try free list allocation unless | 2558 // There is no next page in this space. Try free list allocation unless |
| 2688 // that is currently forbidden. The fixed space free list implicitly assumes | 2559 // that is currently forbidden. The fixed space free list implicitly assumes |
| 2689 // that all free blocks are of the fixed size. | 2560 // that all free blocks are of the fixed size. |
| 2690 if (!Heap::linear_allocation()) { | 2561 if (!heap()->linear_allocation()) { |
| 2691 Object* result; | 2562 Object* result; |
| 2692 MaybeObject* maybe = free_list_.Allocate(); | 2563 MaybeObject* maybe = free_list_.Allocate(); |
| 2693 if (maybe->ToObject(&result)) { | 2564 if (maybe->ToObject(&result)) { |
| 2694 accounting_stats_.AllocateBytes(size_in_bytes); | 2565 accounting_stats_.AllocateBytes(size_in_bytes); |
| 2695 HeapObject* obj = HeapObject::cast(result); | 2566 HeapObject* obj = HeapObject::cast(result); |
| 2696 Page* p = Page::FromAddress(obj->address()); | 2567 Page* p = Page::FromAddress(obj->address()); |
| 2697 | 2568 |
| 2698 if (obj->address() >= p->AllocationWatermark()) { | 2569 if (obj->address() >= p->AllocationWatermark()) { |
| 2699 // There should be no hole between the allocation watermark | 2570 // There should be no hole between the allocation watermark |
| 2700 // and allocated object address. | 2571 // and allocated object address. |
| 2701 // Memory above the allocation watermark was not swept and | 2572 // Memory above the allocation watermark was not swept and |
| 2702 // might contain garbage pointers to new space. | 2573 // might contain garbage pointers to new space. |
| 2703 ASSERT(obj->address() == p->AllocationWatermark()); | 2574 ASSERT(obj->address() == p->AllocationWatermark()); |
| 2704 p->SetAllocationWatermark(obj->address() + size_in_bytes); | 2575 p->SetAllocationWatermark(obj->address() + size_in_bytes); |
| 2705 } | 2576 } |
| 2706 | 2577 |
| 2707 return obj; | 2578 return obj; |
| 2708 } | 2579 } |
| 2709 } | 2580 } |
| 2710 | 2581 |
| 2711 // Free list allocation failed and there is no next page. Fail if we have | 2582 // Free list allocation failed and there is no next page. Fail if we have |
| 2712 // hit the old generation size limit that should cause a garbage | 2583 // hit the old generation size limit that should cause a garbage |
| 2713 // collection. | 2584 // collection. |
| 2714 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 2585 if (!heap()->always_allocate() && |
| 2586 heap()->OldGenerationAllocationLimitReached()) { |
| 2715 return NULL; | 2587 return NULL; |
| 2716 } | 2588 } |
| 2717 | 2589 |
| 2718 // Try to expand the space and allocate in the new next page. | 2590 // Try to expand the space and allocate in the new next page. |
| 2719 ASSERT(!current_page->next_page()->is_valid()); | 2591 ASSERT(!current_page->next_page()->is_valid()); |
| 2720 if (Expand(current_page)) { | 2592 if (Expand(current_page)) { |
| 2721 return AllocateInNextPage(current_page, size_in_bytes); | 2593 return AllocateInNextPage(current_page, size_in_bytes); |
| 2722 } | 2594 } |
| 2723 | 2595 |
| 2724 // Finally, fail. | 2596 // Finally, fail. |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2806 #endif | 2678 #endif |
| 2807 | 2679 |
| 2808 | 2680 |
| 2809 // ----------------------------------------------------------------------------- | 2681 // ----------------------------------------------------------------------------- |
| 2810 // GlobalPropertyCellSpace implementation | 2682 // GlobalPropertyCellSpace implementation |
| 2811 | 2683 |
| 2812 #ifdef DEBUG | 2684 #ifdef DEBUG |
| 2813 void CellSpace::VerifyObject(HeapObject* object) { | 2685 void CellSpace::VerifyObject(HeapObject* object) { |
| 2814 // The object should be a global object property cell or a free-list node. | 2686 // The object should be a global object property cell or a free-list node. |
| 2815 ASSERT(object->IsJSGlobalPropertyCell() || | 2687 ASSERT(object->IsJSGlobalPropertyCell() || |
| 2816 object->map() == Heap::two_pointer_filler_map()); | 2688 object->map() == heap()->two_pointer_filler_map()); |
| 2817 } | 2689 } |
| 2818 #endif | 2690 #endif |
| 2819 | 2691 |
| 2820 | 2692 |
| 2821 // ----------------------------------------------------------------------------- | 2693 // ----------------------------------------------------------------------------- |
| 2822 // LargeObjectIterator | 2694 // LargeObjectIterator |
| 2823 | 2695 |
| 2824 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { | 2696 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { |
| 2825 current_ = space->first_chunk_; | 2697 current_ = space->first_chunk_; |
| 2826 size_func_ = NULL; | 2698 size_func_ = NULL; |
| (...skipping 16 matching lines...) Expand all Loading... |
| 2843 } | 2715 } |
| 2844 | 2716 |
| 2845 | 2717 |
| 2846 // ----------------------------------------------------------------------------- | 2718 // ----------------------------------------------------------------------------- |
| 2847 // LargeObjectChunk | 2719 // LargeObjectChunk |
| 2848 | 2720 |
| 2849 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, | 2721 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, |
| 2850 Executability executable) { | 2722 Executability executable) { |
| 2851 size_t requested = ChunkSizeFor(size_in_bytes); | 2723 size_t requested = ChunkSizeFor(size_in_bytes); |
| 2852 size_t size; | 2724 size_t size; |
| 2853 void* mem = MemoryAllocator::AllocateRawMemory(requested, &size, executable); | 2725 Isolate* isolate = Isolate::Current(); |
| 2726 void* mem = isolate->memory_allocator()->AllocateRawMemory( |
| 2727 requested, &size, executable); |
| 2854 if (mem == NULL) return NULL; | 2728 if (mem == NULL) return NULL; |
| 2855 | 2729 |
| 2856 // The start of the chunk may be overlayed with a page so we have to | 2730 // The start of the chunk may be overlayed with a page so we have to |
| 2857 // make sure that the page flags fit in the size field. | 2731 // make sure that the page flags fit in the size field. |
| 2858 ASSERT((size & Page::kPageFlagMask) == 0); | 2732 ASSERT((size & Page::kPageFlagMask) == 0); |
| 2859 | 2733 |
| 2860 LOG(NewEvent("LargeObjectChunk", mem, size)); | 2734 LOG(isolate, NewEvent("LargeObjectChunk", mem, size)); |
| 2861 if (size < requested) { | 2735 if (size < requested) { |
| 2862 MemoryAllocator::FreeRawMemory(mem, size, executable); | 2736 isolate->memory_allocator()->FreeRawMemory( |
| 2863 LOG(DeleteEvent("LargeObjectChunk", mem)); | 2737 mem, size, executable); |
| 2738 LOG(isolate, DeleteEvent("LargeObjectChunk", mem)); |
| 2864 return NULL; | 2739 return NULL; |
| 2865 } | 2740 } |
| 2866 | 2741 |
| 2867 ObjectSpace space = (executable == EXECUTABLE) | 2742 ObjectSpace space = (executable == EXECUTABLE) |
| 2868 ? kObjectSpaceCodeSpace | 2743 ? kObjectSpaceCodeSpace |
| 2869 : kObjectSpaceLoSpace; | 2744 : kObjectSpaceLoSpace; |
| 2870 MemoryAllocator::PerformAllocationCallback( | 2745 isolate->memory_allocator()->PerformAllocationCallback( |
| 2871 space, kAllocationActionAllocate, size); | 2746 space, kAllocationActionAllocate, size); |
| 2872 | 2747 |
| 2873 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); | 2748 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); |
| 2874 chunk->size_ = size; | 2749 chunk->size_ = size; |
| 2750 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); |
| 2751 page->heap_ = Isolate::Current()->heap(); |
| 2875 return chunk; | 2752 return chunk; |
| 2876 } | 2753 } |
| 2877 | 2754 |
| 2878 | 2755 |
| 2879 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { | 2756 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { |
| 2880 int os_alignment = static_cast<int>(OS::AllocateAlignment()); | 2757 int os_alignment = static_cast<int>(OS::AllocateAlignment()); |
| 2881 if (os_alignment < Page::kPageSize) { | 2758 if (os_alignment < Page::kPageSize) { |
| 2882 size_in_bytes += (Page::kPageSize - os_alignment); | 2759 size_in_bytes += (Page::kPageSize - os_alignment); |
| 2883 } | 2760 } |
| 2884 return size_in_bytes + Page::kObjectStartOffset; | 2761 return size_in_bytes + Page::kObjectStartOffset; |
| 2885 } | 2762 } |
| 2886 | 2763 |
| 2887 // ----------------------------------------------------------------------------- | 2764 // ----------------------------------------------------------------------------- |
| 2888 // LargeObjectSpace | 2765 // LargeObjectSpace |
| 2889 | 2766 |
| 2890 LargeObjectSpace::LargeObjectSpace(AllocationSpace id) | 2767 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) |
| 2891 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis | 2768 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis |
| 2892 first_chunk_(NULL), | 2769 first_chunk_(NULL), |
| 2893 size_(0), | 2770 size_(0), |
| 2894 page_count_(0), | 2771 page_count_(0), |
| 2895 objects_size_(0) {} | 2772 objects_size_(0) {} |
| 2896 | 2773 |
| 2897 | 2774 |
| 2898 bool LargeObjectSpace::Setup() { | 2775 bool LargeObjectSpace::Setup() { |
| 2899 first_chunk_ = NULL; | 2776 first_chunk_ = NULL; |
| 2900 size_ = 0; | 2777 size_ = 0; |
| 2901 page_count_ = 0; | 2778 page_count_ = 0; |
| 2902 objects_size_ = 0; | 2779 objects_size_ = 0; |
| 2903 return true; | 2780 return true; |
| 2904 } | 2781 } |
| 2905 | 2782 |
| 2906 | 2783 |
| 2907 void LargeObjectSpace::TearDown() { | 2784 void LargeObjectSpace::TearDown() { |
| 2908 while (first_chunk_ != NULL) { | 2785 while (first_chunk_ != NULL) { |
| 2909 LargeObjectChunk* chunk = first_chunk_; | 2786 LargeObjectChunk* chunk = first_chunk_; |
| 2910 first_chunk_ = first_chunk_->next(); | 2787 first_chunk_ = first_chunk_->next(); |
| 2911 LOG(DeleteEvent("LargeObjectChunk", chunk->address())); | 2788 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address())); |
| 2912 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); | 2789 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); |
| 2913 Executability executable = | 2790 Executability executable = |
| 2914 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; | 2791 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; |
| 2915 ObjectSpace space = kObjectSpaceLoSpace; | 2792 ObjectSpace space = kObjectSpaceLoSpace; |
| 2916 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; | 2793 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; |
| 2917 size_t size = chunk->size(); | 2794 size_t size = chunk->size(); |
| 2918 MemoryAllocator::FreeRawMemory(chunk->address(), size, executable); | 2795 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(), |
| 2919 MemoryAllocator::PerformAllocationCallback( | 2796 size, |
| 2797 executable); |
| 2798 heap()->isolate()->memory_allocator()->PerformAllocationCallback( |
| 2920 space, kAllocationActionFree, size); | 2799 space, kAllocationActionFree, size); |
| 2921 } | 2800 } |
| 2922 | 2801 |
| 2923 size_ = 0; | 2802 size_ = 0; |
| 2924 page_count_ = 0; | 2803 page_count_ = 0; |
| 2925 objects_size_ = 0; | 2804 objects_size_ = 0; |
| 2926 } | 2805 } |
| 2927 | 2806 |
| 2928 | 2807 |
| 2929 #ifdef ENABLE_HEAP_PROTECTION | 2808 #ifdef ENABLE_HEAP_PROTECTION |
| 2930 | 2809 |
| 2931 void LargeObjectSpace::Protect() { | 2810 void LargeObjectSpace::Protect() { |
| 2932 LargeObjectChunk* chunk = first_chunk_; | 2811 LargeObjectChunk* chunk = first_chunk_; |
| 2933 while (chunk != NULL) { | 2812 while (chunk != NULL) { |
| 2934 MemoryAllocator::Protect(chunk->address(), chunk->size()); | 2813 heap()->isolate()->memory_allocator()->Protect(chunk->address(), |
| 2814 chunk->size()); |
| 2935 chunk = chunk->next(); | 2815 chunk = chunk->next(); |
| 2936 } | 2816 } |
| 2937 } | 2817 } |
| 2938 | 2818 |
| 2939 | 2819 |
| 2940 void LargeObjectSpace::Unprotect() { | 2820 void LargeObjectSpace::Unprotect() { |
| 2941 LargeObjectChunk* chunk = first_chunk_; | 2821 LargeObjectChunk* chunk = first_chunk_; |
| 2942 while (chunk != NULL) { | 2822 while (chunk != NULL) { |
| 2943 bool is_code = chunk->GetObject()->IsCode(); | 2823 bool is_code = chunk->GetObject()->IsCode(); |
| 2944 MemoryAllocator::Unprotect(chunk->address(), chunk->size(), | 2824 heap()->isolate()->memory_allocator()->Unprotect(chunk->address(), |
| 2945 is_code ? EXECUTABLE : NOT_EXECUTABLE); | 2825 chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE); |
| 2946 chunk = chunk->next(); | 2826 chunk = chunk->next(); |
| 2947 } | 2827 } |
| 2948 } | 2828 } |
| 2949 | 2829 |
| 2950 #endif | 2830 #endif |
| 2951 | 2831 |
| 2952 | 2832 |
| 2953 MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size, | 2833 MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size, |
| 2954 int object_size, | 2834 int object_size, |
| 2955 Executability executable) { | 2835 Executability executable) { |
| 2956 ASSERT(0 < object_size && object_size <= requested_size); | 2836 ASSERT(0 < object_size && object_size <= requested_size); |
| 2957 | 2837 |
| 2958 // Check if we want to force a GC before growing the old space further. | 2838 // Check if we want to force a GC before growing the old space further. |
| 2959 // If so, fail the allocation. | 2839 // If so, fail the allocation. |
| 2960 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 2840 if (!heap()->always_allocate() && |
| 2841 heap()->OldGenerationAllocationLimitReached()) { |
| 2961 return Failure::RetryAfterGC(identity()); | 2842 return Failure::RetryAfterGC(identity()); |
| 2962 } | 2843 } |
| 2963 | 2844 |
| 2964 LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable); | 2845 LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable); |
| 2965 if (chunk == NULL) { | 2846 if (chunk == NULL) { |
| 2966 return Failure::RetryAfterGC(identity()); | 2847 return Failure::RetryAfterGC(identity()); |
| 2967 } | 2848 } |
| 2968 | 2849 |
| 2969 size_ += static_cast<int>(chunk->size()); | 2850 size_ += static_cast<int>(chunk->size()); |
| 2970 objects_size_ += requested_size; | 2851 objects_size_ += requested_size; |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3055 // regions (modulo 32). So we treat a large page as a sequence of | 2936 // regions (modulo 32). So we treat a large page as a sequence of |
| 3056 // normal pages of size Page::kPageSize having same dirty marks | 2937 // normal pages of size Page::kPageSize having same dirty marks |
| 3057 // and subsequently iterate dirty regions on each of these pages. | 2938 // and subsequently iterate dirty regions on each of these pages. |
| 3058 Address start = object->address(); | 2939 Address start = object->address(); |
| 3059 Address end = page->ObjectAreaEnd(); | 2940 Address end = page->ObjectAreaEnd(); |
| 3060 Address object_end = start + object->Size(); | 2941 Address object_end = start + object->Size(); |
| 3061 | 2942 |
| 3062 // Iterate regions of the first normal page covering object. | 2943 // Iterate regions of the first normal page covering object. |
| 3063 uint32_t first_region_number = page->GetRegionNumberForAddress(start); | 2944 uint32_t first_region_number = page->GetRegionNumberForAddress(start); |
| 3064 newmarks |= | 2945 newmarks |= |
| 3065 Heap::IterateDirtyRegions(marks >> first_region_number, | 2946 heap()->IterateDirtyRegions(marks >> first_region_number, |
| 3066 start, | 2947 start, |
| 3067 end, | 2948 end, |
| 3068 &Heap::IteratePointersInDirtyRegion, | 2949 &Heap::IteratePointersInDirtyRegion, |
| 3069 copy_object) << first_region_number; | 2950 copy_object) << first_region_number; |
| 3070 | 2951 |
| 3071 start = end; | 2952 start = end; |
| 3072 end = start + Page::kPageSize; | 2953 end = start + Page::kPageSize; |
| 3073 while (end <= object_end) { | 2954 while (end <= object_end) { |
| 3074 // Iterate next 32 regions. | 2955 // Iterate next 32 regions. |
| 3075 newmarks |= | 2956 newmarks |= |
| 3076 Heap::IterateDirtyRegions(marks, | 2957 heap()->IterateDirtyRegions(marks, |
| 3077 start, | 2958 start, |
| 3078 end, | 2959 end, |
| 3079 &Heap::IteratePointersInDirtyRegion, | 2960 &Heap::IteratePointersInDirtyRegion, |
| 3080 copy_object); | 2961 copy_object); |
| 3081 start = end; | 2962 start = end; |
| 3082 end = start + Page::kPageSize; | 2963 end = start + Page::kPageSize; |
| 3083 } | 2964 } |
| 3084 | 2965 |
| 3085 if (start != object_end) { | 2966 if (start != object_end) { |
| 3086 // Iterate the last piece of an object which is less than | 2967 // Iterate the last piece of an object which is less than |
| 3087 // Page::kPageSize. | 2968 // Page::kPageSize. |
| 3088 newmarks |= | 2969 newmarks |= |
| 3089 Heap::IterateDirtyRegions(marks, | 2970 heap()->IterateDirtyRegions(marks, |
| 3090 start, | 2971 start, |
| 3091 object_end, | 2972 object_end, |
| 3092 &Heap::IteratePointersInDirtyRegion, | 2973 &Heap::IteratePointersInDirtyRegion, |
| 3093 copy_object); | 2974 copy_object); |
| 3094 } | 2975 } |
| 3095 | 2976 |
| 3096 page->SetRegionMarks(newmarks); | 2977 page->SetRegionMarks(newmarks); |
| 3097 } | 2978 } |
| 3098 } | 2979 } |
| 3099 } | 2980 } |
| 3100 } | 2981 } |
| 3101 | 2982 |
| 3102 | 2983 |
| 3103 void LargeObjectSpace::FreeUnmarkedObjects() { | 2984 void LargeObjectSpace::FreeUnmarkedObjects() { |
| 3104 LargeObjectChunk* previous = NULL; | 2985 LargeObjectChunk* previous = NULL; |
| 3105 LargeObjectChunk* current = first_chunk_; | 2986 LargeObjectChunk* current = first_chunk_; |
| 3106 while (current != NULL) { | 2987 while (current != NULL) { |
| 3107 HeapObject* object = current->GetObject(); | 2988 HeapObject* object = current->GetObject(); |
| 3108 if (object->IsMarked()) { | 2989 if (object->IsMarked()) { |
| 3109 object->ClearMark(); | 2990 object->ClearMark(); |
| 3110 MarkCompactCollector::tracer()->decrement_marked_count(); | 2991 heap()->mark_compact_collector()->tracer()->decrement_marked_count(); |
| 3111 previous = current; | 2992 previous = current; |
| 3112 current = current->next(); | 2993 current = current->next(); |
| 3113 } else { | 2994 } else { |
| 3114 Page* page = Page::FromAddress(RoundUp(current->address(), | 2995 Page* page = Page::FromAddress(RoundUp(current->address(), |
| 3115 Page::kPageSize)); | 2996 Page::kPageSize)); |
| 3116 Executability executable = | 2997 Executability executable = |
| 3117 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; | 2998 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; |
| 3118 Address chunk_address = current->address(); | 2999 Address chunk_address = current->address(); |
| 3119 size_t chunk_size = current->size(); | 3000 size_t chunk_size = current->size(); |
| 3120 | 3001 |
| 3121 // Cut the chunk out from the chunk list. | 3002 // Cut the chunk out from the chunk list. |
| 3122 current = current->next(); | 3003 current = current->next(); |
| 3123 if (previous == NULL) { | 3004 if (previous == NULL) { |
| 3124 first_chunk_ = current; | 3005 first_chunk_ = current; |
| 3125 } else { | 3006 } else { |
| 3126 previous->set_next(current); | 3007 previous->set_next(current); |
| 3127 } | 3008 } |
| 3128 | 3009 |
| 3129 // Free the chunk. | 3010 // Free the chunk. |
| 3130 MarkCompactCollector::ReportDeleteIfNeeded(object); | 3011 heap()->mark_compact_collector()->ReportDeleteIfNeeded(object); |
| 3131 LiveObjectList::ProcessNonLive(object); | 3012 LiveObjectList::ProcessNonLive(object); |
| 3132 | 3013 |
| 3133 size_ -= static_cast<int>(chunk_size); | 3014 size_ -= static_cast<int>(chunk_size); |
| 3134 objects_size_ -= object->Size(); | 3015 objects_size_ -= object->Size(); |
| 3135 page_count_--; | 3016 page_count_--; |
| 3136 ObjectSpace space = kObjectSpaceLoSpace; | 3017 ObjectSpace space = kObjectSpaceLoSpace; |
| 3137 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; | 3018 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; |
| 3138 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable); | 3019 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address, |
| 3139 MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree, | 3020 chunk_size, |
| 3140 size_); | 3021 executable); |
| 3141 LOG(DeleteEvent("LargeObjectChunk", chunk_address)); | 3022 heap()->isolate()->memory_allocator()->PerformAllocationCallback( |
| 3023 space, kAllocationActionFree, size_); |
| 3024 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address)); |
| 3142 } | 3025 } |
| 3143 } | 3026 } |
| 3144 } | 3027 } |
| 3145 | 3028 |
| 3146 | 3029 |
| 3147 bool LargeObjectSpace::Contains(HeapObject* object) { | 3030 bool LargeObjectSpace::Contains(HeapObject* object) { |
| 3148 Address address = object->address(); | 3031 Address address = object->address(); |
| 3149 if (Heap::new_space()->Contains(address)) { | 3032 if (heap()->new_space()->Contains(address)) { |
| 3150 return false; | 3033 return false; |
| 3151 } | 3034 } |
| 3152 Page* page = Page::FromAddress(address); | 3035 Page* page = Page::FromAddress(address); |
| 3153 | 3036 |
| 3154 SLOW_ASSERT(!page->IsLargeObjectPage() | 3037 SLOW_ASSERT(!page->IsLargeObjectPage() |
| 3155 || !FindObject(address)->IsFailure()); | 3038 || !FindObject(address)->IsFailure()); |
| 3156 | 3039 |
| 3157 return page->IsLargeObjectPage(); | 3040 return page->IsLargeObjectPage(); |
| 3158 } | 3041 } |
| 3159 | 3042 |
| 3160 | 3043 |
| 3161 #ifdef DEBUG | 3044 #ifdef DEBUG |
| 3162 // We do not assume that the large object iterator works, because it depends | 3045 // We do not assume that the large object iterator works, because it depends |
| 3163 // on the invariants we are checking during verification. | 3046 // on the invariants we are checking during verification. |
| 3164 void LargeObjectSpace::Verify() { | 3047 void LargeObjectSpace::Verify() { |
| 3165 for (LargeObjectChunk* chunk = first_chunk_; | 3048 for (LargeObjectChunk* chunk = first_chunk_; |
| 3166 chunk != NULL; | 3049 chunk != NULL; |
| 3167 chunk = chunk->next()) { | 3050 chunk = chunk->next()) { |
| 3168 // Each chunk contains an object that starts at the large object page's | 3051 // Each chunk contains an object that starts at the large object page's |
| 3169 // object area start. | 3052 // object area start. |
| 3170 HeapObject* object = chunk->GetObject(); | 3053 HeapObject* object = chunk->GetObject(); |
| 3171 Page* page = Page::FromAddress(object->address()); | 3054 Page* page = Page::FromAddress(object->address()); |
| 3172 ASSERT(object->address() == page->ObjectAreaStart()); | 3055 ASSERT(object->address() == page->ObjectAreaStart()); |
| 3173 | 3056 |
| 3174 // The first word should be a map, and we expect all map pointers to be | 3057 // The first word should be a map, and we expect all map pointers to be |
| 3175 // in map space. | 3058 // in map space. |
| 3176 Map* map = object->map(); | 3059 Map* map = object->map(); |
| 3177 ASSERT(map->IsMap()); | 3060 ASSERT(map->IsMap()); |
| 3178 ASSERT(Heap::map_space()->Contains(map)); | 3061 ASSERT(heap()->map_space()->Contains(map)); |
| 3179 | 3062 |
| 3180 // We have only code, sequential strings, external strings | 3063 // We have only code, sequential strings, external strings |
| 3181 // (sequential strings that have been morphed into external | 3064 // (sequential strings that have been morphed into external |
| 3182 // strings), fixed arrays, and byte arrays in large object space. | 3065 // strings), fixed arrays, and byte arrays in large object space. |
| 3183 ASSERT(object->IsCode() || object->IsSeqString() || | 3066 ASSERT(object->IsCode() || object->IsSeqString() || |
| 3184 object->IsExternalString() || object->IsFixedArray() || | 3067 object->IsExternalString() || object->IsFixedArray() || |
| 3185 object->IsByteArray()); | 3068 object->IsByteArray()); |
| 3186 | 3069 |
| 3187 // The object itself should look OK. | 3070 // The object itself should look OK. |
| 3188 object->Verify(); | 3071 object->Verify(); |
| 3189 | 3072 |
| 3190 // Byte arrays and strings don't have interior pointers. | 3073 // Byte arrays and strings don't have interior pointers. |
| 3191 if (object->IsCode()) { | 3074 if (object->IsCode()) { |
| 3192 VerifyPointersVisitor code_visitor; | 3075 VerifyPointersVisitor code_visitor; |
| 3193 object->IterateBody(map->instance_type(), | 3076 object->IterateBody(map->instance_type(), |
| 3194 object->Size(), | 3077 object->Size(), |
| 3195 &code_visitor); | 3078 &code_visitor); |
| 3196 } else if (object->IsFixedArray()) { | 3079 } else if (object->IsFixedArray()) { |
| 3197 // We loop over fixed arrays ourselves, rather then using the visitor, | 3080 // We loop over fixed arrays ourselves, rather then using the visitor, |
| 3198 // because the visitor doesn't support the start/offset iteration | 3081 // because the visitor doesn't support the start/offset iteration |
| 3199 // needed for IsRegionDirty. | 3082 // needed for IsRegionDirty. |
| 3200 FixedArray* array = FixedArray::cast(object); | 3083 FixedArray* array = FixedArray::cast(object); |
| 3201 for (int j = 0; j < array->length(); j++) { | 3084 for (int j = 0; j < array->length(); j++) { |
| 3202 Object* element = array->get(j); | 3085 Object* element = array->get(j); |
| 3203 if (element->IsHeapObject()) { | 3086 if (element->IsHeapObject()) { |
| 3204 HeapObject* element_object = HeapObject::cast(element); | 3087 HeapObject* element_object = HeapObject::cast(element); |
| 3205 ASSERT(Heap::Contains(element_object)); | 3088 ASSERT(heap()->Contains(element_object)); |
| 3206 ASSERT(element_object->map()->IsMap()); | 3089 ASSERT(element_object->map()->IsMap()); |
| 3207 if (Heap::InNewSpace(element_object)) { | 3090 if (heap()->InNewSpace(element_object)) { |
| 3208 Address array_addr = object->address(); | 3091 Address array_addr = object->address(); |
| 3209 Address element_addr = array_addr + FixedArray::kHeaderSize + | 3092 Address element_addr = array_addr + FixedArray::kHeaderSize + |
| 3210 j * kPointerSize; | 3093 j * kPointerSize; |
| 3211 | 3094 |
| 3212 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr)); | 3095 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr)); |
| 3213 } | 3096 } |
| 3214 } | 3097 } |
| 3215 } | 3098 } |
| 3216 } | 3099 } |
| 3217 } | 3100 } |
| (...skipping 18 matching lines...) Expand all Loading... |
| 3236 CollectHistogramInfo(obj); | 3119 CollectHistogramInfo(obj); |
| 3237 } | 3120 } |
| 3238 | 3121 |
| 3239 PrintF(" number of objects %d, " | 3122 PrintF(" number of objects %d, " |
| 3240 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_); | 3123 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_); |
| 3241 if (num_objects > 0) ReportHistogram(false); | 3124 if (num_objects > 0) ReportHistogram(false); |
| 3242 } | 3125 } |
| 3243 | 3126 |
| 3244 | 3127 |
| 3245 void LargeObjectSpace::CollectCodeStatistics() { | 3128 void LargeObjectSpace::CollectCodeStatistics() { |
| 3129 Isolate* isolate = heap()->isolate(); |
| 3246 LargeObjectIterator obj_it(this); | 3130 LargeObjectIterator obj_it(this); |
| 3247 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 3131 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
| 3248 if (obj->IsCode()) { | 3132 if (obj->IsCode()) { |
| 3249 Code* code = Code::cast(obj); | 3133 Code* code = Code::cast(obj); |
| 3250 code_kind_statistics[code->kind()] += code->Size(); | 3134 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 3251 } | 3135 } |
| 3252 } | 3136 } |
| 3253 } | 3137 } |
| 3254 #endif // DEBUG | 3138 #endif // DEBUG |
| 3255 | 3139 |
| 3256 } } // namespace v8::internal | 3140 } } // namespace v8::internal |
| OLD | NEW |