| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 391 } | 391 } |
| 392 | 392 |
| 393 | 393 |
| 394 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start) { | 394 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start) { |
| 395 MemoryChunk* chunk = MemoryChunk::Initialize(heap, | 395 MemoryChunk* chunk = MemoryChunk::Initialize(heap, |
| 396 start, | 396 start, |
| 397 Page::kPageSize, | 397 Page::kPageSize, |
| 398 NOT_EXECUTABLE, | 398 NOT_EXECUTABLE, |
| 399 heap->new_space()); | 399 heap->new_space()); |
| 400 chunk->initialize_scan_on_scavenge(true); | 400 chunk->initialize_scan_on_scavenge(true); |
| 401 heap->incremental_marking()->SetNewSpacePageFlags(chunk); |
| 401 return static_cast<NewSpacePage*>(chunk); | 402 return static_cast<NewSpacePage*>(chunk); |
| 402 } | 403 } |
| 403 | 404 |
| 404 | 405 |
| 405 MemoryChunk* MemoryChunk::Initialize(Heap* heap, | 406 MemoryChunk* MemoryChunk::Initialize(Heap* heap, |
| 406 Address base, | 407 Address base, |
| 407 size_t size, | 408 size_t size, |
| 408 Executability executable, | 409 Executability executable, |
| 409 Space* owner) { | 410 Space* owner) { |
| 410 MemoryChunk* chunk = FromAddress(base); | 411 MemoryChunk* chunk = FromAddress(base); |
| 411 | 412 |
| 412 ASSERT(base == chunk->address()); | 413 ASSERT(base == chunk->address()); |
| 413 | 414 |
| 414 chunk->heap_ = heap; | 415 chunk->heap_ = heap; |
| 415 chunk->size_ = size; | 416 chunk->size_ = size; |
| 416 chunk->flags_ = 0; | 417 chunk->flags_ = 0; |
| 417 chunk->set_owner(owner); | 418 chunk->set_owner(owner); |
| 418 chunk->markbits()->Clear(); | 419 chunk->markbits()->Clear(); |
| 419 chunk->initialize_scan_on_scavenge(false); | 420 chunk->initialize_scan_on_scavenge(false); |
| 420 ASSERT(OFFSET_OF(MemoryChunk, scan_on_scavenge_) == kScanOnScavengeOffset); | |
| 421 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); | 421 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
| 422 | 422 |
| 423 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); | 423 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); |
| 424 | 424 |
| 425 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA); | 425 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA); |
| 426 | 426 |
| 427 return chunk; | 427 return chunk; |
| 428 } | 428 } |
| 429 | 429 |
| 430 | 430 |
| (...skipping 492 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 923 from_space_.executable()); | 923 from_space_.executable()); |
| 924 } | 924 } |
| 925 | 925 |
| 926 #endif | 926 #endif |
| 927 | 927 |
| 928 | 928 |
| 929 void NewSpace::Flip() { | 929 void NewSpace::Flip() { |
| 930 SemiSpace tmp = from_space_; | 930 SemiSpace tmp = from_space_; |
| 931 from_space_ = to_space_; | 931 from_space_ = to_space_; |
| 932 to_space_ = tmp; | 932 to_space_ = tmp; |
| 933 |
| 934 NewSpacePage* old_active_page = from_space_.current_page(); |
| 935 NewSpacePage* new_active_page = to_space_.current_page(); |
| 936 new_active_page->CopyFlagsFrom(old_active_page); |
| 933 } | 937 } |
| 934 | 938 |
| 935 | 939 |
| 936 void NewSpace::Grow() { | 940 void NewSpace::Grow() { |
| 937 ASSERT(Capacity() < MaximumCapacity()); | 941 ASSERT(Capacity() < MaximumCapacity()); |
| 938 if (to_space_.Grow()) { | 942 if (to_space_.Grow()) { |
| 939 // Only grow from space if we managed to grow to space. | 943 // Only grow from space if we managed to grow to space. |
| 940 if (!from_space_.Grow()) { | 944 if (!from_space_.Grow()) { |
| 941 // If we managed to grow to space but couldn't grow from space, | 945 // If we managed to grow to space but couldn't grow from space, |
| 942 // attempt to shrink to space. | 946 // attempt to shrink to space. |
| (...skipping 1320 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2263 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 2267 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
| 2264 if (obj->IsCode()) { | 2268 if (obj->IsCode()) { |
| 2265 Code* code = Code::cast(obj); | 2269 Code* code = Code::cast(obj); |
| 2266 isolate->code_kind_statistics()[code->kind()] += code->Size(); | 2270 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 2267 } | 2271 } |
| 2268 } | 2272 } |
| 2269 } | 2273 } |
| 2270 #endif // DEBUG | 2274 #endif // DEBUG |
| 2271 | 2275 |
| 2272 } } // namespace v8::internal | 2276 } } // namespace v8::internal |
| OLD | NEW |