| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 187 if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages(); | 187 if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages(); |
| 188 SetFlag(SCAN_ON_SCAVENGE); | 188 SetFlag(SCAN_ON_SCAVENGE); |
| 189 } else { | 189 } else { |
| 190 if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages(); | 190 if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages(); |
| 191 ClearFlag(SCAN_ON_SCAVENGE); | 191 ClearFlag(SCAN_ON_SCAVENGE); |
| 192 } | 192 } |
| 193 heap_->incremental_marking()->SetOldSpacePageFlags(this); | 193 heap_->incremental_marking()->SetOldSpacePageFlags(this); |
| 194 } | 194 } |
| 195 | 195 |
| 196 | 196 |
| 197 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) { | 197 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) { |
| 198 MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>( | 198 MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>( |
| 199 OffsetFrom(addr) & ~Page::kPageAlignmentMask); | 199 OffsetFrom(addr) & ~Page::kPageAlignmentMask); |
| 200 if (maybe->owner() != NULL) return maybe; | 200 if (maybe->owner() != NULL) return maybe; |
| 201 LargeObjectIterator iterator(HEAP->lo_space()); | 201 LargeObjectIterator iterator(heap->lo_space()); |
| 202 for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) { | 202 for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) { |
| 203 // Fixed arrays are the only pointer-containing objects in large object | 203 // Fixed arrays are the only pointer-containing objects in large object |
| 204 // space. | 204 // space. |
| 205 if (o->IsFixedArray()) { | 205 if (o->IsFixedArray()) { |
| 206 MemoryChunk* chunk = MemoryChunk::FromAddress(o->address()); | 206 MemoryChunk* chunk = MemoryChunk::FromAddress(o->address()); |
| 207 if (chunk->Contains(addr)) { | 207 if (chunk->Contains(addr)) { |
| 208 return chunk; | 208 return chunk; |
| 209 } | 209 } |
| 210 } | 210 } |
| 211 } | 211 } |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 308 | 308 |
| 309 // ----------------------------------------------------------------------------- | 309 // ----------------------------------------------------------------------------- |
| 310 // NewSpace | 310 // NewSpace |
| 311 | 311 |
| 312 | 312 |
| 313 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) { | 313 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) { |
| 314 Address old_top = allocation_info_.top; | 314 Address old_top = allocation_info_.top; |
| 315 #ifdef DEBUG | 315 #ifdef DEBUG |
| 316 // If we are stressing compaction we waste some memory in new space | 316 // If we are stressing compaction we waste some memory in new space |
| 317 // in order to get more frequent GCs. | 317 // in order to get more frequent GCs. |
| 318 if (FLAG_stress_compaction && !HEAP->linear_allocation()) { | 318 if (FLAG_stress_compaction && !heap()->linear_allocation()) { |
| 319 if (allocation_info_.limit - old_top >= size_in_bytes * 4) { | 319 if (allocation_info_.limit - old_top >= size_in_bytes * 4) { |
| 320 int filler_size = size_in_bytes * 4; | 320 int filler_size = size_in_bytes * 4; |
| 321 for (int i = 0; i < filler_size; i += kPointerSize) { | 321 for (int i = 0; i < filler_size; i += kPointerSize) { |
| 322 *(reinterpret_cast<Object**>(old_top + i)) = | 322 *(reinterpret_cast<Object**>(old_top + i)) = |
| 323 HEAP->one_pointer_filler_map(); | 323 heap()->one_pointer_filler_map(); |
| 324 } | 324 } |
| 325 old_top += filler_size; | 325 old_top += filler_size; |
| 326 allocation_info_.top += filler_size; | 326 allocation_info_.top += filler_size; |
| 327 } | 327 } |
| 328 } | 328 } |
| 329 #endif | 329 #endif |
| 330 | 330 |
| 331 if (allocation_info_.limit - old_top < size_in_bytes) { | 331 if (allocation_info_.limit - old_top < size_in_bytes) { |
| 332 return SlowAllocateRaw(size_in_bytes); | 332 return SlowAllocateRaw(size_in_bytes); |
| 333 } | 333 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 355 Map* map = object->map(); | 355 Map* map = object->map(); |
| 356 Heap* heap = object->GetHeap(); | 356 Heap* heap = object->GetHeap(); |
| 357 return map == heap->raw_unchecked_free_space_map() | 357 return map == heap->raw_unchecked_free_space_map() |
| 358 || map == heap->raw_unchecked_one_pointer_filler_map() | 358 || map == heap->raw_unchecked_one_pointer_filler_map() |
| 359 || map == heap->raw_unchecked_two_pointer_filler_map(); | 359 || map == heap->raw_unchecked_two_pointer_filler_map(); |
| 360 } | 360 } |
| 361 | 361 |
| 362 } } // namespace v8::internal | 362 } } // namespace v8::internal |
| 363 | 363 |
| 364 #endif // V8_SPACES_INL_H_ | 364 #endif // V8_SPACES_INL_H_ |
| OLD | NEW |