| OLD | NEW |
| (Empty) |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #ifndef V8_SPACES_INL_H_ | |
| 6 #define V8_SPACES_INL_H_ | |
| 7 | |
| 8 #include "src/heap-profiler.h" | |
| 9 #include "src/isolate.h" | |
| 10 #include "src/spaces.h" | |
| 11 #include "src/v8memory.h" | |
| 12 | |
| 13 namespace v8 { | |
| 14 namespace internal { | |
| 15 | |
| 16 | |
| 17 // ----------------------------------------------------------------------------- | |
| 18 // Bitmap | |
| 19 | |
| 20 void Bitmap::Clear(MemoryChunk* chunk) { | |
| 21 Bitmap* bitmap = chunk->markbits(); | |
| 22 for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0; | |
| 23 chunk->ResetLiveBytes(); | |
| 24 } | |
| 25 | |
| 26 | |
| 27 // ----------------------------------------------------------------------------- | |
| 28 // PageIterator | |
| 29 | |
| 30 | |
| 31 PageIterator::PageIterator(PagedSpace* space) | |
| 32 : space_(space), | |
| 33 prev_page_(&space->anchor_), | |
| 34 next_page_(prev_page_->next_page()) { } | |
| 35 | |
| 36 | |
| 37 bool PageIterator::has_next() { | |
| 38 return next_page_ != &space_->anchor_; | |
| 39 } | |
| 40 | |
| 41 | |
| 42 Page* PageIterator::next() { | |
| 43 DCHECK(has_next()); | |
| 44 prev_page_ = next_page_; | |
| 45 next_page_ = next_page_->next_page(); | |
| 46 return prev_page_; | |
| 47 } | |
| 48 | |
| 49 | |
| 50 // ----------------------------------------------------------------------------- | |
| 51 // NewSpacePageIterator | |
| 52 | |
| 53 | |
| 54 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space) | |
| 55 : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()), | |
| 56 next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())), | |
| 57 last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { } | |
| 58 | |
| 59 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space) | |
| 60 : prev_page_(space->anchor()), | |
| 61 next_page_(prev_page_->next_page()), | |
| 62 last_page_(prev_page_->prev_page()) { } | |
| 63 | |
| 64 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit) | |
| 65 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()), | |
| 66 next_page_(NewSpacePage::FromAddress(start)), | |
| 67 last_page_(NewSpacePage::FromLimit(limit)) { | |
| 68 SemiSpace::AssertValidRange(start, limit); | |
| 69 } | |
| 70 | |
| 71 | |
| 72 bool NewSpacePageIterator::has_next() { | |
| 73 return prev_page_ != last_page_; | |
| 74 } | |
| 75 | |
| 76 | |
| 77 NewSpacePage* NewSpacePageIterator::next() { | |
| 78 DCHECK(has_next()); | |
| 79 prev_page_ = next_page_; | |
| 80 next_page_ = next_page_->next_page(); | |
| 81 return prev_page_; | |
| 82 } | |
| 83 | |
| 84 | |
| 85 // ----------------------------------------------------------------------------- | |
| 86 // HeapObjectIterator | |
| 87 HeapObject* HeapObjectIterator::FromCurrentPage() { | |
| 88 while (cur_addr_ != cur_end_) { | |
| 89 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) { | |
| 90 cur_addr_ = space_->limit(); | |
| 91 continue; | |
| 92 } | |
| 93 HeapObject* obj = HeapObject::FromAddress(cur_addr_); | |
| 94 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); | |
| 95 cur_addr_ += obj_size; | |
| 96 DCHECK(cur_addr_ <= cur_end_); | |
| 97 if (!obj->IsFiller()) { | |
| 98 DCHECK_OBJECT_SIZE(obj_size); | |
| 99 return obj; | |
| 100 } | |
| 101 } | |
| 102 return NULL; | |
| 103 } | |
| 104 | |
| 105 | |
| 106 // ----------------------------------------------------------------------------- | |
| 107 // MemoryAllocator | |
| 108 | |
| 109 #ifdef ENABLE_HEAP_PROTECTION | |
| 110 | |
| 111 void MemoryAllocator::Protect(Address start, size_t size) { | |
| 112 base::OS::Protect(start, size); | |
| 113 } | |
| 114 | |
| 115 | |
| 116 void MemoryAllocator::Unprotect(Address start, | |
| 117 size_t size, | |
| 118 Executability executable) { | |
| 119 base::OS::Unprotect(start, size, executable); | |
| 120 } | |
| 121 | |
| 122 | |
| 123 void MemoryAllocator::ProtectChunkFromPage(Page* page) { | |
| 124 int id = GetChunkId(page); | |
| 125 base::OS::Protect(chunks_[id].address(), chunks_[id].size()); | |
| 126 } | |
| 127 | |
| 128 | |
| 129 void MemoryAllocator::UnprotectChunkFromPage(Page* page) { | |
| 130 int id = GetChunkId(page); | |
| 131 base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(), | |
| 132 chunks_[id].owner()->executable() == EXECUTABLE); | |
| 133 } | |
| 134 | |
| 135 #endif | |
| 136 | |
| 137 | |
| 138 // -------------------------------------------------------------------------- | |
| 139 // PagedSpace | |
| 140 Page* Page::Initialize(Heap* heap, | |
| 141 MemoryChunk* chunk, | |
| 142 Executability executable, | |
| 143 PagedSpace* owner) { | |
| 144 Page* page = reinterpret_cast<Page*>(chunk); | |
| 145 DCHECK(page->area_size() <= kMaxRegularHeapObjectSize); | |
| 146 DCHECK(chunk->owner() == owner); | |
| 147 owner->IncreaseCapacity(page->area_size()); | |
| 148 owner->Free(page->area_start(), page->area_size()); | |
| 149 | |
| 150 heap->incremental_marking()->SetOldSpacePageFlags(chunk); | |
| 151 | |
| 152 return page; | |
| 153 } | |
| 154 | |
| 155 | |
| 156 bool PagedSpace::Contains(Address addr) { | |
| 157 Page* p = Page::FromAddress(addr); | |
| 158 if (!p->is_valid()) return false; | |
| 159 return p->owner() == this; | |
| 160 } | |
| 161 | |
| 162 | |
| 163 void MemoryChunk::set_scan_on_scavenge(bool scan) { | |
| 164 if (scan) { | |
| 165 if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages(); | |
| 166 SetFlag(SCAN_ON_SCAVENGE); | |
| 167 } else { | |
| 168 if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages(); | |
| 169 ClearFlag(SCAN_ON_SCAVENGE); | |
| 170 } | |
| 171 heap_->incremental_marking()->SetOldSpacePageFlags(this); | |
| 172 } | |
| 173 | |
| 174 | |
| 175 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) { | |
| 176 MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>( | |
| 177 OffsetFrom(addr) & ~Page::kPageAlignmentMask); | |
| 178 if (maybe->owner() != NULL) return maybe; | |
| 179 LargeObjectIterator iterator(heap->lo_space()); | |
| 180 for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) { | |
| 181 // Fixed arrays are the only pointer-containing objects in large object | |
| 182 // space. | |
| 183 if (o->IsFixedArray()) { | |
| 184 MemoryChunk* chunk = MemoryChunk::FromAddress(o->address()); | |
| 185 if (chunk->Contains(addr)) { | |
| 186 return chunk; | |
| 187 } | |
| 188 } | |
| 189 } | |
| 190 UNREACHABLE(); | |
| 191 return NULL; | |
| 192 } | |
| 193 | |
| 194 | |
| 195 void MemoryChunk::UpdateHighWaterMark(Address mark) { | |
| 196 if (mark == NULL) return; | |
| 197 // Need to subtract one from the mark because when a chunk is full the | |
| 198 // top points to the next address after the chunk, which effectively belongs | |
| 199 // to another chunk. See the comment to Page::FromAllocationTop. | |
| 200 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); | |
| 201 int new_mark = static_cast<int>(mark - chunk->address()); | |
| 202 if (new_mark > chunk->high_water_mark_) { | |
| 203 chunk->high_water_mark_ = new_mark; | |
| 204 } | |
| 205 } | |
| 206 | |
| 207 | |
| 208 PointerChunkIterator::PointerChunkIterator(Heap* heap) | |
| 209 : state_(kOldPointerState), | |
| 210 old_pointer_iterator_(heap->old_pointer_space()), | |
| 211 map_iterator_(heap->map_space()), | |
| 212 lo_iterator_(heap->lo_space()) { } | |
| 213 | |
| 214 | |
| 215 Page* Page::next_page() { | |
| 216 DCHECK(next_chunk()->owner() == owner()); | |
| 217 return static_cast<Page*>(next_chunk()); | |
| 218 } | |
| 219 | |
| 220 | |
| 221 Page* Page::prev_page() { | |
| 222 DCHECK(prev_chunk()->owner() == owner()); | |
| 223 return static_cast<Page*>(prev_chunk()); | |
| 224 } | |
| 225 | |
| 226 | |
| 227 void Page::set_next_page(Page* page) { | |
| 228 DCHECK(page->owner() == owner()); | |
| 229 set_next_chunk(page); | |
| 230 } | |
| 231 | |
| 232 | |
| 233 void Page::set_prev_page(Page* page) { | |
| 234 DCHECK(page->owner() == owner()); | |
| 235 set_prev_chunk(page); | |
| 236 } | |
| 237 | |
| 238 | |
| 239 // Try linear allocation in the page of alloc_info's allocation top. Does | |
| 240 // not contain slow case logic (e.g. move to the next page or try free list | |
| 241 // allocation) so it can be used by all the allocation functions and for all | |
| 242 // the paged spaces. | |
| 243 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { | |
| 244 Address current_top = allocation_info_.top(); | |
| 245 Address new_top = current_top + size_in_bytes; | |
| 246 if (new_top > allocation_info_.limit()) return NULL; | |
| 247 | |
| 248 allocation_info_.set_top(new_top); | |
| 249 return HeapObject::FromAddress(current_top); | |
| 250 } | |
| 251 | |
| 252 | |
| 253 // Raw allocation. | |
| 254 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) { | |
| 255 HeapObject* object = AllocateLinearly(size_in_bytes); | |
| 256 | |
| 257 if (object == NULL) { | |
| 258 object = free_list_.Allocate(size_in_bytes); | |
| 259 if (object == NULL) { | |
| 260 object = SlowAllocateRaw(size_in_bytes); | |
| 261 } | |
| 262 } | |
| 263 | |
| 264 if (object != NULL) { | |
| 265 if (identity() == CODE_SPACE) { | |
| 266 SkipList::Update(object->address(), size_in_bytes); | |
| 267 } | |
| 268 return object; | |
| 269 } | |
| 270 | |
| 271 return AllocationResult::Retry(identity()); | |
| 272 } | |
| 273 | |
| 274 | |
| 275 // ----------------------------------------------------------------------------- | |
| 276 // NewSpace | |
| 277 | |
| 278 | |
| 279 AllocationResult NewSpace::AllocateRaw(int size_in_bytes) { | |
| 280 Address old_top = allocation_info_.top(); | |
| 281 | |
| 282 if (allocation_info_.limit() - old_top < size_in_bytes) { | |
| 283 return SlowAllocateRaw(size_in_bytes); | |
| 284 } | |
| 285 | |
| 286 HeapObject* obj = HeapObject::FromAddress(old_top); | |
| 287 allocation_info_.set_top(allocation_info_.top() + size_in_bytes); | |
| 288 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | |
| 289 | |
| 290 return obj; | |
| 291 } | |
| 292 | |
| 293 | |
| 294 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { | |
| 295 heap->incremental_marking()->SetOldSpacePageFlags(chunk); | |
| 296 return static_cast<LargePage*>(chunk); | |
| 297 } | |
| 298 | |
| 299 | |
| 300 intptr_t LargeObjectSpace::Available() { | |
| 301 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); | |
| 302 } | |
| 303 | |
| 304 | |
| 305 bool FreeListNode::IsFreeListNode(HeapObject* object) { | |
| 306 Map* map = object->map(); | |
| 307 Heap* heap = object->GetHeap(); | |
| 308 return map == heap->raw_unchecked_free_space_map() | |
| 309 || map == heap->raw_unchecked_one_pointer_filler_map() | |
| 310 || map == heap->raw_unchecked_two_pointer_filler_map(); | |
| 311 } | |
| 312 | |
| 313 } } // namespace v8::internal | |
| 314 | |
| 315 #endif // V8_SPACES_INL_H_ | |
| OLD | NEW |