Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(111)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.cpp

Issue 2819123002: Replace ASSERT_NOT_REACHED, and RELEASE_ASSERT in platform/heap (Closed)
Patch Set: fix build error Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after
239 for (BasePage* page = first_unswept_page_; page; page = page->Next()) 239 for (BasePage* page = first_unswept_page_; page; page = page->Next())
240 page->PoisonUnmarkedObjects(); 240 page->PoisonUnmarkedObjects();
241 } 241 }
242 #endif 242 #endif
243 243
244 Address BaseArena::LazySweep(size_t allocation_size, size_t gc_info_index) { 244 Address BaseArena::LazySweep(size_t allocation_size, size_t gc_info_index) {
245 // If there are no pages to be swept, return immediately. 245 // If there are no pages to be swept, return immediately.
246 if (!first_unswept_page_) 246 if (!first_unswept_page_)
247 return nullptr; 247 return nullptr;
248 248
249 RELEASE_ASSERT(GetThreadState()->IsSweepingInProgress()); 249 CHECK(GetThreadState()->IsSweepingInProgress());
250 250
251 // lazySweepPages() can be called recursively if finalizers invoked in 251 // lazySweepPages() can be called recursively if finalizers invoked in
252 // page->sweep() allocate memory and the allocation triggers 252 // page->sweep() allocate memory and the allocation triggers
253 // lazySweepPages(). This check prevents the sweeping from being executed 253 // lazySweepPages(). This check prevents the sweeping from being executed
254 // recursively. 254 // recursively.
255 if (GetThreadState()->SweepForbidden()) 255 if (GetThreadState()->SweepForbidden())
256 return nullptr; 256 return nullptr;
257 257
258 TRACE_EVENT0("blink_gc", "BaseArena::lazySweepPages"); 258 TRACE_EVENT0("blink_gc", "BaseArena::lazySweepPages");
259 ThreadState::SweepForbiddenScope sweep_forbidden(GetThreadState()); 259 ThreadState::SweepForbiddenScope sweep_forbidden(GetThreadState());
(...skipping 22 matching lines...) Expand all
282 } 282 }
283 } 283 }
284 284
285 bool BaseArena::LazySweepWithDeadline(double deadline_seconds) { 285 bool BaseArena::LazySweepWithDeadline(double deadline_seconds) {
286 // It might be heavy to call 286 // It might be heavy to call
287 // Platform::current()->monotonicallyIncreasingTimeSeconds() per page (i.e., 287 // Platform::current()->monotonicallyIncreasingTimeSeconds() per page (i.e.,
288 // 128 KB sweep or one LargeObject sweep), so we check the deadline per 10 288 // 128 KB sweep or one LargeObject sweep), so we check the deadline per 10
289 // pages. 289 // pages.
290 static const int kDeadlineCheckInterval = 10; 290 static const int kDeadlineCheckInterval = 10;
291 291
292 RELEASE_ASSERT(GetThreadState()->IsSweepingInProgress()); 292 CHECK(GetThreadState()->IsSweepingInProgress());
293 ASSERT(GetThreadState()->SweepForbidden()); 293 ASSERT(GetThreadState()->SweepForbidden());
294 ASSERT(!GetThreadState()->IsMainThread() || 294 ASSERT(!GetThreadState()->IsMainThread() ||
295 ScriptForbiddenScope::IsScriptForbidden()); 295 ScriptForbiddenScope::IsScriptForbidden());
296 296
297 NormalPageArena* normal_arena = nullptr; 297 NormalPageArena* normal_arena = nullptr;
298 if (first_unswept_page_ && !first_unswept_page_->IsLargeObjectPage()) { 298 if (first_unswept_page_ && !first_unswept_page_->IsLargeObjectPage()) {
299 // Mark this NormalPageArena as being lazily swept. 299 // Mark this NormalPageArena as being lazily swept.
300 NormalPage* normal_page = 300 NormalPage* normal_page =
301 reinterpret_cast<NormalPage*>(first_unswept_page_); 301 reinterpret_cast<NormalPage*>(first_unswept_page_);
302 normal_arena = normal_page->ArenaForNormalPage(); 302 normal_arena = normal_page->ArenaForNormalPage();
(...skipping 13 matching lines...) Expand all
316 } 316 }
317 page_count++; 317 page_count++;
318 } 318 }
319 ThreadHeap::ReportMemoryUsageForTracing(); 319 ThreadHeap::ReportMemoryUsageForTracing();
320 if (normal_arena) 320 if (normal_arena)
321 normal_arena->SetIsLazySweeping(false); 321 normal_arena->SetIsLazySweeping(false);
322 return true; 322 return true;
323 } 323 }
324 324
325 void BaseArena::CompleteSweep() { 325 void BaseArena::CompleteSweep() {
326 RELEASE_ASSERT(GetThreadState()->IsSweepingInProgress()); 326 CHECK(GetThreadState()->IsSweepingInProgress());
327 ASSERT(GetThreadState()->SweepForbidden()); 327 ASSERT(GetThreadState()->SweepForbidden());
328 ASSERT(!GetThreadState()->IsMainThread() || 328 ASSERT(!GetThreadState()->IsMainThread() ||
329 ScriptForbiddenScope::IsScriptForbidden()); 329 ScriptForbiddenScope::IsScriptForbidden());
330 330
331 while (first_unswept_page_) { 331 while (first_unswept_page_) {
332 SweepUnsweptPage(); 332 SweepUnsweptPage();
333 } 333 }
334 ThreadHeap::ReportMemoryUsageForTracing(); 334 ThreadHeap::ReportMemoryUsageForTracing();
335 } 335 }
336 336
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 for (size_t i = 0; i < kBlinkPagesPerRegion; ++i) { 615 for (size_t i = 0; i < kBlinkPagesPerRegion; ++i) {
616 PageMemory* memory = PageMemory::SetupPageMemoryInRegion( 616 PageMemory* memory = PageMemory::SetupPageMemoryInRegion(
617 region, i * kBlinkPageSize, BlinkPagePayloadSize()); 617 region, i * kBlinkPageSize, BlinkPagePayloadSize());
618 // Take the first possible page ensuring that this thread actually 618 // Take the first possible page ensuring that this thread actually
619 // gets a page and add the rest to the page pool. 619 // gets a page and add the rest to the page pool.
620 if (!page_memory) { 620 if (!page_memory) {
621 bool result = memory->Commit(); 621 bool result = memory->Commit();
622 // If you hit the ASSERT, it will mean that you're hitting 622 // If you hit the ASSERT, it will mean that you're hitting
623 // the limit of the number of mmapped regions OS can support 623 // the limit of the number of mmapped regions OS can support
624 // (e.g., /proc/sys/vm/max_map_count in Linux). 624 // (e.g., /proc/sys/vm/max_map_count in Linux).
625 RELEASE_ASSERT(result); 625 CHECK(result);
626 page_memory = memory; 626 page_memory = memory;
627 } else { 627 } else {
628 GetThreadState()->Heap().GetFreePagePool()->Add(ArenaIndex(), memory); 628 GetThreadState()->Heap().GetFreePagePool()->Add(ArenaIndex(), memory);
629 } 629 }
630 } 630 }
631 } 631 }
632 NormalPage* page = 632 NormalPage* page =
633 new (page_memory->WritableStart()) NormalPage(page_memory, this); 633 new (page_memory->WritableStart()) NormalPage(page_memory, this);
634 page->Link(&first_page_); 634 page->Link(&first_page_);
635 635
(...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after
906 GetThreadState()->CompleteSweep(); 906 GetThreadState()->CompleteSweep();
907 907
908 // 7. Check if we should trigger a GC. 908 // 7. Check if we should trigger a GC.
909 GetThreadState()->ScheduleGCIfNeeded(); 909 GetThreadState()->ScheduleGCIfNeeded();
910 910
911 // 8. Add a new page to this heap. 911 // 8. Add a new page to this heap.
912 AllocatePage(); 912 AllocatePage();
913 913
914 // 9. Try to allocate from a free list. This allocation must succeed. 914 // 9. Try to allocate from a free list. This allocation must succeed.
915 result = AllocateFromFreeList(allocation_size, gc_info_index); 915 result = AllocateFromFreeList(allocation_size, gc_info_index);
916 RELEASE_ASSERT(result); 916 CHECK(result);
917 return result; 917 return result;
918 } 918 }
919 919
920 Address NormalPageArena::AllocateFromFreeList(size_t allocation_size, 920 Address NormalPageArena::AllocateFromFreeList(size_t allocation_size,
921 size_t gc_info_index) { 921 size_t gc_info_index) {
922 // Try reusing a block from the largest bin. The underlying reasoning 922 // Try reusing a block from the largest bin. The underlying reasoning
923 // being that we want to amortize this slow allocation call by carving 923 // being that we want to amortize this slow allocation call by carving
924 // off as a large a free block as possible in one go; a block that will 924 // off as a large a free block as possible in one go; a block that will
925 // service this block and let following allocations be serviced quickly 925 // service this block and let following allocations be serviced quickly
926 // by bump allocation. 926 // by bump allocation.
(...skipping 894 matching lines...) Expand 10 before | Expand all | Expand 10 after
1821 1821
1822 has_entries_ = true; 1822 has_entries_ = true;
1823 size_t index = GetHash(address); 1823 size_t index = GetHash(address);
1824 ASSERT(!(index & 1)); 1824 ASSERT(!(index & 1));
1825 Address cache_page = RoundToBlinkPageStart(address); 1825 Address cache_page = RoundToBlinkPageStart(address);
1826 entries_[index + 1] = entries_[index]; 1826 entries_[index + 1] = entries_[index];
1827 entries_[index] = cache_page; 1827 entries_[index] = cache_page;
1828 } 1828 }
1829 1829
1830 } // namespace blink 1830 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698