Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(580)

Side by Side Diff: src/spaces.cc

Issue 7535004: Merge bleeding edge up to 8774 into the GC branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/string-stream.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 674 matching lines...) Expand 10 before | Expand all | Expand 10 after
685 PageIterator iterator(this); 685 PageIterator iterator(this);
686 while (iterator.has_next()) { 686 while (iterator.has_next()) {
687 heap()->isolate()->memory_allocator()->Free(iterator.next()); 687 heap()->isolate()->memory_allocator()->Free(iterator.next());
688 } 688 }
689 anchor_.set_next_page(&anchor_); 689 anchor_.set_next_page(&anchor_);
690 anchor_.set_prev_page(&anchor_); 690 anchor_.set_prev_page(&anchor_);
691 accounting_stats_.Clear(); 691 accounting_stats_.Clear();
692 } 692 }
693 693
694 694
695 #ifdef ENABLE_HEAP_PROTECTION
696
697 void PagedSpace::Protect() {
698 Page* page = first_page_;
699 while (page->is_valid()) {
700 Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
701 page = Isolate::Current()->memory_allocator()->
702 FindLastPageInSameChunk(page)->next_page();
703 }
704 }
705
706
707 void PagedSpace::Unprotect() {
708 Page* page = first_page_;
709 while (page->is_valid()) {
710 Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
711 page = Isolate::Current()->memory_allocator()->
712 FindLastPageInSameChunk(page)->next_page();
713 }
714 }
715
716 #endif
717
718
719 MaybeObject* PagedSpace::FindObject(Address addr) { 695 MaybeObject* PagedSpace::FindObject(Address addr) {
720 // Note: this function can only be called on precisely swept spaces. 696 // Note: this function can only be called on precisely swept spaces.
721 ASSERT(!heap()->mark_compact_collector()->in_use()); 697 ASSERT(!heap()->mark_compact_collector()->in_use());
722 698
723 if (!Contains(addr)) return Failure::Exception(); 699 if (!Contains(addr)) return Failure::Exception();
724 700
725 Page* p = Page::FromAddress(addr); 701 Page* p = Page::FromAddress(addr);
726 HeapObjectIterator it(p, NULL); 702 HeapObjectIterator it(p, NULL);
727 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 703 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
728 Address cur = obj->address(); 704 Address cur = obj->address();
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
861 if (base == NULL) return false; 837 if (base == NULL) return false;
862 838
863 chunk_base_ = base; 839 chunk_base_ = base;
864 chunk_size_ = static_cast<uintptr_t>(size); 840 chunk_size_ = static_cast<uintptr_t>(size);
865 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); 841 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
866 842
867 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); 843 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
868 ASSERT(IsPowerOf2(maximum_semispace_capacity)); 844 ASSERT(IsPowerOf2(maximum_semispace_capacity));
869 845
870 // Allocate and setup the histogram arrays if necessary. 846 // Allocate and setup the histogram arrays if necessary.
871 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
872 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); 847 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
873 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); 848 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
874 849
875 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ 850 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
876 promoted_histogram_[name].set_name(#name); 851 promoted_histogram_[name].set_name(#name);
877 INSTANCE_TYPE_LIST(SET_NAME) 852 INSTANCE_TYPE_LIST(SET_NAME)
878 #undef SET_NAME 853 #undef SET_NAME
879 #endif
880 854
881 ASSERT(maximum_semispace_capacity == heap()->ReservedSemiSpaceSize()); 855 ASSERT(maximum_semispace_capacity == heap()->ReservedSemiSpaceSize());
882 ASSERT(static_cast<intptr_t>(chunk_size_) >= 856 ASSERT(static_cast<intptr_t>(chunk_size_) >=
883 2 * heap()->ReservedSemiSpaceSize()); 857 2 * heap()->ReservedSemiSpaceSize());
884 ASSERT(IsAddressAligned(chunk_base_, 2 * maximum_semispace_capacity, 0)); 858 ASSERT(IsAddressAligned(chunk_base_, 2 * maximum_semispace_capacity, 0));
885 859
886 if (!to_space_.Setup(chunk_base_, 860 if (!to_space_.Setup(chunk_base_,
887 initial_semispace_capacity, 861 initial_semispace_capacity,
888 maximum_semispace_capacity)) { 862 maximum_semispace_capacity)) {
889 return false; 863 return false;
890 } 864 }
891 if (!from_space_.Setup(chunk_base_ + maximum_semispace_capacity, 865 if (!from_space_.Setup(chunk_base_ + maximum_semispace_capacity,
892 initial_semispace_capacity, 866 initial_semispace_capacity,
893 maximum_semispace_capacity)) { 867 maximum_semispace_capacity)) {
894 return false; 868 return false;
895 } 869 }
896 870
897 start_ = chunk_base_; 871 start_ = chunk_base_;
898 address_mask_ = ~(2 * maximum_semispace_capacity - 1); 872 address_mask_ = ~(2 * maximum_semispace_capacity - 1);
899 object_mask_ = address_mask_ | kHeapObjectTagMask; 873 object_mask_ = address_mask_ | kHeapObjectTagMask;
900 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; 874 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
901 875
902 ResetAllocationInfo(); 876 ResetAllocationInfo();
903 877
904 return true; 878 return true;
905 } 879 }
906 880
907 881
908 void NewSpace::TearDown() { 882 void NewSpace::TearDown() {
909 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
910 if (allocated_histogram_) { 883 if (allocated_histogram_) {
911 DeleteArray(allocated_histogram_); 884 DeleteArray(allocated_histogram_);
912 allocated_histogram_ = NULL; 885 allocated_histogram_ = NULL;
913 } 886 }
914 if (promoted_histogram_) { 887 if (promoted_histogram_) {
915 DeleteArray(promoted_histogram_); 888 DeleteArray(promoted_histogram_);
916 promoted_histogram_ = NULL; 889 promoted_histogram_ = NULL;
917 } 890 }
918 #endif
919 891
920 start_ = NULL; 892 start_ = NULL;
921 allocation_info_.top = NULL; 893 allocation_info_.top = NULL;
922 allocation_info_.limit = NULL; 894 allocation_info_.limit = NULL;
923 895
924 to_space_.TearDown(); 896 to_space_.TearDown();
925 from_space_.TearDown(); 897 from_space_.TearDown();
926 898
927 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); 899 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
928 heap()->isolate()->memory_allocator()->FreeMemory( 900 heap()->isolate()->memory_allocator()->FreeMemory(
929 chunk_base_, 901 chunk_base_,
930 static_cast<size_t>(chunk_size_), 902 static_cast<size_t>(chunk_size_),
931 NOT_EXECUTABLE); 903 NOT_EXECUTABLE);
932 chunk_base_ = NULL; 904 chunk_base_ = NULL;
933 chunk_size_ = 0; 905 chunk_size_ = 0;
934 } 906 }
935 907
936 908
937 #ifdef ENABLE_HEAP_PROTECTION
938
939 void NewSpace::Protect() {
940 heap()->isolate()->memory_allocator()->Protect(ToSpaceStart(), Capacity());
941 heap()->isolate()->memory_allocator()->Protect(FromSpaceStart(), Capacity());
942 }
943
944
945 void NewSpace::Unprotect() {
946 heap()->isolate()->memory_allocator()->Unprotect(ToSpaceStart(), Capacity(),
947 to_space_.executable());
948 heap()->isolate()->memory_allocator()->Unprotect(FromSpaceStart(), Capacity(),
949 from_space_.executable());
950 }
951
952 #endif
953
954
955 void NewSpace::Flip() { 909 void NewSpace::Flip() {
956 SemiSpace::Swap(&from_space_, &to_space_); 910 SemiSpace::Swap(&from_space_, &to_space_);
957 } 911 }
958 912
959 913
960 void NewSpace::Grow() { 914 void NewSpace::Grow() {
961 ASSERT(Capacity() < MaximumCapacity()); 915 ASSERT(Capacity() < MaximumCapacity());
962 if (to_space_.Grow()) { 916 if (to_space_.Grow()) {
963 // Only grow from space if we managed to grow to space. 917 // Only grow from space if we managed to grow to space.
964 if (!from_space_.Grow()) { 918 if (!from_space_.Grow()) {
(...skipping 481 matching lines...) Expand 10 before | Expand all | Expand 10 after
1446 CASE(BUILTIN); 1400 CASE(BUILTIN);
1447 CASE(LOAD_IC); 1401 CASE(LOAD_IC);
1448 CASE(KEYED_LOAD_IC); 1402 CASE(KEYED_LOAD_IC);
1449 CASE(STORE_IC); 1403 CASE(STORE_IC);
1450 CASE(KEYED_STORE_IC); 1404 CASE(KEYED_STORE_IC);
1451 CASE(CALL_IC); 1405 CASE(CALL_IC);
1452 CASE(KEYED_CALL_IC); 1406 CASE(KEYED_CALL_IC);
1453 CASE(UNARY_OP_IC); 1407 CASE(UNARY_OP_IC);
1454 CASE(BINARY_OP_IC); 1408 CASE(BINARY_OP_IC);
1455 CASE(COMPARE_IC); 1409 CASE(COMPARE_IC);
1410 CASE(TO_BOOLEAN_IC);
1456 } 1411 }
1457 } 1412 }
1458 1413
1459 #undef CASE 1414 #undef CASE
1460 1415
1461 PrintF("\n Code kind histograms: \n"); 1416 PrintF("\n Code kind histograms: \n");
1462 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { 1417 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1463 if (isolate->code_kind_statistics()[i] > 0) { 1418 if (isolate->code_kind_statistics()[i] > 0) {
1464 PrintF(" %-20s: %10d bytes\n", table[i], 1419 PrintF(" %-20s: %10d bytes\n", table[i],
1465 isolate->code_kind_statistics()[i]); 1420 isolate->code_kind_statistics()[i]);
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1513 } 1468 }
1514 1469
1515 if (FLAG_collect_heap_spill_statistics && print_spill) { 1470 if (FLAG_collect_heap_spill_statistics && print_spill) {
1516 isolate->js_spill_information()->Print(); 1471 isolate->js_spill_information()->Print();
1517 } 1472 }
1518 } 1473 }
1519 #endif // DEBUG 1474 #endif // DEBUG
1520 1475
1521 1476
1522 // Support for statistics gathering for --heap-stats and --log-gc. 1477 // Support for statistics gathering for --heap-stats and --log-gc.
1523 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1524 void NewSpace::ClearHistograms() { 1478 void NewSpace::ClearHistograms() {
1525 for (int i = 0; i <= LAST_TYPE; i++) { 1479 for (int i = 0; i <= LAST_TYPE; i++) {
1526 allocated_histogram_[i].clear(); 1480 allocated_histogram_[i].clear();
1527 promoted_histogram_[i].clear(); 1481 promoted_histogram_[i].clear();
1528 } 1482 }
1529 } 1483 }
1530 1484
1531 // Because the copying collector does not touch garbage objects, we iterate 1485 // Because the copying collector does not touch garbage objects, we iterate
1532 // the new space before a collection to get a histogram of allocated objects. 1486 // the new space before a collection to get a histogram of allocated objects.
1533 // This only happens (1) when compiled with DEBUG and the --heap-stats flag is 1487 // This only happens when --log-gc flag is set.
1534 // set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
1535 // flag is set.
1536 void NewSpace::CollectStatistics() { 1488 void NewSpace::CollectStatistics() {
1537 ClearHistograms(); 1489 ClearHistograms();
1538 SemiSpaceIterator it(this); 1490 SemiSpaceIterator it(this);
1539 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) 1491 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1540 RecordAllocation(obj); 1492 RecordAllocation(obj);
1541 } 1493 }
1542 1494
1543 1495
1544 #ifdef ENABLE_LOGGING_AND_PROFILING
1545 static void DoReportStatistics(Isolate* isolate, 1496 static void DoReportStatistics(Isolate* isolate,
1546 HistogramInfo* info, const char* description) { 1497 HistogramInfo* info, const char* description) {
1547 LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); 1498 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1548 // Lump all the string types together. 1499 // Lump all the string types together.
1549 int string_number = 0; 1500 int string_number = 0;
1550 int string_bytes = 0; 1501 int string_bytes = 0;
1551 #define INCREMENT(type, size, name, camel_name) \ 1502 #define INCREMENT(type, size, name, camel_name) \
1552 string_number += info[type].number(); \ 1503 string_number += info[type].number(); \
1553 string_bytes += info[type].bytes(); 1504 string_bytes += info[type].bytes();
1554 STRING_TYPE_LIST(INCREMENT) 1505 STRING_TYPE_LIST(INCREMENT)
1555 #undef INCREMENT 1506 #undef INCREMENT
1556 if (string_number > 0) { 1507 if (string_number > 0) {
1557 LOG(isolate, 1508 LOG(isolate,
1558 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); 1509 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1559 } 1510 }
1560 1511
1561 // Then do the other types. 1512 // Then do the other types.
1562 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { 1513 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1563 if (info[i].number() > 0) { 1514 if (info[i].number() > 0) {
1564 LOG(isolate, 1515 LOG(isolate,
1565 HeapSampleItemEvent(info[i].name(), info[i].number(), 1516 HeapSampleItemEvent(info[i].name(), info[i].number(),
1566 info[i].bytes())); 1517 info[i].bytes()));
1567 } 1518 }
1568 } 1519 }
1569 LOG(isolate, HeapSampleEndEvent("NewSpace", description)); 1520 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1570 } 1521 }
1571 #endif // ENABLE_LOGGING_AND_PROFILING
1572 1522
1573 1523
1574 void NewSpace::ReportStatistics() { 1524 void NewSpace::ReportStatistics() {
1575 #ifdef DEBUG 1525 #ifdef DEBUG
1576 if (FLAG_heap_stats) { 1526 if (FLAG_heap_stats) {
1577 float pct = static_cast<float>(Available()) / Capacity(); 1527 float pct = static_cast<float>(Available()) / Capacity();
1578 PrintF(" capacity: %" V8_PTR_PREFIX "d" 1528 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1579 ", available: %" V8_PTR_PREFIX "d, %%%d\n", 1529 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1580 Capacity(), Available(), static_cast<int>(pct*100)); 1530 Capacity(), Available(), static_cast<int>(pct*100));
1581 PrintF("\n Object Histogram:\n"); 1531 PrintF("\n Object Histogram:\n");
1582 for (int i = 0; i <= LAST_TYPE; i++) { 1532 for (int i = 0; i <= LAST_TYPE; i++) {
1583 if (allocated_histogram_[i].number() > 0) { 1533 if (allocated_histogram_[i].number() > 0) {
1584 PrintF(" %-34s%10d (%10d bytes)\n", 1534 PrintF(" %-34s%10d (%10d bytes)\n",
1585 allocated_histogram_[i].name(), 1535 allocated_histogram_[i].name(),
1586 allocated_histogram_[i].number(), 1536 allocated_histogram_[i].number(),
1587 allocated_histogram_[i].bytes()); 1537 allocated_histogram_[i].bytes());
1588 } 1538 }
1589 } 1539 }
1590 PrintF("\n"); 1540 PrintF("\n");
1591 } 1541 }
1592 #endif // DEBUG 1542 #endif // DEBUG
1593 1543
1594 #ifdef ENABLE_LOGGING_AND_PROFILING
1595 if (FLAG_log_gc) { 1544 if (FLAG_log_gc) {
1596 Isolate* isolate = ISOLATE; 1545 Isolate* isolate = ISOLATE;
1597 DoReportStatistics(isolate, allocated_histogram_, "allocated"); 1546 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1598 DoReportStatistics(isolate, promoted_histogram_, "promoted"); 1547 DoReportStatistics(isolate, promoted_histogram_, "promoted");
1599 } 1548 }
1600 #endif // ENABLE_LOGGING_AND_PROFILING
1601 } 1549 }
1602 1550
1603 1551
1604 void NewSpace::RecordAllocation(HeapObject* obj) { 1552 void NewSpace::RecordAllocation(HeapObject* obj) {
1605 InstanceType type = obj->map()->instance_type(); 1553 InstanceType type = obj->map()->instance_type();
1606 ASSERT(0 <= type && type <= LAST_TYPE); 1554 ASSERT(0 <= type && type <= LAST_TYPE);
1607 allocated_histogram_[type].increment_number(1); 1555 allocated_histogram_[type].increment_number(1);
1608 allocated_histogram_[type].increment_bytes(obj->Size()); 1556 allocated_histogram_[type].increment_bytes(obj->Size());
1609 } 1557 }
1610 1558
1611 1559
1612 void NewSpace::RecordPromotion(HeapObject* obj) { 1560 void NewSpace::RecordPromotion(HeapObject* obj) {
1613 InstanceType type = obj->map()->instance_type(); 1561 InstanceType type = obj->map()->instance_type();
1614 ASSERT(0 <= type && type <= LAST_TYPE); 1562 ASSERT(0 <= type && type <= LAST_TYPE);
1615 promoted_histogram_[type].increment_number(1); 1563 promoted_histogram_[type].increment_number(1);
1616 promoted_histogram_[type].increment_bytes(obj->Size()); 1564 promoted_histogram_[type].increment_bytes(obj->Size());
1617 } 1565 }
1618 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1619 1566
1620 // ----------------------------------------------------------------------------- 1567 // -----------------------------------------------------------------------------
1621 // Free lists for old object spaces implementation 1568 // Free lists for old object spaces implementation
1622 1569
1623 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { 1570 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1624 ASSERT(size_in_bytes > 0); 1571 ASSERT(size_in_bytes > 0);
1625 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1572 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1626 1573
1627 // We write a map and possibly size information to the block. If the block 1574 // We write a map and possibly size information to the block. If the block
1628 // is big enough to be a FreeSpace with at least one extra word (the next 1575 // is big enough to be a FreeSpace with at least one extra word (the next
(...skipping 692 matching lines...) Expand 10 before | Expand all | Expand 10 after
2321 page_count_ = 0; 2268 page_count_ = 0;
2322 objects_size_ = 0; 2269 objects_size_ = 0;
2323 return true; 2270 return true;
2324 } 2271 }
2325 2272
2326 2273
2327 void LargeObjectSpace::TearDown() { 2274 void LargeObjectSpace::TearDown() {
2328 while (first_page_ != NULL) { 2275 while (first_page_ != NULL) {
2329 LargePage* page = first_page_; 2276 LargePage* page = first_page_;
2330 first_page_ = first_page_->next_page(); 2277 first_page_ = first_page_->next_page();
2278 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2331 2279
2280 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2281 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2282 space, kAllocationActionFree, page->size());
2332 heap()->isolate()->memory_allocator()->Free(page); 2283 heap()->isolate()->memory_allocator()->Free(page);
2333 } 2284 }
2334 2285
2335 size_ = 0; 2286 size_ = 0;
2336 page_count_ = 0; 2287 page_count_ = 0;
2337 objects_size_ = 0; 2288 objects_size_ = 0;
2338 } 2289 }
2339 2290
2340 2291
2341 #ifdef ENABLE_HEAP_PROTECTION
2342
2343 void LargeObjectSpace::Protect() {
2344 LargeObjectChunk* chunk = first_chunk_;
2345 while (chunk != NULL) {
2346 heap()->isolate()->memory_allocator()->Protect(chunk->address(),
2347 chunk->size());
2348 chunk = chunk->next();
2349 }
2350 }
2351
2352
2353 void LargeObjectSpace::Unprotect() {
2354 LargeObjectChunk* chunk = first_chunk_;
2355 while (chunk != NULL) {
2356 bool is_code = chunk->GetObject()->IsCode();
2357 heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
2358 chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
2359 chunk = chunk->next();
2360 }
2361 }
2362
2363 #endif
2364
2365 MaybeObject* LargeObjectSpace::AllocateRawInternal(int object_size, 2292 MaybeObject* LargeObjectSpace::AllocateRawInternal(int object_size,
2366 Executability executable) { 2293 Executability executable) {
2367 // Check if we want to force a GC before growing the old space further. 2294 // Check if we want to force a GC before growing the old space further.
2368 // If so, fail the allocation. 2295 // If so, fail the allocation.
2369 if (!heap()->always_allocate() && 2296 if (!heap()->always_allocate() &&
2370 heap()->OldGenerationAllocationLimitReached()) { 2297 heap()->OldGenerationAllocationLimitReached()) {
2371 return Failure::RetryAfterGC(identity()); 2298 return Failure::RetryAfterGC(identity());
2372 } 2299 }
2373 2300
2374 // TODO(gc) isolates merge 2301 // TODO(gc) isolates merge
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
2570 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { 2497 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2571 if (obj->IsCode()) { 2498 if (obj->IsCode()) {
2572 Code* code = Code::cast(obj); 2499 Code* code = Code::cast(obj);
2573 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2500 isolate->code_kind_statistics()[code->kind()] += code->Size();
2574 } 2501 }
2575 } 2502 }
2576 } 2503 }
2577 #endif // DEBUG 2504 #endif // DEBUG
2578 2505
2579 } } // namespace v8::internal 2506 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/string-stream.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698