Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(207)

Side by Side Diff: src/heap.h

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/hashmap.h ('k') | src/heap.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_H_ 5 #ifndef V8_HEAP_H_
6 #define V8_HEAP_H_ 6 #define V8_HEAP_H_
7 7
8 #include <cmath> 8 #include <cmath>
9 9
10 #include "src/allocation.h" 10 #include "src/allocation.h"
(...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after
383 explicit PromotionQueue(Heap* heap) 383 explicit PromotionQueue(Heap* heap)
384 : front_(NULL), 384 : front_(NULL),
385 rear_(NULL), 385 rear_(NULL),
386 limit_(NULL), 386 limit_(NULL),
387 emergency_stack_(0), 387 emergency_stack_(0),
388 heap_(heap) { } 388 heap_(heap) { }
389 389
390 void Initialize(); 390 void Initialize();
391 391
392 void Destroy() { 392 void Destroy() {
393 ASSERT(is_empty()); 393 DCHECK(is_empty());
394 delete emergency_stack_; 394 delete emergency_stack_;
395 emergency_stack_ = NULL; 395 emergency_stack_ = NULL;
396 } 396 }
397 397
398 inline void ActivateGuardIfOnTheSamePage(); 398 inline void ActivateGuardIfOnTheSamePage();
399 399
400 Page* GetHeadPage() { 400 Page* GetHeadPage() {
401 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); 401 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
402 } 402 }
403 403
404 void SetNewLimit(Address limit) { 404 void SetNewLimit(Address limit) {
405 if (!guard_) { 405 if (!guard_) {
406 return; 406 return;
407 } 407 }
408 408
409 ASSERT(GetHeadPage() == Page::FromAllocationTop(limit)); 409 DCHECK(GetHeadPage() == Page::FromAllocationTop(limit));
410 limit_ = reinterpret_cast<intptr_t*>(limit); 410 limit_ = reinterpret_cast<intptr_t*>(limit);
411 411
412 if (limit_ <= rear_) { 412 if (limit_ <= rear_) {
413 return; 413 return;
414 } 414 }
415 415
416 RelocateQueueHead(); 416 RelocateQueueHead();
417 } 417 }
418 418
419 bool IsBelowPromotionQueue(Address to_space_top) { 419 bool IsBelowPromotionQueue(Address to_space_top) {
420 // If the given to-space top pointer and the head of the promotion queue 420 // If the given to-space top pointer and the head of the promotion queue
421 // are not on the same page, then the to-space objects are below the 421 // are not on the same page, then the to-space objects are below the
422 // promotion queue. 422 // promotion queue.
423 if (GetHeadPage() != Page::FromAddress(to_space_top)) { 423 if (GetHeadPage() != Page::FromAddress(to_space_top)) {
424 return true; 424 return true;
425 } 425 }
426 // If the to space top pointer is smaller or equal than the promotion 426 // If the to space top pointer is smaller or equal than the promotion
427 // queue head, then the to-space objects are below the promotion queue. 427 // queue head, then the to-space objects are below the promotion queue.
428 return reinterpret_cast<intptr_t*>(to_space_top) <= rear_; 428 return reinterpret_cast<intptr_t*>(to_space_top) <= rear_;
429 } 429 }
430 430
431 bool is_empty() { 431 bool is_empty() {
432 return (front_ == rear_) && 432 return (front_ == rear_) &&
433 (emergency_stack_ == NULL || emergency_stack_->length() == 0); 433 (emergency_stack_ == NULL || emergency_stack_->length() == 0);
434 } 434 }
435 435
436 inline void insert(HeapObject* target, int size); 436 inline void insert(HeapObject* target, int size);
437 437
438 void remove(HeapObject** target, int* size) { 438 void remove(HeapObject** target, int* size) {
439 ASSERT(!is_empty()); 439 DCHECK(!is_empty());
440 if (front_ == rear_) { 440 if (front_ == rear_) {
441 Entry e = emergency_stack_->RemoveLast(); 441 Entry e = emergency_stack_->RemoveLast();
442 *target = e.obj_; 442 *target = e.obj_;
443 *size = e.size_; 443 *size = e.size_;
444 return; 444 return;
445 } 445 }
446 446
447 if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { 447 if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
448 NewSpacePage* front_page = 448 NewSpacePage* front_page =
449 NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); 449 NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
450 ASSERT(!front_page->prev_page()->is_anchor()); 450 DCHECK(!front_page->prev_page()->is_anchor());
451 front_ = 451 front_ =
452 reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end()); 452 reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
453 } 453 }
454 *target = reinterpret_cast<HeapObject*>(*(--front_)); 454 *target = reinterpret_cast<HeapObject*>(*(--front_));
455 *size = static_cast<int>(*(--front_)); 455 *size = static_cast<int>(*(--front_));
456 // Assert no underflow. 456 // Assert no underflow.
457 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), 457 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
458 reinterpret_cast<Address>(front_)); 458 reinterpret_cast<Address>(front_));
459 } 459 }
460 460
(...skipping 690 matching lines...) Expand 10 before | Expand all | Expand 10 after
1151 static inline void CopyBlock(Address dst, Address src, int byte_size); 1151 static inline void CopyBlock(Address dst, Address src, int byte_size);
1152 1152
1153 // Optimized version of memmove for blocks with pointer size aligned sizes and 1153 // Optimized version of memmove for blocks with pointer size aligned sizes and
1154 // pointer size aligned addresses. 1154 // pointer size aligned addresses.
1155 static inline void MoveBlock(Address dst, Address src, int byte_size); 1155 static inline void MoveBlock(Address dst, Address src, int byte_size);
1156 1156
1157 // Check new space expansion criteria and expand semispaces if it was hit. 1157 // Check new space expansion criteria and expand semispaces if it was hit.
1158 void CheckNewSpaceExpansionCriteria(); 1158 void CheckNewSpaceExpansionCriteria();
1159 1159
1160 inline void IncrementPromotedObjectsSize(int object_size) { 1160 inline void IncrementPromotedObjectsSize(int object_size) {
1161 ASSERT(object_size > 0); 1161 DCHECK(object_size > 0);
1162 promoted_objects_size_ += object_size; 1162 promoted_objects_size_ += object_size;
1163 } 1163 }
1164 1164
1165 inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { 1165 inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
1166 ASSERT(object_size > 0); 1166 DCHECK(object_size > 0);
1167 semi_space_copied_object_size_ += object_size; 1167 semi_space_copied_object_size_ += object_size;
1168 } 1168 }
1169 1169
1170 inline void IncrementNodesDiedInNewSpace() { 1170 inline void IncrementNodesDiedInNewSpace() {
1171 nodes_died_in_new_space_++; 1171 nodes_died_in_new_space_++;
1172 } 1172 }
1173 1173
1174 inline void IncrementNodesCopiedInNewSpace() { 1174 inline void IncrementNodesCopiedInNewSpace() {
1175 nodes_copied_in_new_space_++; 1175 nodes_copied_in_new_space_++;
1176 } 1176 }
1177 1177
1178 inline void IncrementNodesPromoted() { 1178 inline void IncrementNodesPromoted() {
1179 nodes_promoted_++; 1179 nodes_promoted_++;
1180 } 1180 }
1181 1181
1182 inline void IncrementYoungSurvivorsCounter(int survived) { 1182 inline void IncrementYoungSurvivorsCounter(int survived) {
1183 ASSERT(survived >= 0); 1183 DCHECK(survived >= 0);
1184 survived_since_last_expansion_ += survived; 1184 survived_since_last_expansion_ += survived;
1185 } 1185 }
1186 1186
1187 inline bool NextGCIsLikelyToBeFull() { 1187 inline bool NextGCIsLikelyToBeFull() {
1188 if (FLAG_gc_global) return true; 1188 if (FLAG_gc_global) return true;
1189 1189
1190 if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; 1190 if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
1191 1191
1192 intptr_t adjusted_allocation_limit = 1192 intptr_t adjusted_allocation_limit =
1193 old_generation_allocation_limit_ - new_space_.Capacity(); 1193 old_generation_allocation_limit_ - new_space_.Capacity();
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
1289 1289
1290 // Completely clear the Instanceof cache (to stop it keeping objects alive 1290 // Completely clear the Instanceof cache (to stop it keeping objects alive
1291 // around a GC). 1291 // around a GC).
1292 inline void CompletelyClearInstanceofCache(); 1292 inline void CompletelyClearInstanceofCache();
1293 1293
1294 // The roots that have an index less than this are always in old space. 1294 // The roots that have an index less than this are always in old space.
1295 static const int kOldSpaceRoots = 0x20; 1295 static const int kOldSpaceRoots = 0x20;
1296 1296
1297 uint32_t HashSeed() { 1297 uint32_t HashSeed() {
1298 uint32_t seed = static_cast<uint32_t>(hash_seed()->value()); 1298 uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
1299 ASSERT(FLAG_randomize_hashes || seed == 0); 1299 DCHECK(FLAG_randomize_hashes || seed == 0);
1300 return seed; 1300 return seed;
1301 } 1301 }
1302 1302
1303 void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { 1303 void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
1304 ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0)); 1304 DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
1305 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); 1305 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
1306 } 1306 }
1307 1307
1308 void SetConstructStubDeoptPCOffset(int pc_offset) { 1308 void SetConstructStubDeoptPCOffset(int pc_offset) {
1309 ASSERT(construct_stub_deopt_pc_offset() == Smi::FromInt(0)); 1309 DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
1310 set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); 1310 set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1311 } 1311 }
1312 1312
1313 void SetGetterStubDeoptPCOffset(int pc_offset) { 1313 void SetGetterStubDeoptPCOffset(int pc_offset) {
1314 ASSERT(getter_stub_deopt_pc_offset() == Smi::FromInt(0)); 1314 DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
1315 set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); 1315 set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1316 } 1316 }
1317 1317
1318 void SetSetterStubDeoptPCOffset(int pc_offset) { 1318 void SetSetterStubDeoptPCOffset(int pc_offset) {
1319 ASSERT(setter_stub_deopt_pc_offset() == Smi::FromInt(0)); 1319 DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
1320 set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); 1320 set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1321 } 1321 }
1322 1322
1323 // For post mortem debugging. 1323 // For post mortem debugging.
1324 void RememberUnmappedPage(Address page, bool compacted); 1324 void RememberUnmappedPage(Address page, bool compacted);
1325 1325
1326 // Global inline caching age: it is incremented on some GCs after context 1326 // Global inline caching age: it is incremented on some GCs after context
1327 // disposal. We use it to flush inline caches. 1327 // disposal. We use it to flush inline caches.
1328 int global_ic_age() { 1328 int global_ic_age() {
1329 return global_ic_age_; 1329 return global_ic_age_;
(...skipping 25 matching lines...) Expand all
1355 enum { 1355 enum {
1356 FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, 1356 FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
1357 FIRST_FIXED_ARRAY_SUB_TYPE = 1357 FIRST_FIXED_ARRAY_SUB_TYPE =
1358 FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, 1358 FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
1359 FIRST_CODE_AGE_SUB_TYPE = 1359 FIRST_CODE_AGE_SUB_TYPE =
1360 FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, 1360 FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
1361 OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 1361 OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
1362 }; 1362 };
1363 1363
1364 void RecordObjectStats(InstanceType type, size_t size) { 1364 void RecordObjectStats(InstanceType type, size_t size) {
1365 ASSERT(type <= LAST_TYPE); 1365 DCHECK(type <= LAST_TYPE);
1366 object_counts_[type]++; 1366 object_counts_[type]++;
1367 object_sizes_[type] += size; 1367 object_sizes_[type] += size;
1368 } 1368 }
1369 1369
1370 void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { 1370 void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
1371 int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; 1371 int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
1372 int code_age_index = 1372 int code_age_index =
1373 FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; 1373 FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
1374 ASSERT(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE && 1374 DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
1375 code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE); 1375 code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
1376 ASSERT(code_age_index >= FIRST_CODE_AGE_SUB_TYPE && 1376 DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
1377 code_age_index < OBJECT_STATS_COUNT); 1377 code_age_index < OBJECT_STATS_COUNT);
1378 object_counts_[code_sub_type_index]++; 1378 object_counts_[code_sub_type_index]++;
1379 object_sizes_[code_sub_type_index] += size; 1379 object_sizes_[code_sub_type_index] += size;
1380 object_counts_[code_age_index]++; 1380 object_counts_[code_age_index]++;
1381 object_sizes_[code_age_index] += size; 1381 object_sizes_[code_age_index] += size;
1382 } 1382 }
1383 1383
1384 void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) { 1384 void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
1385 ASSERT(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE); 1385 DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
1386 object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++; 1386 object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
1387 object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size; 1387 object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
1388 } 1388 }
1389 1389
1390 void CheckpointObjectStats(); 1390 void CheckpointObjectStats();
1391 1391
1392 // We don't use a LockGuard here since we want to lock the heap 1392 // We don't use a LockGuard here since we want to lock the heap
1393 // only when FLAG_concurrent_recompilation is true. 1393 // only when FLAG_concurrent_recompilation is true.
1394 class RelocationLock { 1394 class RelocationLock {
1395 public: 1395 public:
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after
1558 int remembered_unmapped_pages_index_; 1558 int remembered_unmapped_pages_index_;
1559 Address remembered_unmapped_pages_[kRememberedUnmappedPages]; 1559 Address remembered_unmapped_pages_[kRememberedUnmappedPages];
1560 1560
1561 // Total length of the strings we failed to flatten since the last GC. 1561 // Total length of the strings we failed to flatten since the last GC.
1562 int unflattened_strings_length_; 1562 int unflattened_strings_length_;
1563 1563
1564 #define ROOT_ACCESSOR(type, name, camel_name) \ 1564 #define ROOT_ACCESSOR(type, name, camel_name) \
1565 inline void set_##name(type* value) { \ 1565 inline void set_##name(type* value) { \
1566 /* The deserializer makes use of the fact that these common roots are */ \ 1566 /* The deserializer makes use of the fact that these common roots are */ \
1567 /* never in new space and never on a page that is being compacted. */ \ 1567 /* never in new space and never on a page that is being compacted. */ \
1568 ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \ 1568 DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
1569 roots_[k##camel_name##RootIndex] = value; \ 1569 roots_[k##camel_name##RootIndex] = value; \
1570 } 1570 }
1571 ROOT_LIST(ROOT_ACCESSOR) 1571 ROOT_LIST(ROOT_ACCESSOR)
1572 #undef ROOT_ACCESSOR 1572 #undef ROOT_ACCESSOR
1573 1573
1574 #ifdef DEBUG 1574 #ifdef DEBUG
1575 // If the --gc-interval flag is set to a positive value, this 1575 // If the --gc-interval flag is set to a positive value, this
1576 // variable holds the value indicating the number of allocations 1576 // variable holds the value indicating the number of allocations
1577 // remain until the next failure and garbage collection. 1577 // remain until the next failure and garbage collection.
1578 int allocation_timeout_; 1578 int allocation_timeout_;
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
1668 bool pass_isolate_; 1668 bool pass_isolate_;
1669 }; 1669 };
1670 List<GCEpilogueCallbackPair> gc_epilogue_callbacks_; 1670 List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
1671 1671
1672 // Support for computing object sizes during GC. 1672 // Support for computing object sizes during GC.
1673 HeapObjectCallback gc_safe_size_of_old_object_; 1673 HeapObjectCallback gc_safe_size_of_old_object_;
1674 static int GcSafeSizeOfOldObject(HeapObject* object); 1674 static int GcSafeSizeOfOldObject(HeapObject* object);
1675 1675
1676 // Update the GC state. Called from the mark-compact collector. 1676 // Update the GC state. Called from the mark-compact collector.
1677 void MarkMapPointersAsEncoded(bool encoded) { 1677 void MarkMapPointersAsEncoded(bool encoded) {
1678 ASSERT(!encoded); 1678 DCHECK(!encoded);
1679 gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; 1679 gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
1680 } 1680 }
1681 1681
1682 // Code that should be run before and after each GC. Includes some 1682 // Code that should be run before and after each GC. Includes some
1683 // reporting/verification activities when compiled with DEBUG set. 1683 // reporting/verification activities when compiled with DEBUG set.
1684 void GarbageCollectionPrologue(); 1684 void GarbageCollectionPrologue();
1685 void GarbageCollectionEpilogue(); 1685 void GarbageCollectionEpilogue();
1686 1686
1687 // Pretenuring decisions are made based on feedback collected during new 1687 // Pretenuring decisions are made based on feedback collected during new
1688 // space evacuation. Note that between feedback collection and calling this 1688 // space evacuation. Note that between feedback collection and calling this
(...skipping 30 matching lines...) Expand all
1719 GarbageCollector collector, 1719 GarbageCollector collector,
1720 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); 1720 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1721 1721
1722 inline void UpdateOldSpaceLimits(); 1722 inline void UpdateOldSpaceLimits();
1723 1723
1724 // Selects the proper allocation space depending on the given object 1724 // Selects the proper allocation space depending on the given object
1725 // size, pretenuring decision, and preferred old-space. 1725 // size, pretenuring decision, and preferred old-space.
1726 static AllocationSpace SelectSpace(int object_size, 1726 static AllocationSpace SelectSpace(int object_size,
1727 AllocationSpace preferred_old_space, 1727 AllocationSpace preferred_old_space,
1728 PretenureFlag pretenure) { 1728 PretenureFlag pretenure) {
1729 ASSERT(preferred_old_space == OLD_POINTER_SPACE || 1729 DCHECK(preferred_old_space == OLD_POINTER_SPACE ||
1730 preferred_old_space == OLD_DATA_SPACE); 1730 preferred_old_space == OLD_DATA_SPACE);
1731 if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; 1731 if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
1732 return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE; 1732 return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
1733 } 1733 }
1734 1734
1735 // Allocate an uninitialized object. The memory is non-executable if the 1735 // Allocate an uninitialized object. The memory is non-executable if the
1736 // hardware and OS allow. This is the single choke-point for allocations 1736 // hardware and OS allow. This is the single choke-point for allocations
1737 // performed by the runtime and should not be bypassed (to extend this to 1737 // performed by the runtime and should not be bypassed (to extend this to
1738 // inlined allocations, use the Heap::DisableInlineAllocation() support). 1738 // inlined allocations, use the Heap::DisableInlineAllocation() support).
1739 MUST_USE_RESULT inline AllocationResult AllocateRaw( 1739 MUST_USE_RESULT inline AllocationResult AllocateRaw(
(...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after
2043 2043
2044 int heap_size_mb = static_cast<int>(SizeOfObjects() / MB); 2044 int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
2045 return heap_size_mb / kMbPerMs; 2045 return heap_size_mb / kMbPerMs;
2046 } 2046 }
2047 2047
2048 void AdvanceIdleIncrementalMarking(intptr_t step_size); 2048 void AdvanceIdleIncrementalMarking(intptr_t step_size);
2049 2049
2050 void ClearObjectStats(bool clear_last_time_stats = false); 2050 void ClearObjectStats(bool clear_last_time_stats = false);
2051 2051
2052 void set_weak_object_to_code_table(Object* value) { 2052 void set_weak_object_to_code_table(Object* value) {
2053 ASSERT(!InNewSpace(value)); 2053 DCHECK(!InNewSpace(value));
2054 weak_object_to_code_table_ = value; 2054 weak_object_to_code_table_ = value;
2055 } 2055 }
2056 2056
2057 Object** weak_object_to_code_table_address() { 2057 Object** weak_object_to_code_table_address() {
2058 return &weak_object_to_code_table_; 2058 return &weak_object_to_code_table_;
2059 } 2059 }
2060 2060
2061 inline void UpdateAllocationsHash(HeapObject* object); 2061 inline void UpdateAllocationsHash(HeapObject* object);
2062 inline void UpdateAllocationsHash(uint32_t value); 2062 inline void UpdateAllocationsHash(uint32_t value);
2063 inline void PrintAlloctionsHash(); 2063 inline void PrintAlloctionsHash();
(...skipping 366 matching lines...) Expand 10 before | Expand all | Expand 10 after
2430 int Lookup(Map* source, Name* name) { 2430 int Lookup(Map* source, Name* name) {
2431 if (!name->IsUniqueName()) return kAbsent; 2431 if (!name->IsUniqueName()) return kAbsent;
2432 int index = Hash(source, name); 2432 int index = Hash(source, name);
2433 Key& key = keys_[index]; 2433 Key& key = keys_[index];
2434 if ((key.source == source) && (key.name == name)) return results_[index]; 2434 if ((key.source == source) && (key.name == name)) return results_[index];
2435 return kAbsent; 2435 return kAbsent;
2436 } 2436 }
2437 2437
2438 // Update an element in the cache. 2438 // Update an element in the cache.
2439 void Update(Map* source, Name* name, int result) { 2439 void Update(Map* source, Name* name, int result) {
2440 ASSERT(result != kAbsent); 2440 DCHECK(result != kAbsent);
2441 if (name->IsUniqueName()) { 2441 if (name->IsUniqueName()) {
2442 int index = Hash(source, name); 2442 int index = Hash(source, name);
2443 Key& key = keys_[index]; 2443 Key& key = keys_[index];
2444 key.source = source; 2444 key.source = source;
2445 key.name = name; 2445 key.name = name;
2446 results_[index] = result; 2446 results_[index] = result;
2447 } 2447 }
2448 } 2448 }
2449 2449
2450 // Clear the cache. 2450 // Clear the cache.
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
2535 // JavaScript execution. 2535 // JavaScript execution.
2536 class IntrusiveMarking { 2536 class IntrusiveMarking {
2537 public: 2537 public:
2538 static bool IsMarked(HeapObject* object) { 2538 static bool IsMarked(HeapObject* object) {
2539 return (object->map_word().ToRawValue() & kNotMarkedBit) == 0; 2539 return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
2540 } 2540 }
2541 2541
2542 static void ClearMark(HeapObject* object) { 2542 static void ClearMark(HeapObject* object) {
2543 uintptr_t map_word = object->map_word().ToRawValue(); 2543 uintptr_t map_word = object->map_word().ToRawValue();
2544 object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit)); 2544 object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
2545 ASSERT(!IsMarked(object)); 2545 DCHECK(!IsMarked(object));
2546 } 2546 }
2547 2547
2548 static void SetMark(HeapObject* object) { 2548 static void SetMark(HeapObject* object) {
2549 uintptr_t map_word = object->map_word().ToRawValue(); 2549 uintptr_t map_word = object->map_word().ToRawValue();
2550 object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit)); 2550 object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
2551 ASSERT(IsMarked(object)); 2551 DCHECK(IsMarked(object));
2552 } 2552 }
2553 2553
2554 static Map* MapOfMarkedObject(HeapObject* object) { 2554 static Map* MapOfMarkedObject(HeapObject* object) {
2555 uintptr_t map_word = object->map_word().ToRawValue(); 2555 uintptr_t map_word = object->map_word().ToRawValue();
2556 return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap(); 2556 return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
2557 } 2557 }
2558 2558
2559 static int SizeOfMarkedObject(HeapObject* object) { 2559 static int SizeOfMarkedObject(HeapObject* object) {
2560 return object->SizeFromMap(MapOfMarkedObject(object)); 2560 return object->SizeFromMap(MapOfMarkedObject(object));
2561 } 2561 }
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
2621 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. 2621 DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
2622 2622
2623 private: 2623 private:
2624 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); 2624 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
2625 }; 2625 };
2626 #endif // DEBUG 2626 #endif // DEBUG
2627 2627
2628 } } // namespace v8::internal 2628 } } // namespace v8::internal
2629 2629
2630 #endif // V8_HEAP_H_ 2630 #endif // V8_HEAP_H_
OLDNEW
« no previous file with comments | « src/hashmap.h ('k') | src/heap.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698