Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #ifndef V8_HEAP_INL_H_ | 28 #ifndef V8_HEAP_INL_H_ |
| 29 #define V8_HEAP_INL_H_ | 29 #define V8_HEAP_INL_H_ |
| 30 | 30 |
| 31 #include "heap.h" | 31 #include "heap.h" |
| 32 #include "objects.h" | 32 #include "objects.h" |
| 33 #include "isolate.h" | |
| 33 #include "v8-counters.h" | 34 #include "v8-counters.h" |
| 34 #include "store-buffer.h" | 35 #include "store-buffer.h" |
| 35 #include "store-buffer-inl.h" | 36 #include "store-buffer-inl.h" |
| 36 | 37 |
| 37 namespace v8 { | 38 namespace v8 { |
| 38 namespace internal { | 39 namespace internal { |
| 39 | 40 |
| 41 void PromotionQueue::insert(HeapObject* target, int size) { | |
| 42 *(--rear_) = reinterpret_cast<intptr_t>(target); | |
| 43 *(--rear_) = size; | |
| 44 // Assert no overflow into live objects. | |
| 45 ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top()); | |
| 46 } | |
| 47 | |
| 48 | |
| 40 int Heap::MaxObjectSizeInPagedSpace() { | 49 int Heap::MaxObjectSizeInPagedSpace() { |
| 41 return Page::kMaxHeapObjectSize; | 50 return Page::kMaxHeapObjectSize; |
| 42 } | 51 } |
| 43 | 52 |
| 44 | 53 |
| 45 MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str, | 54 MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str, |
| 46 PretenureFlag pretenure) { | 55 PretenureFlag pretenure) { |
| 47 // Check for ASCII first since this is the common case. | 56 // Check for ASCII first since this is the common case. |
| 48 if (String::IsAscii(str.start(), str.length())) { | 57 if (String::IsAscii(str.start(), str.length())) { |
| 49 // If the string is ASCII, we do not need to convert the characters | 58 // If the string is ASCII, we do not need to convert the characters |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 141 ASSERT(space != NEW_SPACE || | 150 ASSERT(space != NEW_SPACE || |
| 142 retry_space == OLD_POINTER_SPACE || | 151 retry_space == OLD_POINTER_SPACE || |
| 143 retry_space == OLD_DATA_SPACE || | 152 retry_space == OLD_DATA_SPACE || |
| 144 retry_space == LO_SPACE); | 153 retry_space == LO_SPACE); |
| 145 #ifdef DEBUG | 154 #ifdef DEBUG |
| 146 if (FLAG_gc_interval >= 0 && | 155 if (FLAG_gc_interval >= 0 && |
| 147 !disallow_allocation_failure_ && | 156 !disallow_allocation_failure_ && |
| 148 Heap::allocation_timeout_-- <= 0) { | 157 Heap::allocation_timeout_-- <= 0) { |
| 149 return Failure::RetryAfterGC(space); | 158 return Failure::RetryAfterGC(space); |
| 150 } | 159 } |
| 151 Counters::objs_since_last_full.Increment(); | 160 isolate_->counters()->objs_since_last_full()->Increment(); |
| 152 Counters::objs_since_last_young.Increment(); | 161 isolate_->counters()->objs_since_last_young()->Increment(); |
| 153 #endif | 162 #endif |
| 154 MaybeObject* result; | 163 MaybeObject* result; |
| 155 if (NEW_SPACE == space) { | 164 if (NEW_SPACE == space) { |
| 156 result = new_space_.AllocateRaw(size_in_bytes); | 165 result = new_space_.AllocateRaw(size_in_bytes); |
| 157 if (always_allocate() && result->IsFailure()) { | 166 if (always_allocate() && result->IsFailure()) { |
| 158 space = retry_space; | 167 space = retry_space; |
| 159 } else { | 168 } else { |
| 160 return result; | 169 return result; |
| 161 } | 170 } |
| 162 } | 171 } |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 209 (*resource_addr)->Dispose(); | 218 (*resource_addr)->Dispose(); |
| 210 } | 219 } |
| 211 | 220 |
| 212 // Clear the resource pointer in the string. | 221 // Clear the resource pointer in the string. |
| 213 *resource_addr = NULL; | 222 *resource_addr = NULL; |
| 214 } | 223 } |
| 215 | 224 |
| 216 | 225 |
| 217 MaybeObject* Heap::AllocateRawMap() { | 226 MaybeObject* Heap::AllocateRawMap() { |
| 218 #ifdef DEBUG | 227 #ifdef DEBUG |
| 219 Counters::objs_since_last_full.Increment(); | 228 isolate_->counters()->objs_since_last_full()->Increment(); |
| 220 Counters::objs_since_last_young.Increment(); | 229 isolate_->counters()->objs_since_last_young()->Increment(); |
| 221 #endif | 230 #endif |
| 222 MaybeObject* result = map_space_->AllocateRaw(Map::kSize); | 231 MaybeObject* result = map_space_->AllocateRaw(Map::kSize); |
| 223 if (result->IsFailure()) old_gen_exhausted_ = true; | 232 if (result->IsFailure()) old_gen_exhausted_ = true; |
| 224 #ifdef DEBUG | 233 #ifdef DEBUG |
| 225 if (!result->IsFailure()) { | 234 if (!result->IsFailure()) { |
| 226 // Maps have their own alignment. | 235 // Maps have their own alignment. |
| 227 CHECK((reinterpret_cast<intptr_t>(result) & kMapAlignmentMask) == | 236 CHECK((reinterpret_cast<intptr_t>(result) & kMapAlignmentMask) == |
| 228 static_cast<intptr_t>(kHeapObjectTag)); | 237 static_cast<intptr_t>(kHeapObjectTag)); |
| 229 } | 238 } |
| 230 #endif | 239 #endif |
| 231 return result; | 240 return result; |
| 232 } | 241 } |
| 233 | 242 |
| 234 | 243 |
| 235 MaybeObject* Heap::AllocateRawCell() { | 244 MaybeObject* Heap::AllocateRawCell() { |
| 236 #ifdef DEBUG | 245 #ifdef DEBUG |
| 237 Counters::objs_since_last_full.Increment(); | 246 isolate_->counters()->objs_since_last_full()->Increment(); |
| 238 Counters::objs_since_last_young.Increment(); | 247 isolate_->counters()->objs_since_last_young()->Increment(); |
| 239 #endif | 248 #endif |
| 240 MaybeObject* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize); | 249 MaybeObject* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize); |
| 241 if (result->IsFailure()) old_gen_exhausted_ = true; | 250 if (result->IsFailure()) old_gen_exhausted_ = true; |
| 242 return result; | 251 return result; |
| 243 } | 252 } |
| 244 | 253 |
| 245 | 254 |
| 246 bool Heap::InNewSpace(Object* object) { | 255 bool Heap::InNewSpace(Object* object) { |
| 247 bool result = new_space_.Contains(object); | 256 bool result = new_space_.Contains(object); |
| 248 ASSERT(!result || // Either not in new space | 257 ASSERT(!result || // Either not in new space |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 261 return new_space_.FromSpaceContains(object); | 270 return new_space_.FromSpaceContains(object); |
| 262 } | 271 } |
| 263 | 272 |
| 264 | 273 |
| 265 bool Heap::InToSpace(Object* object) { | 274 bool Heap::InToSpace(Object* object) { |
| 266 return new_space_.ToSpaceContains(object); | 275 return new_space_.ToSpaceContains(object); |
| 267 } | 276 } |
| 268 | 277 |
| 269 | 278 |
| 270 bool Heap::OldGenerationAllocationLimitReached() { | 279 bool Heap::OldGenerationAllocationLimitReached() { |
| 271 if (!IncrementalMarking::IsStopped()) return false; | 280 if (!incremental_marking()->IsStopped()) return false; |
| 272 return OldGenerationSpaceAvailable() < 0; | 281 return OldGenerationSpaceAvailable() < 0; |
| 273 } | 282 } |
| 274 | 283 |
| 275 | 284 |
| 276 bool Heap::ShouldBePromoted(Address old_address, int object_size) { | 285 bool Heap::ShouldBePromoted(Address old_address, int object_size) { |
| 277 // An object should be promoted if: | 286 // An object should be promoted if: |
| 278 // - the object has survived a scavenge operation or | 287 // - the object has survived a scavenge operation or |
| 279 // - to space is already 25% full. | 288 // - to space is already 25% full. |
| 280 return old_address < new_space_.age_mark() | 289 return old_address < new_space_.age_mark() |
| 281 || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2); | 290 || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2); |
| 282 } | 291 } |
| 283 | 292 |
| 284 | 293 |
| 285 void Heap::RecordWrite(Address address, int offset) { | 294 void Heap::RecordWrite(Address address, int offset) { |
| 286 if (!InNewSpace(address)) StoreBuffer::Mark(address + offset); | 295 if (!InNewSpace(address)) store_buffer_.Mark(address + offset); |
| 287 } | 296 } |
| 288 | 297 |
| 289 | 298 |
| 290 void Heap::RecordWrites(Address address, int start, int len) { | 299 void Heap::RecordWrites(Address address, int start, int len) { |
| 291 if (!InNewSpace(address)) { | 300 if (!InNewSpace(address)) { |
| 292 for (int i = 0; i < len; i++) { | 301 for (int i = 0; i < len; i++) { |
| 293 StoreBuffer::Mark(address + start + i * kPointerSize); | 302 store_buffer_.Mark(address + start + i * kPointerSize); |
| 294 } | 303 } |
| 295 } | 304 } |
| 296 } | 305 } |
| 297 | 306 |
| 298 | 307 |
| 299 OldSpace* Heap::TargetSpace(HeapObject* object) { | 308 OldSpace* Heap::TargetSpace(HeapObject* object) { |
| 300 InstanceType type = object->map()->instance_type(); | 309 InstanceType type = object->map()->instance_type(); |
| 301 AllocationSpace space = TargetSpaceId(type); | 310 AllocationSpace space = TargetSpaceId(type); |
| 302 return (space == OLD_POINTER_SPACE) | 311 return (space == OLD_POINTER_SPACE) |
| 303 ? old_pointer_space_ | 312 ? old_pointer_space_ |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 341 void Heap::CopyBlockToOldSpaceAndUpdateWriteBarrier(Address dst, | 350 void Heap::CopyBlockToOldSpaceAndUpdateWriteBarrier(Address dst, |
| 342 Address src, | 351 Address src, |
| 343 int byte_size) { | 352 int byte_size) { |
| 344 ASSERT(IsAligned(byte_size, kPointerSize)); | 353 ASSERT(IsAligned(byte_size, kPointerSize)); |
| 345 | 354 |
| 346 for (int remaining = byte_size / kPointerSize; | 355 for (int remaining = byte_size / kPointerSize; |
| 347 remaining > 0; | 356 remaining > 0; |
| 348 remaining--) { | 357 remaining--) { |
| 349 Memory::Object_at(dst) = Memory::Object_at(src); | 358 Memory::Object_at(dst) = Memory::Object_at(src); |
| 350 | 359 |
| 351 if (Heap::InNewSpace(Memory::Object_at(dst))) { | 360 if (InNewSpace(Memory::Object_at(dst))) { |
| 352 StoreBuffer::Mark(dst); | 361 store_buffer_.Mark(dst); |
| 353 } | 362 } |
| 354 | 363 |
| 355 dst += kPointerSize; | 364 dst += kPointerSize; |
| 356 src += kPointerSize; | 365 src += kPointerSize; |
| 357 } | 366 } |
| 358 } | 367 } |
| 359 | 368 |
| 360 | 369 |
| 361 void Heap::MoveBlock(Address dst, Address src, int byte_size) { | 370 void Heap::MoveBlock(Address dst, Address src, int byte_size) { |
| 362 ASSERT(IsAligned(byte_size, kPointerSize)); | 371 ASSERT(IsAligned(byte_size, kPointerSize)); |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 374 | 383 |
| 375 while (src_slot != end_slot) { | 384 while (src_slot != end_slot) { |
| 376 *dst_slot++ = *src_slot++; | 385 *dst_slot++ = *src_slot++; |
| 377 } | 386 } |
| 378 } else { | 387 } else { |
| 379 memmove(dst, src, byte_size); | 388 memmove(dst, src, byte_size); |
| 380 } | 389 } |
| 381 } | 390 } |
| 382 | 391 |
| 383 | 392 |
| 393 void Heap::ScavengePointer(HeapObject** p) { | |
| 394 ScavengeObject(p, *p); | |
| 395 } | |
| 396 | |
| 397 | |
| 384 void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { | 398 void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { |
| 385 ASSERT(InFromSpace(object)); | 399 ASSERT(HEAP->InFromSpace(object)); |
| 386 | 400 |
| 387 // We use the first word (where the map pointer usually is) of a heap | 401 // We use the first word (where the map pointer usually is) of a heap |
| 388 // object to record the forwarding pointer. A forwarding pointer can | 402 // object to record the forwarding pointer. A forwarding pointer can |
| 389 // point to an old space, the code space, or the to space of the new | 403 // point to an old space, the code space, or the to space of the new |
| 390 // generation. | 404 // generation. |
| 391 MapWord first_word = object->map_word(); | 405 MapWord first_word = object->map_word(); |
| 392 | 406 |
| 393 // If the first word is a forwarding address, the object has already been | 407 // If the first word is a forwarding address, the object has already been |
| 394 // copied. | 408 // copied. |
| 395 if (first_word.IsForwardingAddress()) { | 409 if (first_word.IsForwardingAddress()) { |
| 396 HeapObject* dest = first_word.ToForwardingAddress(); | 410 HeapObject* dest = first_word.ToForwardingAddress(); |
| 397 ASSERT(InFromSpace(*p)); | 411 ASSERT(HEAP->InFromSpace(*p)); |
| 398 *p = dest; | 412 *p = dest; |
| 399 Address slot = reinterpret_cast<Address>(p); | 413 Address slot = reinterpret_cast<Address>(p); |
| 400 if (Heap::InNewSpace(dest) && !Heap::InNewSpace(slot)) { | 414 if (HEAP->InNewSpace(dest) && !HEAP->InNewSpace(slot)) { |
| 401 ASSERT(InToSpace(dest)); | 415 ASSERT(HEAP->InToSpace(dest)); |
| 402 StoreBuffer::EnterDirectlyIntoStoreBuffer(slot); | 416 HEAP->store_buffer_.EnterDirectlyIntoStoreBuffer(slot); |
| 403 } | 417 } |
| 404 return; | 418 return; |
| 405 } | 419 } |
| 406 | 420 |
| 407 // Call the slow part of scavenge object. | 421 // Call the slow part of scavenge object. |
| 408 return ScavengeObjectSlow(p, object); | 422 return ScavengeObjectSlow(p, object); |
| 409 } | 423 } |
| 410 | 424 |
| 411 | 425 |
| 412 bool Heap::CollectGarbage(AllocationSpace space) { | 426 bool Heap::CollectGarbage(AllocationSpace space) { |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 454 } | 468 } |
| 455 } | 469 } |
| 456 ASSERT(amount_of_external_allocated_memory_ >= 0); | 470 ASSERT(amount_of_external_allocated_memory_ >= 0); |
| 457 return amount_of_external_allocated_memory_; | 471 return amount_of_external_allocated_memory_; |
| 458 } | 472 } |
| 459 | 473 |
| 460 | 474 |
| 461 void Heap::SetLastScriptId(Object* last_script_id) { | 475 void Heap::SetLastScriptId(Object* last_script_id) { |
| 462 roots_[kLastScriptIdRootIndex] = last_script_id; | 476 roots_[kLastScriptIdRootIndex] = last_script_id; |
| 463 } | 477 } |
| 464 | 478 |
|
Erik Corry
2011/04/20 20:07:40
2 blank lines
Vyacheslav Egorov (Chromium)
2011/04/24 11:24:08
Done.
| |
| 479 Isolate* Heap::isolate() { | |
| 480 return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) - | |
| 481 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4); | |
| 482 } | |
| 483 | |
| 465 | 484 |
| 466 #ifdef DEBUG | 485 #ifdef DEBUG |
| 467 #define GC_GREEDY_CHECK() \ | 486 #define GC_GREEDY_CHECK() \ |
| 468 if (FLAG_gc_greedy) v8::internal::Heap::GarbageCollectionGreedyCheck() | 487 if (FLAG_gc_greedy) HEAP->GarbageCollectionGreedyCheck() |
| 469 #else | 488 #else |
| 470 #define GC_GREEDY_CHECK() { } | 489 #define GC_GREEDY_CHECK() { } |
| 471 #endif | 490 #endif |
| 472 | 491 |
| 473 | 492 |
| 474 // Calls the FUNCTION_CALL function and retries it up to three times | 493 // Calls the FUNCTION_CALL function and retries it up to three times |
| 475 // to guarantee that any allocations performed during the call will | 494 // to guarantee that any allocations performed during the call will |
| 476 // succeed if there's enough memory. | 495 // succeed if there's enough memory. |
| 477 | 496 |
| 478 // Warning: Do not use the identifiers __object__, __maybe_object__ or | 497 // Warning: Do not use the identifiers __object__, __maybe_object__ or |
| 479 // __scope__ in a call to this macro. | 498 // __scope__ in a call to this macro. |
| 480 | 499 |
| 481 #define CALL_AND_RETRY(FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \ | 500 #define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)\ |
| 482 do { \ | 501 do { \ |
| 483 GC_GREEDY_CHECK(); \ | 502 GC_GREEDY_CHECK(); \ |
| 484 MaybeObject* __maybe_object__ = FUNCTION_CALL; \ | 503 MaybeObject* __maybe_object__ = FUNCTION_CALL; \ |
| 485 Object* __object__ = NULL; \ | 504 Object* __object__ = NULL; \ |
| 486 if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ | 505 if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ |
| 487 if (__maybe_object__->IsOutOfMemory()) { \ | 506 if (__maybe_object__->IsOutOfMemory()) { \ |
| 488 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\ | 507 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\ |
| 489 } \ | 508 } \ |
| 490 if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ | 509 if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ |
| 491 Heap::CollectGarbage( \ | 510 ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \ |
| 492 Failure::cast(__maybe_object__)->allocation_space()); \ | 511 allocation_space()); \ |
| 493 __maybe_object__ = FUNCTION_CALL; \ | 512 __maybe_object__ = FUNCTION_CALL; \ |
| 494 if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ | 513 if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ |
| 495 if (__maybe_object__->IsOutOfMemory()) { \ | 514 if (__maybe_object__->IsOutOfMemory()) { \ |
| 496 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\ | 515 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\ |
| 497 } \ | 516 } \ |
| 498 if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ | 517 if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ |
| 499 Counters::gc_last_resort_from_handles.Increment(); \ | 518 ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \ |
| 500 Heap::CollectAllAvailableGarbage(); \ | 519 ISOLATE->heap()->CollectAllAvailableGarbage(); \ |
| 501 { \ | 520 { \ |
| 502 AlwaysAllocateScope __scope__; \ | 521 AlwaysAllocateScope __scope__; \ |
| 503 __maybe_object__ = FUNCTION_CALL; \ | 522 __maybe_object__ = FUNCTION_CALL; \ |
| 504 } \ | 523 } \ |
| 505 if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ | 524 if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ |
| 506 if (__maybe_object__->IsOutOfMemory() || \ | 525 if (__maybe_object__->IsOutOfMemory() || \ |
| 507 __maybe_object__->IsRetryAfterGC()) { \ | 526 __maybe_object__->IsRetryAfterGC()) { \ |
| 508 /* TODO(1181417): Fix this. */ \ | 527 /* TODO(1181417): Fix this. */ \ |
| 509 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2", true);\ | 528 v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2", true);\ |
| 510 } \ | 529 } \ |
| 511 RETURN_EMPTY; \ | 530 RETURN_EMPTY; \ |
| 512 } while (false) | 531 } while (false) |
| 513 | 532 |
| 514 | 533 |
| 515 #define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \ | 534 // TODO(isolates): cache isolate: either accept as a parameter or |
| 516 CALL_AND_RETRY(FUNCTION_CALL, \ | 535 // set to some known symbol (__CUR_ISOLATE__?) |
| 517 return Handle<TYPE>(TYPE::cast(__object__)), \ | 536 #define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \ |
| 537 CALL_AND_RETRY(ISOLATE, \ | |
| 538 FUNCTION_CALL, \ | |
| 539 return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \ | |
| 518 return Handle<TYPE>()) | 540 return Handle<TYPE>()) |
| 519 | 541 |
| 520 | 542 |
| 521 #define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \ | 543 #define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \ |
| 522 CALL_AND_RETRY(FUNCTION_CALL, return, return) | 544 CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, return, return) |
| 523 | 545 |
| 524 | 546 |
| 525 #ifdef DEBUG | 547 #ifdef DEBUG |
| 526 | 548 |
| 527 inline bool Heap::allow_allocation(bool new_state) { | 549 inline bool Heap::allow_allocation(bool new_state) { |
| 528 bool old = allocation_allowed_; | 550 bool old = allocation_allowed_; |
| 529 allocation_allowed_ = new_state; | 551 allocation_allowed_ = new_state; |
| 530 return old; | 552 return old; |
| 531 } | 553 } |
| 532 | 554 |
| 533 #endif | 555 #endif |
| 534 | 556 |
| 535 | 557 |
| 536 void ExternalStringTable::AddString(String* string) { | 558 void ExternalStringTable::AddString(String* string) { |
| 537 ASSERT(string->IsExternalString()); | 559 ASSERT(string->IsExternalString()); |
| 538 if (Heap::InNewSpace(string)) { | 560 if (heap_->InNewSpace(string)) { |
| 539 new_space_strings_.Add(string); | 561 new_space_strings_.Add(string); |
| 540 } else { | 562 } else { |
| 541 old_space_strings_.Add(string); | 563 old_space_strings_.Add(string); |
| 542 } | 564 } |
| 543 } | 565 } |
| 544 | 566 |
| 545 | 567 |
| 546 void ExternalStringTable::Iterate(ObjectVisitor* v) { | 568 void ExternalStringTable::Iterate(ObjectVisitor* v) { |
| 547 if (!new_space_strings_.is_empty()) { | 569 if (!new_space_strings_.is_empty()) { |
| 548 Object** start = &new_space_strings_[0]; | 570 Object** start = &new_space_strings_[0]; |
| 549 v->VisitPointers(start, start + new_space_strings_.length()); | 571 v->VisitPointers(start, start + new_space_strings_.length()); |
| 550 } | 572 } |
| 551 if (!old_space_strings_.is_empty()) { | 573 if (!old_space_strings_.is_empty()) { |
| 552 Object** start = &old_space_strings_[0]; | 574 Object** start = &old_space_strings_[0]; |
| 553 v->VisitPointers(start, start + old_space_strings_.length()); | 575 v->VisitPointers(start, start + old_space_strings_.length()); |
| 554 } | 576 } |
| 555 } | 577 } |
| 556 | 578 |
| 557 | 579 |
| 558 // Verify() is inline to avoid ifdef-s around its calls in release | 580 // Verify() is inline to avoid ifdef-s around its calls in release |
| 559 // mode. | 581 // mode. |
| 560 void ExternalStringTable::Verify() { | 582 void ExternalStringTable::Verify() { |
| 561 #ifdef DEBUG | 583 #ifdef DEBUG |
| 562 for (int i = 0; i < new_space_strings_.length(); ++i) { | 584 for (int i = 0; i < new_space_strings_.length(); ++i) { |
| 563 ASSERT(Heap::InNewSpace(new_space_strings_[i])); | 585 ASSERT(heap_->InNewSpace(new_space_strings_[i])); |
| 564 ASSERT(new_space_strings_[i] != Heap::raw_unchecked_null_value()); | 586 ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_null_value()); |
| 565 } | 587 } |
| 566 for (int i = 0; i < old_space_strings_.length(); ++i) { | 588 for (int i = 0; i < old_space_strings_.length(); ++i) { |
| 567 ASSERT(!Heap::InNewSpace(old_space_strings_[i])); | 589 ASSERT(!heap_->InNewSpace(old_space_strings_[i])); |
| 568 ASSERT(old_space_strings_[i] != Heap::raw_unchecked_null_value()); | 590 ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_null_value()); |
| 569 } | 591 } |
| 570 #endif | 592 #endif |
| 571 } | 593 } |
| 572 | 594 |
| 573 | 595 |
| 574 void ExternalStringTable::AddOldString(String* string) { | 596 void ExternalStringTable::AddOldString(String* string) { |
| 575 ASSERT(string->IsExternalString()); | 597 ASSERT(string->IsExternalString()); |
| 576 ASSERT(!Heap::InNewSpace(string)); | 598 ASSERT(!heap_->InNewSpace(string)); |
| 577 old_space_strings_.Add(string); | 599 old_space_strings_.Add(string); |
| 578 } | 600 } |
| 579 | 601 |
| 580 | 602 |
| 581 void ExternalStringTable::ShrinkNewStrings(int position) { | 603 void ExternalStringTable::ShrinkNewStrings(int position) { |
| 582 new_space_strings_.Rewind(position); | 604 new_space_strings_.Rewind(position); |
| 583 Verify(); | 605 Verify(); |
| 584 } | 606 } |
| 585 | 607 |
| 608 | |
| 609 void Heap::ClearInstanceofCache() { | |
| 610 set_instanceof_cache_function(the_hole_value()); | |
| 611 } | |
| 612 | |
| 613 | |
| 614 Object* Heap::ToBoolean(bool condition) { | |
| 615 return condition ? true_value() : false_value(); | |
| 616 } | |
| 617 | |
| 618 | |
| 619 void Heap::CompletelyClearInstanceofCache() { | |
| 620 set_instanceof_cache_map(the_hole_value()); | |
| 621 set_instanceof_cache_function(the_hole_value()); | |
| 622 } | |
| 623 | |
| 624 | |
| 625 MaybeObject* TranscendentalCache::Get(Type type, double input) { | |
| 626 SubCache* cache = caches_[type]; | |
| 627 if (cache == NULL) { | |
| 628 caches_[type] = cache = new SubCache(type); | |
| 629 } | |
| 630 return cache->Get(input); | |
| 631 } | |
| 632 | |
| 633 | |
| 634 Address TranscendentalCache::cache_array_address() { | |
| 635 return reinterpret_cast<Address>(caches_); | |
| 636 } | |
| 637 | |
| 638 | |
| 639 double TranscendentalCache::SubCache::Calculate(double input) { | |
| 640 switch (type_) { | |
| 641 case ACOS: | |
| 642 return acos(input); | |
| 643 case ASIN: | |
| 644 return asin(input); | |
| 645 case ATAN: | |
| 646 return atan(input); | |
| 647 case COS: | |
| 648 return cos(input); | |
| 649 case EXP: | |
| 650 return exp(input); | |
| 651 case LOG: | |
| 652 return log(input); | |
| 653 case SIN: | |
| 654 return sin(input); | |
| 655 case TAN: | |
| 656 return tan(input); | |
| 657 default: | |
| 658 return 0.0; // Never happens. | |
| 659 } | |
| 660 } | |
| 661 | |
| 662 | |
| 663 MaybeObject* TranscendentalCache::SubCache::Get(double input) { | |
| 664 Converter c; | |
| 665 c.dbl = input; | |
| 666 int hash = Hash(c); | |
| 667 Element e = elements_[hash]; | |
| 668 if (e.in[0] == c.integers[0] && | |
| 669 e.in[1] == c.integers[1]) { | |
| 670 ASSERT(e.output != NULL); | |
| 671 isolate_->counters()->transcendental_cache_hit()->Increment(); | |
| 672 return e.output; | |
| 673 } | |
| 674 double answer = Calculate(input); | |
| 675 isolate_->counters()->transcendental_cache_miss()->Increment(); | |
| 676 Object* heap_number; | |
| 677 { MaybeObject* maybe_heap_number = | |
| 678 isolate_->heap()->AllocateHeapNumber(answer); | |
| 679 if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number; | |
| 680 } | |
| 681 elements_[hash].in[0] = c.integers[0]; | |
| 682 elements_[hash].in[1] = c.integers[1]; | |
| 683 elements_[hash].output = heap_number; | |
| 684 return heap_number; | |
| 685 } | |
| 686 | |
| 687 | |
| 688 Heap* _inline_get_heap_() { | |
| 689 return HEAP; | |
| 690 } | |
| 691 | |
| 692 | |
| 586 } } // namespace v8::internal | 693 } } // namespace v8::internal |
| 587 | 694 |
| 588 #endif // V8_HEAP_INL_H_ | 695 #endif // V8_HEAP_INL_H_ |
| OLD | NEW |