| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 613 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 624 ASSERT(!SpaceIsPaged(space_index) || | 624 ASSERT(!SpaceIsPaged(space_index) || |
| 625 size <= Page::kPageSize - Page::kObjectStartOffset); | 625 size <= Page::kPageSize - Page::kObjectStartOffset); |
| 626 MaybeObject* maybe_new_allocation; | 626 MaybeObject* maybe_new_allocation; |
| 627 if (space_index == NEW_SPACE) { | 627 if (space_index == NEW_SPACE) { |
| 628 maybe_new_allocation = | 628 maybe_new_allocation = |
| 629 reinterpret_cast<NewSpace*>(space)->AllocateRaw(size); | 629 reinterpret_cast<NewSpace*>(space)->AllocateRaw(size); |
| 630 } else { | 630 } else { |
| 631 maybe_new_allocation = | 631 maybe_new_allocation = |
| 632 reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); | 632 reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); |
| 633 } | 633 } |
| 634 ASSERT(!maybe_new_allocation->IsFailure()); |
| 634 Object* new_allocation = maybe_new_allocation->ToObjectUnchecked(); | 635 Object* new_allocation = maybe_new_allocation->ToObjectUnchecked(); |
| 635 HeapObject* new_object = HeapObject::cast(new_allocation); | 636 HeapObject* new_object = HeapObject::cast(new_allocation); |
| 636 address = new_object->address(); | 637 address = new_object->address(); |
| 637 high_water_[space_index] = address + size; | 638 high_water_[space_index] = address + size; |
| 638 } else { | 639 } else { |
| 639 ASSERT(SpaceIsLarge(space_index)); | 640 ASSERT(SpaceIsLarge(space_index)); |
| 640 LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); | 641 LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); |
| 641 Object* new_allocation; | 642 Object* new_allocation; |
| 642 if (space_index == kLargeData) { | 643 if (space_index == kLargeData) { |
| 643 new_allocation = lo_space->AllocateRawData(size)->ToObjectUnchecked(); | 644 new_allocation = lo_space->AllocateRawData(size)->ToObjectUnchecked(); |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 684 ASSERT(SpaceIsPaged(space)); | 685 ASSERT(SpaceIsPaged(space)); |
| 685 int page_of_pointee = offset >> kPageSizeBits; | 686 int page_of_pointee = offset >> kPageSizeBits; |
| 686 Address object_address = pages_[space][page_of_pointee] + | 687 Address object_address = pages_[space][page_of_pointee] + |
| 687 (offset & Page::kPageAlignmentMask); | 688 (offset & Page::kPageAlignmentMask); |
| 688 return HeapObject::FromAddress(object_address); | 689 return HeapObject::FromAddress(object_address); |
| 689 } | 690 } |
| 690 | 691 |
| 691 | 692 |
| 692 void Deserializer::Deserialize() { | 693 void Deserializer::Deserialize() { |
| 693 isolate_ = Isolate::Current(); | 694 isolate_ = Isolate::Current(); |
| 695 ASSERT(isolate_ != NULL); |
| 694 // Don't GC while deserializing - just expand the heap. | 696 // Don't GC while deserializing - just expand the heap. |
| 695 Address* store_buffer_top = | 697 Address* store_buffer_top = |
| 696 reinterpret_cast<Address*>(isolate_->heap()->store_buffer_top()); | 698 reinterpret_cast<Address*>(isolate_->heap()->store_buffer_top()); |
| 697 AlwaysAllocateScope always_allocate; | 699 AlwaysAllocateScope always_allocate; |
| 698 // Don't use the free lists while deserializing. | 700 // Don't use the free lists while deserializing. |
| 699 LinearAllocationScope allocate_linearly; | 701 LinearAllocationScope allocate_linearly; |
| 700 // No active threads. | 702 // No active threads. |
| 701 ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); | 703 ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); |
| 702 // No active handles. | 704 // No active handles. |
| 703 ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); | 705 ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); |
| (...skipping 897 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1601 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); | 1603 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); |
| 1602 } | 1604 } |
| 1603 } | 1605 } |
| 1604 int allocation_address = fullness_[space]; | 1606 int allocation_address = fullness_[space]; |
| 1605 fullness_[space] = allocation_address + size; | 1607 fullness_[space] = allocation_address + size; |
| 1606 return allocation_address; | 1608 return allocation_address; |
| 1607 } | 1609 } |
| 1608 | 1610 |
| 1609 | 1611 |
| 1610 } } // namespace v8::internal | 1612 } } // namespace v8::internal |
| OLD | NEW |