 Chromium Code Reviews
 Chromium Code Reviews Issue 335009:
  New snapshot framework.  Doesn't work on ARM yet (code targets...  (Closed) 
  Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
    
  
    Issue 335009:
  New snapshot framework.  Doesn't work on ARM yet (code targets...  (Closed) 
  Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/| OLD | NEW | 
|---|---|
| 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright | 
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. | 
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above | 
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following | 
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided | 
| (...skipping 1399 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1410 int length = reader_.GetInt(); | 1410 int length = reader_.GetInt(); | 
| 1411 ASSERT(length <= kMaxTagLength); | 1411 ASSERT(length <= kMaxTagLength); | 
| 1412 reader_.GetBytes(reinterpret_cast<Address>(buf), length); | 1412 reader_.GetBytes(reinterpret_cast<Address>(buf), length); | 
| 1413 ASSERT_EQ(strlen(tag), length); | 1413 ASSERT_EQ(strlen(tag), length); | 
| 1414 ASSERT(strncmp(tag, buf, length) == 0); | 1414 ASSERT(strncmp(tag, buf, length) == 0); | 
| 1415 } | 1415 } | 
| 1416 } | 1416 } | 
| 1417 #endif | 1417 #endif | 
| 1418 | 1418 | 
| 1419 | 1419 | 
| 1420 class NoGlobalHandlesChecker : public ObjectVisitor { | |
| 1421 public: | |
| 1422 virtual void VisitPointers(Object** start, Object** end) { | |
| 1423 ASSERT(false); | |
| 1424 } | |
| 1425 }; | |
| 1426 | |
| 1427 | |
| 1420 void Deserializer::Deserialize() { | 1428 void Deserializer::Deserialize() { | 
| 1429 // No global handles. | |
| 1430 NoGlobalHandlesChecker checker; | |
| 1431 GlobalHandles::IterateRoots(&checker); | |
| 1421 // No active threads. | 1432 // No active threads. | 
| 1422 ASSERT_EQ(NULL, ThreadState::FirstInUse()); | 1433 ASSERT_EQ(NULL, ThreadState::FirstInUse()); | 
| 1423 // No active handles. | 1434 // No active handles. | 
| 1424 ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty()); | 1435 ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty()); | 
| 1425 reference_decoder_ = new ExternalReferenceDecoder(); | 1436 reference_decoder_ = new ExternalReferenceDecoder(); | 
| 1426 // By setting linear allocation only, we forbid the use of free list | 1437 // By setting linear allocation only, we forbid the use of free list | 
| 1427 // allocation which is not predicted by SimulatedAddress. | 1438 // allocation which is not predicted by SimulatedAddress. | 
| 1428 GetHeader(); | 1439 GetHeader(); | 
| 1429 Heap::IterateRoots(this); | 1440 Heap::IterateRoots(this); | 
| 1430 GetContextStack(); | 1441 GetContextStack(); | 
| 1442 // Any global handles that have been set up by deserialization are leaked | |
| 1443 // since noone is keeping track of them. | |
| 1444 GlobalHandles::TearDown(); | |
| 
Mads Ager (chromium)
2009/10/26 11:14:05
Shouldn't we be checking that there are no global
 
Erik Corry
2009/10/27 09:49:58
No.  Even if there are global handles we want to d
 | |
| 1431 } | 1445 } | 
| 1432 | 1446 | 
| 1433 | 1447 | 
| 1434 void Deserializer::VisitPointers(Object** start, Object** end) { | 1448 void Deserializer::VisitPointers(Object** start, Object** end) { | 
| 1435 bool root = root_; | 1449 bool root = root_; | 
| 1436 root_ = false; | 1450 root_ = false; | 
| 1437 for (Object** p = start; p < end; ++p) { | 1451 for (Object** p = start; p < end; ++p) { | 
| 1438 if (root) { | 1452 if (root) { | 
| 1439 roots_++; | 1453 roots_++; | 
| 1440 // Read the next object or pointer from the stream | 1454 // Read the next object or pointer from the stream | 
| (...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1733 ConcatReversed(&large_objects_, new_objects); | 1747 ConcatReversed(&large_objects_, new_objects); | 
| 1734 ASSERT(index < large_objects_.length()); | 1748 ASSERT(index < large_objects_.length()); | 
| 1735 } | 1749 } | 
| 1736 return large_objects_[index]; // s.page_offset() is ignored. | 1750 return large_objects_[index]; // s.page_offset() is ignored. | 
| 1737 } | 1751 } | 
| 1738 UNREACHABLE(); | 1752 UNREACHABLE(); | 
| 1739 return NULL; | 1753 return NULL; | 
| 1740 } | 1754 } | 
| 1741 | 1755 | 
| 1742 | 1756 | 
| 1757 Deserializer2::Deserializer2(SnapshotByteSource* source) | |
| 1758 : source_(source), | |
| 
Mads Ager (chromium)
2009/10/26 11:14:05
Four space indent.
 | |
| 1759 external_reference_decoder_(NULL) { | |
| 1760 for (int i = 0; i <= LAST_SPACE; i++) { | |
| 1761 fullness_[i] = 0; | |
| 1762 } | |
| 1763 } | |
| 1764 | |
| 1765 | |
| 1766 // This routine both allocates a new object, and also keeps | |
| 1767 // track of where objects have been allocated so that we can | |
| 1768 // fix back references when deserializing. | |
| 1769 Address Deserializer2::Allocate(int space_index, int size) { | |
| 1770 HeapObject* new_object; | |
| 1771 int old_fullness = CurrentAllocationAddress(space_index); | |
| 1772 // When we start a new page we need to record its location. | |
| 1773 bool record_page = (old_fullness == 0); | |
| 1774 if (SpaceIsPaged(space_index)) { | |
| 1775 PagedSpace* space; | |
| 1776 switch (space_index) { | |
| 1777 case OLD_DATA_SPACE: space = Heap::old_data_space(); break; | |
| 1778 case OLD_POINTER_SPACE: space = Heap::old_pointer_space(); break; | |
| 1779 case MAP_SPACE: space = Heap::map_space(); break; | |
| 1780 case CODE_SPACE: space = Heap::code_space(); break; | |
| 1781 case CELL_SPACE: space = Heap::cell_space(); break; | |
| 1782 default: UNREACHABLE(); space = NULL; break; | |
| 1783 } | |
| 1784 ASSERT(size <= Page::kPageSize - Page::kObjectStartOffset); | |
| 1785 int current_page = old_fullness >> Page::kPageSizeBits; | |
| 1786 int new_fullness = old_fullness + size; | |
| 1787 int new_page = new_fullness >> Page::kPageSizeBits; | |
| 1788 // What is our new position within the current page. | |
| 1789 int intra_page_offset = new_fullness - current_page * Page::kPageSize; | |
| 1790 if (intra_page_offset > Page::kPageSize - Page::kObjectStartOffset) { | |
| 1791 // This object will not fit in a page and we have to move to the next. | |
| 1792 new_page = (old_fullness >> Page::kPageSizeBits) + 1; | |
| 
Mads Ager (chromium)
2009/10/26 11:14:05
current_page + 1?
 | |
| 1793 old_fullness = new_page << Page::kPageSizeBits; | |
| 1794 new_fullness = old_fullness + size; | |
| 1795 record_page = true; | |
| 1796 } | |
| 1797 fullness_[space_index] = new_fullness; | |
| 1798 Object* new_allocation = space->AllocateRaw(size); | |
| 1799 new_object = HeapObject::cast(new_allocation); | |
| 1800 ASSERT(!new_object->IsFailure()); | |
| 1801 ASSERT((reinterpret_cast<intptr_t>(new_object->address()) & | |
| 1802 Page::kPageAlignmentMask) == | |
| 
Mads Ager (chromium)
2009/10/26 11:14:05
Funky indentation.  I would do:
ASSERT((reinterpr
 | |
| 1803 (old_fullness & Page::kPageAlignmentMask) + | |
| 1804 Page::kObjectStartOffset); | |
| 1805 } else if (SpaceIsLarge(space_index)) { | |
| 1806 ASSERT(size > Page::kPageSize - Page::kObjectStartOffset); | |
| 1807 fullness_[LO_SPACE]++; | |
| 1808 LargeObjectSpace* lo_space = Heap::lo_space(); | |
| 1809 Object* new_allocation; | |
| 1810 if (space_index == kLargeData) { | |
| 1811 new_allocation = lo_space->AllocateRaw(size); | |
| 1812 } else if (space_index == kLargeFixedArray) { | |
| 1813 new_allocation = lo_space->AllocateRawFixedArray(size); | |
| 1814 } else { | |
| 1815 ASSERT(space_index == kLargeCode); | |
| 1816 new_allocation = lo_space->AllocateRawCode(size); | |
| 1817 } | |
| 1818 ASSERT(!new_allocation->IsFailure()); | |
| 1819 new_object = HeapObject::cast(new_allocation); | |
| 1820 record_page = true; | |
| 1821 // The page recording below records all large objects in the same space. | |
| 1822 space_index = LO_SPACE; | |
| 1823 } else { | |
| 1824 ASSERT(space_index == NEW_SPACE); | |
| 1825 Object* new_allocation = Heap::new_space()->AllocateRaw(size); | |
| 1826 fullness_[space_index] += size; | |
| 1827 ASSERT(!new_allocation->IsFailure()); | |
| 1828 new_object = HeapObject::cast(new_allocation); | |
| 1829 } | |
| 1830 Address address = new_object->address(); | |
| 1831 if (record_page) { | |
| 1832 pages_[space_index].Add(address); | |
| 1833 } | |
| 1834 return address; | |
| 1835 } | |
| 1836 | |
| 1837 | |
| 1838 // This returns the address of an object that has been described in the | |
| 1839 // snapshot as being offset bytes back in a particular space. | |
| 1840 HeapObject* Deserializer2::GetAddress(int space) { | |
| 1841 int offset = source_->GetInt(); | |
| 1842 if (SpaceIsLarge(space)) { | |
| 1843 // Large spaces have one object per 'page'. | |
| 1844 return HeapObject::FromAddress( | |
| 1845 pages_[LO_SPACE][fullness_[LO_SPACE] - offset]); | |
| 1846 } | |
| 1847 offset <<= kObjectAlignmentBits; | |
| 1848 if (space == NEW_SPACE) { | |
| 1849 // New space has only one space - numbered 0. | |
| 1850 return HeapObject::FromAddress( | |
| 1851 pages_[space][0] + fullness_[space] - offset); | |
| 1852 } | |
| 1853 ASSERT(SpaceIsPaged(space)); | |
| 1854 int virtual_address = fullness_[space] - offset; | |
| 1855 int page_of_pointee = (virtual_address) >> Page::kPageSizeBits; | |
| 1856 Address object_address = pages_[space][page_of_pointee] + | |
| 1857 (virtual_address & Page::kPageAlignmentMask); | |
| 1858 return HeapObject::FromAddress(object_address); | |
| 1859 } | |
| 1860 | |
| 1861 | |
| 1862 void Deserializer2::Deserialize() { | |
| 1863 // Don't GC while deserializing - just expand the heap. | |
| 1864 AlwaysAllocateScope always_allocate; | |
| 1865 // Don't use the free lists while deserializing. | |
| 1866 LinearAllocationScope allocate_linearly; | |
| 1867 // No active threads. | |
| 1868 ASSERT_EQ(NULL, ThreadState::FirstInUse()); | |
| 1869 // No active handles. | |
| 1870 ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty()); | |
| 1871 ASSERT(external_reference_decoder_ == NULL); | |
| 1872 external_reference_decoder_ = new ExternalReferenceDecoder(); | |
| 1873 Heap::IterateRoots(this); | |
| 1874 ASSERT(source_->AtEOF()); | |
| 1875 delete external_reference_decoder_; | |
| 1876 external_reference_decoder_ = NULL; | |
| 1877 } | |
| 1878 | |
| 1879 | |
| 1880 // This is called on the roots. It is the driver of the deserialization | |
| 1881 // process. | |
| 1882 void Deserializer2::VisitPointers(Object** start, Object** end) { | |
| 1883 for (Object** current = start; current < end; current++) { | |
| 1884 DataType data = static_cast<DataType>(source_->Get()); | |
| 1885 if (data == SMI_SERIALIZATION) { | |
| 1886 *current = Smi::FromInt(source_->GetInt() - kSmiBias); | |
| 1887 } else if (data == BACKREF_SERIALIZATION) { | |
| 1888 int space = source_->Get(); | |
| 1889 *current = GetAddress(space); | |
| 1890 } else { | |
| 1891 ASSERT(data == OBJECT_SERIALIZATION); | |
| 1892 ReadObject(current); | |
| 1893 } | |
| 1894 } | |
| 1895 } | |
| 1896 | |
| 1897 | |
| 1898 // This routine writes the new object into the pointer provided and then | |
| 1899 // returns a true of the new object was in young space and false otherwise. | |
| 
Mads Ager (chromium)
2009/10/26 11:14:05
returns true if the new ...
 | |
| 1900 // The reason for this strange interface is that otherwise the object is | |
| 1901 // written very late, which means the ByteArray map is not set up by the | |
| 1902 // time we need to use it to mark the space at the end of a page free (by | |
| 1903 // making it into a byte array). | |
| 1904 bool Deserializer2::ReadObject(Object** write_back) { | |
| 1905 int space = source_->Get(); | |
| 1906 int size = source_->GetInt() << kObjectAlignmentBits; | |
| 1907 Address address = Allocate(space, size); | |
| 1908 *write_back = HeapObject::FromAddress(address); | |
| 1909 Object** current = reinterpret_cast<Object**>(address); | |
| 1910 Object** limit = current + (size >> kPointerSizeLog2); | |
| 1911 while (current < limit) { | |
| 1912 DataType data = static_cast<DataType>(source_->Get()); | |
| 1913 switch (data) { | |
| 1914 case SMI_SERIALIZATION: | |
| 1915 *current++ = Smi::FromInt(source_->GetInt() - kSmiBias); | |
| 1916 break; | |
| 1917 case RAW_DATA_SERIALIZATION: { | |
| 1918 int size = source_->GetInt(); | |
| 1919 byte* raw_data_out = reinterpret_cast<byte*>(current); | |
| 1920 for (int j = 0; j < size; j++) { | |
| 1921 *raw_data_out++ = source_->Get(); | |
| 1922 } | |
| 1923 current = reinterpret_cast<Object**>(raw_data_out); | |
| 1924 break; | |
| 1925 } | |
| 1926 case OBJECT_SERIALIZATION: { | |
| 1927 // Recurse to unpack an object that is forward-referenced from here. | |
| 1928 bool in_new_space = ReadObject(current); | |
| 1929 if (in_new_space && space != NEW_SPACE) { | |
| 1930 Heap::RecordWrite(address, | |
| 1931 reinterpret_cast<Address>(current) - address); | |
| 1932 } | |
| 1933 current++; | |
| 1934 break; | |
| 1935 } | |
| 1936 case CODE_OBJECT_SERIALIZATION: { | |
| 1937 Object* new_code_object = NULL; | |
| 1938 ReadObject(&new_code_object); | |
| 1939 Code* code_object = reinterpret_cast<Code*>(new_code_object); | |
| 1940 // Setting a branch/call to another code object from code. | |
| 1941 Address location_of_branch_data = reinterpret_cast<Address>(current); | |
| 1942 Assembler::set_target_address_at(location_of_branch_data, | |
| 1943 code_object->instruction_start()); | |
| 1944 location_of_branch_data += Assembler::kCallTargetSize; | |
| 1945 current = reinterpret_cast<Object**>(location_of_branch_data); | |
| 1946 break; | |
| 1947 } | |
| 1948 case BACKREF_SERIALIZATION: { | |
| 1949 // Write a backreference to an object we unpacked earlier. | |
| 1950 int backref_space = source_->Get(); | |
| 1951 if (backref_space == NEW_SPACE && space != NEW_SPACE) { | |
| 1952 Heap::RecordWrite(address, | |
| 1953 reinterpret_cast<Address>(current) - address); | |
| 1954 } | |
| 1955 *current++ = GetAddress(backref_space); | |
| 1956 break; | |
| 1957 } | |
| 1958 case CODE_BACKREF_SERIALIZATION: { | |
| 1959 int backref_space = source_->Get(); | |
| 1960 // Can't use Code::cast because heap is not set up yet and assertions | |
| 1961 // will fail. | |
| 1962 Code* code_object = reinterpret_cast<Code*>(GetAddress(backref_space)); | |
| 1963 // Setting a branch/call to previously decoded code object from code. | |
| 1964 Address location_of_branch_data = reinterpret_cast<Address>(current); | |
| 1965 Assembler::set_target_address_at(location_of_branch_data, | |
| 1966 code_object->instruction_start()); | |
| 1967 location_of_branch_data += Assembler::kCallTargetSize; | |
| 1968 current = reinterpret_cast<Object**>(location_of_branch_data); | |
| 1969 break; | |
| 1970 } | |
| 1971 case EXTERNAL_REFERENCE_SERIALIZATION: { | |
| 1972 int reference_id = source_->GetInt(); | |
| 1973 Address address = external_reference_decoder_->Decode(reference_id); | |
| 1974 *current++ = reinterpret_cast<Object*>(address); | |
| 1975 break; | |
| 1976 } | |
| 1977 default: | |
| 1978 UNREACHABLE(); | |
| 1979 } | |
| 1980 } | |
| 1981 ASSERT(current == limit); | |
| 1982 return space == NEW_SPACE; | |
| 1983 } | |
| 1984 | |
| 1985 #ifdef DEBUG | |
| 1986 | |
| 1987 void Deserializer2::Synchronize(const char* tag) { | |
| 1988 int data = source_->Get(); | |
| 1989 // If this assert fails then that indicates that you have a mismatch between | |
| 1990 // the number of GC roots when serializing and deserializing. | |
| 1991 ASSERT(data == SYNCHRONIZE); | |
| 1992 do { | |
| 1993 int character = source_->Get(); | |
| 1994 if (character == 0) break; | |
| 1995 if (FLAG_debug_serialization) { | |
| 1996 PrintF("%c", character); | |
| 1997 } | |
| 1998 } while (true); | |
| 1999 if (FLAG_debug_serialization) { | |
| 2000 PrintF("\n"); | |
| 2001 } | |
| 2002 } | |
| 2003 | |
| 2004 | |
| 2005 void Serializer2::Synchronize(const char* tag) { | |
| 2006 sink_->Put(SYNCHRONIZE, tag); | |
| 2007 int character; | |
| 2008 do { | |
| 2009 character = *tag++; | |
| 2010 sink_->Put(character, "tagcharacter"); | |
| 2011 } while (character != 0); | |
| 2012 } | |
| 2013 | |
| 2014 #endif | |
| 2015 | |
| 2016 Serializer2::Serializer2(SnapshotByteSink* sink) | |
| 2017 : sink_(sink), | |
| 
Mads Ager (chromium)
2009/10/26 11:14:05
Four space indent.
 | |
| 2018 current_root_index_(0), | |
| 2019 external_reference_encoder_(NULL) { | |
| 2020 for (int i = 0; i <= LAST_SPACE; i++) { | |
| 2021 fullness_[i] = 0; | |
| 2022 } | |
| 2023 } | |
| 2024 | |
| 2025 | |
| 2026 void Serializer2::Serialize() { | |
| 2027 // No active threads. | |
| 2028 CHECK_EQ(NULL, ThreadState::FirstInUse()); | |
| 2029 // No active or weak handles. | |
| 2030 CHECK(HandleScopeImplementer::instance()->blocks()->is_empty()); | |
| 2031 CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles()); | |
| 2032 ASSERT(external_reference_encoder_ == NULL); | |
| 2033 external_reference_encoder_ = new ExternalReferenceEncoder(); | |
| 2034 Heap::IterateRoots(this); | |
| 2035 delete external_reference_encoder_; | |
| 2036 external_reference_encoder_ = NULL; | |
| 2037 } | |
| 2038 | |
| 2039 | |
| 2040 void Serializer2::VisitPointers(Object** start, Object** end) { | |
| 2041 for (Object** current = start; current < end; current++) { | |
| 2042 SerializeObject(*current, TAGGED_REPRESENTATION); | |
| 2043 } | |
| 2044 } | |
| 2045 | |
| 2046 | |
| 2047 void Serializer2::SerializeObject( | |
| 2048 Object* o, | |
| 2049 ReferenceRepresentation reference_representation) { | |
| 2050 if (o->IsHeapObject()) { | |
| 2051 HeapObject* heap_object = HeapObject::cast(o); | |
| 2052 MapWord map_word = heap_object->map_word(); | |
| 2053 if (map_word.IsSerializationAddress()) { | |
| 2054 int space = SpaceOfAlreadySerializedObject(heap_object); | |
| 2055 int offset = | |
| 2056 CurrentAllocationAddress(space) - map_word.ToSerializationAddress(); | |
| 2057 // If we are actually dealing with real offsets (and not a numbering of | |
| 2058 // all objects) then we should shift out the bits that are always 0. | |
| 2059 if (!SpaceIsLarge(space)) offset >>= kObjectAlignmentBits; | |
| 2060 if (reference_representation == CODE_TARGET_REPRESENTATION) { | |
| 2061 sink_->Put(CODE_BACKREF_SERIALIZATION, "BackRefCodeSerialization"); | |
| 2062 } else { | |
| 2063 ASSERT(reference_representation == TAGGED_REPRESENTATION); | |
| 2064 sink_->Put(BACKREF_SERIALIZATION, "BackRefSerialization"); | |
| 2065 } | |
| 2066 sink_->Put(space, "space"); | |
| 2067 sink_->PutInt(offset, "offset"); | |
| 2068 } else { | |
| 2069 // Object has not yet been serialized. Serialize it here. | |
| 2070 ObjectSerializer serializer(this, | |
| 2071 heap_object, | |
| 2072 sink_, | |
| 2073 reference_representation); | |
| 2074 serializer.Serialize(); | |
| 2075 } | |
| 2076 } else { | |
| 2077 // Serialize a Smi. | |
| 2078 unsigned int value = Smi::cast(o)->value() + kSmiBias; | |
| 2079 sink_->Put(SMI_SERIALIZATION, "SmiSerialization"); | |
| 2080 sink_->PutInt(value, "smi"); | |
| 2081 } | |
| 2082 } | |
| 2083 | |
| 2084 | |
| 2085 void Serializer2::ObjectSerializer::Serialize() { | |
| 2086 int space = Serializer2::SpaceOfObject(object_); | |
| 2087 int size = object_->Size(); | |
| 2088 | |
| 2089 if (reference_representation_ == TAGGED_REPRESENTATION) { | |
| 2090 sink_->Put(OBJECT_SERIALIZATION, "ObjectSerialization"); | |
| 2091 } else { | |
| 2092 ASSERT(reference_representation_ == CODE_TARGET_REPRESENTATION); | |
| 2093 sink_->Put(CODE_OBJECT_SERIALIZATION, "ObjectSerialization"); | |
| 2094 } | |
| 2095 sink_->Put(space, "space"); | |
| 2096 sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); | |
| 2097 | |
| 2098 // Get the map before overwriting it. | |
| 2099 Map* map = object_->map(); | |
| 2100 // Mark this object as already serialized. | |
| 2101 object_->set_map_word( | |
| 2102 MapWord::FromSerializationAddress(serializer_->Allocate(space, size))); | |
| 2103 | |
| 2104 // Serialize the map (first word of the object). | |
| 2105 serializer_->SerializeObject(map, TAGGED_REPRESENTATION); | |
| 2106 | |
| 2107 // Serialize the rest of the object. | |
| 2108 ASSERT(bytes_processed_so_far_ == 0); | |
| 2109 bytes_processed_so_far_ = kPointerSize; | |
| 2110 object_->IterateBody(map->instance_type(), size, this); | |
| 2111 OutputRawData(object_->address() + size); | |
| 2112 } | |
| 2113 | |
| 2114 | |
| 2115 void Serializer2::ObjectSerializer::VisitPointers(Object** start, | |
| 2116 Object** end) { | |
| 2117 Address pointers_start = reinterpret_cast<Address>(start); | |
| 2118 OutputRawData(pointers_start); | |
| 2119 | |
| 2120 for (Object** current = start; current < end; current++) { | |
| 2121 serializer_->SerializeObject(*current, TAGGED_REPRESENTATION); | |
| 2122 } | |
| 2123 bytes_processed_so_far_ += (end - start) * kPointerSize; | |
| 2124 } | |
| 2125 | |
| 2126 | |
| 2127 void Serializer2::ObjectSerializer::VisitExternalReferences(Address* start, | |
| 2128 Address* end) { | |
| 2129 Address references_start = reinterpret_cast<Address>(start); | |
| 2130 OutputRawData(references_start); | |
| 2131 | |
| 2132 for (Address* current = start; current < end; current++) { | |
| 2133 sink_->Put(EXTERNAL_REFERENCE_SERIALIZATION, "External reference"); | |
| 2134 int reference_id = serializer_->EncodeExternalReference(*current); | |
| 2135 sink_->PutInt(reference_id, "reference id"); | |
| 2136 } | |
| 2137 bytes_processed_so_far_ += (end - start) * kPointerSize; | |
| 2138 } | |
| 2139 | |
| 2140 | |
| 2141 void Serializer2::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { | |
| 2142 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); | |
| 2143 Address target_start = rinfo->target_address_address(); | |
| 2144 OutputRawData(target_start); | |
| 2145 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); | |
| 2146 serializer_->SerializeObject(target, CODE_TARGET_REPRESENTATION); | |
| 2147 bytes_processed_so_far_ += Assembler::kCallTargetSize; | |
| 2148 } | |
| 2149 | |
| 2150 | |
| 2151 void Serializer2::ObjectSerializer::OutputRawData(Address up_to) { | |
| 2152 Address object_start = object_->address(); | |
| 2153 int up_to_offset = up_to - object_start; | |
| 2154 int skipped = up_to_offset - bytes_processed_so_far_; | |
| 2155 ASSERT(skipped >= 0); | |
| 2156 if (skipped != 0) { | |
| 2157 sink_->Put(RAW_DATA_SERIALIZATION, "raw data"); | |
| 2158 sink_->PutInt(skipped, "length"); | |
| 2159 for (int i = 0; i < skipped; i++) { | |
| 2160 unsigned int data = object_start[bytes_processed_so_far_ + i]; | |
| 2161 sink_->Put(data, "byte"); | |
| 2162 } | |
| 2163 } | |
| 2164 bytes_processed_so_far_ += skipped; | |
| 2165 } | |
| 2166 | |
| 2167 | |
| 2168 int Serializer2::SpaceOfObject(HeapObject* object) { | |
| 2169 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { | |
| 2170 AllocationSpace s = static_cast<AllocationSpace>(i); | |
| 2171 if (Heap::InSpace(object, s)) { | |
| 2172 if (i == LO_SPACE) { | |
| 2173 if (object->IsCode()) { | |
| 2174 return kLargeCode; | |
| 2175 } else if (object->IsFixedArray()) { | |
| 2176 return kLargeFixedArray; | |
| 2177 } else { | |
| 2178 return kLargeData; | |
| 2179 } | |
| 2180 } | |
| 2181 return i; | |
| 2182 } | |
| 2183 } | |
| 2184 UNREACHABLE(); | |
| 2185 return 0; | |
| 2186 } | |
| 2187 | |
| 2188 | |
| 2189 int Serializer2::SpaceOfAlreadySerializedObject(HeapObject* object) { | |
| 2190 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { | |
| 2191 AllocationSpace s = static_cast<AllocationSpace>(i); | |
| 2192 if (Heap::InSpace(object, s)) { | |
| 2193 return i; | |
| 2194 } | |
| 2195 } | |
| 2196 UNREACHABLE(); | |
| 2197 return 0; | |
| 2198 } | |
| 2199 | |
| 2200 | |
| 2201 int Serializer2::Allocate(int space, int size) { | |
| 2202 ASSERT(space >= 0 && space < kNumberOfSpaces); | |
| 2203 if (SpaceIsLarge(space)) { | |
| 2204 // In large object space we merely number the objects instead of trying to | |
| 2205 // determine some sort of address. | |
| 2206 return fullness_[LO_SPACE]++; | |
| 2207 } | |
| 2208 if (SpaceIsPaged(space)) { | |
| 2209 // Paged spaces are a little special. We encode their addresses as if the | |
| 2210 // pages were all contiguous and each page were filled up in the range | |
| 2211 // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous | |
| 2212 // and allocation does not start at offset 0 in the page, but this scheme | |
| 2213 // means the deserializer can get the page number quickly by shifting the | |
| 2214 // serialized address. | |
| 2215 ASSERT(IsPowerOf2(Page::kPageSize)); | |
| 2216 int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1)); | |
| 2217 ASSERT(size <= Page::kObjectAreaSize); | |
| 2218 if (used_in_this_page + size > Page::kObjectAreaSize) { | |
| 2219 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); | |
| 2220 } | |
| 2221 } | |
| 2222 int allocation_address = fullness_[space]; | |
| 2223 fullness_[space] = allocation_address + size; | |
| 2224 return allocation_address; | |
| 2225 } | |
| 2226 | |
| 2227 | |
| 1743 } } // namespace v8::internal | 2228 } } // namespace v8::internal | 
| OLD | NEW |