| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 300 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 311 "StoreBuffer::StoreBufferOverflow"); | 311 "StoreBuffer::StoreBufferOverflow"); |
| 312 Add(ExternalReference:: | 312 Add(ExternalReference:: |
| 313 incremental_evacuation_record_write_function(isolate).address(), | 313 incremental_evacuation_record_write_function(isolate).address(), |
| 314 RUNTIME_ENTRY, | 314 RUNTIME_ENTRY, |
| 315 7, | 315 7, |
| 316 "IncrementalMarking::RecordWrite"); | 316 "IncrementalMarking::RecordWrite"); |
| 317 | 317 |
| 318 | 318 |
| 319 | 319 |
| 320 // Miscellaneous | 320 // Miscellaneous |
| 321 Add(ExternalReference::roots_address(isolate).address(), | 321 Add(ExternalReference::roots_array_start(isolate).address(), |
| 322 UNCLASSIFIED, | 322 UNCLASSIFIED, |
| 323 3, | 323 3, |
| 324 "Heap::roots_address()"); | 324 "Heap::roots_array_start()"); |
| 325 Add(ExternalReference::address_of_stack_limit(isolate).address(), | 325 Add(ExternalReference::address_of_stack_limit(isolate).address(), |
| 326 UNCLASSIFIED, | 326 UNCLASSIFIED, |
| 327 4, | 327 4, |
| 328 "StackGuard::address_of_jslimit()"); | 328 "StackGuard::address_of_jslimit()"); |
| 329 Add(ExternalReference::address_of_real_stack_limit(isolate).address(), | 329 Add(ExternalReference::address_of_real_stack_limit(isolate).address(), |
| 330 UNCLASSIFIED, | 330 UNCLASSIFIED, |
| 331 5, | 331 5, |
| 332 "StackGuard::address_of_real_jslimit()"); | 332 "StackGuard::address_of_real_jslimit()"); |
| 333 #ifndef V8_INTERPRETED_REGEXP | 333 #ifndef V8_INTERPRETED_REGEXP |
| 334 Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(), | 334 Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(), |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 483 42, | 483 42, |
| 484 "power_double_int_function"); | 484 "power_double_int_function"); |
| 485 Add(ExternalReference::store_buffer_top(isolate).address(), | 485 Add(ExternalReference::store_buffer_top(isolate).address(), |
| 486 UNCLASSIFIED, | 486 UNCLASSIFIED, |
| 487 43, | 487 43, |
| 488 "store_buffer_top"); | 488 "store_buffer_top"); |
| 489 Add(ExternalReference::address_of_canonical_non_hole_nan().address(), | 489 Add(ExternalReference::address_of_canonical_non_hole_nan().address(), |
| 490 UNCLASSIFIED, | 490 UNCLASSIFIED, |
| 491 44, | 491 44, |
| 492 "canonical_nan"); | 492 "canonical_nan"); |
| 493 Add(ExternalReference::address_of_the_hole_nan().address(), |
| 494 UNCLASSIFIED, |
| 495 45, |
| 496 "the_hole_nan"); |
| 493 } | 497 } |
| 494 | 498 |
| 495 | 499 |
| 496 ExternalReferenceEncoder::ExternalReferenceEncoder() | 500 ExternalReferenceEncoder::ExternalReferenceEncoder() |
| 497 : encodings_(Match), | 501 : encodings_(Match), |
| 498 isolate_(Isolate::Current()) { | 502 isolate_(Isolate::Current()) { |
| 499 ExternalReferenceTable* external_references = | 503 ExternalReferenceTable* external_references = |
| 500 ExternalReferenceTable::instance(isolate_); | 504 ExternalReferenceTable::instance(isolate_); |
| 501 for (int i = 0; i < external_references->size(); ++i) { | 505 for (int i = 0; i < external_references->size(); ++i) { |
| 502 Put(external_references->address(i), i); | 506 Put(external_references->address(i), i); |
| (...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 746 dest_space = isolate->heap()->lo_space(); \ | 750 dest_space = isolate->heap()->lo_space(); \ |
| 747 } | 751 } |
| 748 | 752 |
| 749 | 753 |
| 750 static const int kUnknownOffsetFromStart = -1; | 754 static const int kUnknownOffsetFromStart = -1; |
| 751 | 755 |
| 752 | 756 |
| 753 void Deserializer::ReadChunk(Object** current, | 757 void Deserializer::ReadChunk(Object** current, |
| 754 Object** limit, | 758 Object** limit, |
| 755 int source_space, | 759 int source_space, |
| 756 Address address) { | 760 Address current_object_address) { |
| 757 Isolate* const isolate = isolate_; | 761 Isolate* const isolate = isolate_; |
| 762 bool write_barrier_needed = (current_object_address != NULL && |
| 763 source_space != NEW_SPACE && |
| 764 source_space != CELL_SPACE && |
| 765 source_space != CODE_SPACE && |
| 766 source_space != OLD_DATA_SPACE); |
| 758 while (current < limit) { | 767 while (current < limit) { |
| 759 int data = source_->Get(); | 768 int data = source_->Get(); |
| 760 switch (data) { | 769 switch (data) { |
| 761 #define CASE_STATEMENT(where, how, within, space_number) \ | 770 #define CASE_STATEMENT(where, how, within, space_number) \ |
| 762 case where + how + within + space_number: \ | 771 case where + how + within + space_number: \ |
| 763 ASSERT((where & ~kPointedToMask) == 0); \ | 772 ASSERT((where & ~kPointedToMask) == 0); \ |
| 764 ASSERT((how & ~kHowToCodeMask) == 0); \ | 773 ASSERT((how & ~kHowToCodeMask) == 0); \ |
| 765 ASSERT((within & ~kWhereToPointMask) == 0); \ | 774 ASSERT((within & ~kWhereToPointMask) == 0); \ |
| 766 ASSERT((space_number & ~kSpaceMask) == 0); | 775 ASSERT((space_number & ~kSpaceMask) == 0); |
| 767 | 776 |
| 768 #define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \ | 777 #define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \ |
| 769 { \ | 778 { \ |
| 770 bool emit_write_barrier = false; \ | 779 bool emit_write_barrier = false; \ |
| 771 bool current_was_incremented = false; \ | 780 bool current_was_incremented = false; \ |
| 772 int space_number = space_number_if_any == kAnyOldSpace ? \ | 781 int space_number = space_number_if_any == kAnyOldSpace ? \ |
| 773 (data & kSpaceMask) : space_number_if_any; \ | 782 (data & kSpaceMask) : space_number_if_any; \ |
| 774 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ | 783 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ |
| 775 ASSIGN_DEST_SPACE(space_number) \ | 784 ASSIGN_DEST_SPACE(space_number) \ |
| 776 ReadObject(space_number, dest_space, current); \ | 785 ReadObject(space_number, dest_space, current); \ |
| 777 emit_write_barrier = (space_number == NEW_SPACE && \ | 786 emit_write_barrier = (space_number == NEW_SPACE); \ |
| 778 source_space != NEW_SPACE && \ | |
| 779 source_space != CELL_SPACE); \ | |
| 780 } else { \ | 787 } else { \ |
| 781 Object* new_object = NULL; /* May not be a real Object pointer. */ \ | 788 Object* new_object = NULL; /* May not be a real Object pointer. */ \ |
| 782 if (where == kNewObject) { \ | 789 if (where == kNewObject) { \ |
| 783 ASSIGN_DEST_SPACE(space_number) \ | 790 ASSIGN_DEST_SPACE(space_number) \ |
| 784 ReadObject(space_number, dest_space, &new_object); \ | 791 ReadObject(space_number, dest_space, &new_object); \ |
| 785 } else if (where == kRootArray) { \ | 792 } else if (where == kRootArray) { \ |
| 786 int root_id = source_->GetInt(); \ | 793 int root_id = source_->GetInt(); \ |
| 787 new_object = isolate->heap()->roots_address()[root_id]; \ | 794 new_object = isolate->heap()->roots_array_start()[root_id]; \ |
| 795 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
| 788 } else if (where == kPartialSnapshotCache) { \ | 796 } else if (where == kPartialSnapshotCache) { \ |
| 789 int cache_index = source_->GetInt(); \ | 797 int cache_index = source_->GetInt(); \ |
| 790 new_object = isolate->serialize_partial_snapshot_cache() \ | 798 new_object = isolate->serialize_partial_snapshot_cache() \ |
| 791 [cache_index]; \ | 799 [cache_index]; \ |
| 800 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
| 792 } else if (where == kExternalReference) { \ | 801 } else if (where == kExternalReference) { \ |
| 793 int reference_id = source_->GetInt(); \ | 802 int reference_id = source_->GetInt(); \ |
| 794 Address address = external_reference_decoder_-> \ | 803 Address address = external_reference_decoder_-> \ |
| 795 Decode(reference_id); \ | 804 Decode(reference_id); \ |
| 796 new_object = reinterpret_cast<Object*>(address); \ | 805 new_object = reinterpret_cast<Object*>(address); \ |
| 797 } else if (where == kBackref) { \ | 806 } else if (where == kBackref) { \ |
| 798 emit_write_barrier = (space_number == NEW_SPACE && \ | 807 emit_write_barrier = (space_number == NEW_SPACE); \ |
| 799 source_space != NEW_SPACE && \ | |
| 800 source_space != CELL_SPACE); \ | |
| 801 new_object = GetAddressFromEnd(data & kSpaceMask); \ | 808 new_object = GetAddressFromEnd(data & kSpaceMask); \ |
| 802 } else { \ | 809 } else { \ |
| 803 ASSERT(where == kFromStart); \ | 810 ASSERT(where == kFromStart); \ |
| 804 if (offset_from_start == kUnknownOffsetFromStart) { \ | 811 if (offset_from_start == kUnknownOffsetFromStart) { \ |
| 805 emit_write_barrier = (space_number == NEW_SPACE && \ | 812 emit_write_barrier = (space_number == NEW_SPACE); \ |
| 806 source_space != NEW_SPACE && \ | |
| 807 source_space != CELL_SPACE); \ | |
| 808 new_object = GetAddressFromStart(data & kSpaceMask); \ | 813 new_object = GetAddressFromStart(data & kSpaceMask); \ |
| 809 } else { \ | 814 } else { \ |
| 810 Address object_address = pages_[space_number][0] + \ | 815 Address object_address = pages_[space_number][0] + \ |
| 811 (offset_from_start << kObjectAlignmentBits); \ | 816 (offset_from_start << kObjectAlignmentBits); \ |
| 812 new_object = HeapObject::FromAddress(object_address); \ | 817 new_object = HeapObject::FromAddress(object_address); \ |
| 813 } \ | 818 } \ |
| 814 } \ | 819 } \ |
| 815 if (within == kFirstInstruction) { \ | 820 if (within == kFirstInstruction) { \ |
| 816 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ | 821 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ |
| 817 new_object = reinterpret_cast<Object*>( \ | 822 new_object = reinterpret_cast<Object*>( \ |
| 818 new_code_object->instruction_start()); \ | 823 new_code_object->instruction_start()); \ |
| 819 } \ | 824 } \ |
| 820 if (how == kFromCode) { \ | 825 if (how == kFromCode) { \ |
| 821 Address location_of_branch_data = \ | 826 Address location_of_branch_data = \ |
| 822 reinterpret_cast<Address>(current); \ | 827 reinterpret_cast<Address>(current); \ |
| 823 Assembler::set_target_at(location_of_branch_data, \ | 828 Assembler::set_target_at(location_of_branch_data, \ |
| 824 reinterpret_cast<Address>(new_object)); \ | 829 reinterpret_cast<Address>(new_object)); \ |
| 825 if (within == kFirstInstruction) { \ | 830 if (within == kFirstInstruction) { \ |
| 826 location_of_branch_data += Assembler::kCallTargetSize; \ | 831 location_of_branch_data += Assembler::kCallTargetSize; \ |
| 827 current = reinterpret_cast<Object**>(location_of_branch_data); \ | 832 current = reinterpret_cast<Object**>(location_of_branch_data); \ |
| 828 current_was_incremented = true; \ | 833 current_was_incremented = true; \ |
| 829 } \ | 834 } \ |
| 830 } else { \ | 835 } else { \ |
| 831 *current = new_object; \ | 836 *current = new_object; \ |
| 832 } \ | 837 } \ |
| 833 } \ | 838 } \ |
| 834 if (emit_write_barrier) { \ | 839 if (emit_write_barrier && write_barrier_needed) { \ |
| 835 isolate->heap()->RecordWrite(address, static_cast<int>( \ | 840 Address current_address = reinterpret_cast<Address>(current); \ |
| 836 reinterpret_cast<Address>(current) - address)); \ | 841 isolate->heap()->RecordWrite( \ |
| 842 current_object_address, \ |
| 843 static_cast<int>(current_address - current_object_address)); \ |
| 837 } \ | 844 } \ |
| 838 if (!current_was_incremented) { \ | 845 if (!current_was_incremented) { \ |
| 839 current++; /* Increment current if it wasn't done above. */ \ | 846 current++; \ |
| 840 } \ | 847 } \ |
| 841 break; \ | 848 break; \ |
| 842 } \ | 849 } \ |
| 843 | 850 |
| 844 // This generates a case and a body for each space. The large object spaces are | 851 // This generates a case and a body for each space. The large object spaces are |
| 845 // very rare in snapshots so they are grouped in one body. | 852 // very rare in snapshots so they are grouped in one body. |
| 846 #define ONE_PER_SPACE(where, how, within) \ | 853 #define ONE_PER_SPACE(where, how, within) \ |
| 847 CASE_STATEMENT(where, how, within, NEW_SPACE) \ | 854 CASE_STATEMENT(where, how, within, NEW_SPACE) \ |
| 848 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ | 855 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ |
| 849 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ | 856 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ |
| (...skipping 26 matching lines...) Expand all Loading... |
| 876 CASE_STATEMENT(where, how, within, kLargeCode) \ | 883 CASE_STATEMENT(where, how, within, kLargeCode) \ |
| 877 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ | 884 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ |
| 878 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) | 885 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) |
| 879 | 886 |
| 880 #define ONE_PER_CODE_SPACE(where, how, within) \ | 887 #define ONE_PER_CODE_SPACE(where, how, within) \ |
| 881 CASE_STATEMENT(where, how, within, CODE_SPACE) \ | 888 CASE_STATEMENT(where, how, within, CODE_SPACE) \ |
| 882 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ | 889 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ |
| 883 CASE_STATEMENT(where, how, within, kLargeCode) \ | 890 CASE_STATEMENT(where, how, within, kLargeCode) \ |
| 884 CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart) | 891 CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart) |
| 885 | 892 |
| 886 #define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number, \ | 893 #define FOUR_CASES(byte_code) \ |
| 887 space_number, \ | 894 case byte_code: \ |
| 888 offset_from_start) \ | 895 case byte_code + 1: \ |
| 889 CASE_STATEMENT(kFromStart, kPlain, kStartOfObject, pseudo_space_number) \ | 896 case byte_code + 2: \ |
| 890 CASE_BODY(kFromStart, kPlain, kStartOfObject, space_number, offset_from_start) | 897 case byte_code + 3: |
| 898 |
| 899 #define SIXTEEN_CASES(byte_code) \ |
| 900 FOUR_CASES(byte_code) \ |
| 901 FOUR_CASES(byte_code + 4) \ |
| 902 FOUR_CASES(byte_code + 8) \ |
| 903 FOUR_CASES(byte_code + 12) |
| 891 | 904 |
| 892 // We generate 15 cases and bodies that process special tags that combine | 905 // We generate 15 cases and bodies that process special tags that combine |
| 893 // the raw data tag and the length into one byte. | 906 // the raw data tag and the length into one byte. |
| 894 #define RAW_CASE(index, size) \ | 907 #define RAW_CASE(index, size) \ |
| 895 case kRawData + index: { \ | 908 case kRawData + index: { \ |
| 896 byte* raw_data_out = reinterpret_cast<byte*>(current); \ | 909 byte* raw_data_out = reinterpret_cast<byte*>(current); \ |
| 897 source_->CopyRaw(raw_data_out, size); \ | 910 source_->CopyRaw(raw_data_out, size); \ |
| 898 current = reinterpret_cast<Object**>(raw_data_out + size); \ | 911 current = reinterpret_cast<Object**>(raw_data_out + size); \ |
| 899 break; \ | 912 break; \ |
| 900 } | 913 } |
| 901 COMMON_RAW_LENGTHS(RAW_CASE) | 914 COMMON_RAW_LENGTHS(RAW_CASE) |
| 902 #undef RAW_CASE | 915 #undef RAW_CASE |
| 903 | 916 |
| 904 // Deserialize a chunk of raw data that doesn't have one of the popular | 917 // Deserialize a chunk of raw data that doesn't have one of the popular |
| 905 // lengths. | 918 // lengths. |
| 906 case kRawData: { | 919 case kRawData: { |
| 907 int size = source_->GetInt(); | 920 int size = source_->GetInt(); |
| 908 byte* raw_data_out = reinterpret_cast<byte*>(current); | 921 byte* raw_data_out = reinterpret_cast<byte*>(current); |
| 909 source_->CopyRaw(raw_data_out, size); | 922 source_->CopyRaw(raw_data_out, size); |
| 910 current = reinterpret_cast<Object**>(raw_data_out + size); | 923 current = reinterpret_cast<Object**>(raw_data_out + size); |
| 911 break; | 924 break; |
| 912 } | 925 } |
| 913 | 926 |
| 927 SIXTEEN_CASES(kRootArrayLowConstants) |
| 928 SIXTEEN_CASES(kRootArrayHighConstants) { |
| 929 int root_id = RootArrayConstantFromByteCode(data); |
| 930 Object* object = isolate->heap()->roots_array_start()[root_id]; |
| 931 ASSERT(!isolate->heap()->InNewSpace(object)); |
| 932 *current++ = object; |
| 933 break; |
| 934 } |
| 935 |
| 936 case kRepeat: { |
| 937 int repeats = source_->GetInt(); |
| 938 Object* object = current[-1]; |
| 939 ASSERT(!isolate->heap()->InNewSpace(object)); |
| 940 for (int i = 0; i < repeats; i++) current[i] = object; |
| 941 current += repeats; |
| 942 break; |
| 943 } |
| 944 |
| 945 STATIC_ASSERT(kRootArrayNumberOfConstantEncodings == |
| 946 Heap::kOldSpaceRoots); |
| 947 STATIC_ASSERT(kMaxRepeats == 12); |
| 948 FOUR_CASES(kConstantRepeat) |
| 949 FOUR_CASES(kConstantRepeat + 4) |
| 950 FOUR_CASES(kConstantRepeat + 8) { |
| 951 int repeats = RepeatsForCode(data); |
| 952 Object* object = current[-1]; |
| 953 ASSERT(!isolate->heap()->InNewSpace(object)); |
| 954 for (int i = 0; i < repeats; i++) current[i] = object; |
| 955 current += repeats; |
| 956 break; |
| 957 } |
| 958 |
| 914 // Deserialize a new object and write a pointer to it to the current | 959 // Deserialize a new object and write a pointer to it to the current |
| 915 // object. | 960 // object. |
| 916 ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject) | 961 ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject) |
| 917 // Support for direct instruction pointers in functions | 962 // Support for direct instruction pointers in functions |
| 918 ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction) | 963 ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction) |
| 919 // Deserialize a new code object and write a pointer to its first | 964 // Deserialize a new code object and write a pointer to its first |
| 920 // instruction to the current code object. | 965 // instruction to the current code object. |
| 921 ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction) | 966 ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction) |
| 922 // Find a recently deserialized object using its offset from the current | 967 // Find a recently deserialized object using its offset from the current |
| 923 // allocation point and write a pointer to it to the current object. | 968 // allocation point and write a pointer to it to the current object. |
| 924 ALL_SPACES(kBackref, kPlain, kStartOfObject) | 969 ALL_SPACES(kBackref, kPlain, kStartOfObject) |
| 925 // Find a recently deserialized code object using its offset from the | 970 // Find a recently deserialized code object using its offset from the |
| 926 // current allocation point and write a pointer to its first instruction | 971 // current allocation point and write a pointer to its first instruction |
| 927 // to the current code object or the instruction pointer in a function | 972 // to the current code object or the instruction pointer in a function |
| 928 // object. | 973 // object. |
| 929 ALL_SPACES(kBackref, kFromCode, kFirstInstruction) | 974 ALL_SPACES(kBackref, kFromCode, kFirstInstruction) |
| 930 ALL_SPACES(kBackref, kPlain, kFirstInstruction) | 975 ALL_SPACES(kBackref, kPlain, kFirstInstruction) |
| 931 // Find an already deserialized object using its offset from the start | 976 // Find an already deserialized object using its offset from the start |
| 932 // and write a pointer to it to the current object. | 977 // and write a pointer to it to the current object. |
| 933 ALL_SPACES(kFromStart, kPlain, kStartOfObject) | 978 ALL_SPACES(kFromStart, kPlain, kStartOfObject) |
| 934 ALL_SPACES(kFromStart, kPlain, kFirstInstruction) | 979 ALL_SPACES(kFromStart, kPlain, kFirstInstruction) |
| 935 // Find an already deserialized code object using its offset from the | 980 // Find an already deserialized code object using its offset from the |
| 936 // start and write a pointer to its first instruction to the current code | 981 // start and write a pointer to its first instruction to the current code |
| 937 // object. | 982 // object. |
| 938 ALL_SPACES(kFromStart, kFromCode, kFirstInstruction) | 983 ALL_SPACES(kFromStart, kFromCode, kFirstInstruction) |
| 939 // Find an already deserialized object at one of the predetermined popular | |
| 940 // offsets from the start and write a pointer to it in the current object. | |
| 941 COMMON_REFERENCE_PATTERNS(EMIT_COMMON_REFERENCE_PATTERNS) | |
| 942 // Find an object in the roots array and write a pointer to it to the | 984 // Find an object in the roots array and write a pointer to it to the |
| 943 // current object. | 985 // current object. |
| 944 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) | 986 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) |
| 945 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart) | 987 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart) |
| 946 // Find an object in the partial snapshots cache and write a pointer to it | 988 // Find an object in the partial snapshots cache and write a pointer to it |
| 947 // to the current object. | 989 // to the current object. |
| 948 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) | 990 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) |
| 949 CASE_BODY(kPartialSnapshotCache, | 991 CASE_BODY(kPartialSnapshotCache, |
| 950 kPlain, | 992 kPlain, |
| 951 kStartOfObject, | 993 kStartOfObject, |
| (...skipping 21 matching lines...) Expand all Loading... |
| 973 CASE_BODY(kExternalReference, | 1015 CASE_BODY(kExternalReference, |
| 974 kFromCode, | 1016 kFromCode, |
| 975 kStartOfObject, | 1017 kStartOfObject, |
| 976 0, | 1018 0, |
| 977 kUnknownOffsetFromStart) | 1019 kUnknownOffsetFromStart) |
| 978 | 1020 |
| 979 #undef CASE_STATEMENT | 1021 #undef CASE_STATEMENT |
| 980 #undef CASE_BODY | 1022 #undef CASE_BODY |
| 981 #undef ONE_PER_SPACE | 1023 #undef ONE_PER_SPACE |
| 982 #undef ALL_SPACES | 1024 #undef ALL_SPACES |
| 983 #undef EMIT_COMMON_REFERENCE_PATTERNS | |
| 984 #undef ASSIGN_DEST_SPACE | 1025 #undef ASSIGN_DEST_SPACE |
| 985 | 1026 |
| 986 case kNewPage: { | 1027 case kNewPage: { |
| 987 int space = source_->Get(); | 1028 int space = source_->Get(); |
| 988 pages_[space].Add(last_object_address_); | 1029 pages_[space].Add(last_object_address_); |
| 989 if (space == CODE_SPACE) { | 1030 if (space == CODE_SPACE) { |
| 990 CPU::FlushICache(last_object_address_, Page::kPageSize); | 1031 CPU::FlushICache(last_object_address_, Page::kPageSize); |
| 991 } | 1032 } |
| 992 break; | 1033 break; |
| 993 } | 1034 } |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1060 sink_->PutSection(character, "TagCharacter"); | 1101 sink_->PutSection(character, "TagCharacter"); |
| 1061 } while (character != 0); | 1102 } while (character != 0); |
| 1062 } | 1103 } |
| 1063 | 1104 |
| 1064 #endif | 1105 #endif |
| 1065 | 1106 |
| 1066 Serializer::Serializer(SnapshotByteSink* sink) | 1107 Serializer::Serializer(SnapshotByteSink* sink) |
| 1067 : sink_(sink), | 1108 : sink_(sink), |
| 1068 current_root_index_(0), | 1109 current_root_index_(0), |
| 1069 external_reference_encoder_(new ExternalReferenceEncoder), | 1110 external_reference_encoder_(new ExternalReferenceEncoder), |
| 1070 large_object_total_(0) { | 1111 large_object_total_(0), |
| 1112 root_index_wave_front_(0) { |
| 1071 // The serializer is meant to be used only to generate initial heap images | 1113 // The serializer is meant to be used only to generate initial heap images |
| 1072 // from a context in which there is only one isolate. | 1114 // from a context in which there is only one isolate. |
| 1073 ASSERT(Isolate::Current()->IsDefaultIsolate()); | 1115 ASSERT(Isolate::Current()->IsDefaultIsolate()); |
| 1074 for (int i = 0; i <= LAST_SPACE; i++) { | 1116 for (int i = 0; i <= LAST_SPACE; i++) { |
| 1075 fullness_[i] = 0; | 1117 fullness_[i] = 0; |
| 1076 } | 1118 } |
| 1077 } | 1119 } |
| 1078 | 1120 |
| 1079 | 1121 |
| 1080 Serializer::~Serializer() { | 1122 Serializer::~Serializer() { |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1117 } | 1159 } |
| 1118 isolate->set_serialize_partial_snapshot_cache_length( | 1160 isolate->set_serialize_partial_snapshot_cache_length( |
| 1119 Isolate::kPartialSnapshotCacheCapacity); | 1161 Isolate::kPartialSnapshotCacheCapacity); |
| 1120 } | 1162 } |
| 1121 | 1163 |
| 1122 | 1164 |
| 1123 void Serializer::VisitPointers(Object** start, Object** end) { | 1165 void Serializer::VisitPointers(Object** start, Object** end) { |
| 1124 Isolate* isolate = Isolate::Current(); | 1166 Isolate* isolate = Isolate::Current(); |
| 1125 | 1167 |
| 1126 for (Object** current = start; current < end; current++) { | 1168 for (Object** current = start; current < end; current++) { |
| 1169 if (start == isolate->heap()->roots_array_start()) { |
| 1170 root_index_wave_front_ = |
| 1171 Max(root_index_wave_front_, static_cast<intptr_t>(current - start)); |
| 1172 } |
| 1127 if (reinterpret_cast<Address>(current) == | 1173 if (reinterpret_cast<Address>(current) == |
| 1128 isolate->heap()->store_buffer()->TopAddress()) { | 1174 isolate->heap()->store_buffer()->TopAddress()) { |
| 1129 sink_->Put(kSkip, "Skip"); | 1175 sink_->Put(kSkip, "Skip"); |
| 1130 } else if ((*current)->IsSmi()) { | 1176 } else if ((*current)->IsSmi()) { |
| 1131 sink_->Put(kRawData, "RawData"); | 1177 sink_->Put(kRawData, "RawData"); |
| 1132 sink_->PutInt(kPointerSize, "length"); | 1178 sink_->PutInt(kPointerSize, "length"); |
| 1133 for (int i = 0; i < kPointerSize; i++) { | 1179 for (int i = 0; i < kPointerSize; i++) { |
| 1134 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); | 1180 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); |
| 1135 } | 1181 } |
| 1136 } else { | 1182 } else { |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1184 startup_serializer_->VisitPointer( | 1230 startup_serializer_->VisitPointer( |
| 1185 &isolate->serialize_partial_snapshot_cache()[length]); | 1231 &isolate->serialize_partial_snapshot_cache()[length]); |
| 1186 // We don't recurse from the startup snapshot generator into the partial | 1232 // We don't recurse from the startup snapshot generator into the partial |
| 1187 // snapshot generator. | 1233 // snapshot generator. |
| 1188 ASSERT(length == isolate->serialize_partial_snapshot_cache_length()); | 1234 ASSERT(length == isolate->serialize_partial_snapshot_cache_length()); |
| 1189 isolate->set_serialize_partial_snapshot_cache_length(length + 1); | 1235 isolate->set_serialize_partial_snapshot_cache_length(length + 1); |
| 1190 return length; | 1236 return length; |
| 1191 } | 1237 } |
| 1192 | 1238 |
| 1193 | 1239 |
| 1194 int PartialSerializer::RootIndex(HeapObject* heap_object) { | 1240 int Serializer::RootIndex(HeapObject* heap_object) { |
| 1195 for (int i = 0; i < Heap::kRootListLength; i++) { | 1241 Heap* heap = HEAP; |
| 1196 Object* root = HEAP->roots_address()[i]; | 1242 if (heap->InNewSpace(heap_object)) return kInvalidRootIndex; |
| 1197 if (root == heap_object) return i; | 1243 for (int i = 0; i < root_index_wave_front_; i++) { |
| 1244 Object* root = heap->roots_array_start()[i]; |
| 1245 if (!root->IsSmi() && root == heap_object) return i; |
| 1198 } | 1246 } |
| 1199 return kInvalidRootIndex; | 1247 return kInvalidRootIndex; |
| 1200 } | 1248 } |
| 1201 | 1249 |
| 1202 | 1250 |
| 1203 // Encode the location of an already deserialized object in order to write its | 1251 // Encode the location of an already deserialized object in order to write its |
| 1204 // location into a later object. We can encode the location as an offset from | 1252 // location into a later object. We can encode the location as an offset from |
| 1205 // the start of the deserialized objects or as an offset backwards from the | 1253 // the start of the deserialized objects or as an offset backwards from the |
| 1206 // current allocation pointer. | 1254 // current allocation pointer. |
| 1207 void Serializer::SerializeReferenceToPreviousObject( | 1255 void Serializer::SerializeReferenceToPreviousObject( |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1223 // For new space it is always simple to encode back from current allocation. | 1271 // For new space it is always simple to encode back from current allocation. |
| 1224 if (offset < address) { | 1272 if (offset < address) { |
| 1225 from_start = false; | 1273 from_start = false; |
| 1226 address = offset; | 1274 address = offset; |
| 1227 } | 1275 } |
| 1228 } | 1276 } |
| 1229 // If we are actually dealing with real offsets (and not a numbering of | 1277 // If we are actually dealing with real offsets (and not a numbering of |
| 1230 // all objects) then we should shift out the bits that are always 0. | 1278 // all objects) then we should shift out the bits that are always 0. |
| 1231 if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits; | 1279 if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits; |
| 1232 if (from_start) { | 1280 if (from_start) { |
| 1233 #define COMMON_REFS_CASE(pseudo_space, actual_space, offset) \ | 1281 sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer"); |
| 1234 if (space == actual_space && address == offset && \ | 1282 sink_->PutInt(address, "address"); |
| 1235 how_to_code == kPlain && where_to_point == kStartOfObject) { \ | |
| 1236 sink_->Put(kFromStart + how_to_code + where_to_point + \ | |
| 1237 pseudo_space, "RefSer"); \ | |
| 1238 } else /* NOLINT */ | |
| 1239 COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE) | |
| 1240 #undef COMMON_REFS_CASE | |
| 1241 { /* NOLINT */ | |
| 1242 sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer"); | |
| 1243 sink_->PutInt(address, "address"); | |
| 1244 } | |
| 1245 } else { | 1283 } else { |
| 1246 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); | 1284 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); |
| 1247 sink_->PutInt(address, "address"); | 1285 sink_->PutInt(address, "address"); |
| 1248 } | 1286 } |
| 1249 } | 1287 } |
| 1250 | 1288 |
| 1251 | 1289 |
| 1252 void StartupSerializer::SerializeObject( | 1290 void StartupSerializer::SerializeObject( |
| 1253 Object* o, | 1291 Object* o, |
| 1254 HowToCode how_to_code, | 1292 HowToCode how_to_code, |
| 1255 WhereToPoint where_to_point) { | 1293 WhereToPoint where_to_point) { |
| 1256 CHECK(o->IsHeapObject()); | 1294 CHECK(o->IsHeapObject()); |
| 1257 HeapObject* heap_object = HeapObject::cast(o); | 1295 HeapObject* heap_object = HeapObject::cast(o); |
| 1258 | 1296 |
| 1297 int root_index; |
| 1298 if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) { |
| 1299 PutRoot(root_index, heap_object, how_to_code, where_to_point); |
| 1300 return; |
| 1301 } |
| 1302 |
| 1259 if (address_mapper_.IsMapped(heap_object)) { | 1303 if (address_mapper_.IsMapped(heap_object)) { |
| 1260 int space = SpaceOfAlreadySerializedObject(heap_object); | 1304 int space = SpaceOfAlreadySerializedObject(heap_object); |
| 1261 int address = address_mapper_.MappedTo(heap_object); | 1305 int address = address_mapper_.MappedTo(heap_object); |
| 1262 SerializeReferenceToPreviousObject(space, | 1306 SerializeReferenceToPreviousObject(space, |
| 1263 address, | 1307 address, |
| 1264 how_to_code, | 1308 how_to_code, |
| 1265 where_to_point); | 1309 where_to_point); |
| 1266 } else { | 1310 } else { |
| 1267 // Object has not yet been serialized. Serialize it here. | 1311 // Object has not yet been serialized. Serialize it here. |
| 1268 ObjectSerializer object_serializer(this, | 1312 ObjectSerializer object_serializer(this, |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1279 for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length(); | 1323 for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length(); |
| 1280 i < Isolate::kPartialSnapshotCacheCapacity; | 1324 i < Isolate::kPartialSnapshotCacheCapacity; |
| 1281 i++) { | 1325 i++) { |
| 1282 sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization"); | 1326 sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization"); |
| 1283 sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index"); | 1327 sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index"); |
| 1284 } | 1328 } |
| 1285 HEAP->IterateWeakRoots(this, VISIT_ALL); | 1329 HEAP->IterateWeakRoots(this, VISIT_ALL); |
| 1286 } | 1330 } |
| 1287 | 1331 |
| 1288 | 1332 |
| 1333 void Serializer::PutRoot(int root_index, |
| 1334 HeapObject* object, |
| 1335 SerializerDeserializer::HowToCode how_to_code, |
| 1336 SerializerDeserializer::WhereToPoint where_to_point) { |
| 1337 if (how_to_code == kPlain && |
| 1338 where_to_point == kStartOfObject && |
| 1339 root_index < kRootArrayNumberOfConstantEncodings && |
| 1340 !HEAP->InNewSpace(object)) { |
| 1341 if (root_index < kRootArrayNumberOfLowConstantEncodings) { |
| 1342 sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant"); |
| 1343 } else { |
| 1344 sink_->Put(kRootArrayHighConstants + root_index - |
| 1345 kRootArrayNumberOfLowConstantEncodings, |
| 1346 "RootHiConstant"); |
| 1347 } |
| 1348 } else { |
| 1349 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); |
| 1350 sink_->PutInt(root_index, "root_index"); |
| 1351 } |
| 1352 } |
| 1353 |
| 1354 |
| 1289 void PartialSerializer::SerializeObject( | 1355 void PartialSerializer::SerializeObject( |
| 1290 Object* o, | 1356 Object* o, |
| 1291 HowToCode how_to_code, | 1357 HowToCode how_to_code, |
| 1292 WhereToPoint where_to_point) { | 1358 WhereToPoint where_to_point) { |
| 1293 CHECK(o->IsHeapObject()); | 1359 CHECK(o->IsHeapObject()); |
| 1294 HeapObject* heap_object = HeapObject::cast(o); | 1360 HeapObject* heap_object = HeapObject::cast(o); |
| 1295 | 1361 |
| 1296 int root_index; | 1362 int root_index; |
| 1297 if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) { | 1363 if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) { |
| 1298 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); | 1364 PutRoot(root_index, heap_object, how_to_code, where_to_point); |
| 1299 sink_->PutInt(root_index, "root_index"); | |
| 1300 return; | 1365 return; |
| 1301 } | 1366 } |
| 1302 | 1367 |
| 1303 if (ShouldBeInThePartialSnapshotCache(heap_object)) { | 1368 if (ShouldBeInThePartialSnapshotCache(heap_object)) { |
| 1304 int cache_index = PartialSnapshotCacheIndex(heap_object); | 1369 int cache_index = PartialSnapshotCacheIndex(heap_object); |
| 1305 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, | 1370 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, |
| 1306 "PartialSnapshotCache"); | 1371 "PartialSnapshotCache"); |
| 1307 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); | 1372 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); |
| 1308 return; | 1373 return; |
| 1309 } | 1374 } |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1367 | 1432 |
| 1368 | 1433 |
| 1369 void Serializer::ObjectSerializer::VisitPointers(Object** start, | 1434 void Serializer::ObjectSerializer::VisitPointers(Object** start, |
| 1370 Object** end) { | 1435 Object** end) { |
| 1371 Object** current = start; | 1436 Object** current = start; |
| 1372 while (current < end) { | 1437 while (current < end) { |
| 1373 while (current < end && (*current)->IsSmi()) current++; | 1438 while (current < end && (*current)->IsSmi()) current++; |
| 1374 if (current < end) OutputRawData(reinterpret_cast<Address>(current)); | 1439 if (current < end) OutputRawData(reinterpret_cast<Address>(current)); |
| 1375 | 1440 |
| 1376 while (current < end && !(*current)->IsSmi()) { | 1441 while (current < end && !(*current)->IsSmi()) { |
| 1377 serializer_->SerializeObject(*current, kPlain, kStartOfObject); | 1442 HeapObject* current_contents = HeapObject::cast(*current); |
| 1378 bytes_processed_so_far_ += kPointerSize; | 1443 int root_index = serializer_->RootIndex(current_contents); |
| 1379 current++; | 1444 // Repeats are not subject to the write barrier so there are only some |
| 1445 // objects that can be used in a repeat encoding. These are the early |
| 1446 // ones in the root array that are never in new space. |
| 1447 if (current != start && |
| 1448 root_index != kInvalidRootIndex && |
| 1449 root_index < kRootArrayNumberOfConstantEncodings && |
| 1450 current_contents == current[-1]) { |
| 1451 ASSERT(!HEAP->InNewSpace(current_contents)); |
| 1452 int repeat_count = 1; |
| 1453 while (current < end - 1 && current[repeat_count] == current_contents) { |
| 1454 repeat_count++; |
| 1455 } |
| 1456 current += repeat_count; |
| 1457 bytes_processed_so_far_ += repeat_count * kPointerSize; |
| 1458 if (repeat_count > kMaxRepeats) { |
| 1459 sink_->Put(kRepeat, "SerializeRepeats"); |
| 1460 sink_->PutInt(repeat_count, "SerializeRepeats"); |
| 1461 } else { |
| 1462 sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats"); |
| 1463 } |
| 1464 } else { |
| 1465 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject); |
| 1466 bytes_processed_so_far_ += kPointerSize; |
| 1467 current++; |
| 1468 } |
| 1380 } | 1469 } |
| 1381 } | 1470 } |
| 1382 } | 1471 } |
| 1383 | 1472 |
| 1384 | 1473 |
| 1385 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, | 1474 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, |
| 1386 Address* end) { | 1475 Address* end) { |
| 1387 Address references_start = reinterpret_cast<Address>(start); | 1476 Address references_start = reinterpret_cast<Address>(start); |
| 1388 OutputRawData(references_start); | 1477 OutputRawData(references_start); |
| 1389 | 1478 |
| (...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1553 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); | 1642 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); |
| 1554 } | 1643 } |
| 1555 } | 1644 } |
| 1556 int allocation_address = fullness_[space]; | 1645 int allocation_address = fullness_[space]; |
| 1557 fullness_[space] = allocation_address + size; | 1646 fullness_[space] = allocation_address + size; |
| 1558 return allocation_address; | 1647 return allocation_address; |
| 1559 } | 1648 } |
| 1560 | 1649 |
| 1561 | 1650 |
| 1562 } } // namespace v8::internal | 1651 } } // namespace v8::internal |
| OLD | NEW |