OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
66 to_entry_ = &snapshot->entries()[to_index_]; | 66 to_entry_ = &snapshot->entries()[to_index_]; |
67 } | 67 } |
68 | 68 |
69 | 69 |
70 const int HeapEntry::kNoEntry = -1; | 70 const int HeapEntry::kNoEntry = -1; |
71 | 71 |
72 HeapEntry::HeapEntry(HeapSnapshot* snapshot, | 72 HeapEntry::HeapEntry(HeapSnapshot* snapshot, |
73 Type type, | 73 Type type, |
74 const char* name, | 74 const char* name, |
75 SnapshotObjectId id, | 75 SnapshotObjectId id, |
76 size_t self_size) | 76 size_t self_size, |
| 77 unsigned trace_node_id) |
77 : type_(type), | 78 : type_(type), |
78 children_count_(0), | 79 children_count_(0), |
79 children_index_(-1), | 80 children_index_(-1), |
80 self_size_(self_size), | 81 self_size_(self_size), |
| 82 snapshot_(snapshot), |
| 83 name_(name), |
81 id_(id), | 84 id_(id), |
82 snapshot_(snapshot), | 85 trace_node_id_(trace_node_id) { } |
83 name_(name) { } | |
84 | 86 |
85 | 87 |
86 void HeapEntry::SetNamedReference(HeapGraphEdge::Type type, | 88 void HeapEntry::SetNamedReference(HeapGraphEdge::Type type, |
87 const char* name, | 89 const char* name, |
88 HeapEntry* entry) { | 90 HeapEntry* entry) { |
89 HeapGraphEdge edge(type, name, this->index(), entry->index()); | 91 HeapGraphEdge edge(type, name, this->index(), entry->index()); |
90 snapshot_->edges().Add(edge); | 92 snapshot_->edges().Add(edge); |
91 ++children_count_; | 93 ++children_count_; |
92 } | 94 } |
93 | 95 |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
182 | 184 |
183 | 185 |
184 // It is very important to keep objects that form a heap snapshot | 186 // It is very important to keep objects that form a heap snapshot |
185 // as small as possible. | 187 // as small as possible. |
186 namespace { // Avoid littering the global namespace. | 188 namespace { // Avoid littering the global namespace. |
187 | 189 |
188 template <size_t ptr_size> struct SnapshotSizeConstants; | 190 template <size_t ptr_size> struct SnapshotSizeConstants; |
189 | 191 |
190 template <> struct SnapshotSizeConstants<4> { | 192 template <> struct SnapshotSizeConstants<4> { |
191 static const int kExpectedHeapGraphEdgeSize = 12; | 193 static const int kExpectedHeapGraphEdgeSize = 12; |
192 static const int kExpectedHeapEntrySize = 24; | 194 static const int kExpectedHeapEntrySize = 28; |
193 }; | 195 }; |
194 | 196 |
195 template <> struct SnapshotSizeConstants<8> { | 197 template <> struct SnapshotSizeConstants<8> { |
196 static const int kExpectedHeapGraphEdgeSize = 24; | 198 static const int kExpectedHeapGraphEdgeSize = 24; |
197 static const int kExpectedHeapEntrySize = 40; | 199 static const int kExpectedHeapEntrySize = 40; |
198 }; | 200 }; |
199 | 201 |
200 } // namespace | 202 } // namespace |
201 | 203 |
202 | 204 |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
236 max_snapshot_js_object_id_ = profiler_->heap_object_map()->last_assigned_id(); | 238 max_snapshot_js_object_id_ = profiler_->heap_object_map()->last_assigned_id(); |
237 } | 239 } |
238 | 240 |
239 | 241 |
240 HeapEntry* HeapSnapshot::AddRootEntry() { | 242 HeapEntry* HeapSnapshot::AddRootEntry() { |
241 ASSERT(root_index_ == HeapEntry::kNoEntry); | 243 ASSERT(root_index_ == HeapEntry::kNoEntry); |
242 ASSERT(entries_.is_empty()); // Root entry must be the first one. | 244 ASSERT(entries_.is_empty()); // Root entry must be the first one. |
243 HeapEntry* entry = AddEntry(HeapEntry::kSynthetic, | 245 HeapEntry* entry = AddEntry(HeapEntry::kSynthetic, |
244 "", | 246 "", |
245 HeapObjectsMap::kInternalRootObjectId, | 247 HeapObjectsMap::kInternalRootObjectId, |
| 248 0, |
246 0); | 249 0); |
247 root_index_ = entry->index(); | 250 root_index_ = entry->index(); |
248 ASSERT(root_index_ == 0); | 251 ASSERT(root_index_ == 0); |
249 return entry; | 252 return entry; |
250 } | 253 } |
251 | 254 |
252 | 255 |
253 HeapEntry* HeapSnapshot::AddGcRootsEntry() { | 256 HeapEntry* HeapSnapshot::AddGcRootsEntry() { |
254 ASSERT(gc_roots_index_ == HeapEntry::kNoEntry); | 257 ASSERT(gc_roots_index_ == HeapEntry::kNoEntry); |
255 HeapEntry* entry = AddEntry(HeapEntry::kSynthetic, | 258 HeapEntry* entry = AddEntry(HeapEntry::kSynthetic, |
256 "(GC roots)", | 259 "(GC roots)", |
257 HeapObjectsMap::kGcRootsObjectId, | 260 HeapObjectsMap::kGcRootsObjectId, |
| 261 0, |
258 0); | 262 0); |
259 gc_roots_index_ = entry->index(); | 263 gc_roots_index_ = entry->index(); |
260 return entry; | 264 return entry; |
261 } | 265 } |
262 | 266 |
263 | 267 |
264 HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) { | 268 HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) { |
265 ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry); | 269 ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry); |
266 ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags); | 270 ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags); |
267 HeapEntry* entry = AddEntry( | 271 HeapEntry* entry = AddEntry( |
268 HeapEntry::kSynthetic, | 272 HeapEntry::kSynthetic, |
269 VisitorSynchronization::kTagNames[tag], | 273 VisitorSynchronization::kTagNames[tag], |
270 HeapObjectsMap::GetNthGcSubrootId(tag), | 274 HeapObjectsMap::GetNthGcSubrootId(tag), |
| 275 0, |
271 0); | 276 0); |
272 gc_subroot_indexes_[tag] = entry->index(); | 277 gc_subroot_indexes_[tag] = entry->index(); |
273 return entry; | 278 return entry; |
274 } | 279 } |
275 | 280 |
276 | 281 |
277 HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type, | 282 HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type, |
278 const char* name, | 283 const char* name, |
279 SnapshotObjectId id, | 284 SnapshotObjectId id, |
280 size_t size) { | 285 size_t size, |
281 HeapEntry entry(this, type, name, id, size); | 286 unsigned trace_node_id) { |
| 287 HeapEntry entry(this, type, name, id, size, trace_node_id); |
282 entries_.Add(entry); | 288 entries_.Add(entry); |
283 return &entries_.last(); | 289 return &entries_.last(); |
284 } | 290 } |
285 | 291 |
286 | 292 |
287 void HeapSnapshot::FillChildren() { | 293 void HeapSnapshot::FillChildren() { |
288 ASSERT(children().is_empty()); | 294 ASSERT(children().is_empty()); |
289 children().Allocate(edges().length()); | 295 children().Allocate(edges().length()); |
290 int children_index = 0; | 296 int children_index = 0; |
291 for (int i = 0; i < entries().length(); ++i) { | 297 for (int i = 0; i < entries().length(); ++i) { |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
383 // When we do lookup in HashMap we see no difference between two cases: | 389 // When we do lookup in HashMap we see no difference between two cases: |
384 // it has an entry with NULL as the value or it has created | 390 // it has an entry with NULL as the value or it has created |
385 // a new entry on the fly with NULL as the default value. | 391 // a new entry on the fly with NULL as the default value. |
386 // With such dummy element we have a guaranty that all entries_map_ entries | 392 // With such dummy element we have a guaranty that all entries_map_ entries |
387 // will have the value field grater than 0. | 393 // will have the value field grater than 0. |
388 // This fact is using in MoveObject method. | 394 // This fact is using in MoveObject method. |
389 entries_.Add(EntryInfo(0, NULL, 0)); | 395 entries_.Add(EntryInfo(0, NULL, 0)); |
390 } | 396 } |
391 | 397 |
392 | 398 |
393 void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) { | 399 bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) { |
394 ASSERT(to != NULL); | 400 ASSERT(to != NULL); |
395 ASSERT(from != NULL); | 401 ASSERT(from != NULL); |
396 if (from == to) return; | 402 if (from == to) return false; |
397 void* from_value = entries_map_.Remove(from, ComputePointerHash(from)); | 403 void* from_value = entries_map_.Remove(from, ComputePointerHash(from)); |
398 if (from_value == NULL) { | 404 if (from_value == NULL) { |
399 // It may occur that some untracked object moves to an address X and there | 405 // It may occur that some untracked object moves to an address X and there |
400 // is a tracked object at that address. In this case we should remove the | 406 // is a tracked object at that address. In this case we should remove the |
401 // entry as we know that the object has died. | 407 // entry as we know that the object has died. |
402 void* to_value = entries_map_.Remove(to, ComputePointerHash(to)); | 408 void* to_value = entries_map_.Remove(to, ComputePointerHash(to)); |
403 if (to_value != NULL) { | 409 if (to_value != NULL) { |
404 int to_entry_info_index = | 410 int to_entry_info_index = |
405 static_cast<int>(reinterpret_cast<intptr_t>(to_value)); | 411 static_cast<int>(reinterpret_cast<intptr_t>(to_value)); |
406 entries_.at(to_entry_info_index).addr = NULL; | 412 entries_.at(to_entry_info_index).addr = NULL; |
(...skipping 20 matching lines...) Expand all Loading... |
427 if (FLAG_heap_profiler_trace_objects) { | 433 if (FLAG_heap_profiler_trace_objects) { |
428 PrintF("Move object from %p to %p old size %6d new size %6d\n", | 434 PrintF("Move object from %p to %p old size %6d new size %6d\n", |
429 from, | 435 from, |
430 to, | 436 to, |
431 entries_.at(from_entry_info_index).size, | 437 entries_.at(from_entry_info_index).size, |
432 object_size); | 438 object_size); |
433 } | 439 } |
434 entries_.at(from_entry_info_index).size = object_size; | 440 entries_.at(from_entry_info_index).size = object_size; |
435 to_entry->value = from_value; | 441 to_entry->value = from_value; |
436 } | 442 } |
| 443 return from_value != NULL; |
437 } | 444 } |
438 | 445 |
439 | 446 |
440 void HeapObjectsMap::UpdateObjectSize(Address addr, int size) { | 447 void HeapObjectsMap::UpdateObjectSize(Address addr, int size) { |
441 FindOrAddEntry(addr, size, false); | 448 FindOrAddEntry(addr, size, false); |
442 } | 449 } |
443 | 450 |
444 | 451 |
445 SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) { | 452 SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) { |
446 HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr), | 453 HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr), |
(...skipping 456 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
903 return AddEntry(object->address(), type, name, object->Size()); | 910 return AddEntry(object->address(), type, name, object->Size()); |
904 } | 911 } |
905 | 912 |
906 | 913 |
907 HeapEntry* V8HeapExplorer::AddEntry(Address address, | 914 HeapEntry* V8HeapExplorer::AddEntry(Address address, |
908 HeapEntry::Type type, | 915 HeapEntry::Type type, |
909 const char* name, | 916 const char* name, |
910 size_t size) { | 917 size_t size) { |
911 SnapshotObjectId object_id = heap_object_map_->FindOrAddEntry( | 918 SnapshotObjectId object_id = heap_object_map_->FindOrAddEntry( |
912 address, static_cast<unsigned int>(size)); | 919 address, static_cast<unsigned int>(size)); |
913 return snapshot_->AddEntry(type, name, object_id, size); | 920 unsigned trace_node_id = 0; |
| 921 if (AllocationTracker* allocation_tracker = |
| 922 snapshot_->profiler()->allocation_tracker()) { |
| 923 trace_node_id = |
| 924 allocation_tracker->address_to_trace()->GetTraceNodeId(address); |
| 925 } |
| 926 return snapshot_->AddEntry(type, name, object_id, size, trace_node_id); |
914 } | 927 } |
915 | 928 |
916 | 929 |
917 class GcSubrootsEnumerator : public ObjectVisitor { | 930 class GcSubrootsEnumerator : public ObjectVisitor { |
918 public: | 931 public: |
919 GcSubrootsEnumerator( | 932 GcSubrootsEnumerator( |
920 SnapshotFillerInterface* filler, V8HeapExplorer* explorer) | 933 SnapshotFillerInterface* filler, V8HeapExplorer* explorer) |
921 : filler_(filler), | 934 : filler_(filler), |
922 explorer_(explorer), | 935 explorer_(explorer), |
923 previous_object_count_(0), | 936 previous_object_count_(0), |
(...skipping 1212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2136 intptr_t elements = info->GetElementCount(); | 2149 intptr_t elements = info->GetElementCount(); |
2137 intptr_t size = info->GetSizeInBytes(); | 2150 intptr_t size = info->GetSizeInBytes(); |
2138 const char* name = elements != -1 | 2151 const char* name = elements != -1 |
2139 ? names_->GetFormatted( | 2152 ? names_->GetFormatted( |
2140 "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements) | 2153 "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements) |
2141 : names_->GetCopy(info->GetLabel()); | 2154 : names_->GetCopy(info->GetLabel()); |
2142 return snapshot_->AddEntry( | 2155 return snapshot_->AddEntry( |
2143 entries_type_, | 2156 entries_type_, |
2144 name, | 2157 name, |
2145 heap_object_map_->GenerateId(info), | 2158 heap_object_map_->GenerateId(info), |
2146 size != -1 ? static_cast<int>(size) : 0); | 2159 size != -1 ? static_cast<int>(size) : 0, |
| 2160 0); |
2147 } | 2161 } |
2148 | 2162 |
2149 | 2163 |
2150 NativeObjectsExplorer::NativeObjectsExplorer( | 2164 NativeObjectsExplorer::NativeObjectsExplorer( |
2151 HeapSnapshot* snapshot, | 2165 HeapSnapshot* snapshot, |
2152 SnapshottingProgressReportingInterface* progress) | 2166 SnapshottingProgressReportingInterface* progress) |
2153 : isolate_(snapshot->profiler()->heap_object_map()->heap()->isolate()), | 2167 : isolate_(snapshot->profiler()->heap_object_map()->heap()->isolate()), |
2154 snapshot_(snapshot), | 2168 snapshot_(snapshot), |
2155 names_(snapshot_->profiler()->names()), | 2169 names_(snapshot_->profiler()->names()), |
2156 progress_(progress), | 2170 progress_(progress), |
(...skipping 478 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2635 v8::OutputStream* stream_; | 2649 v8::OutputStream* stream_; |
2636 int chunk_size_; | 2650 int chunk_size_; |
2637 ScopedVector<char> chunk_; | 2651 ScopedVector<char> chunk_; |
2638 int chunk_pos_; | 2652 int chunk_pos_; |
2639 bool aborted_; | 2653 bool aborted_; |
2640 }; | 2654 }; |
2641 | 2655 |
2642 | 2656 |
2643 // type, name|index, to_node. | 2657 // type, name|index, to_node. |
2644 const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3; | 2658 const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3; |
2645 // type, name, id, self_size, children_index. | 2659 // type, name, id, self_size, edge_count, trace_node_id. |
2646 const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5; | 2660 const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 6; |
2647 | 2661 |
2648 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { | 2662 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { |
2649 if (AllocationTracker* allocation_tracker = | 2663 if (AllocationTracker* allocation_tracker = |
2650 snapshot_->profiler()->allocation_tracker()) { | 2664 snapshot_->profiler()->allocation_tracker()) { |
2651 allocation_tracker->PrepareForSerialization(); | 2665 allocation_tracker->PrepareForSerialization(); |
2652 } | 2666 } |
2653 ASSERT(writer_ == NULL); | 2667 ASSERT(writer_ == NULL); |
2654 writer_ = new OutputStreamWriter(stream); | 2668 writer_ = new OutputStreamWriter(stream); |
2655 SerializeImpl(); | 2669 SerializeImpl(); |
2656 delete writer_; | 2670 delete writer_; |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2776 edges[i - 1]->from()->index() <= edges[i]->from()->index()); | 2790 edges[i - 1]->from()->index() <= edges[i]->from()->index()); |
2777 SerializeEdge(edges[i], i == 0); | 2791 SerializeEdge(edges[i], i == 0); |
2778 if (writer_->aborted()) return; | 2792 if (writer_->aborted()) return; |
2779 } | 2793 } |
2780 } | 2794 } |
2781 | 2795 |
2782 | 2796 |
2783 void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) { | 2797 void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) { |
2784 // The buffer needs space for 4 unsigned ints, 1 size_t, 5 commas, \n and \0 | 2798 // The buffer needs space for 4 unsigned ints, 1 size_t, 5 commas, \n and \0 |
2785 static const int kBufferSize = | 2799 static const int kBufferSize = |
2786 4 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT | 2800 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT |
2787 + MaxDecimalDigitsIn<sizeof(size_t)>::kUnsigned // NOLINT | 2801 + MaxDecimalDigitsIn<sizeof(size_t)>::kUnsigned // NOLINT |
2788 + 5 + 1 + 1; | 2802 + 6 + 1 + 1; |
2789 EmbeddedVector<char, kBufferSize> buffer; | 2803 EmbeddedVector<char, kBufferSize> buffer; |
2790 int buffer_pos = 0; | 2804 int buffer_pos = 0; |
2791 if (entry_index(entry) != 0) { | 2805 if (entry_index(entry) != 0) { |
2792 buffer[buffer_pos++] = ','; | 2806 buffer[buffer_pos++] = ','; |
2793 } | 2807 } |
2794 buffer_pos = utoa(entry->type(), buffer, buffer_pos); | 2808 buffer_pos = utoa(entry->type(), buffer, buffer_pos); |
2795 buffer[buffer_pos++] = ','; | 2809 buffer[buffer_pos++] = ','; |
2796 buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos); | 2810 buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos); |
2797 buffer[buffer_pos++] = ','; | 2811 buffer[buffer_pos++] = ','; |
2798 buffer_pos = utoa(entry->id(), buffer, buffer_pos); | 2812 buffer_pos = utoa(entry->id(), buffer, buffer_pos); |
2799 buffer[buffer_pos++] = ','; | 2813 buffer[buffer_pos++] = ','; |
2800 buffer_pos = utoa(entry->self_size(), buffer, buffer_pos); | 2814 buffer_pos = utoa(entry->self_size(), buffer, buffer_pos); |
2801 buffer[buffer_pos++] = ','; | 2815 buffer[buffer_pos++] = ','; |
2802 buffer_pos = utoa(entry->children_count(), buffer, buffer_pos); | 2816 buffer_pos = utoa(entry->children_count(), buffer, buffer_pos); |
| 2817 buffer[buffer_pos++] = ','; |
| 2818 buffer_pos = utoa(entry->trace_node_id(), buffer, buffer_pos); |
2803 buffer[buffer_pos++] = '\n'; | 2819 buffer[buffer_pos++] = '\n'; |
2804 buffer[buffer_pos++] = '\0'; | 2820 buffer[buffer_pos++] = '\0'; |
2805 writer_->AddString(buffer.start()); | 2821 writer_->AddString(buffer.start()); |
2806 } | 2822 } |
2807 | 2823 |
2808 | 2824 |
2809 void HeapSnapshotJSONSerializer::SerializeNodes() { | 2825 void HeapSnapshotJSONSerializer::SerializeNodes() { |
2810 List<HeapEntry>& entries = snapshot_->entries(); | 2826 List<HeapEntry>& entries = snapshot_->entries(); |
2811 for (int i = 0; i < entries.length(); ++i) { | 2827 for (int i = 0; i < entries.length(); ++i) { |
2812 SerializeNode(&entries[i]); | 2828 SerializeNode(&entries[i]); |
(...skipping 13 matching lines...) Expand all Loading... |
2826 // We use a set of macros to improve readability. | 2842 // We use a set of macros to improve readability. |
2827 #define JSON_A(s) "[" s "]" | 2843 #define JSON_A(s) "[" s "]" |
2828 #define JSON_O(s) "{" s "}" | 2844 #define JSON_O(s) "{" s "}" |
2829 #define JSON_S(s) "\"" s "\"" | 2845 #define JSON_S(s) "\"" s "\"" |
2830 writer_->AddString(JSON_O( | 2846 writer_->AddString(JSON_O( |
2831 JSON_S("node_fields") ":" JSON_A( | 2847 JSON_S("node_fields") ":" JSON_A( |
2832 JSON_S("type") "," | 2848 JSON_S("type") "," |
2833 JSON_S("name") "," | 2849 JSON_S("name") "," |
2834 JSON_S("id") "," | 2850 JSON_S("id") "," |
2835 JSON_S("self_size") "," | 2851 JSON_S("self_size") "," |
2836 JSON_S("edge_count")) "," | 2852 JSON_S("edge_count") "," |
| 2853 JSON_S("trace_node_id")) "," |
2837 JSON_S("node_types") ":" JSON_A( | 2854 JSON_S("node_types") ":" JSON_A( |
2838 JSON_A( | 2855 JSON_A( |
2839 JSON_S("hidden") "," | 2856 JSON_S("hidden") "," |
2840 JSON_S("array") "," | 2857 JSON_S("array") "," |
2841 JSON_S("string") "," | 2858 JSON_S("string") "," |
2842 JSON_S("object") "," | 2859 JSON_S("object") "," |
2843 JSON_S("code") "," | 2860 JSON_S("code") "," |
2844 JSON_S("closure") "," | 2861 JSON_S("closure") "," |
2845 JSON_S("regexp") "," | 2862 JSON_S("regexp") "," |
2846 JSON_S("number") "," | 2863 JSON_S("number") "," |
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3061 writer_->AddString("\"<dummy>\""); | 3078 writer_->AddString("\"<dummy>\""); |
3062 for (int i = 1; i < sorted_strings.length(); ++i) { | 3079 for (int i = 1; i < sorted_strings.length(); ++i) { |
3063 writer_->AddCharacter(','); | 3080 writer_->AddCharacter(','); |
3064 SerializeString(sorted_strings[i]); | 3081 SerializeString(sorted_strings[i]); |
3065 if (writer_->aborted()) return; | 3082 if (writer_->aborted()) return; |
3066 } | 3083 } |
3067 } | 3084 } |
3068 | 3085 |
3069 | 3086 |
3070 } } // namespace v8::internal | 3087 } } // namespace v8::internal |
OLD | NEW |