Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(381)

Side by Side Diff: src/allocation-tracker.cc

Issue 59583003: Revert "Record allocation stack traces". (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/allocation-tracker.h ('k') | src/heap-snapshot-generator.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "allocation-tracker.h"
31
32 #include "heap-snapshot-generator.h"
33 #include "frames-inl.h"
34
35 namespace v8 {
36 namespace internal {
37
38 AllocationTraceNode::AllocationTraceNode(
39 AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id)
40 : tree_(tree),
41 function_id_(shared_function_info_id),
42 total_size_(0),
43 allocation_count_(0),
44 id_(tree->next_node_id()) {
45 }
46
47
48 AllocationTraceNode::~AllocationTraceNode() {
49 }
50
51
52 AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) {
53 for (int i = 0; i < children_.length(); i++) {
54 AllocationTraceNode* node = children_[i];
55 if (node->function_id() == id) return node;
56 }
57 return NULL;
58 }
59
60
61 AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId id) {
62 AllocationTraceNode* child = FindChild(id);
63 if (child == NULL) {
64 child = new AllocationTraceNode(tree_, id);
65 children_.Add(child);
66 }
67 return child;
68 }
69
70
71 void AllocationTraceNode::AddAllocation(unsigned size) {
72 total_size_ += size;
73 ++allocation_count_;
74 }
75
76
77 void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
78 OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
79 if (tracker != NULL) {
80 const char* name = "<unknown function>";
81 if (function_id_ != 0) {
82 AllocationTracker::FunctionInfo* info =
83 tracker->GetFunctionInfo(function_id_);
84 if (info != NULL) {
85 name = info->name;
86 }
87 }
88 OS::Print("%s #%u", name, id_);
89 } else {
90 OS::Print("%u #%u", function_id_, id_);
91 }
92 OS::Print("\n");
93 indent += 2;
94 for (int i = 0; i < children_.length(); i++) {
95 children_[i]->Print(indent, tracker);
96 }
97 }
98
99
100 AllocationTraceTree::AllocationTraceTree()
101 : next_node_id_(1),
102 root_(this, 0) {
103 }
104
105
106 AllocationTraceTree::~AllocationTraceTree() {
107 }
108
109
110 AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
111 const Vector<SnapshotObjectId>& path) {
112 AllocationTraceNode* node = root();
113 for (SnapshotObjectId* entry = path.start() + path.length() - 1;
114 entry != path.start() - 1;
115 --entry) {
116 node = node->FindOrAddChild(*entry);
117 }
118 return node;
119 }
120
121
122 void AllocationTraceTree::Print(AllocationTracker* tracker) {
123 OS::Print("[AllocationTraceTree:]\n");
124 OS::Print("Total size | Allocation count | Function id | id\n");
125 root()->Print(0, tracker);
126 }
127
128 void AllocationTracker::DeleteUnresolvedLocation(
129 UnresolvedLocation** location) {
130 delete *location;
131 }
132
133
134 AllocationTracker::FunctionInfo::FunctionInfo()
135 : name(""),
136 script_name(""),
137 script_id(0),
138 line(-1),
139 column(-1) {
140 }
141
142
143 static bool AddressesMatch(void* key1, void* key2) {
144 return key1 == key2;
145 }
146
147
148 AllocationTracker::AllocationTracker(
149 HeapObjectsMap* ids, StringsStorage* names)
150 : ids_(ids),
151 names_(names),
152 id_to_function_info_(AddressesMatch) {
153 }
154
155
156 AllocationTracker::~AllocationTracker() {
157 unresolved_locations_.Iterate(DeleteUnresolvedLocation);
158 }
159
160
161 void AllocationTracker::PrepareForSerialization() {
162 List<UnresolvedLocation*> copy(unresolved_locations_.length());
163 copy.AddAll(unresolved_locations_);
164 unresolved_locations_.Clear();
165 for (int i = 0; i < copy.length(); i++) {
166 copy[i]->Resolve();
167 delete copy[i];
168 }
169 }
170
171
172 void AllocationTracker::NewObjectEvent(Address addr, int size) {
173 DisallowHeapAllocation no_allocation;
174 Heap* heap = ids_->heap();
175
176 // Mark the new block as FreeSpace to make sure the heap is iterable
177 // while we are capturing stack trace.
178 FreeListNode::FromAddress(addr)->set_size(heap, size);
179 ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size);
180 ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
181
182 Isolate* isolate = heap->isolate();
183 int length = 0;
184 StackTraceFrameIterator it(isolate);
185 while (!it.done() && length < kMaxAllocationTraceLength) {
186 JavaScriptFrame* frame = it.frame();
187 SharedFunctionInfo* shared = frame->function()->shared();
188 SnapshotObjectId id = ids_->FindEntry(shared->address());
189 allocation_trace_buffer_[length++] = id;
190 AddFunctionInfo(shared, id);
191 it.Advance();
192 }
193 AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
194 Vector<SnapshotObjectId>(allocation_trace_buffer_, length));
195 top_node->AddAllocation(size);
196 }
197
198
199 static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
200 return ComputeIntegerHash(static_cast<uint32_t>(id),
201 v8::internal::kZeroHashSeed);
202 }
203
204
205 AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo(
206 SnapshotObjectId id) {
207 HashMap::Entry* entry = id_to_function_info_.Lookup(
208 reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false);
209 if (entry == NULL) {
210 return NULL;
211 }
212 return reinterpret_cast<FunctionInfo*>(entry->value);
213 }
214
215
216 void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
217 SnapshotObjectId id) {
218 HashMap::Entry* entry = id_to_function_info_.Lookup(
219 reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
220 if (entry->value == NULL) {
221 FunctionInfo* info = new FunctionInfo();
222 info->name = names_->GetFunctionName(shared->DebugName());
223 if (shared->script()->IsScript()) {
224 Script* script = Script::cast(shared->script());
225 if (script->name()->IsName()) {
226 Name* name = Name::cast(script->name());
227 info->script_name = names_->GetName(name);
228 }
229 info->script_id = script->id()->value();
230 // Converting start offset into line and column may cause heap
231 // allocations so we postpone them until snapshot serialization.
232 unresolved_locations_.Add(new UnresolvedLocation(
233 script,
234 shared->start_position(),
235 info));
236 }
237 entry->value = info;
238 }
239 }
240
241
242 AllocationTracker::UnresolvedLocation::UnresolvedLocation(
243 Script* script, int start, FunctionInfo* info)
244 : start_position_(start),
245 info_(info) {
246 script_ = Handle<Script>::cast(
247 script->GetIsolate()->global_handles()->Create(script));
248 GlobalHandles::MakeWeak(
249 reinterpret_cast<Object**>(script_.location()),
250 this, &HandleWeakScript);
251 }
252
253
254 AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
255 if (!script_.is_null()) {
256 script_->GetIsolate()->global_handles()->Destroy(
257 reinterpret_cast<Object**>(script_.location()));
258 }
259 }
260
261
262 void AllocationTracker::UnresolvedLocation::Resolve() {
263 if (script_.is_null()) return;
264 info_->line = GetScriptLineNumber(script_, start_position_);
265 info_->column = GetScriptColumnNumber(script_, start_position_);
266 }
267
268
269 void AllocationTracker::UnresolvedLocation::HandleWeakScript(
270 v8::Isolate* isolate,
271 v8::Persistent<v8::Value>* obj,
272 void* data) {
273 UnresolvedLocation* location = reinterpret_cast<UnresolvedLocation*>(data);
274 location->script_ = Handle<Script>::null();
275 obj->Dispose();
276 }
277
278
279 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/allocation-tracker.h ('k') | src/heap-snapshot-generator.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698