Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/profile-generator.cc

Issue 1356223004: Move heap and CPU profilers into a dedicated directory. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: rebaseline Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/profile-generator.h ('k') | src/profile-generator-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/profile-generator.h"
6
7 #include "src/compiler.h"
8 #include "src/debug/debug.h"
9 #include "src/deoptimizer.h"
10 #include "src/global-handles.h"
11 #include "src/profile-generator-inl.h"
12 #include "src/sampler.h"
13 #include "src/scopeinfo.h"
14 #include "src/splay-tree-inl.h"
15 #include "src/unicode.h"
16
17 namespace v8 {
18 namespace internal {
19
20
21 JITLineInfoTable::JITLineInfoTable() {}
22
23
24 JITLineInfoTable::~JITLineInfoTable() {}
25
26
27 void JITLineInfoTable::SetPosition(int pc_offset, int line) {
28 DCHECK(pc_offset >= 0);
29 DCHECK(line > 0); // The 1-based number of the source line.
30 if (GetSourceLineNumber(pc_offset) != line) {
31 pc_offset_map_.insert(std::make_pair(pc_offset, line));
32 }
33 }
34
35
36 int JITLineInfoTable::GetSourceLineNumber(int pc_offset) const {
37 PcOffsetMap::const_iterator it = pc_offset_map_.lower_bound(pc_offset);
38 if (it == pc_offset_map_.end()) {
39 if (pc_offset_map_.empty()) return v8::CpuProfileNode::kNoLineNumberInfo;
40 return (--pc_offset_map_.end())->second;
41 }
42 return it->second;
43 }
44
45
46 const char* const CodeEntry::kEmptyNamePrefix = "";
47 const char* const CodeEntry::kEmptyResourceName = "";
48 const char* const CodeEntry::kEmptyBailoutReason = "";
49 const char* const CodeEntry::kNoDeoptReason = "";
50
51
52 CodeEntry::~CodeEntry() {
53 delete no_frame_ranges_;
54 delete line_info_;
55 }
56
57
58 uint32_t CodeEntry::GetHash() const {
59 uint32_t hash = ComputeIntegerHash(tag(), v8::internal::kZeroHashSeed);
60 if (script_id_ != v8::UnboundScript::kNoScriptId) {
61 hash ^= ComputeIntegerHash(static_cast<uint32_t>(script_id_),
62 v8::internal::kZeroHashSeed);
63 hash ^= ComputeIntegerHash(static_cast<uint32_t>(position_),
64 v8::internal::kZeroHashSeed);
65 } else {
66 hash ^= ComputeIntegerHash(
67 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
68 v8::internal::kZeroHashSeed);
69 hash ^= ComputeIntegerHash(
70 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
71 v8::internal::kZeroHashSeed);
72 hash ^= ComputeIntegerHash(
73 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
74 v8::internal::kZeroHashSeed);
75 hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
76 }
77 return hash;
78 }
79
80
81 bool CodeEntry::IsSameFunctionAs(CodeEntry* entry) const {
82 if (this == entry) return true;
83 if (script_id_ != v8::UnboundScript::kNoScriptId) {
84 return script_id_ == entry->script_id_ && position_ == entry->position_;
85 }
86 return name_prefix_ == entry->name_prefix_ && name_ == entry->name_ &&
87 resource_name_ == entry->resource_name_ &&
88 line_number_ == entry->line_number_;
89 }
90
91
92 void CodeEntry::SetBuiltinId(Builtins::Name id) {
93 bit_field_ = TagField::update(bit_field_, Logger::BUILTIN_TAG);
94 bit_field_ = BuiltinIdField::update(bit_field_, id);
95 }
96
97
98 int CodeEntry::GetSourceLine(int pc_offset) const {
99 if (line_info_ && !line_info_->empty()) {
100 return line_info_->GetSourceLineNumber(pc_offset);
101 }
102 return v8::CpuProfileNode::kNoLineNumberInfo;
103 }
104
105
106 void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
107 if (!shared->script()->IsScript()) return;
108 Script* script = Script::cast(shared->script());
109 set_script_id(script->id());
110 set_position(shared->start_position());
111 set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
112 }
113
114
115 CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
116 DCHECK(has_deopt_info());
117
118 CpuProfileDeoptInfo info;
119 info.deopt_reason = deopt_reason_;
120 if (inlined_function_infos_.empty()) {
121 info.stack.push_back(CpuProfileDeoptFrame(
122 {script_id_, position_ + deopt_position_.position()}));
123 return info;
124 }
125 // Copy the only branch from the inlining tree where the deopt happened.
126 SourcePosition position = deopt_position_;
127 int inlining_id = InlinedFunctionInfo::kNoParentId;
128 for (size_t i = 0; i < inlined_function_infos_.size(); ++i) {
129 InlinedFunctionInfo& current_info = inlined_function_infos_.at(i);
130 if (std::binary_search(current_info.deopt_pc_offsets.begin(),
131 current_info.deopt_pc_offsets.end(), pc_offset_)) {
132 inlining_id = static_cast<int>(i);
133 break;
134 }
135 }
136 while (inlining_id != InlinedFunctionInfo::kNoParentId) {
137 InlinedFunctionInfo& inlined_info = inlined_function_infos_.at(inlining_id);
138 info.stack.push_back(
139 CpuProfileDeoptFrame({inlined_info.script_id,
140 inlined_info.start_position + position.raw()}));
141 position = inlined_info.inline_position;
142 inlining_id = inlined_info.parent_id;
143 }
144 return info;
145 }
146
147
148 void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
149 deopt_infos_.push_back(entry->GetDeoptInfo());
150 entry->clear_deopt_info();
151 }
152
153
154 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
155 HashMap::Entry* map_entry = children_.Lookup(entry, CodeEntryHash(entry));
156 return map_entry != NULL ?
157 reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
158 }
159
160
161 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
162 HashMap::Entry* map_entry =
163 children_.LookupOrInsert(entry, CodeEntryHash(entry));
164 ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
165 if (node == NULL) {
166 // New node added.
167 node = new ProfileNode(tree_, entry);
168 map_entry->value = node;
169 children_list_.Add(node);
170 }
171 return node;
172 }
173
174
175 void ProfileNode::IncrementLineTicks(int src_line) {
176 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
177 // Increment a hit counter of a certain source line.
178 // Add a new source line if not found.
179 HashMap::Entry* e =
180 line_ticks_.LookupOrInsert(reinterpret_cast<void*>(src_line), src_line);
181 DCHECK(e);
182 e->value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(e->value) + 1);
183 }
184
185
186 bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
187 unsigned int length) const {
188 if (entries == NULL || length == 0) return false;
189
190 unsigned line_count = line_ticks_.occupancy();
191
192 if (line_count == 0) return true;
193 if (length < line_count) return false;
194
195 v8::CpuProfileNode::LineTick* entry = entries;
196
197 for (HashMap::Entry* p = line_ticks_.Start(); p != NULL;
198 p = line_ticks_.Next(p), entry++) {
199 entry->line =
200 static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
201 entry->hit_count =
202 static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->value));
203 }
204
205 return true;
206 }
207
208
209 void ProfileNode::Print(int indent) {
210 base::OS::Print("%5u %*s %s%s %d #%d", self_ticks_, indent, "",
211 entry_->name_prefix(), entry_->name(), entry_->script_id(),
212 id());
213 if (entry_->resource_name()[0] != '\0')
214 base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
215 base::OS::Print("\n");
216 for (size_t i = 0; i < deopt_infos_.size(); ++i) {
217 CpuProfileDeoptInfo& info = deopt_infos_[i];
218 base::OS::Print(
219 "%*s;;; deopted at script_id: %d position: %d with reason '%s'.\n",
220 indent + 10, "", info.stack[0].script_id, info.stack[0].position,
221 info.deopt_reason);
222 for (size_t index = 1; index < info.stack.size(); ++index) {
223 base::OS::Print("%*s;;; Inline point: script_id %d position: %d.\n",
224 indent + 10, "", info.stack[index].script_id,
225 info.stack[index].position);
226 }
227 }
228 const char* bailout_reason = entry_->bailout_reason();
229 if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
230 bailout_reason != CodeEntry::kEmptyBailoutReason) {
231 base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
232 bailout_reason);
233 }
234 for (HashMap::Entry* p = children_.Start();
235 p != NULL;
236 p = children_.Next(p)) {
237 reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
238 }
239 }
240
241
242 class DeleteNodesCallback {
243 public:
244 void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
245
246 void AfterAllChildrenTraversed(ProfileNode* node) {
247 delete node;
248 }
249
250 void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
251 };
252
253
254 ProfileTree::ProfileTree()
255 : root_entry_(Logger::FUNCTION_TAG, "(root)"),
256 next_node_id_(1),
257 root_(new ProfileNode(this, &root_entry_)),
258 next_function_id_(1),
259 function_ids_(ProfileNode::CodeEntriesMatch) {}
260
261
262 ProfileTree::~ProfileTree() {
263 DeleteNodesCallback cb;
264 TraverseDepthFirst(&cb);
265 }
266
267
268 unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
269 CodeEntry* code_entry = node->entry();
270 HashMap::Entry* entry =
271 function_ids_.LookupOrInsert(code_entry, code_entry->GetHash());
272 if (!entry->value) {
273 entry->value = reinterpret_cast<void*>(next_function_id_++);
274 }
275 return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
276 }
277
278
279 ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path,
280 int src_line) {
281 ProfileNode* node = root_;
282 CodeEntry* last_entry = NULL;
283 for (CodeEntry** entry = path.start() + path.length() - 1;
284 entry != path.start() - 1;
285 --entry) {
286 if (*entry != NULL) {
287 node = node->FindOrAddChild(*entry);
288 last_entry = *entry;
289 }
290 }
291 if (last_entry && last_entry->has_deopt_info()) {
292 node->CollectDeoptInfo(last_entry);
293 }
294 node->IncrementSelfTicks();
295 if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
296 node->IncrementLineTicks(src_line);
297 }
298 return node;
299 }
300
301
302 struct NodesPair {
303 NodesPair(ProfileNode* src, ProfileNode* dst)
304 : src(src), dst(dst) { }
305 ProfileNode* src;
306 ProfileNode* dst;
307 };
308
309
310 class Position {
311 public:
312 explicit Position(ProfileNode* node)
313 : node(node), child_idx_(0) { }
314 INLINE(ProfileNode* current_child()) {
315 return node->children()->at(child_idx_);
316 }
317 INLINE(bool has_current_child()) {
318 return child_idx_ < node->children()->length();
319 }
320 INLINE(void next_child()) { ++child_idx_; }
321
322 ProfileNode* node;
323 private:
324 int child_idx_;
325 };
326
327
328 // Non-recursive implementation of a depth-first post-order tree traversal.
329 template <typename Callback>
330 void ProfileTree::TraverseDepthFirst(Callback* callback) {
331 List<Position> stack(10);
332 stack.Add(Position(root_));
333 while (stack.length() > 0) {
334 Position& current = stack.last();
335 if (current.has_current_child()) {
336 callback->BeforeTraversingChild(current.node, current.current_child());
337 stack.Add(Position(current.current_child()));
338 } else {
339 callback->AfterAllChildrenTraversed(current.node);
340 if (stack.length() > 1) {
341 Position& parent = stack[stack.length() - 2];
342 callback->AfterChildTraversed(parent.node, current.node);
343 parent.next_child();
344 }
345 // Remove child from the stack.
346 stack.RemoveLast();
347 }
348 }
349 }
350
351
352 CpuProfile::CpuProfile(const char* title, bool record_samples)
353 : title_(title),
354 record_samples_(record_samples),
355 start_time_(base::TimeTicks::HighResolutionNow()) {
356 }
357
358
359 void CpuProfile::AddPath(base::TimeTicks timestamp,
360 const Vector<CodeEntry*>& path, int src_line) {
361 ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path, src_line);
362 if (record_samples_) {
363 timestamps_.Add(timestamp);
364 samples_.Add(top_frame_node);
365 }
366 }
367
368
369 void CpuProfile::CalculateTotalTicksAndSamplingRate() {
370 end_time_ = base::TimeTicks::HighResolutionNow();
371 }
372
373
374 void CpuProfile::Print() {
375 base::OS::Print("[Top down]:\n");
376 top_down_.Print();
377 }
378
379
380 CodeMap::~CodeMap() {}
381
382
383 const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
384
385
386 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
387 DeleteAllCoveredCode(addr, addr + size);
388 CodeTree::Locator locator;
389 tree_.Insert(addr, &locator);
390 locator.set_value(CodeEntryInfo(entry, size));
391 }
392
393
394 void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
395 List<Address> to_delete;
396 Address addr = end - 1;
397 while (addr >= start) {
398 CodeTree::Locator locator;
399 if (!tree_.FindGreatestLessThan(addr, &locator)) break;
400 Address start2 = locator.key(), end2 = start2 + locator.value().size;
401 if (start2 < end && start < end2) to_delete.Add(start2);
402 addr = start2 - 1;
403 }
404 for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
405 }
406
407
408 CodeEntry* CodeMap::FindEntry(Address addr) {
409 CodeTree::Locator locator;
410 if (tree_.FindGreatestLessThan(addr, &locator)) {
411 // locator.key() <= addr. Need to check that addr is within entry.
412 const CodeEntryInfo& entry = locator.value();
413 if (addr < (locator.key() + entry.size)) {
414 return entry.entry;
415 }
416 }
417 return NULL;
418 }
419
420
421 void CodeMap::MoveCode(Address from, Address to) {
422 if (from == to) return;
423 CodeTree::Locator locator;
424 if (!tree_.Find(from, &locator)) return;
425 CodeEntryInfo entry = locator.value();
426 tree_.Remove(from);
427 AddCode(to, entry.entry, entry.size);
428 }
429
430
431 void CodeMap::CodeTreePrinter::Call(
432 const Address& key, const CodeMap::CodeEntryInfo& value) {
433 base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
434 }
435
436
437 void CodeMap::Print() {
438 CodeTreePrinter printer;
439 tree_.ForEach(&printer);
440 }
441
442
443 CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
444 : function_and_resource_names_(heap),
445 current_profiles_semaphore_(1) {
446 }
447
448
449 static void DeleteCodeEntry(CodeEntry** entry_ptr) {
450 delete *entry_ptr;
451 }
452
453
454 static void DeleteCpuProfile(CpuProfile** profile_ptr) {
455 delete *profile_ptr;
456 }
457
458
459 CpuProfilesCollection::~CpuProfilesCollection() {
460 finished_profiles_.Iterate(DeleteCpuProfile);
461 current_profiles_.Iterate(DeleteCpuProfile);
462 code_entries_.Iterate(DeleteCodeEntry);
463 }
464
465
466 bool CpuProfilesCollection::StartProfiling(const char* title,
467 bool record_samples) {
468 current_profiles_semaphore_.Wait();
469 if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
470 current_profiles_semaphore_.Signal();
471 return false;
472 }
473 for (int i = 0; i < current_profiles_.length(); ++i) {
474 if (strcmp(current_profiles_[i]->title(), title) == 0) {
475 // Ignore attempts to start profile with the same title...
476 current_profiles_semaphore_.Signal();
477 // ... though return true to force it collect a sample.
478 return true;
479 }
480 }
481 current_profiles_.Add(new CpuProfile(title, record_samples));
482 current_profiles_semaphore_.Signal();
483 return true;
484 }
485
486
487 CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
488 const int title_len = StrLength(title);
489 CpuProfile* profile = NULL;
490 current_profiles_semaphore_.Wait();
491 for (int i = current_profiles_.length() - 1; i >= 0; --i) {
492 if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
493 profile = current_profiles_.Remove(i);
494 break;
495 }
496 }
497 current_profiles_semaphore_.Signal();
498
499 if (profile == NULL) return NULL;
500 profile->CalculateTotalTicksAndSamplingRate();
501 finished_profiles_.Add(profile);
502 return profile;
503 }
504
505
506 bool CpuProfilesCollection::IsLastProfile(const char* title) {
507 // Called from VM thread, and only it can mutate the list,
508 // so no locking is needed here.
509 if (current_profiles_.length() != 1) return false;
510 return StrLength(title) == 0
511 || strcmp(current_profiles_[0]->title(), title) == 0;
512 }
513
514
515 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
516 // Called from VM thread for a completed profile.
517 for (int i = 0; i < finished_profiles_.length(); i++) {
518 if (profile == finished_profiles_[i]) {
519 finished_profiles_.Remove(i);
520 return;
521 }
522 }
523 UNREACHABLE();
524 }
525
526
527 void CpuProfilesCollection::AddPathToCurrentProfiles(
528 base::TimeTicks timestamp, const Vector<CodeEntry*>& path, int src_line) {
529 // As starting / stopping profiles is rare relatively to this
530 // method, we don't bother minimizing the duration of lock holding,
531 // e.g. copying contents of the list to a local vector.
532 current_profiles_semaphore_.Wait();
533 for (int i = 0; i < current_profiles_.length(); ++i) {
534 current_profiles_[i]->AddPath(timestamp, path, src_line);
535 }
536 current_profiles_semaphore_.Signal();
537 }
538
539
540 CodeEntry* CpuProfilesCollection::NewCodeEntry(
541 Logger::LogEventsAndTags tag, const char* name, const char* name_prefix,
542 const char* resource_name, int line_number, int column_number,
543 JITLineInfoTable* line_info, Address instruction_start) {
544 CodeEntry* code_entry =
545 new CodeEntry(tag, name, name_prefix, resource_name, line_number,
546 column_number, line_info, instruction_start);
547 code_entries_.Add(code_entry);
548 return code_entry;
549 }
550
551
552 const char* const ProfileGenerator::kProgramEntryName =
553 "(program)";
554 const char* const ProfileGenerator::kIdleEntryName =
555 "(idle)";
556 const char* const ProfileGenerator::kGarbageCollectorEntryName =
557 "(garbage collector)";
558 const char* const ProfileGenerator::kUnresolvedFunctionName =
559 "(unresolved function)";
560
561
562 ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
563 : profiles_(profiles),
564 program_entry_(
565 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
566 idle_entry_(
567 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
568 gc_entry_(
569 profiles->NewCodeEntry(Logger::BUILTIN_TAG,
570 kGarbageCollectorEntryName)),
571 unresolved_entry_(
572 profiles->NewCodeEntry(Logger::FUNCTION_TAG,
573 kUnresolvedFunctionName)) {
574 }
575
576
577 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
578 // Allocate space for stack frames + pc + function + vm-state.
579 ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
580 // As actual number of decoded code entries may vary, initialize
581 // entries vector with NULL values.
582 CodeEntry** entry = entries.start();
583 memset(entry, 0, entries.length() * sizeof(*entry));
584
585 // The ProfileNode knows nothing about all versions of generated code for
586 // the same JS function. The line number information associated with
587 // the latest version of generated code is used to find a source line number
588 // for a JS function. Then, the detected source line is passed to
589 // ProfileNode to increase the tick count for this source line.
590 int src_line = v8::CpuProfileNode::kNoLineNumberInfo;
591 bool src_line_not_found = true;
592
593 if (sample.pc != NULL) {
594 if (sample.has_external_callback && sample.state == EXTERNAL &&
595 sample.top_frame_type == StackFrame::EXIT) {
596 // Don't use PC when in external callback code, as it can point
597 // inside callback's code, and we will erroneously report
598 // that a callback calls itself.
599 *entry++ = code_map_.FindEntry(sample.external_callback);
600 } else {
601 CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
602 // If there is no pc_entry we're likely in native code.
603 // Find out, if top of stack was pointing inside a JS function
604 // meaning that we have encountered a frameless invocation.
605 if (!pc_entry && (sample.top_frame_type == StackFrame::JAVA_SCRIPT ||
606 sample.top_frame_type == StackFrame::OPTIMIZED)) {
607 pc_entry = code_map_.FindEntry(sample.tos);
608 }
609 // If pc is in the function code before it set up stack frame or after the
610 // frame was destroyed SafeStackFrameIterator incorrectly thinks that
611 // ebp contains return address of the current function and skips caller's
612 // frame. Check for this case and just skip such samples.
613 if (pc_entry) {
614 List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
615 int pc_offset =
616 static_cast<int>(sample.pc - pc_entry->instruction_start());
617 if (ranges) {
618 for (int i = 0; i < ranges->length(); i++) {
619 OffsetRange& range = ranges->at(i);
620 if (range.from <= pc_offset && pc_offset < range.to) {
621 return;
622 }
623 }
624 }
625 src_line = pc_entry->GetSourceLine(pc_offset);
626 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
627 src_line = pc_entry->line_number();
628 }
629 src_line_not_found = false;
630 *entry++ = pc_entry;
631
632 if (pc_entry->builtin_id() == Builtins::kFunctionCall ||
633 pc_entry->builtin_id() == Builtins::kFunctionApply) {
634 // When current function is FunctionCall or FunctionApply builtin the
635 // top frame is either frame of the calling JS function or internal
636 // frame. In the latter case we know the caller for sure but in the
637 // former case we don't so we simply replace the frame with
638 // 'unresolved' entry.
639 if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
640 *entry++ = unresolved_entry_;
641 }
642 }
643 }
644 }
645
646 for (const Address* stack_pos = sample.stack,
647 *stack_end = stack_pos + sample.frames_count;
648 stack_pos != stack_end;
649 ++stack_pos) {
650 *entry = code_map_.FindEntry(*stack_pos);
651
652 // Skip unresolved frames (e.g. internal frame) and get source line of
653 // the first JS caller.
654 if (src_line_not_found && *entry) {
655 int pc_offset =
656 static_cast<int>(*stack_pos - (*entry)->instruction_start());
657 src_line = (*entry)->GetSourceLine(pc_offset);
658 if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
659 src_line = (*entry)->line_number();
660 }
661 src_line_not_found = false;
662 }
663
664 entry++;
665 }
666 }
667
668 if (FLAG_prof_browser_mode) {
669 bool no_symbolized_entries = true;
670 for (CodeEntry** e = entries.start(); e != entry; ++e) {
671 if (*e != NULL) {
672 no_symbolized_entries = false;
673 break;
674 }
675 }
676 // If no frames were symbolized, put the VM state entry in.
677 if (no_symbolized_entries) {
678 *entry++ = EntryForVMState(sample.state);
679 }
680 }
681
682 profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line);
683 }
684
685
686 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
687 switch (tag) {
688 case GC:
689 return gc_entry_;
690 case JS:
691 case COMPILER:
692 // DOM events handlers are reported as OTHER / EXTERNAL entries.
693 // To avoid confusing people, let's put all these entries into
694 // one bucket.
695 case OTHER:
696 case EXTERNAL:
697 return program_entry_;
698 case IDLE:
699 return idle_entry_;
700 default: return NULL;
701 }
702 }
703
704 } // namespace internal
705 } // namespace v8
OLDNEW
« no previous file with comments | « src/profile-generator.h ('k') | src/profile-generator-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698