| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 13 matching lines...) Expand all Loading... |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #include "cpu-profiler-inl.h" | 30 #include "cpu-profiler-inl.h" |
| 31 | 31 |
| 32 #ifdef ENABLE_LOGGING_AND_PROFILING | 32 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 33 | 33 |
| 34 #include "frames-inl.h" |
| 34 #include "log-inl.h" | 35 #include "log-inl.h" |
| 35 | 36 |
| 36 #include "../include/v8-profiler.h" | 37 #include "../include/v8-profiler.h" |
| 37 | 38 |
| 38 namespace v8 { | 39 namespace v8 { |
| 39 namespace internal { | 40 namespace internal { |
| 40 | 41 |
| 41 static const int kEventsBufferSize = 256*KB; | 42 static const int kEventsBufferSize = 256*KB; |
| 42 static const int kTickSamplesBufferChunkSize = 64*KB; | 43 static const int kTickSamplesBufferChunkSize = 64*KB; |
| 43 static const int kTickSamplesBufferChunksCount = 16; | 44 static const int kTickSamplesBufferChunksCount = 16; |
| 44 | 45 |
| 45 | 46 |
| 46 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) | 47 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) |
| 47 : generator_(generator), | 48 : generator_(generator), |
| 48 running_(false), | 49 running_(false), |
| 49 ticks_buffer_(sizeof(TickSampleEventRecord), | 50 ticks_buffer_(sizeof(TickSampleEventRecord), |
| 50 kTickSamplesBufferChunkSize, | 51 kTickSamplesBufferChunkSize, |
| 51 kTickSamplesBufferChunksCount), | 52 kTickSamplesBufferChunksCount), |
| 52 enqueue_order_(0) { } | 53 enqueue_order_(0) { |
| 54 } |
| 53 | 55 |
| 54 | 56 |
| 55 void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, | 57 void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, |
| 56 const char* prefix, | 58 const char* prefix, |
| 57 String* name, | 59 String* name, |
| 58 Address start) { | 60 Address start) { |
| 59 if (FilterOutCodeCreateEvent(tag)) return; | 61 if (FilterOutCodeCreateEvent(tag)) return; |
| 60 CodeEventsContainer evt_rec; | 62 CodeEventsContainer evt_rec; |
| 61 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; | 63 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; |
| 62 rec->type = CodeEventRecord::CODE_CREATION; | 64 rec->type = CodeEventRecord::CODE_CREATION; |
| (...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 174 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; | 176 CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; |
| 175 rec->type = CodeEventRecord::CODE_CREATION; | 177 rec->type = CodeEventRecord::CODE_CREATION; |
| 176 rec->order = ++enqueue_order_; | 178 rec->order = ++enqueue_order_; |
| 177 rec->start = start; | 179 rec->start = start; |
| 178 rec->entry = generator_->NewCodeEntry(tag, prefix, name); | 180 rec->entry = generator_->NewCodeEntry(tag, prefix, name); |
| 179 rec->size = size; | 181 rec->size = size; |
| 180 events_buffer_.Enqueue(evt_rec); | 182 events_buffer_.Enqueue(evt_rec); |
| 181 } | 183 } |
| 182 | 184 |
| 183 | 185 |
| 186 void ProfilerEventsProcessor::AddCurrentStack() { |
| 187 TickSampleEventRecord record; |
| 188 TickSample* sample = &record.sample; |
| 189 sample->state = VMState::current_state(); |
| 190 sample->pc = reinterpret_cast<Address>(sample); // Not NULL. |
| 191 sample->frames_count = 0; |
| 192 for (StackTraceFrameIterator it; |
| 193 !it.done() && sample->frames_count < TickSample::kMaxFramesCount; |
| 194 it.Advance()) { |
| 195 JavaScriptFrame* frame = it.frame(); |
| 196 sample->stack[sample->frames_count++] = |
| 197 reinterpret_cast<Address>(frame->function()); |
| 198 } |
| 199 record.order = enqueue_order_; |
| 200 ticks_from_vm_buffer_.Enqueue(record); |
| 201 } |
| 202 |
| 203 |
| 184 bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { | 204 bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { |
| 185 if (!events_buffer_.IsEmpty()) { | 205 if (!events_buffer_.IsEmpty()) { |
| 186 CodeEventsContainer record; | 206 CodeEventsContainer record; |
| 187 events_buffer_.Dequeue(&record); | 207 events_buffer_.Dequeue(&record); |
| 188 switch (record.generic.type) { | 208 switch (record.generic.type) { |
| 189 #define PROFILER_TYPE_CASE(type, clss) \ | 209 #define PROFILER_TYPE_CASE(type, clss) \ |
| 190 case CodeEventRecord::type: \ | 210 case CodeEventRecord::type: \ |
| 191 record.clss##_.UpdateCodeMap(generator_->code_map()); \ | 211 record.clss##_.UpdateCodeMap(generator_->code_map()); \ |
| 192 break; | 212 break; |
| 193 | 213 |
| 194 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE) | 214 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE) |
| 195 | 215 |
| 196 #undef PROFILER_TYPE_CASE | 216 #undef PROFILER_TYPE_CASE |
| 197 default: return true; // Skip record. | 217 default: return true; // Skip record. |
| 198 } | 218 } |
| 199 *dequeue_order = record.generic.order; | 219 *dequeue_order = record.generic.order; |
| 200 return true; | 220 return true; |
| 201 } | 221 } |
| 202 return false; | 222 return false; |
| 203 } | 223 } |
| 204 | 224 |
| 205 | 225 |
| 206 bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { | 226 bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { |
| 207 while (true) { | 227 while (true) { |
| 228 if (!ticks_from_vm_buffer_.IsEmpty() |
| 229 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) { |
| 230 TickSampleEventRecord record; |
| 231 ticks_from_vm_buffer_.Dequeue(&record); |
| 232 generator_->RecordTickSample(record.sample); |
| 233 } |
| 234 |
| 208 const TickSampleEventRecord* rec = | 235 const TickSampleEventRecord* rec = |
| 209 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); | 236 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); |
| 210 if (rec == NULL) return false; | 237 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty(); |
| 211 if (rec->order == dequeue_order) { | 238 if (rec->order == dequeue_order) { |
| 212 generator_->RecordTickSample(rec->sample); | 239 generator_->RecordTickSample(rec->sample); |
| 213 ticks_buffer_.FinishDequeue(); | 240 ticks_buffer_.FinishDequeue(); |
| 214 } else { | 241 } else { |
| 215 return true; | 242 return true; |
| 216 } | 243 } |
| 217 } | 244 } |
| 218 } | 245 } |
| 219 | 246 |
| 220 | 247 |
| (...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 409 CpuProfiler::~CpuProfiler() { | 436 CpuProfiler::~CpuProfiler() { |
| 410 delete token_enumerator_; | 437 delete token_enumerator_; |
| 411 delete profiles_; | 438 delete profiles_; |
| 412 } | 439 } |
| 413 | 440 |
| 414 | 441 |
| 415 void CpuProfiler::StartCollectingProfile(const char* title) { | 442 void CpuProfiler::StartCollectingProfile(const char* title) { |
| 416 if (profiles_->StartProfiling(title, next_profile_uid_++)) { | 443 if (profiles_->StartProfiling(title, next_profile_uid_++)) { |
| 417 StartProcessorIfNotStarted(); | 444 StartProcessorIfNotStarted(); |
| 418 } | 445 } |
| 446 processor_->AddCurrentStack(); |
| 419 } | 447 } |
| 420 | 448 |
| 421 | 449 |
| 422 void CpuProfiler::StartCollectingProfile(String* title) { | 450 void CpuProfiler::StartCollectingProfile(String* title) { |
| 423 if (profiles_->StartProfiling(title, next_profile_uid_++)) { | 451 StartCollectingProfile(profiles_->GetName(title)); |
| 424 StartProcessorIfNotStarted(); | |
| 425 } | |
| 426 } | 452 } |
| 427 | 453 |
| 428 | 454 |
| 429 void CpuProfiler::StartProcessorIfNotStarted() { | 455 void CpuProfiler::StartProcessorIfNotStarted() { |
| 430 if (processor_ == NULL) { | 456 if (processor_ == NULL) { |
| 431 // Disable logging when using the new implementation. | 457 // Disable logging when using the new implementation. |
| 432 saved_logging_nesting_ = Logger::logging_nesting_; | 458 saved_logging_nesting_ = Logger::logging_nesting_; |
| 433 Logger::logging_nesting_ = 0; | 459 Logger::logging_nesting_ = 0; |
| 434 generator_ = new ProfileGenerator(profiles_); | 460 generator_ = new ProfileGenerator(profiles_); |
| 435 processor_ = new ProfilerEventsProcessor(generator_); | 461 processor_ = new ProfilerEventsProcessor(generator_); |
| 436 processor_->Start(); | 462 processor_->Start(); |
| 437 // Enable stack sampling. | |
| 438 // It is important to have it started prior to logging, see issue 683: | |
| 439 // http://code.google.com/p/v8/issues/detail?id=683 | |
| 440 reinterpret_cast<Sampler*>(Logger::ticker_)->Start(); | |
| 441 // Enumerate stuff we already have in the heap. | 463 // Enumerate stuff we already have in the heap. |
| 442 if (Heap::HasBeenSetup()) { | 464 if (Heap::HasBeenSetup()) { |
| 443 Logger::LogCodeObjects(); | 465 Logger::LogCodeObjects(); |
| 444 Logger::LogCompiledFunctions(); | 466 Logger::LogCompiledFunctions(); |
| 445 Logger::LogFunctionObjects(); | 467 Logger::LogFunctionObjects(); |
| 446 Logger::LogAccessorCallbacks(); | 468 Logger::LogAccessorCallbacks(); |
| 447 } | 469 } |
| 470 // Enable stack sampling. |
| 471 reinterpret_cast<Sampler*>(Logger::ticker_)->Start(); |
| 448 } | 472 } |
| 449 } | 473 } |
| 450 | 474 |
| 451 | 475 |
| 452 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { | 476 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { |
| 453 const double actual_sampling_rate = generator_->actual_sampling_rate(); | 477 const double actual_sampling_rate = generator_->actual_sampling_rate(); |
| 454 StopProcessorIfLastProfile(); | 478 StopProcessorIfLastProfile(); |
| 455 CpuProfile* result = profiles_->StopProfiling(CodeEntry::kNoSecurityToken, | 479 CpuProfile* result = profiles_->StopProfiling(CodeEntry::kNoSecurityToken, |
| 456 title, | 480 title, |
| 457 actual_sampling_rate); | 481 actual_sampling_rate); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 503 void CpuProfiler::TearDown() { | 527 void CpuProfiler::TearDown() { |
| 504 #ifdef ENABLE_LOGGING_AND_PROFILING | 528 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 505 if (singleton_ != NULL) { | 529 if (singleton_ != NULL) { |
| 506 delete singleton_; | 530 delete singleton_; |
| 507 } | 531 } |
| 508 singleton_ = NULL; | 532 singleton_ = NULL; |
| 509 #endif | 533 #endif |
| 510 } | 534 } |
| 511 | 535 |
| 512 } } // namespace v8::internal | 536 } } // namespace v8::internal |
| OLD | NEW |