Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2033)

Side by Side Diff: src/libsampler/v8-sampler.cc

Issue 1922303002: Create libsampler as V8 sampler library. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/libsampler/v8-sampler.h ('k') | src/v8.gyp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/profiler/sampler.h" 5 #include "src/libsampler/v8-sampler.h"
6 6
7 #if V8_OS_POSIX && !V8_OS_CYGWIN 7 #if V8_OS_POSIX && !V8_OS_CYGWIN
8 8
9 #define USE_SIGNALS 9 #define USE_SIGNALS
10 10
11 #include <errno.h> 11 #include <errno.h>
12 #include <pthread.h> 12 #include <pthread.h>
13 #include <signal.h> 13 #include <signal.h>
14 #include <sys/time.h> 14 #include <sys/time.h>
15 15
(...skipping 21 matching lines...) Expand all
37 #endif 37 #endif
38 38
39 #elif V8_OS_WIN || V8_OS_CYGWIN 39 #elif V8_OS_WIN || V8_OS_CYGWIN
40 40
41 #include "src/base/win32-headers.h" 41 #include "src/base/win32-headers.h"
42 42
43 #endif 43 #endif
44 44
45 #include "src/atomic-utils.h" 45 #include "src/atomic-utils.h"
46 #include "src/base/platform/platform.h" 46 #include "src/base/platform/platform.h"
47 #include "src/flags.h" 47 #include "src/hashmap.h"
48 #include "src/frames-inl.h" 48 #include "src/isolate.h"
49 #include "src/log.h"
50 #include "src/profiler/cpu-profiler-inl.h"
51 #include "src/simulator.h"
52 #include "src/v8threads.h"
53 #include "src/vm-state-inl.h"
54 49
55 50
56 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) 51 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
57 52
58 // Not all versions of Android's C library provide ucontext_t. 53 // Not all versions of Android's C library provide ucontext_t.
59 // Detect this and provide custom but compatible definitions. Note that these 54 // Detect this and provide custom but compatible definitions. Note that these
60 // follow the GLibc naming convention to access register values from 55 // follow the GLibc naming convention to access register values from
61 // mcontext_t. 56 // mcontext_t.
62 // 57 //
63 // See http://code.google.com/p/android/issues/detail?id=34784 58 // See http://code.google.com/p/android/issues/detail?id=34784
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
151 mcontext_t uc_mcontext; 146 mcontext_t uc_mcontext;
152 // Other fields are not used by V8, don't define them here. 147 // Other fields are not used by V8, don't define them here.
153 } ucontext_t; 148 } ucontext_t;
154 enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 }; 149 enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
155 #endif 150 #endif
156 151
157 #endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) 152 #endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
158 153
159 154
160 namespace v8 { 155 namespace v8 {
161 namespace internal { 156 namespace sampler {
162 157
163 namespace { 158 namespace {
164 159
165 class PlatformDataCommon : public Malloced { 160 class PlatformDataCommon : public i::Malloced {
166 public: 161 public:
167 PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {} 162 PlatformDataCommon() : profiled_thread_id_(i::ThreadId::Current()) {}
168 ThreadId profiled_thread_id() { return profiled_thread_id_; } 163 i::ThreadId profiled_thread_id() { return profiled_thread_id_; }
169 164
170 protected: 165 protected:
171 ~PlatformDataCommon() {} 166 ~PlatformDataCommon() {}
172 167
173 private: 168 private:
174 ThreadId profiled_thread_id_; 169 i::ThreadId profiled_thread_id_;
175 }; 170 };
176 171
177 172
178 bool IsSamePage(byte* ptr1, byte* ptr2) { 173 #if defined(USE_SIGNALS)
179 const uint32_t kPageSize = 4096; 174 typedef internal::List<Sampler*> SamplerList;
180 uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1);
181 return (reinterpret_cast<uintptr_t>(ptr1) & mask) ==
182 (reinterpret_cast<uintptr_t>(ptr2) & mask);
183 }
184 175
185
186 // Check if the code at specified address could potentially be a
187 // frame setup code.
188 bool IsNoFrameRegion(Address address) {
189 struct Pattern {
190 int bytes_count;
191 byte bytes[8];
192 int offsets[4];
193 };
194 byte* pc = reinterpret_cast<byte*>(address);
195 static Pattern patterns[] = {
196 #if V8_HOST_ARCH_IA32
197 // push %ebp
198 // mov %esp,%ebp
199 {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
200 // pop %ebp
201 // ret N
202 {2, {0x5d, 0xc2}, {0, 1, -1}},
203 // pop %ebp
204 // ret
205 {2, {0x5d, 0xc3}, {0, 1, -1}},
206 #elif V8_HOST_ARCH_X64
207 // pushq %rbp
208 // movq %rsp,%rbp
209 {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
210 // popq %rbp
211 // ret N
212 {2, {0x5d, 0xc2}, {0, 1, -1}},
213 // popq %rbp
214 // ret
215 {2, {0x5d, 0xc3}, {0, 1, -1}},
216 #endif
217 {0, {}, {}}
218 };
219 for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) {
220 for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) {
221 int offset = *offset_ptr;
222 if (!offset || IsSamePage(pc, pc - offset)) {
223 MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count);
224 if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count))
225 return true;
226 } else {
227 // It is not safe to examine bytes on another page as it might not be
228 // allocated thus causing a SEGFAULT.
229 // Check the pattern part that's on the same page and
230 // pessimistically assume it could be the entire pattern match.
231 MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset);
232 if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset))
233 return true;
234 }
235 }
236 }
237 return false;
238 }
239
240 typedef List<Sampler*> SamplerList;
241
242 #if defined(USE_SIGNALS)
243 class AtomicGuard { 176 class AtomicGuard {
244 public: 177 public:
245 explicit AtomicGuard(AtomicValue<int>* atomic, bool is_block = true) 178 explicit AtomicGuard(i::AtomicValue<int>* atomic, bool is_block = true)
246 : atomic_(atomic), 179 : atomic_(atomic),
247 is_success_(false) { 180 is_success_(false) {
248 do { 181 do {
249 // Use Acquire_Load to gain mutual exclusion. 182 // Use Acquire_Load to gain mutual exclusion.
250 USE(atomic_->Value()); 183 USE(atomic_->Value());
251 is_success_ = atomic_->TrySetValue(0, 1); 184 is_success_ = atomic_->TrySetValue(0, 1);
252 } while (is_block && !is_success_); 185 } while (is_block && !is_success_);
253 } 186 }
254 187
255 bool is_success() { return is_success_; } 188 bool is_success() { return is_success_; }
256 189
257 ~AtomicGuard() { 190 ~AtomicGuard() {
258 if (is_success_) { 191 if (is_success_) {
259 atomic_->SetValue(0); 192 atomic_->SetValue(0);
260 } 193 }
261 atomic_ = NULL; 194 atomic_ = NULL;
262 } 195 }
263 196
264 private: 197 private:
265 AtomicValue<int>* atomic_; 198 i::AtomicValue<int>* atomic_;
266 bool is_success_; 199 bool is_success_;
267 }; 200 };
268 201
269 202
270 // Returns key for hash map. 203 // Returns key for hash map.
271 void* ThreadKey(pthread_t thread_id) { 204 void* ThreadKey(pthread_t thread_id) {
272 return reinterpret_cast<void*>(thread_id); 205 return reinterpret_cast<void*>(thread_id);
273 } 206 }
274 207
275 208
(...skipping 13 matching lines...) Expand all
289 222
290 class Sampler::PlatformData : public PlatformDataCommon { 223 class Sampler::PlatformData : public PlatformDataCommon {
291 public: 224 public:
292 PlatformData() : vm_tid_(pthread_self()) {} 225 PlatformData() : vm_tid_(pthread_self()) {}
293 pthread_t vm_tid() const { return vm_tid_; } 226 pthread_t vm_tid() const { return vm_tid_; }
294 227
295 private: 228 private:
296 pthread_t vm_tid_; 229 pthread_t vm_tid_;
297 }; 230 };
298 231
232
233 class SamplerManager {
234 public:
235 static void AddSampler(Sampler* sampler) {
236 AtomicGuard atomic_guard(&samplers_access_counter_);
237 DCHECK(sampler->IsActive());
238 // Add sampler into map if needed.
239 pthread_t thread_id = sampler->platform_data()->vm_tid();
240 i::HashMap::Entry *entry =
241 thread_id_to_samplers_.Pointer()->LookupOrInsert(ThreadKey(thread_id),
242 ThreadHash(thread_id));
243 if (entry->value == NULL) {
244 SamplerList* samplers = new SamplerList();
245 samplers->Add(sampler);
246 entry->value = samplers;
247 } else {
248 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
249 if (!samplers->Contains(sampler)) {
250 samplers->Add(sampler);
251 }
252 }
253 }
254
255 static void RemoveSampler(Sampler* sampler) {
256 AtomicGuard atomic_guard(&samplers_access_counter_);
257 DCHECK(sampler->IsActive());
258 // Remove sampler from map.
259 pthread_t thread_id = sampler->platform_data()->vm_tid();
260 void* thread_key = ThreadKey(thread_id);
261 uint32_t thread_hash = ThreadHash(thread_id);
262 i::HashMap::Entry* entry =
263 thread_id_to_samplers_.Get().Lookup(thread_key, thread_hash);
264 DCHECK(entry != NULL);
265 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
266 samplers->RemoveElement(sampler);
267 if (samplers->is_empty()) {
268 thread_id_to_samplers_.Pointer()->Remove(thread_key, thread_hash);
269 delete samplers;
270 }
271 }
272
273 private:
274 struct HashMapCreateTrait {
275 static void Construct(internal::HashMap* allocated_ptr) {
276 new (allocated_ptr) internal::HashMap(internal::HashMap::PointersMatch);
277 }
278 };
279 friend class SignalHandler;
280 static base::LazyInstance<internal::HashMap, HashMapCreateTrait>::type
281 thread_id_to_samplers_;
282 static i::AtomicValue<int> samplers_access_counter_;
283 };
284
285
286 base::LazyInstance<i::HashMap, SamplerManager::HashMapCreateTrait>::type
287 SamplerManager::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER;
288 i::AtomicValue<int> SamplerManager::samplers_access_counter_(0);
289
290
299 #elif V8_OS_WIN || V8_OS_CYGWIN 291 #elif V8_OS_WIN || V8_OS_CYGWIN
300 292
301 // ---------------------------------------------------------------------------- 293 // ----------------------------------------------------------------------------
302 // Win32 profiler support. On Cygwin we use the same sampler implementation as 294 // Win32 profiler support. On Cygwin we use the same sampler implementation as
303 // on Win32. 295 // on Win32.
304 296
305 class Sampler::PlatformData : public PlatformDataCommon { 297 class Sampler::PlatformData : public PlatformDataCommon {
306 public: 298 public:
307 // Get a handle to the calling thread. This is the thread that we are 299 // Get a handle to the calling thread. This is the thread that we are
308 // going to profile. We need to make a copy of the handle because we are 300 // going to profile. We need to make a copy of the handle because we are
(...skipping 12 matching lines...) Expand all
321 CloseHandle(profiled_thread_); 313 CloseHandle(profiled_thread_);
322 profiled_thread_ = NULL; 314 profiled_thread_ = NULL;
323 } 315 }
324 } 316 }
325 317
326 HANDLE profiled_thread() { return profiled_thread_; } 318 HANDLE profiled_thread() { return profiled_thread_; }
327 319
328 private: 320 private:
329 HANDLE profiled_thread_; 321 HANDLE profiled_thread_;
330 }; 322 };
331 #endif 323 #endif // USE_SIGNALS
332
333
334 #if defined(USE_SIMULATOR)
335 bool SimulatorHelper::FillRegisters(Isolate* isolate,
336 v8::RegisterState* state) {
337 Simulator *simulator = isolate->thread_local_top()->simulator_;
338 // Check if there is active simulator.
339 if (simulator == NULL) return false;
340 #if V8_TARGET_ARCH_ARM
341 if (!simulator->has_bad_pc()) {
342 state->pc = reinterpret_cast<Address>(simulator->get_pc());
343 }
344 state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
345 state->fp = reinterpret_cast<Address>(simulator->get_register(
346 Simulator::r11));
347 #elif V8_TARGET_ARCH_ARM64
348 state->pc = reinterpret_cast<Address>(simulator->pc());
349 state->sp = reinterpret_cast<Address>(simulator->sp());
350 state->fp = reinterpret_cast<Address>(simulator->fp());
351 #elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
352 if (!simulator->has_bad_pc()) {
353 state->pc = reinterpret_cast<Address>(simulator->get_pc());
354 }
355 state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
356 state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
357 #elif V8_TARGET_ARCH_PPC
358 if (!simulator->has_bad_pc()) {
359 state->pc = reinterpret_cast<Address>(simulator->get_pc());
360 }
361 state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
362 state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
363 #elif V8_TARGET_ARCH_S390
364 if (!simulator->has_bad_pc()) {
365 state->pc = reinterpret_cast<Address>(simulator->get_pc());
366 }
367 state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
368 state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
369 #endif
370 if (state->sp == 0 || state->fp == 0) {
371 // It possible that the simulator is interrupted while it is updating
372 // the sp or fp register. ARM64 simulator does this in two steps:
373 // first setting it to zero and then setting it to the new value.
374 // Bailout if sp/fp doesn't contain the new value.
375 //
376 // FIXME: The above doesn't really solve the issue.
377 // If a 64-bit target is executed on a 32-bit host even the final
378 // write is non-atomic, so it might obtain a half of the result.
379 // Moreover as long as the register set code uses memcpy (as of now),
380 // it is not guaranteed to be atomic even when both host and target
381 // are of same bitness.
382 return false;
383 }
384 return true;
385 }
386 #endif // USE_SIMULATOR
387 324
388 325
389 #if defined(USE_SIGNALS) 326 #if defined(USE_SIGNALS)
390 327 class SignalHandler : public i::AllStatic {
391 class SignalHandler : public AllStatic {
392 public: 328 public:
393 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } 329 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
394 static void TearDown() { delete mutex_; mutex_ = NULL; } 330 static void TearDown() { delete mutex_; mutex_ = NULL; }
395 331
396 static void IncreaseSamplerCount() { 332 static void IncreaseSamplerCount() {
397 base::LockGuard<base::Mutex> lock_guard(mutex_); 333 base::LockGuard<base::Mutex> lock_guard(mutex_);
398 if (++client_count_ == 1) Install(); 334 if (++client_count_ == 1) Install();
399 } 335 }
400 336
401 static void DecreaseSamplerCount() { 337 static void DecreaseSamplerCount() {
402 base::LockGuard<base::Mutex> lock_guard(mutex_); 338 base::LockGuard<base::Mutex> lock_guard(mutex_);
403 if (--client_count_ == 0) Restore(); 339 if (--client_count_ == 0) Restore();
404 } 340 }
405 341
406 static bool Installed() { 342 static bool Installed() {
407 return signal_handler_installed_; 343 return signal_handler_installed_;
408 } 344 }
409 345
410 #if !V8_OS_NACL
411 static void CollectSample(void* context, Sampler* sampler);
412 #endif
413
414 private: 346 private:
415 static void Install() { 347 static void Install() {
416 #if !V8_OS_NACL 348 #if !V8_OS_NACL
417 struct sigaction sa; 349 struct sigaction sa;
418 sa.sa_sigaction = &HandleProfilerSignal; 350 sa.sa_sigaction = &HandleProfilerSignal;
419 sigemptyset(&sa.sa_mask); 351 sigemptyset(&sa.sa_mask);
420 #if V8_OS_QNX 352 #if V8_OS_QNX
421 sa.sa_flags = SA_SIGINFO; 353 sa.sa_flags = SA_SIGINFO;
422 #else 354 #else
423 sa.sa_flags = SA_RESTART | SA_SIGINFO; 355 sa.sa_flags = SA_RESTART | SA_SIGINFO;
424 #endif 356 #endif
425 signal_handler_installed_ = 357 signal_handler_installed_ =
426 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); 358 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
427 #endif 359 #endif // !V8_OS_NACL
428 } 360 }
429 361
430 static void Restore() { 362 static void Restore() {
431 #if !V8_OS_NACL 363 #if !V8_OS_NACL
432 if (signal_handler_installed_) { 364 if (signal_handler_installed_) {
433 sigaction(SIGPROF, &old_signal_handler_, 0); 365 sigaction(SIGPROF, &old_signal_handler_, 0);
434 signal_handler_installed_ = false; 366 signal_handler_installed_ = false;
435 } 367 }
436 #endif 368 #endif
437 } 369 }
438 370
439 #if !V8_OS_NACL 371 #if !V8_OS_NACL
372 static void FillRegisterState(void* context, RegisterState& regs);
440 static void HandleProfilerSignal(int signal, siginfo_t* info, void* context); 373 static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
441 #endif 374 #endif
442 // Protects the process wide state below. 375 // Protects the process wide state below.
443 static base::Mutex* mutex_; 376 static base::Mutex* mutex_;
444 static int client_count_; 377 static int client_count_;
445 static bool signal_handler_installed_; 378 static bool signal_handler_installed_;
446 static struct sigaction old_signal_handler_; 379 static struct sigaction old_signal_handler_;
447 }; 380 };
448 381
449 382
450 base::Mutex* SignalHandler::mutex_ = NULL; 383 base::Mutex* SignalHandler::mutex_ = NULL;
451 int SignalHandler::client_count_ = 0; 384 int SignalHandler::client_count_ = 0;
452 struct sigaction SignalHandler::old_signal_handler_; 385 struct sigaction SignalHandler::old_signal_handler_;
453 bool SignalHandler::signal_handler_installed_ = false; 386 bool SignalHandler::signal_handler_installed_ = false;
454 387
455 388
456 // As Native Client does not support signal handling, profiling is disabled. 389 // As Native Client does not support signal handling, profiling is disabled.
457 #if !V8_OS_NACL 390 #if !V8_OS_NACL
458 void SignalHandler::CollectSample(void* context, Sampler* sampler) { 391 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
459 if (sampler == NULL || (!sampler->IsProfiling() && 392 void* context) {
460 !sampler->IsRegistered())) { 393 USE(info);
394 if (signal != SIGPROF) return;
395 AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false);
396 if (!atomic_guard.is_success()) return;
397 pthread_t thread_id = pthread_self();
398 i::HashMap::Entry* entry =
399 SamplerManager::thread_id_to_samplers_.Pointer()->Lookup(
400 ThreadKey(thread_id), ThreadHash(thread_id));
401 if (entry == NULL)
461 return; 402 return;
403 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
404 for (int i = 0; i < samplers->length(); ++i) {
405 Sampler* sampler = samplers->at(i);
406 if (sampler == NULL || !sampler->IsProfiling()) {
407 return;
408 }
409 Isolate* isolate = sampler->isolate();
410
411 // We require a fully initialized and entered isolate.
412 if (isolate == NULL || !isolate->IsInUse()) return;
413
414 if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) {
415 return;
416 }
417
418 v8::RegisterState state;
419 SignalHandler::FillRegisterState(context, state);
alph 2016/05/04 00:20:18 still it is possible to move this out of the loop.
lpy 2016/05/04 00:56:09 Done.
420
421 sampler->SampleStack(state);
462 } 422 }
463 Isolate* isolate = sampler->isolate(); 423 }
464 424
465 // We require a fully initialized and entered isolate. 425 void SignalHandler::FillRegisterState(void* context, RegisterState& state) {
alph 2016/05/04 00:20:18 please use pointer for out parameters.
lpy 2016/05/04 00:56:09 Done.
466 if (isolate == NULL || !isolate->IsInUse()) return;
467
468 if (v8::Locker::IsActive() &&
469 !isolate->thread_manager()->IsLockedByCurrentThread()) {
470 return;
471 }
472
473 v8::RegisterState state;
474
475 #if defined(USE_SIMULATOR)
476 if (!SimulatorHelper::FillRegisters(isolate, &state)) return;
477 #else
478 // Extracting the sample from the context is extremely machine dependent. 426 // Extracting the sample from the context is extremely machine dependent.
479 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); 427 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
480 #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390))) 428 #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
481 mcontext_t& mcontext = ucontext->uc_mcontext; 429 mcontext_t& mcontext = ucontext->uc_mcontext;
482 #endif 430 #endif
483 #if V8_OS_LINUX 431 #if V8_OS_LINUX
484 #if V8_HOST_ARCH_IA32 432 #if V8_HOST_ARCH_IA32
485 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]); 433 state.pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
486 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]); 434 state.sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
487 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]); 435 state.fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]);
488 #elif V8_HOST_ARCH_X64 436 #elif V8_HOST_ARCH_X64
489 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]); 437 state.pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]);
490 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]); 438 state.sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]);
491 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]); 439 state.fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]);
492 #elif V8_HOST_ARCH_ARM 440 #elif V8_HOST_ARCH_ARM
493 #if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) 441 #if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
494 // Old GLibc ARM versions used a gregs[] array to access the register 442 // Old GLibc ARM versions used a gregs[] array to access the register
495 // values from mcontext_t. 443 // values from mcontext_t.
496 state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]); 444 state.pc = reinterpret_cast<void*>(mcontext.gregs[R15]);
497 state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]); 445 state.sp = reinterpret_cast<void*>(mcontext.gregs[R13]);
498 state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]); 446 state.fp = reinterpret_cast<void*>(mcontext.gregs[R11]);
499 #else 447 #else
500 state.pc = reinterpret_cast<Address>(mcontext.arm_pc); 448 state.pc = reinterpret_cast<void*>(mcontext.arm_pc);
501 state.sp = reinterpret_cast<Address>(mcontext.arm_sp); 449 state.sp = reinterpret_cast<void*>(mcontext.arm_sp);
502 state.fp = reinterpret_cast<Address>(mcontext.arm_fp); 450 state.fp = reinterpret_cast<void*>(mcontext.arm_fp);
503 #endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) 451 #endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
504 #elif V8_HOST_ARCH_ARM64 452 #elif V8_HOST_ARCH_ARM64
505 state.pc = reinterpret_cast<Address>(mcontext.pc); 453 state.pc = reinterpret_cast<void*>(mcontext.pc);
506 state.sp = reinterpret_cast<Address>(mcontext.sp); 454 state.sp = reinterpret_cast<void*>(mcontext.sp);
507 // FP is an alias for x29. 455 // FP is an alias for x29.
508 state.fp = reinterpret_cast<Address>(mcontext.regs[29]); 456 state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
alph 2016/05/04 00:20:18 doesn't look like it compiles.
lpy 2016/05/04 00:56:09 Done.
509 #elif V8_HOST_ARCH_MIPS 457 #elif V8_HOST_ARCH_MIPS
510 state.pc = reinterpret_cast<Address>(mcontext.pc); 458 state.pc = reinterpret_cast<void*>(mcontext.pc);
511 state.sp = reinterpret_cast<Address>(mcontext.gregs[29]); 459 state.sp = reinterpret_cast<void*>(mcontext.gregs[29]);
512 state.fp = reinterpret_cast<Address>(mcontext.gregs[30]); 460 state.fp = reinterpret_cast<void*>(mcontext.gregs[30]);
513 #elif V8_HOST_ARCH_MIPS64 461 #elif V8_HOST_ARCH_MIPS64
514 state.pc = reinterpret_cast<Address>(mcontext.pc); 462 state.pc = reinterpret_cast<void*>(mcontext.pc);
515 state.sp = reinterpret_cast<Address>(mcontext.gregs[29]); 463 state.sp = reinterpret_cast<void*>(mcontext.gregs[29]);
516 state.fp = reinterpret_cast<Address>(mcontext.gregs[30]); 464 state.fp = reinterpret_cast<void*>(mcontext.gregs[30]);
517 #elif V8_HOST_ARCH_PPC 465 #elif V8_HOST_ARCH_PPC
518 state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip); 466 state.pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
519 state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]); 467 state.sp =
520 state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]); 468 reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
469 state.fp =
470 reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
521 #elif V8_HOST_ARCH_S390 471 #elif V8_HOST_ARCH_S390
522 #if V8_TARGET_ARCH_32_BIT 472 #if V8_TARGET_ARCH_32_BIT
523 // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing 473 // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
524 // mode. This bit needs to be masked out to resolve actual address. 474 // mode. This bit needs to be masked out to resolve actual address.
525 state.pc = 475 state.pc =
526 reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF); 476 reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
527 #else 477 #else
528 state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr); 478 state.pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr);
529 #endif // V8_TARGET_ARCH_32_BIT 479 #endif // V8_TARGET_ARCH_32_BIT
530 state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[15]); 480 state.sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
531 state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[11]); 481 state.fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
532 #endif // V8_HOST_ARCH_* 482 #endif // V8_HOST_ARCH_*
533 #elif V8_OS_MACOSX 483 #elif V8_OS_MACOSX
534 #if V8_HOST_ARCH_X64 484 #if V8_HOST_ARCH_X64
535 #if __DARWIN_UNIX03 485 #if __DARWIN_UNIX03
536 state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip); 486 state.pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
537 state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp); 487 state.sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
538 state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp); 488 state.fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
539 #else // !__DARWIN_UNIX03 489 #else // !__DARWIN_UNIX03
540 state.pc = reinterpret_cast<Address>(mcontext->ss.rip); 490 state->pc = reinterpret_cast<void*>(mcontext->ss.rip);
541 state.sp = reinterpret_cast<Address>(mcontext->ss.rsp); 491 state->sp = reinterpret_cast<void*>(mcontext->ss.rsp);
542 state.fp = reinterpret_cast<Address>(mcontext->ss.rbp); 492 state->fp = reinterpret_cast<void*>(mcontext->ss.rbp);
543 #endif // __DARWIN_UNIX03 493 #endif // __DARWIN_UNIX03
544 #elif V8_HOST_ARCH_IA32 494 #elif V8_HOST_ARCH_IA32
545 #if __DARWIN_UNIX03 495 #if __DARWIN_UNIX03
546 state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip); 496 state.pc = reinterpret_cast<void*>(mcontext->__ss.__eip);
547 state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp); 497 state.sp = reinterpret_cast<void*>(mcontext->__ss.__esp);
548 state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp); 498 state.fp = reinterpret_cast<void*>(mcontext->__ss.__ebp);
549 #else // !__DARWIN_UNIX03 499 #else // !__DARWIN_UNIX03
550 state.pc = reinterpret_cast<Address>(mcontext->ss.eip); 500 state.pc = reinterpret_cast<void*>(mcontext->ss.eip);
551 state.sp = reinterpret_cast<Address>(mcontext->ss.esp); 501 state.sp = reinterpret_cast<void*>(mcontext->ss.esp);
552 state.fp = reinterpret_cast<Address>(mcontext->ss.ebp); 502 state.fp = reinterpret_cast<void*>(mcontext->ss.ebp);
553 #endif // __DARWIN_UNIX03 503 #endif // __DARWIN_UNIX03
554 #endif // V8_HOST_ARCH_IA32 504 #endif // V8_HOST_ARCH_IA32
555 #elif V8_OS_FREEBSD 505 #elif V8_OS_FREEBSD
556 #if V8_HOST_ARCH_IA32 506 #if V8_HOST_ARCH_IA32
557 state.pc = reinterpret_cast<Address>(mcontext.mc_eip); 507 state.pc = reinterpret_cast<void*>(mcontext.mc_eip);
558 state.sp = reinterpret_cast<Address>(mcontext.mc_esp); 508 state.sp = reinterpret_cast<void*>(mcontext.mc_esp);
559 state.fp = reinterpret_cast<Address>(mcontext.mc_ebp); 509 state.fp = reinterpret_cast<void*>(mcontext.mc_ebp);
560 #elif V8_HOST_ARCH_X64 510 #elif V8_HOST_ARCH_X64
561 state.pc = reinterpret_cast<Address>(mcontext.mc_rip); 511 state.pc = reinterpret_cast<void*>(mcontext.mc_rip);
562 state.sp = reinterpret_cast<Address>(mcontext.mc_rsp); 512 state.sp = reinterpret_cast<void*>(mcontext.mc_rsp);
563 state.fp = reinterpret_cast<Address>(mcontext.mc_rbp); 513 state.fp = reinterpret_cast<void*>(mcontext.mc_rbp);
564 #elif V8_HOST_ARCH_ARM 514 #elif V8_HOST_ARCH_ARM
565 state.pc = reinterpret_cast<Address>(mcontext.mc_r15); 515 state.pc = reinterpret_cast<void*>(mcontext.mc_r15);
566 state.sp = reinterpret_cast<Address>(mcontext.mc_r13); 516 state.sp = reinterpret_cast<void*>(mcontext.mc_r13);
567 state.fp = reinterpret_cast<Address>(mcontext.mc_r11); 517 state.fp = reinterpret_cast<void*>(mcontext.mc_r11);
568 #endif // V8_HOST_ARCH_* 518 #endif // V8_HOST_ARCH_*
569 #elif V8_OS_NETBSD 519 #elif V8_OS_NETBSD
570 #if V8_HOST_ARCH_IA32 520 #if V8_HOST_ARCH_IA32
571 state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]); 521 state.pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]);
572 state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]); 522 state.sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]);
573 state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]); 523 state.fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]);
574 #elif V8_HOST_ARCH_X64 524 #elif V8_HOST_ARCH_X64
575 state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]); 525 state.pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]);
576 state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]); 526 state.sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]);
577 state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]); 527 state.fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]);
578 #endif // V8_HOST_ARCH_* 528 #endif // V8_HOST_ARCH_*
579 #elif V8_OS_OPENBSD 529 #elif V8_OS_OPENBSD
580 #if V8_HOST_ARCH_IA32 530 #if V8_HOST_ARCH_IA32
581 state.pc = reinterpret_cast<Address>(ucontext->sc_eip); 531 state.pc = reinterpret_cast<void*>(ucontext->sc_eip);
582 state.sp = reinterpret_cast<Address>(ucontext->sc_esp); 532 state.sp = reinterpret_cast<void*>(ucontext->sc_esp);
583 state.fp = reinterpret_cast<Address>(ucontext->sc_ebp); 533 state.fp = reinterpret_cast<void*>(ucontext->sc_ebp);
584 #elif V8_HOST_ARCH_X64 534 #elif V8_HOST_ARCH_X64
585 state.pc = reinterpret_cast<Address>(ucontext->sc_rip); 535 state.pc = reinterpret_cast<void*>(ucontext->sc_rip);
586 state.sp = reinterpret_cast<Address>(ucontext->sc_rsp); 536 state.sp = reinterpret_cast<void*>(ucontext->sc_rsp);
587 state.fp = reinterpret_cast<Address>(ucontext->sc_rbp); 537 state.fp = reinterpret_cast<void*>(ucontext->sc_rbp);
588 #endif // V8_HOST_ARCH_* 538 #endif // V8_HOST_ARCH_*
589 #elif V8_OS_SOLARIS 539 #elif V8_OS_SOLARIS
590 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]); 540 state.pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]);
591 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]); 541 state.sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]);
592 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]); 542 state.fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]);
593 #elif V8_OS_QNX 543 #elif V8_OS_QNX
594 #if V8_HOST_ARCH_IA32 544 #if V8_HOST_ARCH_IA32
595 state.pc = reinterpret_cast<Address>(mcontext.cpu.eip); 545 state.pc = reinterpret_cast<void*>(mcontext.cpu.eip);
596 state.sp = reinterpret_cast<Address>(mcontext.cpu.esp); 546 state.sp = reinterpret_cast<void*>(mcontext.cpu.esp);
597 state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp); 547 state.fp = reinterpret_cast<void*>(mcontext.cpu.ebp);
598 #elif V8_HOST_ARCH_ARM 548 #elif V8_HOST_ARCH_ARM
599 state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]); 549 state.pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]);
600 state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]); 550 state.sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]);
601 state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]); 551 state.fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]);
602 #endif // V8_HOST_ARCH_* 552 #endif // V8_HOST_ARCH_*
603 #elif V8_OS_AIX 553 #elif V8_OS_AIX
604 state.pc = reinterpret_cast<Address>(mcontext.jmp_context.iar); 554 state.pc = reinterpret_cast<void*>(mcontext.jmp_context.iar);
605 state.sp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[1]); 555 state.sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]);
606 state.fp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[31]); 556 state.fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]);
607 #endif // V8_OS_AIX 557 #endif // V8_OS_AIX
608 #endif // USE_SIMULATOR
609 sampler->SampleStack(state);
610 } 558 }
611 #endif // V8_OS_NACL 559
560 #endif // !V8_OS_NACL
612 561
613 #endif // USE_SIGNALS 562 #endif // USE_SIGNALS
614 563
615 564
616 class SamplerThread : public base::Thread {
617 public:
618 static const int kSamplerThreadStackSize = 64 * KB;
619
620 explicit SamplerThread(int interval)
621 : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)),
622 interval_(interval) {}
623
624 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
625 static void TearDown() { delete mutex_; mutex_ = NULL; }
626
627 static void AddActiveSampler(Sampler* sampler) {
628 bool need_to_start = false;
629 base::LockGuard<base::Mutex> lock_guard(mutex_);
630 if (instance_ == NULL) {
631 // Start a thread that will send SIGPROF signal to VM threads,
632 // when CPU profiling will be enabled.
633 instance_ = new SamplerThread(sampler->interval());
634 need_to_start = true;
635 }
636
637 DCHECK(sampler->IsActive());
638 DCHECK(instance_->interval_ == sampler->interval());
639
640 #if defined(USE_SIGNALS)
641 AddSampler(sampler);
642 #else
643 DCHECK(!instance_->active_samplers_.Contains(sampler));
644 instance_->active_samplers_.Add(sampler);
645 #endif // USE_SIGNALS
646
647 if (need_to_start) instance_->StartSynchronously();
648 }
649
650 static void RemoveSampler(Sampler* sampler) {
651 SamplerThread* instance_to_remove = NULL;
652 {
653 base::LockGuard<base::Mutex> lock_guard(mutex_);
654
655 DCHECK(sampler->IsActive() || sampler->IsRegistered());
656 #if defined(USE_SIGNALS)
657 {
658 AtomicGuard atomic_guard(&sampler_list_access_counter_);
659 // Remove sampler from map.
660 pthread_t thread_id = sampler->platform_data()->vm_tid();
661 void* thread_key = ThreadKey(thread_id);
662 uint32_t thread_hash = ThreadHash(thread_id);
663 HashMap::Entry* entry =
664 thread_id_to_samplers_.Get().Lookup(thread_key, thread_hash);
665 DCHECK(entry != NULL);
666 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
667 samplers->RemoveElement(sampler);
668 if (samplers->is_empty()) {
669 thread_id_to_samplers_.Pointer()->Remove(thread_key, thread_hash);
670 delete samplers;
671 }
672 if (thread_id_to_samplers_.Get().occupancy() == 0) {
673 instance_to_remove = instance_;
674 instance_ = NULL;
675 }
676 }
677 #else
678 bool removed = instance_->active_samplers_.RemoveElement(sampler);
679 DCHECK(removed);
680 USE(removed);
681
682 // We cannot delete the instance immediately as we need to Join() the
683 // thread but we are holding mutex_ and the thread may try to acquire it.
684 if (instance_->active_samplers_.is_empty()) {
685 instance_to_remove = instance_;
686 instance_ = NULL;
687 }
688 #endif // USE_SIGNALS
689 }
690
691 if (!instance_to_remove) return;
692 instance_to_remove->Join();
693 delete instance_to_remove;
694 }
695
696 // Unlike AddActiveSampler, this method only adds a sampler,
697 // but won't start the sampler thread.
698 static void RegisterSampler(Sampler* sampler) {
699 base::LockGuard<base::Mutex> lock_guard(mutex_);
700 #if defined(USE_SIGNALS)
701 AddSampler(sampler);
702 #endif // USE_SIGNALS
703 }
704
705 // Implement Thread::Run().
706 virtual void Run() {
707 while (true) {
708 {
709 base::LockGuard<base::Mutex> lock_guard(mutex_);
710 #if defined(USE_SIGNALS)
711 if (thread_id_to_samplers_.Get().occupancy() == 0) break;
712 if (SignalHandler::Installed()) {
713 for (HashMap::Entry *p = thread_id_to_samplers_.Get().Start();
714 p != NULL; p = thread_id_to_samplers_.Get().Next(p)) {
715 pthread_t thread_id = reinterpret_cast<pthread_t>(p->key);
716 pthread_kill(thread_id, SIGPROF);
717 }
718 }
719 #else
720 if (active_samplers_.is_empty()) break;
721 // When CPU profiling is enabled both JavaScript and C++ code is
722 // profiled. We must not suspend.
723 for (int i = 0; i < active_samplers_.length(); ++i) {
724 Sampler* sampler = active_samplers_.at(i);
725 if (!sampler->IsProfiling()) continue;
726 sampler->DoSample();
727 }
728 #endif // USE_SIGNALS
729 }
730 base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
731 }
732 }
733
734 private:
735 // Protects the process wide state below.
736 static base::Mutex* mutex_;
737 static SamplerThread* instance_;
738
739 const int interval_;
740
741 #if defined(USE_SIGNALS)
742 struct HashMapCreateTrait {
743 static void Construct(HashMap* allocated_ptr) {
744 new (allocated_ptr) HashMap(HashMap::PointersMatch);
745 }
746 };
747 friend class SignalHandler;
748 static base::LazyInstance<HashMap, HashMapCreateTrait>::type
749 thread_id_to_samplers_;
750 static AtomicValue<int> sampler_list_access_counter_;
751 static void AddSampler(Sampler* sampler) {
752 AtomicGuard atomic_guard(&sampler_list_access_counter_);
753 // Add sampler into map if needed.
754 pthread_t thread_id = sampler->platform_data()->vm_tid();
755 HashMap::Entry *entry =
756 thread_id_to_samplers_.Pointer()->LookupOrInsert(ThreadKey(thread_id),
757 ThreadHash(thread_id));
758 if (entry->value == NULL) {
759 SamplerList* samplers = new SamplerList();
760 samplers->Add(sampler);
761 entry->value = samplers;
762 } else {
763 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
764 if (!samplers->Contains(sampler)) {
765 samplers->Add(sampler);
766 }
767 }
768 }
769 #else
770 SamplerList active_samplers_;
771 #endif // USE_SIGNALS
772
773 DISALLOW_COPY_AND_ASSIGN(SamplerThread);
774 };
775
776
777 base::Mutex* SamplerThread::mutex_ = NULL;
778 SamplerThread* SamplerThread::instance_ = NULL;
779 #if defined(USE_SIGNALS)
780 base::LazyInstance<HashMap, SamplerThread::HashMapCreateTrait>::type
781 SamplerThread::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER;
782 AtomicValue<int> SamplerThread::sampler_list_access_counter_(0);
783
784 // As Native Client does not support signal handling, profiling is disabled.
785 #if !V8_OS_NACL
786 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
787 void* context) {
788 USE(info);
789 if (signal != SIGPROF) return;
790 AtomicGuard atomic_guard(&SamplerThread::sampler_list_access_counter_, false);
791 if (!atomic_guard.is_success()) return;
792 pthread_t thread_id = pthread_self();
793 HashMap::Entry* entry =
794 SamplerThread::thread_id_to_samplers_.Pointer()->Lookup(
795 ThreadKey(thread_id), ThreadHash(thread_id));
796 if (entry == NULL)
797 return;
798 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
799 for (int i = 0; i < samplers->length(); ++i) {
800 Sampler* sampler = samplers->at(i);
801 CollectSample(context, sampler);
802 }
803 }
804 #endif // !V8_OS_NACL
805 #endif // USE_SIGNALs
806
807
808 //
809 // StackTracer implementation
810 //
811 DISABLE_ASAN void TickSample::Init(Isolate* isolate,
812 const v8::RegisterState& regs,
813 RecordCEntryFrame record_c_entry_frame,
814 bool update_stats) {
815 timestamp = base::TimeTicks::HighResolutionNow();
816 pc = reinterpret_cast<Address>(regs.pc);
817 state = isolate->current_vm_state();
818 this->update_stats = update_stats;
819
820 // Avoid collecting traces while doing GC.
821 if (state == GC) return;
822
823 Address js_entry_sp = isolate->js_entry_sp();
824 if (js_entry_sp == 0) return; // Not executing JS now.
825
826 if (pc && IsNoFrameRegion(pc)) {
827 // Can't collect stack. Mark the sample as spoiled.
828 timestamp = base::TimeTicks();
829 pc = 0;
830 return;
831 }
832
833 ExternalCallbackScope* scope = isolate->external_callback_scope();
834 Address handler = Isolate::handler(isolate->thread_local_top());
835 // If there is a handler on top of the external callback scope then
836 // we have already entrered JavaScript again and the external callback
837 // is not the top function.
838 if (scope && scope->scope_address() < handler) {
839 external_callback_entry = *scope->callback_entrypoint_address();
840 has_external_callback = true;
841 } else {
842 // sp register may point at an arbitrary place in memory, make
843 // sure MSAN doesn't complain about it.
844 MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address));
845 // Sample potential return address value for frameless invocation of
846 // stubs (we'll figure out later, if this value makes sense).
847 tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp));
848 has_external_callback = false;
849 }
850
851 SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
852 reinterpret_cast<Address>(regs.sp), js_entry_sp);
853 top_frame_type = it.top_frame_type();
854
855 SampleInfo info;
856 GetStackSample(isolate, regs, record_c_entry_frame,
857 reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
858 frames_count = static_cast<unsigned>(info.frames_count);
859 if (!frames_count) {
860 // It is executing JS but failed to collect a stack trace.
861 // Mark the sample as spoiled.
862 timestamp = base::TimeTicks();
863 pc = 0;
864 }
865 }
866
867
868 void TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs,
869 RecordCEntryFrame record_c_entry_frame,
870 void** frames, size_t frames_limit,
871 v8::SampleInfo* sample_info) {
872 sample_info->frames_count = 0;
873 sample_info->vm_state = isolate->current_vm_state();
874 if (sample_info->vm_state == GC) return;
875
876 Address js_entry_sp = isolate->js_entry_sp();
877 if (js_entry_sp == 0) return; // Not executing JS now.
878
879 SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
880 reinterpret_cast<Address>(regs.sp), js_entry_sp);
881 size_t i = 0;
882 if (record_c_entry_frame == kIncludeCEntryFrame && !it.done() &&
883 it.top_frame_type() == StackFrame::EXIT) {
884 frames[i++] = isolate->c_function();
885 }
886 while (!it.done() && i < frames_limit) {
887 if (it.frame()->is_interpreted()) {
888 // For interpreted frames use the bytecode array pointer as the pc.
889 InterpretedFrame* frame = static_cast<InterpretedFrame*>(it.frame());
890 // Since the sampler can interrupt execution at any point the
891 // bytecode_array might be garbage, so don't dereference it.
892 Address bytecode_array =
893 reinterpret_cast<Address>(frame->GetBytecodeArray()) - kHeapObjectTag;
894 frames[i++] = bytecode_array + BytecodeArray::kHeaderSize +
895 frame->GetBytecodeOffset();
896 } else {
897 frames[i++] = it.frame()->pc();
898 }
899 it.Advance();
900 }
901 sample_info->frames_count = i;
902 }
903
904
905 void Sampler::SetUp() { 565 void Sampler::SetUp() {
906 #if defined(USE_SIGNALS) 566 #if defined(USE_SIGNALS)
907 SignalHandler::SetUp(); 567 SignalHandler::SetUp();
908 #endif 568 #endif
909 SamplerThread::SetUp();
910 } 569 }
911 570
912 571
913 void Sampler::TearDown() { 572 void Sampler::TearDown() {
914 SamplerThread::TearDown();
915 #if defined(USE_SIGNALS) 573 #if defined(USE_SIGNALS)
916 SignalHandler::TearDown(); 574 SignalHandler::TearDown();
917 #endif 575 #endif
918 } 576 }
919 577
920 Sampler::Sampler(Isolate* isolate, int interval) 578 Sampler::Sampler(Isolate* isolate)
921 : isolate_(isolate), 579 : is_counting_samples_(false),
922 interval_(interval), 580 js_sample_count_(0),
581 external_sample_count_(0),
582 isolate_(isolate),
923 profiling_(false), 583 profiling_(false),
924 has_processing_thread_(false), 584 active_(false) {
925 active_(false),
926 registered_(false),
927 is_counting_samples_(false),
928 js_sample_count_(0),
929 external_sample_count_(0) {
930 data_ = new PlatformData; 585 data_ = new PlatformData;
931 } 586 }
932 587
933 Sampler::~Sampler() { 588 Sampler::~Sampler() {
934 DCHECK(!IsActive()); 589 DCHECK(!IsActive());
935 if (IsRegistered()) {
936 SamplerThread::RemoveSampler(this);
937 }
938 delete data_; 590 delete data_;
939 } 591 }
940 592
941 void Sampler::Start() { 593 void Sampler::Start() {
942 DCHECK(!IsActive()); 594 DCHECK(!IsActive());
943 SetActive(true); 595 SetActive(true);
944 SamplerThread::AddActiveSampler(this); 596 SamplerManager::AddSampler(this);
945 } 597 }
946 598
947 599
948 void Sampler::Stop() { 600 void Sampler::Stop() {
601 SamplerManager::RemoveSampler(this);
949 DCHECK(IsActive()); 602 DCHECK(IsActive());
950 SamplerThread::RemoveSampler(this);
951 SetActive(false); 603 SetActive(false);
952 SetRegistered(false);
953 } 604 }
954 605
955 606
956 void Sampler::IncreaseProfilingDepth() { 607 void Sampler::IncreaseProfilingDepth() {
957 base::NoBarrier_AtomicIncrement(&profiling_, 1); 608 base::NoBarrier_AtomicIncrement(&profiling_, 1);
958 #if defined(USE_SIGNALS) 609 #if defined(USE_SIGNALS)
959 SignalHandler::IncreaseSamplerCount(); 610 SignalHandler::IncreaseSamplerCount();
960 #endif 611 #endif
961 } 612 }
962 613
963 614
964 void Sampler::DecreaseProfilingDepth() { 615 void Sampler::DecreaseProfilingDepth() {
965 #if defined(USE_SIGNALS) 616 #if defined(USE_SIGNALS)
966 SignalHandler::DecreaseSamplerCount(); 617 SignalHandler::DecreaseSamplerCount();
967 #endif 618 #endif
968 base::NoBarrier_AtomicIncrement(&profiling_, -1); 619 base::NoBarrier_AtomicIncrement(&profiling_, -1);
969 } 620 }
970 621
971 622
972 void Sampler::SampleStack(const v8::RegisterState& state) {
973 TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
974 TickSample sample_obj;
975 if (sample == NULL) sample = &sample_obj;
976 sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true);
977 if (is_counting_samples_ && !sample->timestamp.IsNull()) {
978 if (sample->state == JS) ++js_sample_count_;
979 if (sample->state == EXTERNAL) ++external_sample_count_;
980 }
981 Tick(sample);
982 if (sample != &sample_obj) {
983 isolate_->cpu_profiler()->FinishTickSample();
984 }
985 }
986
987
988 #if defined(USE_SIGNALS) 623 #if defined(USE_SIGNALS)
989 624
990 void Sampler::DoSample() { 625 void Sampler::DoSample() {
991 if (!SignalHandler::Installed()) return; 626 if (!SignalHandler::Installed()) return;
992 if (!IsActive() && !IsRegistered()) {
993 SamplerThread::RegisterSampler(this);
994 SetRegistered(true);
995 }
996 pthread_kill(platform_data()->vm_tid(), SIGPROF); 627 pthread_kill(platform_data()->vm_tid(), SIGPROF);
997 } 628 }
998 629
999 #elif V8_OS_WIN || V8_OS_CYGWIN 630 #elif V8_OS_WIN || V8_OS_CYGWIN
1000 631
1001 void Sampler::DoSample() { 632 void Sampler::DoSample() {
1002 HANDLE profiled_thread = platform_data()->profiled_thread(); 633 HANDLE profiled_thread = platform_data()->profiled_thread();
1003 if (profiled_thread == NULL) return; 634 if (profiled_thread == NULL) return;
1004 635
1005 const DWORD kSuspendFailed = static_cast<DWORD>(-1); 636 const DWORD kSuspendFailed = static_cast<DWORD>(-1);
1006 if (SuspendThread(profiled_thread) == kSuspendFailed) return; 637 if (SuspendThread(profiled_thread) == kSuspendFailed) return;
1007 638
1008 // Context used for sampling the register state of the profiled thread. 639 // Context used for sampling the register state of the profiled thread.
1009 CONTEXT context; 640 CONTEXT context;
1010 memset(&context, 0, sizeof(context)); 641 memset(&context, 0, sizeof(context));
1011 context.ContextFlags = CONTEXT_FULL; 642 context.ContextFlags = CONTEXT_FULL;
1012 if (GetThreadContext(profiled_thread, &context) != 0) { 643 if (GetThreadContext(profiled_thread, &context) != 0) {
1013 v8::RegisterState state; 644 v8::RegisterState state;
1014 #if defined(USE_SIMULATOR) 645 #if V8_HOST_ARCH_X64
1015 if (!SimulatorHelper::FillRegisters(isolate(), &state)) { 646 state.pc = reinterpret_cast<void*>(context.Rip);
1016 ResumeThread(profiled_thread); 647 state.sp = reinterpret_cast<void*>(context.Rsp);
1017 return; 648 state.fp = reinterpret_cast<void*>(context.Rbp);
1018 }
1019 #else 649 #else
1020 #if V8_HOST_ARCH_X64 650 state.pc = reinterpret_cast<void*>(context.Eip);
1021 state.pc = reinterpret_cast<Address>(context.Rip); 651 state.sp = reinterpret_cast<void*>(context.Esp);
1022 state.sp = reinterpret_cast<Address>(context.Rsp); 652 state.fp = reinterpret_cast<void*>(context.Ebp);
1023 state.fp = reinterpret_cast<Address>(context.Rbp);
1024 #else
1025 state.pc = reinterpret_cast<Address>(context.Eip);
1026 state.sp = reinterpret_cast<Address>(context.Esp);
1027 state.fp = reinterpret_cast<Address>(context.Ebp);
1028 #endif 653 #endif
1029 #endif // USE_SIMULATOR
1030 SampleStack(state); 654 SampleStack(state);
1031 } 655 }
1032 ResumeThread(profiled_thread); 656 ResumeThread(profiled_thread);
1033 } 657 }
1034 658
1035 #endif // USE_SIGNALS 659 #endif // USE_SIGNALS
1036 660
1037 661 } // namespace sampler
1038 } // namespace internal
1039 } // namespace v8 662 } // namespace v8
OLDNEW
« no previous file with comments | « src/libsampler/v8-sampler.h ('k') | src/v8.gyp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698