Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(448)

Side by Side Diff: src/libsampler/v8-sampler.cc

Issue 1922303002: Create libsampler as V8 sampler library. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fix TSAN failure Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/libsampler/v8-sampler.h ('k') | src/log.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/profiler/sampler.h" 5 #include "src/libsampler/v8-sampler.h"
6 6
7 #if V8_OS_POSIX && !V8_OS_CYGWIN 7 #if V8_OS_POSIX && !V8_OS_CYGWIN
8 8
9 #define USE_SIGNALS 9 #define USE_SIGNALS
10 10
11 #include <errno.h> 11 #include <errno.h>
12 #include <pthread.h> 12 #include <pthread.h>
13 #include <signal.h> 13 #include <signal.h>
14 #include <sys/time.h> 14 #include <sys/time.h>
15 15
(...skipping 19 matching lines...) Expand all
35 !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT) 35 !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
36 #include <asm/sigcontext.h> // NOLINT 36 #include <asm/sigcontext.h> // NOLINT
37 #endif 37 #endif
38 38
39 #elif V8_OS_WIN || V8_OS_CYGWIN 39 #elif V8_OS_WIN || V8_OS_CYGWIN
40 40
41 #include "src/base/win32-headers.h" 41 #include "src/base/win32-headers.h"
42 42
43 #endif 43 #endif
44 44
45 #include <algorithm>
46 #include <vector>
47 #include <map>
48
45 #include "src/base/atomic-utils.h" 49 #include "src/base/atomic-utils.h"
46 #include "src/base/platform/platform.h" 50 #include "src/base/platform/platform.h"
47 #include "src/profiler/cpu-profiler-inl.h" 51 #include "src/libsampler/hashmap.h"
48 #include "src/profiler/tick-sample.h"
49 #include "src/simulator.h"
50 #include "src/v8threads.h"
51 52
52 53
53 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) 54 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
54 55
55 // Not all versions of Android's C library provide ucontext_t. 56 // Not all versions of Android's C library provide ucontext_t.
56 // Detect this and provide custom but compatible definitions. Note that these 57 // Detect this and provide custom but compatible definitions. Note that these
57 // follow the GLibc naming convention to access register values from 58 // follow the GLibc naming convention to access register values from
58 // mcontext_t. 59 // mcontext_t.
59 // 60 //
60 // See http://code.google.com/p/android/issues/detail?id=34784 61 // See http://code.google.com/p/android/issues/detail?id=34784
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
148 mcontext_t uc_mcontext; 149 mcontext_t uc_mcontext;
149 // Other fields are not used by V8, don't define them here. 150 // Other fields are not used by V8, don't define them here.
150 } ucontext_t; 151 } ucontext_t;
151 enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 }; 152 enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
152 #endif 153 #endif
153 154
154 #endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) 155 #endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
155 156
156 157
157 namespace v8 { 158 namespace v8 {
158 namespace internal { 159 namespace sampler {
159 160
160 namespace { 161 namespace {
161 162
162 class PlatformDataCommon : public Malloced { 163 #if defined(USE_SIGNALS)
163 public: 164 typedef std::vector<Sampler*> SamplerList;
164 PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {} 165 typedef SamplerList::iterator SamplerListIterator;
165 ThreadId profiled_thread_id() { return profiled_thread_id_; }
166 166
167 protected:
168 ~PlatformDataCommon() {}
169
170 private:
171 ThreadId profiled_thread_id_;
172 };
173
174
175 typedef List<Sampler*> SamplerList;
176
177 #if defined(USE_SIGNALS)
178 class AtomicGuard { 167 class AtomicGuard {
179 public: 168 public:
180 explicit AtomicGuard(base::AtomicValue<int>* atomic, bool is_block = true) 169 explicit AtomicGuard(base::AtomicValue<int>* atomic, bool is_block = true)
181 : atomic_(atomic), 170 : atomic_(atomic),
182 is_success_(false) { 171 is_success_(false) {
183 do { 172 do {
184 // Use Acquire_Load to gain mutual exclusion. 173 // Use Acquire_Load to gain mutual exclusion.
185 USE(atomic_->Value()); 174 USE(atomic_->Value());
186 is_success_ = atomic_->TrySetValue(0, 1); 175 is_success_ = atomic_->TrySetValue(0, 1);
187 } while (is_block && !is_success_); 176 } while (is_block && !is_success_);
(...skipping 21 matching lines...) Expand all
209 198
210 199
211 // Returns hash value for hash map. 200 // Returns hash value for hash map.
212 uint32_t ThreadHash(pthread_t thread_id) { 201 uint32_t ThreadHash(pthread_t thread_id) {
213 #if V8_OS_MACOSX 202 #if V8_OS_MACOSX
214 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id)); 203 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id));
215 #else 204 #else
216 return static_cast<uint32_t>(thread_id); 205 return static_cast<uint32_t>(thread_id);
217 #endif 206 #endif
218 } 207 }
208
219 #endif // USE_SIGNALS 209 #endif // USE_SIGNALS
220 210
221 } // namespace 211 } // namespace
222 212
223 #if defined(USE_SIGNALS) 213 #if defined(USE_SIGNALS)
224 214
225 class Sampler::PlatformData : public PlatformDataCommon { 215 class Sampler::PlatformData {
226 public: 216 public:
227 PlatformData() : vm_tid_(pthread_self()) {} 217 PlatformData() : vm_tid_(pthread_self()) {}
228 pthread_t vm_tid() const { return vm_tid_; } 218 pthread_t vm_tid() const { return vm_tid_; }
229 219
230 private: 220 private:
231 pthread_t vm_tid_; 221 pthread_t vm_tid_;
232 }; 222 };
233 223
224
225 class SamplerManager {
226 public:
227 static void AddSampler(Sampler* sampler) {
228 AtomicGuard atomic_guard(&samplers_access_counter_);
229 DCHECK(sampler->IsActive() || !sampler->IsRegistered());
230 // Add sampler into map if needed.
231 pthread_t thread_id = sampler->platform_data()->vm_tid();
232 HashMap::Entry* entry =
233 sampler_map_.Pointer()->LookupOrInsert(ThreadKey(thread_id),
234 ThreadHash(thread_id));
235 DCHECK(entry != NULL);
236 if (entry->value == NULL) {
237 SamplerList* samplers = new SamplerList();
238 samplers->push_back(sampler);
239 entry->value = samplers;
240 } else {
241 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
242 bool exists = false;
243 for (SamplerListIterator iter = samplers->begin();
244 iter != samplers->end(); ++iter) {
245 if (*iter == sampler) {
246 exists = true;
247 break;
248 }
249 }
250 if (!exists) {
251 samplers->push_back(sampler);
252 }
253 }
254 }
255
256 static void RemoveSampler(Sampler* sampler) {
257 AtomicGuard atomic_guard(&samplers_access_counter_);
258 DCHECK(sampler->IsActive() || sampler->IsRegistered());
259 // Remove sampler from map.
260 pthread_t thread_id = sampler->platform_data()->vm_tid();
261 void* thread_key = ThreadKey(thread_id);
262 uint32_t thread_hash = ThreadHash(thread_id);
263 HashMap::Entry* entry = sampler_map_.Get().Lookup(thread_key, thread_hash);
264 DCHECK(entry != NULL);
265 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
266 for (SamplerListIterator iter = samplers->begin(); iter != samplers->end();
267 ++iter) {
268 if (*iter == sampler) {
269 samplers->erase(iter);
270 break;
271 }
272 }
273 if (samplers->empty()) {
274 sampler_map_.Pointer()->Remove(thread_key, thread_hash);
275 delete samplers;
276 }
277 }
278
279 private:
280 struct HashMapCreateTrait {
281 static void Construct(HashMap* allocated_ptr) {
282 new (allocated_ptr) HashMap(HashMap::PointersMatch);
283 }
284 };
285 friend class SignalHandler;
286 static base::LazyInstance<HashMap, HashMapCreateTrait>::type
287 sampler_map_;
288 static base::AtomicValue<int> samplers_access_counter_;
289 };
290
291 base::LazyInstance<HashMap, SamplerManager::HashMapCreateTrait>::type
292 SamplerManager::sampler_map_ = LAZY_INSTANCE_INITIALIZER;
293 base::AtomicValue<int> SamplerManager::samplers_access_counter_(0);
294
295
234 #elif V8_OS_WIN || V8_OS_CYGWIN 296 #elif V8_OS_WIN || V8_OS_CYGWIN
235 297
236 // ---------------------------------------------------------------------------- 298 // ----------------------------------------------------------------------------
237 // Win32 profiler support. On Cygwin we use the same sampler implementation as 299 // Win32 profiler support. On Cygwin we use the same sampler implementation as
238 // on Win32. 300 // on Win32.
239 301
240 class Sampler::PlatformData : public PlatformDataCommon { 302 class Sampler::PlatformData {
241 public: 303 public:
242 // Get a handle to the calling thread. This is the thread that we are 304 // Get a handle to the calling thread. This is the thread that we are
243 // going to profile. We need to make a copy of the handle because we are 305 // going to profile. We need to make a copy of the handle because we are
244 // going to use it in the sampler thread. Using GetThreadHandle() will 306 // going to use it in the sampler thread. Using GetThreadHandle() will
245 // not work in this case. We're using OpenThread because DuplicateHandle 307 // not work in this case. We're using OpenThread because DuplicateHandle
246 // for some reason doesn't work in Chrome's sandbox. 308 // for some reason doesn't work in Chrome's sandbox.
247 PlatformData() 309 PlatformData()
248 : profiled_thread_(OpenThread(THREAD_GET_CONTEXT | 310 : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
249 THREAD_SUSPEND_RESUME | 311 THREAD_SUSPEND_RESUME |
250 THREAD_QUERY_INFORMATION, 312 THREAD_QUERY_INFORMATION,
251 false, 313 false,
252 GetCurrentThreadId())) {} 314 GetCurrentThreadId())) {}
253 315
254 ~PlatformData() { 316 ~PlatformData() {
255 if (profiled_thread_ != NULL) { 317 if (profiled_thread_ != NULL) {
256 CloseHandle(profiled_thread_); 318 CloseHandle(profiled_thread_);
257 profiled_thread_ = NULL; 319 profiled_thread_ = NULL;
258 } 320 }
259 } 321 }
260 322
261 HANDLE profiled_thread() { return profiled_thread_; } 323 HANDLE profiled_thread() { return profiled_thread_; }
262 324
263 private: 325 private:
264 HANDLE profiled_thread_; 326 HANDLE profiled_thread_;
265 }; 327 };
266 #endif 328 #endif // USE_SIGNALS
267 329
268 330
269 #if defined(USE_SIGNALS) 331 #if defined(USE_SIGNALS)
270 332 class SignalHandler {
271 class SignalHandler : public AllStatic {
272 public: 333 public:
273 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); } 334 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
274 static void TearDown() { delete mutex_; mutex_ = NULL; } 335 static void TearDown() { delete mutex_; mutex_ = NULL; }
275 336
276 static void IncreaseSamplerCount() { 337 static void IncreaseSamplerCount() {
277 base::LockGuard<base::Mutex> lock_guard(mutex_); 338 base::LockGuard<base::Mutex> lock_guard(mutex_);
278 if (++client_count_ == 1) Install(); 339 if (++client_count_ == 1) Install();
279 } 340 }
280 341
281 static void DecreaseSamplerCount() { 342 static void DecreaseSamplerCount() {
282 base::LockGuard<base::Mutex> lock_guard(mutex_); 343 base::LockGuard<base::Mutex> lock_guard(mutex_);
283 if (--client_count_ == 0) Restore(); 344 if (--client_count_ == 0) Restore();
284 } 345 }
285 346
286 static bool Installed() { 347 static bool Installed() {
348 base::LockGuard<base::Mutex> lock_guard(mutex_);
287 return signal_handler_installed_; 349 return signal_handler_installed_;
288 } 350 }
289 351
290 #if !V8_OS_NACL
291 static void CollectSample(void* context, Sampler* sampler);
292 #endif
293
294 private: 352 private:
295 static void Install() { 353 static void Install() {
296 #if !V8_OS_NACL 354 #if !V8_OS_NACL
297 struct sigaction sa; 355 struct sigaction sa;
298 sa.sa_sigaction = &HandleProfilerSignal; 356 sa.sa_sigaction = &HandleProfilerSignal;
299 sigemptyset(&sa.sa_mask); 357 sigemptyset(&sa.sa_mask);
300 #if V8_OS_QNX 358 #if V8_OS_QNX
301 sa.sa_flags = SA_SIGINFO; 359 sa.sa_flags = SA_SIGINFO;
302 #else 360 #else
303 sa.sa_flags = SA_RESTART | SA_SIGINFO; 361 sa.sa_flags = SA_RESTART | SA_SIGINFO;
304 #endif 362 #endif
305 signal_handler_installed_ = 363 signal_handler_installed_ =
306 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); 364 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
307 #endif 365 #endif // !V8_OS_NACL
308 } 366 }
309 367
310 static void Restore() { 368 static void Restore() {
311 #if !V8_OS_NACL 369 #if !V8_OS_NACL
312 if (signal_handler_installed_) { 370 if (signal_handler_installed_) {
313 sigaction(SIGPROF, &old_signal_handler_, 0); 371 sigaction(SIGPROF, &old_signal_handler_, 0);
314 signal_handler_installed_ = false; 372 signal_handler_installed_ = false;
315 } 373 }
316 #endif 374 #endif
317 } 375 }
318 376
319 #if !V8_OS_NACL 377 #if !V8_OS_NACL
378 static void FillRegisterState(void* context, RegisterState* regs);
320 static void HandleProfilerSignal(int signal, siginfo_t* info, void* context); 379 static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
321 #endif 380 #endif
322 // Protects the process wide state below. 381 // Protects the process wide state below.
323 static base::Mutex* mutex_; 382 static base::Mutex* mutex_;
324 static int client_count_; 383 static int client_count_;
325 static bool signal_handler_installed_; 384 static bool signal_handler_installed_;
326 static struct sigaction old_signal_handler_; 385 static struct sigaction old_signal_handler_;
327 }; 386 };
328 387
329 388
330 base::Mutex* SignalHandler::mutex_ = NULL; 389 base::Mutex* SignalHandler::mutex_ = NULL;
331 int SignalHandler::client_count_ = 0; 390 int SignalHandler::client_count_ = 0;
332 struct sigaction SignalHandler::old_signal_handler_; 391 struct sigaction SignalHandler::old_signal_handler_;
333 bool SignalHandler::signal_handler_installed_ = false; 392 bool SignalHandler::signal_handler_installed_ = false;
334 393
335 394
336 // As Native Client does not support signal handling, profiling is disabled. 395 // As Native Client does not support signal handling, profiling is disabled.
337 #if !V8_OS_NACL 396 #if !V8_OS_NACL
338 void SignalHandler::CollectSample(void* context, Sampler* sampler) { 397 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
339 if (sampler == NULL || (!sampler->IsProfiling() && 398 void* context) {
340 !sampler->IsRegistered())) { 399 USE(info);
341 return; 400 if (signal != SIGPROF) return;
342 } 401 AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false);
343 Isolate* isolate = sampler->isolate(); 402 if (!atomic_guard.is_success()) return;
344 403 pthread_t thread_id = pthread_self();
345 // We require a fully initialized and entered isolate. 404 HashMap::Entry* entry =
346 if (isolate == NULL || !isolate->IsInUse()) return; 405 SamplerManager::sampler_map_.Pointer()->Lookup(ThreadKey(thread_id),
347 406 ThreadHash(thread_id));
348 if (v8::Locker::IsActive() && 407 if (entry == NULL) return;
349 !isolate->thread_manager()->IsLockedByCurrentThread()) { 408 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
350 return;
351 }
352 409
353 v8::RegisterState state; 410 v8::RegisterState state;
411 FillRegisterState(context, &state);
354 412
355 #if defined(USE_SIMULATOR) 413 for (int i = 0; i < samplers->size(); ++i) {
356 if (!SimulatorHelper::FillRegisters(isolate, &state)) return; 414 Sampler* sampler = (*samplers)[i];
357 #else 415 Isolate* isolate = sampler->isolate();
416
417 // We require a fully initialized and entered isolate.
418 if (isolate == NULL || !isolate->IsInUse()) return;
419
420 if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) return;
421
422 sampler->SampleStack(state);
423 }
424 }
425
426 void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
358 // Extracting the sample from the context is extremely machine dependent. 427 // Extracting the sample from the context is extremely machine dependent.
359 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); 428 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
360 #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390))) 429 #if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
361 mcontext_t& mcontext = ucontext->uc_mcontext; 430 mcontext_t& mcontext = ucontext->uc_mcontext;
362 #endif 431 #endif
363 #if V8_OS_LINUX 432 #if V8_OS_LINUX
364 #if V8_HOST_ARCH_IA32 433 #if V8_HOST_ARCH_IA32
365 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]); 434 state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
366 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]); 435 state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
367 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]); 436 state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]);
368 #elif V8_HOST_ARCH_X64 437 #elif V8_HOST_ARCH_X64
369 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]); 438 state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]);
370 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]); 439 state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]);
371 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]); 440 state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]);
372 #elif V8_HOST_ARCH_ARM 441 #elif V8_HOST_ARCH_ARM
373 #if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) 442 #if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
374 // Old GLibc ARM versions used a gregs[] array to access the register 443 // Old GLibc ARM versions used a gregs[] array to access the register
375 // values from mcontext_t. 444 // values from mcontext_t.
376 state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]); 445 state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]);
377 state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]); 446 state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]);
378 state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]); 447 state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]);
379 #else 448 #else
380 state.pc = reinterpret_cast<Address>(mcontext.arm_pc); 449 state->pc = reinterpret_cast<void*>(mcontext.arm_pc);
381 state.sp = reinterpret_cast<Address>(mcontext.arm_sp); 450 state->sp = reinterpret_cast<void*>(mcontext.arm_sp);
382 state.fp = reinterpret_cast<Address>(mcontext.arm_fp); 451 state->fp = reinterpret_cast<void*>(mcontext.arm_fp);
383 #endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4) 452 #endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
384 #elif V8_HOST_ARCH_ARM64 453 #elif V8_HOST_ARCH_ARM64
385 state.pc = reinterpret_cast<Address>(mcontext.pc); 454 state->pc = reinterpret_cast<void*>(mcontext.pc);
386 state.sp = reinterpret_cast<Address>(mcontext.sp); 455 state->sp = reinterpret_cast<void*>(mcontext.sp);
387 // FP is an alias for x29. 456 // FP is an alias for x29.
388 state.fp = reinterpret_cast<Address>(mcontext.regs[29]); 457 state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
389 #elif V8_HOST_ARCH_MIPS 458 #elif V8_HOST_ARCH_MIPS
390 state.pc = reinterpret_cast<Address>(mcontext.pc); 459 state->pc = reinterpret_cast<void*>(mcontext.pc);
391 state.sp = reinterpret_cast<Address>(mcontext.gregs[29]); 460 state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
392 state.fp = reinterpret_cast<Address>(mcontext.gregs[30]); 461 state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
393 #elif V8_HOST_ARCH_MIPS64 462 #elif V8_HOST_ARCH_MIPS64
394 state.pc = reinterpret_cast<Address>(mcontext.pc); 463 state->pc = reinterpret_cast<void*>(mcontext.pc);
395 state.sp = reinterpret_cast<Address>(mcontext.gregs[29]); 464 state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
396 state.fp = reinterpret_cast<Address>(mcontext.gregs[30]); 465 state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
397 #elif V8_HOST_ARCH_PPC 466 #elif V8_HOST_ARCH_PPC
398 state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip); 467 state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
399 state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]); 468 state->sp =
400 state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]); 469 reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
470 state->fp =
471 reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
401 #elif V8_HOST_ARCH_S390 472 #elif V8_HOST_ARCH_S390
402 #if V8_TARGET_ARCH_32_BIT 473 #if V8_TARGET_ARCH_32_BIT
403 // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing 474 // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
404 // mode. This bit needs to be masked out to resolve actual address. 475 // mode. This bit needs to be masked out to resolve actual address.
405 state.pc = 476 state->pc =
406 reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF); 477 reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
407 #else 478 #else
408 state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr); 479 state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr);
409 #endif // V8_TARGET_ARCH_32_BIT 480 #endif // V8_TARGET_ARCH_32_BIT
410 state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[15]); 481 state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
411 state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[11]); 482 state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
412 #endif // V8_HOST_ARCH_* 483 #endif // V8_HOST_ARCH_*
413 #elif V8_OS_MACOSX 484 #elif V8_OS_MACOSX
414 #if V8_HOST_ARCH_X64 485 #if V8_HOST_ARCH_X64
415 #if __DARWIN_UNIX03 486 #if __DARWIN_UNIX03
416 state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip); 487 state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
417 state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp); 488 state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
418 state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp); 489 state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
419 #else // !__DARWIN_UNIX03 490 #else // !__DARWIN_UNIX03
420 state.pc = reinterpret_cast<Address>(mcontext->ss.rip); 491 state->pc = reinterpret_cast<void*>(mcontext->ss.rip);
421 state.sp = reinterpret_cast<Address>(mcontext->ss.rsp); 492 state->sp = reinterpret_cast<void*>(mcontext->ss.rsp);
422 state.fp = reinterpret_cast<Address>(mcontext->ss.rbp); 493 state->fp = reinterpret_cast<void*>(mcontext->ss.rbp);
423 #endif // __DARWIN_UNIX03 494 #endif // __DARWIN_UNIX03
424 #elif V8_HOST_ARCH_IA32 495 #elif V8_HOST_ARCH_IA32
425 #if __DARWIN_UNIX03 496 #if __DARWIN_UNIX03
426 state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip); 497 state->pc = reinterpret_cast<void*>(mcontext->__ss.__eip);
427 state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp); 498 state->sp = reinterpret_cast<void*>(mcontext->__ss.__esp);
428 state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp); 499 state->fp = reinterpret_cast<void*>(mcontext->__ss.__ebp);
429 #else // !__DARWIN_UNIX03 500 #else // !__DARWIN_UNIX03
430 state.pc = reinterpret_cast<Address>(mcontext->ss.eip); 501 state->pc = reinterpret_cast<void*>(mcontext->ss.eip);
431 state.sp = reinterpret_cast<Address>(mcontext->ss.esp); 502 state->sp = reinterpret_cast<void*>(mcontext->ss.esp);
432 state.fp = reinterpret_cast<Address>(mcontext->ss.ebp); 503 state->fp = reinterpret_cast<void*>(mcontext->ss.ebp);
433 #endif // __DARWIN_UNIX03 504 #endif // __DARWIN_UNIX03
434 #endif // V8_HOST_ARCH_IA32 505 #endif // V8_HOST_ARCH_IA32
435 #elif V8_OS_FREEBSD 506 #elif V8_OS_FREEBSD
436 #if V8_HOST_ARCH_IA32 507 #if V8_HOST_ARCH_IA32
437 state.pc = reinterpret_cast<Address>(mcontext.mc_eip); 508 state->pc = reinterpret_cast<void*>(mcontext.mc_eip);
438 state.sp = reinterpret_cast<Address>(mcontext.mc_esp); 509 state->sp = reinterpret_cast<void*>(mcontext.mc_esp);
439 state.fp = reinterpret_cast<Address>(mcontext.mc_ebp); 510 state->fp = reinterpret_cast<void*>(mcontext.mc_ebp);
440 #elif V8_HOST_ARCH_X64 511 #elif V8_HOST_ARCH_X64
441 state.pc = reinterpret_cast<Address>(mcontext.mc_rip); 512 state->pc = reinterpret_cast<void*>(mcontext.mc_rip);
442 state.sp = reinterpret_cast<Address>(mcontext.mc_rsp); 513 state->sp = reinterpret_cast<void*>(mcontext.mc_rsp);
443 state.fp = reinterpret_cast<Address>(mcontext.mc_rbp); 514 state->fp = reinterpret_cast<void*>(mcontext.mc_rbp);
444 #elif V8_HOST_ARCH_ARM 515 #elif V8_HOST_ARCH_ARM
445 state.pc = reinterpret_cast<Address>(mcontext.mc_r15); 516 state->pc = reinterpret_cast<void*>(mcontext.mc_r15);
446 state.sp = reinterpret_cast<Address>(mcontext.mc_r13); 517 state->sp = reinterpret_cast<void*>(mcontext.mc_r13);
447 state.fp = reinterpret_cast<Address>(mcontext.mc_r11); 518 state->fp = reinterpret_cast<void*>(mcontext.mc_r11);
448 #endif // V8_HOST_ARCH_* 519 #endif // V8_HOST_ARCH_*
449 #elif V8_OS_NETBSD 520 #elif V8_OS_NETBSD
450 #if V8_HOST_ARCH_IA32 521 #if V8_HOST_ARCH_IA32
451 state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]); 522 state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]);
452 state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]); 523 state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]);
453 state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]); 524 state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]);
454 #elif V8_HOST_ARCH_X64 525 #elif V8_HOST_ARCH_X64
455 state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]); 526 state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]);
456 state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]); 527 state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]);
457 state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]); 528 state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]);
458 #endif // V8_HOST_ARCH_* 529 #endif // V8_HOST_ARCH_*
459 #elif V8_OS_OPENBSD 530 #elif V8_OS_OPENBSD
460 #if V8_HOST_ARCH_IA32 531 #if V8_HOST_ARCH_IA32
461 state.pc = reinterpret_cast<Address>(ucontext->sc_eip); 532 state->pc = reinterpret_cast<void*>(ucontext->sc_eip);
462 state.sp = reinterpret_cast<Address>(ucontext->sc_esp); 533 state->sp = reinterpret_cast<void*>(ucontext->sc_esp);
463 state.fp = reinterpret_cast<Address>(ucontext->sc_ebp); 534 state->fp = reinterpret_cast<void*>(ucontext->sc_ebp);
464 #elif V8_HOST_ARCH_X64 535 #elif V8_HOST_ARCH_X64
465 state.pc = reinterpret_cast<Address>(ucontext->sc_rip); 536 state->pc = reinterpret_cast<void*>(ucontext->sc_rip);
466 state.sp = reinterpret_cast<Address>(ucontext->sc_rsp); 537 state->sp = reinterpret_cast<void*>(ucontext->sc_rsp);
467 state.fp = reinterpret_cast<Address>(ucontext->sc_rbp); 538 state->fp = reinterpret_cast<void*>(ucontext->sc_rbp);
468 #endif // V8_HOST_ARCH_* 539 #endif // V8_HOST_ARCH_*
469 #elif V8_OS_SOLARIS 540 #elif V8_OS_SOLARIS
470 state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]); 541 state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]);
471 state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]); 542 state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]);
472 state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]); 543 state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]);
473 #elif V8_OS_QNX 544 #elif V8_OS_QNX
474 #if V8_HOST_ARCH_IA32 545 #if V8_HOST_ARCH_IA32
475 state.pc = reinterpret_cast<Address>(mcontext.cpu.eip); 546 state->pc = reinterpret_cast<void*>(mcontext.cpu.eip);
476 state.sp = reinterpret_cast<Address>(mcontext.cpu.esp); 547 state->sp = reinterpret_cast<void*>(mcontext.cpu.esp);
477 state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp); 548 state->fp = reinterpret_cast<void*>(mcontext.cpu.ebp);
478 #elif V8_HOST_ARCH_ARM 549 #elif V8_HOST_ARCH_ARM
479 state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]); 550 state->pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]);
480 state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]); 551 state->sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]);
481 state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]); 552 state->fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]);
482 #endif // V8_HOST_ARCH_* 553 #endif // V8_HOST_ARCH_*
483 #elif V8_OS_AIX 554 #elif V8_OS_AIX
484 state.pc = reinterpret_cast<Address>(mcontext.jmp_context.iar); 555 state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar);
485 state.sp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[1]); 556 state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]);
486 state.fp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[31]); 557 state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]);
487 #endif // V8_OS_AIX 558 #endif // V8_OS_AIX
488 #endif // USE_SIMULATOR
489 sampler->SampleStack(state);
490 } 559 }
491 #endif // V8_OS_NACL 560
561 #endif // !V8_OS_NACL
492 562
493 #endif // USE_SIGNALS 563 #endif // USE_SIGNALS
494 564
495 565
496 class SamplerThread : public base::Thread {
497 public:
498 static const int kSamplerThreadStackSize = 64 * KB;
499
500 explicit SamplerThread(int interval)
501 : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)),
502 interval_(interval) {}
503
504 static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
505 static void TearDown() { delete mutex_; mutex_ = NULL; }
506
507 static void AddActiveSampler(Sampler* sampler) {
508 bool need_to_start = false;
509 base::LockGuard<base::Mutex> lock_guard(mutex_);
510 if (instance_ == NULL) {
511 // Start a thread that will send SIGPROF signal to VM threads,
512 // when CPU profiling will be enabled.
513 instance_ = new SamplerThread(sampler->interval());
514 need_to_start = true;
515 }
516
517 DCHECK(sampler->IsActive());
518 DCHECK(instance_->interval_ == sampler->interval());
519
520 #if defined(USE_SIGNALS)
521 AddSampler(sampler);
522 #else
523 DCHECK(!instance_->active_samplers_.Contains(sampler));
524 instance_->active_samplers_.Add(sampler);
525 #endif // USE_SIGNALS
526
527 if (need_to_start) instance_->StartSynchronously();
528 }
529
530 static void RemoveSampler(Sampler* sampler) {
531 SamplerThread* instance_to_remove = NULL;
532 {
533 base::LockGuard<base::Mutex> lock_guard(mutex_);
534
535 DCHECK(sampler->IsActive() || sampler->IsRegistered());
536 #if defined(USE_SIGNALS)
537 {
538 AtomicGuard atomic_guard(&sampler_list_access_counter_);
539 // Remove sampler from map.
540 pthread_t thread_id = sampler->platform_data()->vm_tid();
541 void* thread_key = ThreadKey(thread_id);
542 uint32_t thread_hash = ThreadHash(thread_id);
543 HashMap::Entry* entry =
544 thread_id_to_samplers_.Get().Lookup(thread_key, thread_hash);
545 DCHECK(entry != NULL);
546 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
547 samplers->RemoveElement(sampler);
548 if (samplers->is_empty()) {
549 thread_id_to_samplers_.Pointer()->Remove(thread_key, thread_hash);
550 delete samplers;
551 }
552 if (thread_id_to_samplers_.Get().occupancy() == 0) {
553 instance_to_remove = instance_;
554 instance_ = NULL;
555 }
556 }
557 #else
558 bool removed = instance_->active_samplers_.RemoveElement(sampler);
559 DCHECK(removed);
560 USE(removed);
561
562 // We cannot delete the instance immediately as we need to Join() the
563 // thread but we are holding mutex_ and the thread may try to acquire it.
564 if (instance_->active_samplers_.is_empty()) {
565 instance_to_remove = instance_;
566 instance_ = NULL;
567 }
568 #endif // USE_SIGNALS
569 }
570
571 if (!instance_to_remove) return;
572 instance_to_remove->Join();
573 delete instance_to_remove;
574 }
575
576 // Unlike AddActiveSampler, this method only adds a sampler,
577 // but won't start the sampler thread.
578 static void RegisterSampler(Sampler* sampler) {
579 base::LockGuard<base::Mutex> lock_guard(mutex_);
580 #if defined(USE_SIGNALS)
581 AddSampler(sampler);
582 #endif // USE_SIGNALS
583 }
584
585 // Implement Thread::Run().
586 virtual void Run() {
587 while (true) {
588 {
589 base::LockGuard<base::Mutex> lock_guard(mutex_);
590 #if defined(USE_SIGNALS)
591 if (thread_id_to_samplers_.Get().occupancy() == 0) break;
592 if (SignalHandler::Installed()) {
593 for (HashMap::Entry *p = thread_id_to_samplers_.Get().Start();
594 p != NULL; p = thread_id_to_samplers_.Get().Next(p)) {
595 #if V8_OS_AIX && V8_TARGET_ARCH_PPC64
596 // on AIX64, cannot cast (void *) to pthread_t which is
597 // of type unsigned int (4bytes)
598 pthread_t thread_id = reinterpret_cast<intptr_t>(p->key);
599 #else
600 pthread_t thread_id = reinterpret_cast<pthread_t>(p->key);
601 #endif
602 pthread_kill(thread_id, SIGPROF);
603 }
604 }
605 #else
606 if (active_samplers_.is_empty()) break;
607 // When CPU profiling is enabled both JavaScript and C++ code is
608 // profiled. We must not suspend.
609 for (int i = 0; i < active_samplers_.length(); ++i) {
610 Sampler* sampler = active_samplers_.at(i);
611 if (!sampler->IsProfiling()) continue;
612 sampler->DoSample();
613 }
614 #endif // USE_SIGNALS
615 }
616 base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
617 }
618 }
619
620 private:
621 // Protects the process wide state below.
622 static base::Mutex* mutex_;
623 static SamplerThread* instance_;
624
625 const int interval_;
626
627 #if defined(USE_SIGNALS)
628 struct HashMapCreateTrait {
629 static void Construct(HashMap* allocated_ptr) {
630 new (allocated_ptr) HashMap(HashMap::PointersMatch);
631 }
632 };
633 friend class SignalHandler;
634 static base::LazyInstance<HashMap, HashMapCreateTrait>::type
635 thread_id_to_samplers_;
636 static base::AtomicValue<int> sampler_list_access_counter_;
637 static void AddSampler(Sampler* sampler) {
638 AtomicGuard atomic_guard(&sampler_list_access_counter_);
639 // Add sampler into map if needed.
640 pthread_t thread_id = sampler->platform_data()->vm_tid();
641 HashMap::Entry *entry =
642 thread_id_to_samplers_.Pointer()->LookupOrInsert(ThreadKey(thread_id),
643 ThreadHash(thread_id));
644 if (entry->value == NULL) {
645 SamplerList* samplers = new SamplerList();
646 samplers->Add(sampler);
647 entry->value = samplers;
648 } else {
649 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
650 if (!samplers->Contains(sampler)) {
651 samplers->Add(sampler);
652 }
653 }
654 }
655 #else
656 SamplerList active_samplers_;
657 #endif // USE_SIGNALS
658
659 DISALLOW_COPY_AND_ASSIGN(SamplerThread);
660 };
661
662
663 base::Mutex* SamplerThread::mutex_ = NULL;
664 SamplerThread* SamplerThread::instance_ = NULL;
665 #if defined(USE_SIGNALS)
666 base::LazyInstance<HashMap, SamplerThread::HashMapCreateTrait>::type
667 SamplerThread::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER;
668 base::AtomicValue<int> SamplerThread::sampler_list_access_counter_(0);
669
670 // As Native Client does not support signal handling, profiling is disabled.
671 #if !V8_OS_NACL
672 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
673 void* context) {
674 USE(info);
675 if (signal != SIGPROF) return;
676 AtomicGuard atomic_guard(&SamplerThread::sampler_list_access_counter_, false);
677 if (!atomic_guard.is_success()) return;
678 pthread_t thread_id = pthread_self();
679 HashMap::Entry* entry =
680 SamplerThread::thread_id_to_samplers_.Pointer()->Lookup(
681 ThreadKey(thread_id), ThreadHash(thread_id));
682 if (entry == NULL)
683 return;
684 SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
685 for (int i = 0; i < samplers->length(); ++i) {
686 Sampler* sampler = samplers->at(i);
687 CollectSample(context, sampler);
688 }
689 }
690 #endif // !V8_OS_NACL
691 #endif // USE_SIGNALs
692
693
694 void Sampler::SetUp() { 566 void Sampler::SetUp() {
695 #if defined(USE_SIGNALS) 567 #if defined(USE_SIGNALS)
696 SignalHandler::SetUp(); 568 SignalHandler::SetUp();
697 #endif 569 #endif
698 SamplerThread::SetUp();
699 } 570 }
700 571
701 572
702 void Sampler::TearDown() { 573 void Sampler::TearDown() {
703 SamplerThread::TearDown();
704 #if defined(USE_SIGNALS) 574 #if defined(USE_SIGNALS)
705 SignalHandler::TearDown(); 575 SignalHandler::TearDown();
706 #endif 576 #endif
707 } 577 }
708 578
709 Sampler::Sampler(Isolate* isolate, int interval) 579 Sampler::Sampler(Isolate* isolate)
710 : isolate_(isolate), 580 : is_counting_samples_(false),
711 interval_(interval), 581 js_sample_count_(0),
582 external_sample_count_(0),
583 isolate_(isolate),
712 profiling_(false), 584 profiling_(false),
713 has_processing_thread_(false), 585 has_processing_thread_(false),
714 active_(false), 586 active_(false),
715 registered_(false), 587 registered_(false) {
716 is_counting_samples_(false),
717 js_sample_count_(0),
718 external_sample_count_(0) {
719 data_ = new PlatformData; 588 data_ = new PlatformData;
720 } 589 }
721 590
722 Sampler::~Sampler() { 591 Sampler::~Sampler() {
723 DCHECK(!IsActive()); 592 DCHECK(!IsActive());
593 #if defined(USE_SIGNALS)
724 if (IsRegistered()) { 594 if (IsRegistered()) {
725 SamplerThread::RemoveSampler(this); 595 SamplerManager::RemoveSampler(this);
726 } 596 }
597 #endif
727 delete data_; 598 delete data_;
728 } 599 }
729 600
730 void Sampler::Start() { 601 void Sampler::Start() {
731 DCHECK(!IsActive()); 602 DCHECK(!IsActive());
732 SetActive(true); 603 SetActive(true);
733 SamplerThread::AddActiveSampler(this); 604 #if defined(USE_SIGNALS)
605 SamplerManager::AddSampler(this);
606 #endif
734 } 607 }
735 608
736 609
737 void Sampler::Stop() { 610 void Sampler::Stop() {
611 #if defined(USE_SIGNALS)
612 SamplerManager::RemoveSampler(this);
613 #endif
738 DCHECK(IsActive()); 614 DCHECK(IsActive());
739 SamplerThread::RemoveSampler(this);
740 SetActive(false); 615 SetActive(false);
741 SetRegistered(false); 616 SetRegistered(false);
742 } 617 }
743 618
744 619
745 void Sampler::IncreaseProfilingDepth() { 620 void Sampler::IncreaseProfilingDepth() {
746 base::NoBarrier_AtomicIncrement(&profiling_, 1); 621 base::NoBarrier_AtomicIncrement(&profiling_, 1);
747 #if defined(USE_SIGNALS) 622 #if defined(USE_SIGNALS)
748 SignalHandler::IncreaseSamplerCount(); 623 SignalHandler::IncreaseSamplerCount();
749 #endif 624 #endif
750 } 625 }
751 626
752 627
753 void Sampler::DecreaseProfilingDepth() { 628 void Sampler::DecreaseProfilingDepth() {
754 #if defined(USE_SIGNALS) 629 #if defined(USE_SIGNALS)
755 SignalHandler::DecreaseSamplerCount(); 630 SignalHandler::DecreaseSamplerCount();
756 #endif 631 #endif
757 base::NoBarrier_AtomicIncrement(&profiling_, -1); 632 base::NoBarrier_AtomicIncrement(&profiling_, -1);
758 } 633 }
759 634
760 635
761 void Sampler::SampleStack(const v8::RegisterState& state) {
762 TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
763 TickSample sample_obj;
764 if (sample == NULL) sample = &sample_obj;
765 sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true);
766 if (is_counting_samples_ && !sample->timestamp.IsNull()) {
767 if (sample->state == JS) ++js_sample_count_;
768 if (sample->state == EXTERNAL) ++external_sample_count_;
769 }
770 Tick(sample);
771 if (sample != &sample_obj) {
772 isolate_->cpu_profiler()->FinishTickSample();
773 }
774 }
775
776
777 #if defined(USE_SIGNALS) 636 #if defined(USE_SIGNALS)
778 637
779 void Sampler::DoSample() { 638 void Sampler::DoSample() {
780 if (!SignalHandler::Installed()) return; 639 if (!SignalHandler::Installed()) return;
781 if (!IsActive() && !IsRegistered()) { 640 if (!IsActive() && !IsRegistered()) {
782 SamplerThread::RegisterSampler(this); 641 SamplerManager::AddSampler(this);
783 SetRegistered(true); 642 SetRegistered(true);
784 } 643 }
785 pthread_kill(platform_data()->vm_tid(), SIGPROF); 644 pthread_kill(platform_data()->vm_tid(), SIGPROF);
786 } 645 }
787 646
788 #elif V8_OS_WIN || V8_OS_CYGWIN 647 #elif V8_OS_WIN || V8_OS_CYGWIN
789 648
790 void Sampler::DoSample() { 649 void Sampler::DoSample() {
791 HANDLE profiled_thread = platform_data()->profiled_thread(); 650 HANDLE profiled_thread = platform_data()->profiled_thread();
792 if (profiled_thread == NULL) return; 651 if (profiled_thread == NULL) return;
793 652
794 const DWORD kSuspendFailed = static_cast<DWORD>(-1); 653 const DWORD kSuspendFailed = static_cast<DWORD>(-1);
795 if (SuspendThread(profiled_thread) == kSuspendFailed) return; 654 if (SuspendThread(profiled_thread) == kSuspendFailed) return;
796 655
797 // Context used for sampling the register state of the profiled thread. 656 // Context used for sampling the register state of the profiled thread.
798 CONTEXT context; 657 CONTEXT context;
799 memset(&context, 0, sizeof(context)); 658 memset(&context, 0, sizeof(context));
800 context.ContextFlags = CONTEXT_FULL; 659 context.ContextFlags = CONTEXT_FULL;
801 if (GetThreadContext(profiled_thread, &context) != 0) { 660 if (GetThreadContext(profiled_thread, &context) != 0) {
802 v8::RegisterState state; 661 v8::RegisterState state;
803 #if defined(USE_SIMULATOR) 662 #if V8_HOST_ARCH_X64
804 if (!SimulatorHelper::FillRegisters(isolate(), &state)) { 663 state.pc = reinterpret_cast<void*>(context.Rip);
805 ResumeThread(profiled_thread); 664 state.sp = reinterpret_cast<void*>(context.Rsp);
806 return; 665 state.fp = reinterpret_cast<void*>(context.Rbp);
807 }
808 #else 666 #else
809 #if V8_HOST_ARCH_X64 667 state.pc = reinterpret_cast<void*>(context.Eip);
810 state.pc = reinterpret_cast<Address>(context.Rip); 668 state.sp = reinterpret_cast<void*>(context.Esp);
811 state.sp = reinterpret_cast<Address>(context.Rsp); 669 state.fp = reinterpret_cast<void*>(context.Ebp);
812 state.fp = reinterpret_cast<Address>(context.Rbp);
813 #else
814 state.pc = reinterpret_cast<Address>(context.Eip);
815 state.sp = reinterpret_cast<Address>(context.Esp);
816 state.fp = reinterpret_cast<Address>(context.Ebp);
817 #endif 670 #endif
818 #endif // USE_SIMULATOR
819 SampleStack(state); 671 SampleStack(state);
820 } 672 }
821 ResumeThread(profiled_thread); 673 ResumeThread(profiled_thread);
822 } 674 }
823 675
824 #endif // USE_SIGNALS 676 #endif // USE_SIGNALS
825 677
826 678 } // namespace sampler
827 } // namespace internal
828 } // namespace v8 679 } // namespace v8
OLDNEW
« no previous file with comments | « src/libsampler/v8-sampler.h ('k') | src/log.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698