| Index: src/isolate.h
|
| diff --git a/src/isolate.h b/src/isolate.h
|
| index f9135a252bc87542d09666e11f0d7f6cb3c536fe..a5d8bf5cf616615d2356c2c0e1b5eb285a6d9038 100644
|
| --- a/src/isolate.h
|
| +++ b/src/isolate.h
|
| @@ -28,8 +28,6 @@
|
| #ifndef V8_ISOLATE_H_
|
| #define V8_ISOLATE_H_
|
|
|
| -#include <set>
|
| -
|
| #include "../include/v8-debug.h"
|
| #include "allocation.h"
|
| #include "assert-scope.h"
|
| @@ -53,12 +51,13 @@ namespace v8 {
|
| namespace internal {
|
|
|
| class Bootstrapper;
|
| +struct CallInterfaceDescriptor;
|
| class CodeGenerator;
|
| class CodeRange;
|
| struct CodeStubInterfaceDescriptor;
|
| -struct CallInterfaceDescriptor;
|
| class CodeTracer;
|
| class CompilationCache;
|
| +class ConsStringIteratorOp;
|
| class ContextSlotCache;
|
| class Counters;
|
| class CpuFeatures;
|
| @@ -75,20 +74,20 @@ class HeapProfiler;
|
| class HStatistics;
|
| class HTracer;
|
| class InlineRuntimeFunctionsTable;
|
| -class NoAllocationStringAllocator;
|
| class InnerPointerToCodeCache;
|
| +class MaterializedObjectStore;
|
| +class NoAllocationStringAllocator;
|
| class RandomNumberGenerator;
|
| class RegExpStack;
|
| class SaveContext;
|
| -class UnicodeCache;
|
| -class ConsStringIteratorOp;
|
| -class ScannerBase;
|
| +class LexerGCHandler;
|
| class StringTracker;
|
| class StubCache;
|
| class SweeperThread;
|
| class ThreadManager;
|
| class ThreadState;
|
| class ThreadVisitor; // Defined in v8threads.h
|
| +class UnicodeCache;
|
| template <StateTag Tag> class VMState;
|
|
|
| // 'void function pointer', used to roundtrip the
|
| @@ -104,6 +103,7 @@ class DebuggerAgent;
|
| #endif
|
|
|
| #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
|
| + !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
|
| !defined(__mips__) && V8_TARGET_ARCH_MIPS
|
| class Redirection;
|
| class Simulator;
|
| @@ -209,6 +209,11 @@ class ThreadId {
|
| };
|
|
|
|
|
| +#define FIELD_ACCESSOR(type, name) \
|
| + inline void set_##name(type v) { name##_ = v; } \
|
| + inline type name() const { return name##_; }
|
| +
|
| +
|
| class ThreadLocalTop BASE_EMBEDDED {
|
| public:
|
| // Does early low-level initialization that does not depend on the
|
| @@ -235,14 +240,7 @@ class ThreadLocalTop BASE_EMBEDDED {
|
| // stack, try_catch_handler_address returns a JS stack address that
|
| // corresponds to the place on the JS stack where the C++ handler
|
| // would have been if the stack were not separate.
|
| - inline Address try_catch_handler_address() {
|
| - return try_catch_handler_address_;
|
| - }
|
| -
|
| - // Set the address of the top C++ try catch handler.
|
| - inline void set_try_catch_handler_address(Address address) {
|
| - try_catch_handler_address_ = address;
|
| - }
|
| + FIELD_ACCESSOR(Address, try_catch_handler_address)
|
|
|
| void Free() {
|
| ASSERT(!has_pending_message_);
|
| @@ -362,12 +360,18 @@ typedef List<HeapObject*> DebugObjectCache;
|
| /* AstNode state. */ \
|
| V(int, ast_node_id, 0) \
|
| V(unsigned, ast_node_count, 0) \
|
| - V(bool, microtask_pending, false) \
|
| + V(bool, microtask_pending, false) \
|
| + V(bool, autorun_microtasks, true) \
|
| V(HStatistics*, hstatistics, NULL) \
|
| V(HTracer*, htracer, NULL) \
|
| V(CodeTracer*, code_tracer, NULL) \
|
| ISOLATE_DEBUGGER_INIT_LIST(V)
|
|
|
| +#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
|
| + inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
|
| + inline type name() const { return thread_local_top_.name##_; }
|
| +
|
| +
|
| class Isolate {
|
| // These forward declarations are required to make the friend declarations in
|
| // PerIsolateThreadData work on some older versions of gcc.
|
| @@ -387,24 +391,23 @@ class Isolate {
|
| stack_limit_(0),
|
| thread_state_(NULL),
|
| #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
|
| + !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
|
| !defined(__mips__) && V8_TARGET_ARCH_MIPS
|
| simulator_(NULL),
|
| #endif
|
| next_(NULL),
|
| prev_(NULL) { }
|
| + ~PerIsolateThreadData();
|
| Isolate* isolate() const { return isolate_; }
|
| ThreadId thread_id() const { return thread_id_; }
|
| - void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
|
| - uintptr_t stack_limit() const { return stack_limit_; }
|
| - ThreadState* thread_state() const { return thread_state_; }
|
| - void set_thread_state(ThreadState* value) { thread_state_ = value; }
|
| +
|
| + FIELD_ACCESSOR(uintptr_t, stack_limit)
|
| + FIELD_ACCESSOR(ThreadState*, thread_state)
|
|
|
| #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
|
| + !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
|
| !defined(__mips__) && V8_TARGET_ARCH_MIPS
|
| - Simulator* simulator() const { return simulator_; }
|
| - void set_simulator(Simulator* simulator) {
|
| - simulator_ = simulator;
|
| - }
|
| + FIELD_ACCESSOR(Simulator*, simulator)
|
| #endif
|
|
|
| bool Matches(Isolate* isolate, ThreadId thread_id) const {
|
| @@ -418,6 +421,7 @@ class Isolate {
|
| ThreadState* thread_state_;
|
|
|
| #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
|
| + !defined(__aarch64__) && V8_TARGET_ARCH_A64 || \
|
| !defined(__mips__) && V8_TARGET_ARCH_MIPS
|
| Simulator* simulator_;
|
| #endif
|
| @@ -543,38 +547,35 @@ class Isolate {
|
| }
|
| Context** context_address() { return &thread_local_top_.context_; }
|
|
|
| - SaveContext* save_context() { return thread_local_top_.save_context_; }
|
| - void set_save_context(SaveContext* save) {
|
| - thread_local_top_.save_context_ = save;
|
| - }
|
| + THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
|
|
|
| // Access to current thread id.
|
| - ThreadId thread_id() { return thread_local_top_.thread_id_; }
|
| - void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
|
| + THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
|
|
|
| // Interface to pending exception.
|
| MaybeObject* pending_exception() {
|
| ASSERT(has_pending_exception());
|
| return thread_local_top_.pending_exception_;
|
| }
|
| - bool external_caught_exception() {
|
| - return thread_local_top_.external_caught_exception_;
|
| - }
|
| - void set_external_caught_exception(bool value) {
|
| - thread_local_top_.external_caught_exception_ = value;
|
| - }
|
| +
|
| void set_pending_exception(MaybeObject* exception) {
|
| thread_local_top_.pending_exception_ = exception;
|
| }
|
| +
|
| void clear_pending_exception() {
|
| thread_local_top_.pending_exception_ = heap_.the_hole_value();
|
| }
|
| +
|
| MaybeObject** pending_exception_address() {
|
| return &thread_local_top_.pending_exception_;
|
| }
|
| +
|
| bool has_pending_exception() {
|
| return !thread_local_top_.pending_exception_->IsTheHole();
|
| }
|
| +
|
| + THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
|
| +
|
| void clear_pending_message() {
|
| thread_local_top_.has_pending_message_ = false;
|
| thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
|
| @@ -589,12 +590,8 @@ class Isolate {
|
| bool* external_caught_exception_address() {
|
| return &thread_local_top_.external_caught_exception_;
|
| }
|
| - v8::TryCatch* catcher() {
|
| - return thread_local_top_.catcher_;
|
| - }
|
| - void set_catcher(v8::TryCatch* catcher) {
|
| - thread_local_top_.catcher_ = catcher;
|
| - }
|
| +
|
| + THREAD_LOCAL_TOP_ACCESSOR(v8::TryCatch*, catcher)
|
|
|
| MaybeObject** scheduled_exception_address() {
|
| return &thread_local_top_.scheduled_exception_;
|
| @@ -710,12 +707,8 @@ class Isolate {
|
| // Tells whether the current context has experienced an out of memory
|
| // exception.
|
| bool is_out_of_memory();
|
| - bool ignore_out_of_memory() {
|
| - return thread_local_top_.ignore_out_of_memory_;
|
| - }
|
| - void set_ignore_out_of_memory(bool value) {
|
| - thread_local_top_.ignore_out_of_memory_ = value;
|
| - }
|
| +
|
| + THREAD_LOCAL_TOP_ACCESSOR(bool, ignore_out_of_memory)
|
|
|
| void PrintCurrentStackTrace(FILE* out);
|
| void PrintStack(StringStream* accumulator);
|
| @@ -872,6 +865,9 @@ class Isolate {
|
| StubCache* stub_cache() { return stub_cache_; }
|
| DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
|
| ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
|
| + MaterializedObjectStore* materialized_object_store() {
|
| + return materialized_object_store_;
|
| + }
|
|
|
| MemoryAllocator* memory_allocator() {
|
| return memory_allocator_;
|
| @@ -937,11 +933,7 @@ class Isolate {
|
|
|
| RuntimeState* runtime_state() { return &runtime_state_; }
|
|
|
| - void set_fp_stubs_generated(bool value) {
|
| - fp_stubs_generated_ = value;
|
| - }
|
| -
|
| - bool fp_stubs_generated() { return fp_stubs_generated_; }
|
| + FIELD_ACCESSOR(bool, fp_stubs_generated);
|
|
|
| Builtins* builtins() { return &builtins_; }
|
|
|
| @@ -993,43 +985,20 @@ class Isolate {
|
| #endif
|
|
|
| #if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
|
| + V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
|
| V8_TARGET_ARCH_MIPS && !defined(__mips__)
|
| - bool simulator_initialized() { return simulator_initialized_; }
|
| - void set_simulator_initialized(bool initialized) {
|
| - simulator_initialized_ = initialized;
|
| - }
|
| -
|
| - HashMap* simulator_i_cache() { return simulator_i_cache_; }
|
| - void set_simulator_i_cache(HashMap* hash_map) {
|
| - simulator_i_cache_ = hash_map;
|
| - }
|
| -
|
| - Redirection* simulator_redirection() {
|
| - return simulator_redirection_;
|
| - }
|
| - void set_simulator_redirection(Redirection* redirection) {
|
| - simulator_redirection_ = redirection;
|
| - }
|
| + FIELD_ACCESSOR(bool, simulator_initialized)
|
| + FIELD_ACCESSOR(HashMap*, simulator_i_cache)
|
| + FIELD_ACCESSOR(Redirection*, simulator_redirection)
|
| #endif
|
|
|
| Factory* factory() { return reinterpret_cast<Factory*>(this); }
|
|
|
| static const int kJSRegexpStaticOffsetsVectorSize = 128;
|
|
|
| - ExternalCallbackScope* external_callback_scope() {
|
| - return thread_local_top_.external_callback_scope_;
|
| - }
|
| - void set_external_callback_scope(ExternalCallbackScope* scope) {
|
| - thread_local_top_.external_callback_scope_ = scope;
|
| - }
|
| + THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
|
|
|
| - StateTag current_vm_state() {
|
| - return thread_local_top_.current_vm_state_;
|
| - }
|
| -
|
| - void set_current_vm_state(StateTag state) {
|
| - thread_local_top_.current_vm_state_ = state;
|
| - }
|
| + THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
|
|
|
| void SetData(uint32_t slot, void* data) {
|
| ASSERT(slot < Internals::kNumIsolateDataSlots);
|
| @@ -1040,12 +1009,7 @@ class Isolate {
|
| return embedder_data_[slot];
|
| }
|
|
|
| - LookupResult* top_lookup_result() {
|
| - return thread_local_top_.top_lookup_result_;
|
| - }
|
| - void SetTopLookupResult(LookupResult* top) {
|
| - thread_local_top_.top_lookup_result_ = top;
|
| - }
|
| + THREAD_LOCAL_TOP_ACCESSOR(LookupResult*, top_lookup_result)
|
|
|
| bool IsDead() { return has_fatal_error_; }
|
| void SignalFatalError() { has_fatal_error_ = true; }
|
| @@ -1079,7 +1043,9 @@ class Isolate {
|
| enum CallDescriptorKey {
|
| KeyedCall,
|
| NamedCall,
|
| + CallHandler,
|
| ArgumentAdaptorCall,
|
| + ApiFunctionCall,
|
| NUMBER_OF_CALL_DESCRIPTORS
|
| };
|
|
|
| @@ -1093,13 +1059,7 @@ class Isolate {
|
| bool IsDeferredHandle(Object** location);
|
| #endif // DEBUG
|
|
|
| - int max_available_threads() const {
|
| - return max_available_threads_;
|
| - }
|
| -
|
| - void set_max_available_threads(int value) {
|
| - max_available_threads_ = value;
|
| - }
|
| + FIELD_ACCESSOR(int, max_available_threads);
|
|
|
| bool concurrent_recompilation_enabled() {
|
| // Thread is only available with flag enabled.
|
| @@ -1150,8 +1110,15 @@ class Isolate {
|
| // Given an address occupied by a live code object, return that object.
|
| Object* FindCodeObject(Address a);
|
|
|
| - void AddScanner(ScannerBase* scanner);
|
| - void RemoveScanner(ScannerBase* scanner);
|
| + LexerGCHandler* lexer_gc_handler() { return lexer_gc_handler_; }
|
| +
|
| + int NextOptimizationId() {
|
| + int id = next_optimization_id_++;
|
| + if (!Smi::IsValid(next_optimization_id_)) {
|
| + next_optimization_id_ = 0;
|
| + }
|
| + return id;
|
| + }
|
|
|
| private:
|
| Isolate();
|
| @@ -1262,9 +1229,6 @@ class Isolate {
|
| // the Error object.
|
| bool IsErrorObject(Handle<Object> obj);
|
|
|
| - static void UpdateScannersAfterGC(v8::Isolate*, GCType, GCCallbackFlags);
|
| - void UpdateScannersAfterGC();
|
| -
|
| Atomic32 id_;
|
| EntryStackItem* entry_stack_;
|
| int stack_trace_nesting_level_;
|
| @@ -1283,6 +1247,7 @@ class Isolate {
|
| StatsTable* stats_table_;
|
| StubCache* stub_cache_;
|
| DeoptimizerData* deoptimizer_data_;
|
| + MaterializedObjectStore* materialized_object_store_;
|
| ThreadLocalTop thread_local_top_;
|
| bool capture_stack_trace_for_uncaught_exceptions_;
|
| int stack_trace_for_uncaught_exceptions_frame_limit_;
|
| @@ -1332,6 +1297,7 @@ class Isolate {
|
| double time_millis_at_init_;
|
|
|
| #if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
|
| + V8_TARGET_ARCH_A64 && !defined(__aarch64__) || \
|
| V8_TARGET_ARCH_MIPS && !defined(__mips__)
|
| bool simulator_initialized_;
|
| HashMap* simulator_i_cache_;
|
| @@ -1386,9 +1352,8 @@ class Isolate {
|
| // Counts deopt points if deopt_every_n_times is enabled.
|
| unsigned int stress_deopt_count_;
|
|
|
| - // Stores information about the ScannerBase objects currently alive, so that
|
| - // we can update the raw string pointers they hold after GC.
|
| - std::set<ScannerBase*> scanners_;
|
| + LexerGCHandler* lexer_gc_handler_;
|
| + int next_optimization_id_;
|
|
|
| friend class ExecutionAccess;
|
| friend class HandleScopeImplementer;
|
| @@ -1409,6 +1374,10 @@ class Isolate {
|
| };
|
|
|
|
|
| +#undef FIELD_ACCESSOR
|
| +#undef THREAD_LOCAL_TOP_ACCESSOR
|
| +
|
| +
|
| // If the GCC version is 4.1.x or 4.2.x an additional field is added to the
|
| // class as a work around for a bug in the generated code found with these
|
| // versions of GCC. See V8 issue 122 for details.
|
| @@ -1497,18 +1466,21 @@ class StackLimitCheck BASE_EMBEDDED {
|
| class PostponeInterruptsScope BASE_EMBEDDED {
|
| public:
|
| explicit PostponeInterruptsScope(Isolate* isolate)
|
| - : stack_guard_(isolate->stack_guard()) {
|
| + : stack_guard_(isolate->stack_guard()), isolate_(isolate) {
|
| + ExecutionAccess access(isolate_);
|
| stack_guard_->thread_local_.postpone_interrupts_nesting_++;
|
| stack_guard_->DisableInterrupts();
|
| }
|
|
|
| ~PostponeInterruptsScope() {
|
| + ExecutionAccess access(isolate_);
|
| if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
|
| stack_guard_->EnableInterrupts();
|
| }
|
| }
|
| private:
|
| StackGuard* stack_guard_;
|
| + Isolate* isolate_;
|
| };
|
|
|
|
|
|
|