OLD | NEW |
(Empty) | |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #ifndef V8_V8GLOBALS_H_ |
| 29 #define V8_V8GLOBALS_H_ |
| 30 |
| 31 #include "globals.h" |
| 32 |
| 33 namespace v8 { |
| 34 namespace internal { |
| 35 |
| 36 // This file contains constants and global declarations related to the |
| 37 // V8 system. |
| 38 |
| 39 // Mask for the sign bit in a smi. |
| 40 const intptr_t kSmiSignMask = kIntptrSignBit; |
| 41 |
| 42 const int kObjectAlignmentBits = kPointerSizeLog2; |
| 43 const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits; |
| 44 const intptr_t kObjectAlignmentMask = kObjectAlignment - 1; |
| 45 |
| 46 // Desired alignment for pointers. |
| 47 const intptr_t kPointerAlignment = (1 << kPointerSizeLog2); |
| 48 const intptr_t kPointerAlignmentMask = kPointerAlignment - 1; |
| 49 |
| 50 // Desired alignment for maps. |
| 51 #if V8_HOST_ARCH_64_BIT |
| 52 const intptr_t kMapAlignmentBits = kObjectAlignmentBits; |
| 53 #else |
| 54 const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3; |
| 55 #endif |
| 56 const intptr_t kMapAlignment = (1 << kMapAlignmentBits); |
| 57 const intptr_t kMapAlignmentMask = kMapAlignment - 1; |
| 58 |
| 59 // Desired alignment for generated code is 32 bytes (to improve cache line |
| 60 // utilization). |
| 61 const int kCodeAlignmentBits = 5; |
| 62 const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits; |
| 63 const intptr_t kCodeAlignmentMask = kCodeAlignment - 1; |
| 64 |
| 65 // Tag information for Failure. |
| 66 const int kFailureTag = 3; |
| 67 const int kFailureTagSize = 2; |
| 68 const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1; |
| 69 |
| 70 |
| 71 // Zap-value: The value used for zapping dead objects. |
| 72 // Should be a recognizable hex value tagged as a heap object pointer. |
| 73 #ifdef V8_HOST_ARCH_64_BIT |
| 74 const Address kZapValue = |
| 75 reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed)); |
| 76 const Address kHandleZapValue = |
| 77 reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead)); |
| 78 const Address kFromSpaceZapValue = |
| 79 reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad)); |
| 80 const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb; |
| 81 #else |
| 82 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed); |
| 83 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead); |
| 84 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad); |
| 85 const uint32_t kDebugZapValue = 0xbadbaddb; |
| 86 #endif |
| 87 |
| 88 |
| 89 // Number of bits to represent the page size for paged spaces. The value of 13 |
| 90 // gives 8K bytes per page. |
| 91 const int kPageSizeBits = 13; |
| 92 |
| 93 // On Intel architecture, cache line size is 64 bytes. |
| 94 // On ARM it may be less (32 bytes), but as far this constant is |
| 95 // used for aligning data, it doesn't hurt to align on a greater value. |
| 96 const int kProcessorCacheLineSize = 64; |
| 97 |
| 98 // Constants relevant to double precision floating point numbers. |
| 99 |
| 100 // Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no |
| 101 // other bits set. |
| 102 const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51; |
| 103 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30. |
| 104 const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32); |
| 105 |
| 106 |
| 107 // ----------------------------------------------------------------------------- |
| 108 // Forward declarations for frequently used classes |
| 109 // (sorted alphabetically) |
| 110 |
| 111 class AccessorInfo; |
| 112 class Allocation; |
| 113 class Arguments; |
| 114 class Assembler; |
| 115 class AssertNoAllocation; |
| 116 class BreakableStatement; |
| 117 class Code; |
| 118 class CodeGenerator; |
| 119 class CodeStub; |
| 120 class Context; |
| 121 class Debug; |
| 122 class Debugger; |
| 123 class DebugInfo; |
| 124 class Descriptor; |
| 125 class DescriptorArray; |
| 126 class Expression; |
| 127 class ExternalReference; |
| 128 class FixedArray; |
| 129 class FunctionEntry; |
| 130 class FunctionLiteral; |
| 131 class FunctionTemplateInfo; |
| 132 class NumberDictionary; |
| 133 class StringDictionary; |
| 134 template <typename T> class Handle; |
| 135 class Heap; |
| 136 class HeapObject; |
| 137 class IC; |
| 138 class InterceptorInfo; |
| 139 class IterationStatement; |
| 140 class JSArray; |
| 141 class JSFunction; |
| 142 class JSObject; |
| 143 class LargeObjectSpace; |
| 144 class LookupResult; |
| 145 class MacroAssembler; |
| 146 class Map; |
| 147 class MapSpace; |
| 148 class MarkCompactCollector; |
| 149 class NewSpace; |
| 150 class NodeVisitor; |
| 151 class Object; |
| 152 class MaybeObject; |
| 153 class OldSpace; |
| 154 class Property; |
| 155 class Proxy; |
| 156 class RegExpNode; |
| 157 struct RegExpCompileData; |
| 158 class RegExpTree; |
| 159 class RegExpCompiler; |
| 160 class RegExpVisitor; |
| 161 class Scope; |
| 162 template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo; |
| 163 class SerializedScopeInfo; |
| 164 class Script; |
| 165 class Slot; |
| 166 class Smi; |
| 167 template <typename Config, class Allocator = FreeStoreAllocationPolicy> |
| 168 class SplayTree; |
| 169 class Statement; |
| 170 class String; |
| 171 class Struct; |
| 172 class SwitchStatement; |
| 173 class AstVisitor; |
| 174 class Variable; |
| 175 class VariableProxy; |
| 176 class RelocInfo; |
| 177 class Deserializer; |
| 178 class MessageLocation; |
| 179 class ObjectGroup; |
| 180 class TickSample; |
| 181 class VirtualMemory; |
| 182 class Mutex; |
| 183 |
| 184 typedef bool (*WeakSlotCallback)(Object** pointer); |
| 185 |
| 186 // ----------------------------------------------------------------------------- |
| 187 // Miscellaneous |
| 188 |
| 189 // NOTE: SpaceIterator depends on AllocationSpace enumeration values being |
| 190 // consecutive. |
| 191 enum AllocationSpace { |
| 192 NEW_SPACE, // Semispaces collected with copying collector. |
| 193 OLD_POINTER_SPACE, // May contain pointers to new space. |
| 194 OLD_DATA_SPACE, // Must not have pointers to new space. |
| 195 CODE_SPACE, // No pointers to new space, marked executable. |
| 196 MAP_SPACE, // Only and all map objects. |
| 197 CELL_SPACE, // Only and all cell objects. |
| 198 LO_SPACE, // Promoted large objects. |
| 199 |
| 200 FIRST_SPACE = NEW_SPACE, |
| 201 LAST_SPACE = LO_SPACE, |
| 202 FIRST_PAGED_SPACE = OLD_POINTER_SPACE, |
| 203 LAST_PAGED_SPACE = CELL_SPACE |
| 204 }; |
| 205 const int kSpaceTagSize = 3; |
| 206 const int kSpaceTagMask = (1 << kSpaceTagSize) - 1; |
| 207 |
| 208 |
| 209 // A flag that indicates whether objects should be pretenured when |
| 210 // allocated (allocated directly into the old generation) or not |
| 211 // (allocated in the young generation if the object size and type |
| 212 // allows). |
| 213 enum PretenureFlag { NOT_TENURED, TENURED }; |
| 214 |
| 215 enum GarbageCollector { SCAVENGER, MARK_COMPACTOR }; |
| 216 |
| 217 enum Executability { NOT_EXECUTABLE, EXECUTABLE }; |
| 218 |
| 219 enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG }; |
| 220 |
| 221 // Flag indicating whether code is built into the VM (one of the natives files). |
| 222 enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE }; |
| 223 |
| 224 |
| 225 // A CodeDesc describes a buffer holding instructions and relocation |
| 226 // information. The instructions start at the beginning of the buffer |
| 227 // and grow forward, the relocation information starts at the end of |
| 228 // the buffer and grows backward. |
| 229 // |
| 230 // |<--------------- buffer_size ---------------->| |
| 231 // |<-- instr_size -->| |<-- reloc_size -->| |
| 232 // +==================+========+==================+ |
| 233 // | instructions | free | reloc info | |
| 234 // +==================+========+==================+ |
| 235 // ^ |
| 236 // | |
| 237 // buffer |
| 238 |
| 239 struct CodeDesc { |
| 240 byte* buffer; |
| 241 int buffer_size; |
| 242 int instr_size; |
| 243 int reloc_size; |
| 244 Assembler* origin; |
| 245 }; |
| 246 |
| 247 |
| 248 // Callback function on object slots, used for iterating heap object slots in |
| 249 // HeapObjects, global pointers to heap objects, etc. The callback allows the |
| 250 // callback function to change the value of the slot. |
| 251 typedef void (*ObjectSlotCallback)(HeapObject** pointer); |
| 252 |
| 253 |
| 254 // Callback function used for iterating objects in heap spaces, |
| 255 // for example, scanning heap objects. |
| 256 typedef int (*HeapObjectCallback)(HeapObject* obj); |
| 257 |
| 258 |
| 259 // Callback function used for checking constraints when copying/relocating |
| 260 // objects. Returns true if an object can be copied/relocated from its |
| 261 // old_addr to a new_addr. |
| 262 typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr); |
| 263 |
| 264 |
| 265 // Callback function on inline caches, used for iterating over inline caches |
| 266 // in compiled code. |
| 267 typedef void (*InlineCacheCallback)(Code* code, Address ic); |
| 268 |
| 269 |
| 270 // State for inline cache call sites. Aliased as IC::State. |
| 271 enum InlineCacheState { |
| 272 // Has never been executed. |
| 273 UNINITIALIZED, |
| 274 // Has been executed but monomorhic state has been delayed. |
| 275 PREMONOMORPHIC, |
| 276 // Has been executed and only one receiver type has been seen. |
| 277 MONOMORPHIC, |
| 278 // Like MONOMORPHIC but check failed due to prototype. |
| 279 MONOMORPHIC_PROTOTYPE_FAILURE, |
| 280 // Multiple receiver types have been seen. |
| 281 MEGAMORPHIC, |
| 282 // Special states for debug break or step in prepare stubs. |
| 283 DEBUG_BREAK, |
| 284 DEBUG_PREPARE_STEP_IN |
| 285 }; |
| 286 |
| 287 |
| 288 enum InLoopFlag { |
| 289 NOT_IN_LOOP, |
| 290 IN_LOOP |
| 291 }; |
| 292 |
| 293 |
| 294 enum CallFunctionFlags { |
| 295 NO_CALL_FUNCTION_FLAGS = 0, |
| 296 RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject. |
| 297 }; |
| 298 |
| 299 |
| 300 enum InlineCacheHolderFlag { |
| 301 OWN_MAP, // For fast properties objects. |
| 302 PROTOTYPE_MAP // For slow properties objects (except GlobalObjects). |
| 303 }; |
| 304 |
| 305 |
| 306 // Type of properties. |
| 307 // Order of properties is significant. |
| 308 // Must fit in the BitField PropertyDetails::TypeField. |
| 309 // A copy of this is in mirror-debugger.js. |
| 310 enum PropertyType { |
| 311 NORMAL = 0, // only in slow mode |
| 312 FIELD = 1, // only in fast mode |
| 313 CONSTANT_FUNCTION = 2, // only in fast mode |
| 314 CALLBACKS = 3, |
| 315 INTERCEPTOR = 4, // only in lookup results, not in descriptors. |
| 316 MAP_TRANSITION = 5, // only in fast mode |
| 317 CONSTANT_TRANSITION = 6, // only in fast mode |
| 318 NULL_DESCRIPTOR = 7, // only in fast mode |
| 319 // All properties before MAP_TRANSITION are real. |
| 320 FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION, |
| 321 // There are no IC stubs for NULL_DESCRIPTORS. Therefore, |
| 322 // NULL_DESCRIPTOR can be used as the type flag for IC stubs for |
| 323 // nonexistent properties. |
| 324 NONEXISTENT = NULL_DESCRIPTOR |
| 325 }; |
| 326 |
| 327 |
| 328 // Whether to remove map transitions and constant transitions from a |
| 329 // DescriptorArray. |
| 330 enum TransitionFlag { |
| 331 REMOVE_TRANSITIONS, |
| 332 KEEP_TRANSITIONS |
| 333 }; |
| 334 |
| 335 |
| 336 // Union used for fast testing of specific double values. |
| 337 union DoubleRepresentation { |
| 338 double value; |
| 339 int64_t bits; |
| 340 DoubleRepresentation(double x) { value = x; } |
| 341 }; |
| 342 |
| 343 |
| 344 // Union used for customized checking of the IEEE double types |
| 345 // inlined within v8 runtime, rather than going to the underlying |
| 346 // platform headers and libraries |
| 347 union IeeeDoubleLittleEndianArchType { |
| 348 double d; |
| 349 struct { |
| 350 unsigned int man_low :32; |
| 351 unsigned int man_high :20; |
| 352 unsigned int exp :11; |
| 353 unsigned int sign :1; |
| 354 } bits; |
| 355 }; |
| 356 |
| 357 |
| 358 union IeeeDoubleBigEndianArchType { |
| 359 double d; |
| 360 struct { |
| 361 unsigned int sign :1; |
| 362 unsigned int exp :11; |
| 363 unsigned int man_high :20; |
| 364 unsigned int man_low :32; |
| 365 } bits; |
| 366 }; |
| 367 |
| 368 |
| 369 // AccessorCallback |
| 370 struct AccessorDescriptor { |
| 371 MaybeObject* (*getter)(Object* object, void* data); |
| 372 MaybeObject* (*setter)(JSObject* object, Object* value, void* data); |
| 373 void* data; |
| 374 }; |
| 375 |
| 376 |
| 377 // Logging and profiling. |
| 378 // A StateTag represents a possible state of the VM. When compiled with |
| 379 // ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these. |
| 380 // Creating a VMState object enters a state by pushing on the stack, and |
| 381 // destroying a VMState object leaves a state by popping the current state |
| 382 // from the stack. |
| 383 |
| 384 #define STATE_TAG_LIST(V) \ |
| 385 V(JS) \ |
| 386 V(GC) \ |
| 387 V(COMPILER) \ |
| 388 V(OTHER) \ |
| 389 V(EXTERNAL) |
| 390 |
| 391 enum StateTag { |
| 392 #define DEF_STATE_TAG(name) name, |
| 393 STATE_TAG_LIST(DEF_STATE_TAG) |
| 394 #undef DEF_STATE_TAG |
| 395 // Pseudo-types. |
| 396 state_tag_count |
| 397 }; |
| 398 |
| 399 |
| 400 // ----------------------------------------------------------------------------- |
| 401 // Macros |
| 402 |
| 403 // Testers for test. |
| 404 |
| 405 #define HAS_SMI_TAG(value) \ |
| 406 ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag) |
| 407 |
| 408 #define HAS_FAILURE_TAG(value) \ |
| 409 ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag) |
| 410 |
| 411 // OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer |
| 412 #define OBJECT_POINTER_ALIGN(value) \ |
| 413 (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask) |
| 414 |
| 415 // POINTER_SIZE_ALIGN returns the value aligned as a pointer. |
| 416 #define POINTER_SIZE_ALIGN(value) \ |
| 417 (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask) |
| 418 |
| 419 // MAP_POINTER_ALIGN returns the value aligned as a map pointer. |
| 420 #define MAP_POINTER_ALIGN(value) \ |
| 421 (((value) + kMapAlignmentMask) & ~kMapAlignmentMask) |
| 422 |
| 423 // CODE_POINTER_ALIGN returns the value aligned as a generated code segment. |
| 424 #define CODE_POINTER_ALIGN(value) \ |
| 425 (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask) |
| 426 |
| 427 // Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk") |
| 428 // inside a C++ class and new and delete will be overloaded so logging is |
| 429 // performed. |
| 430 // This file (globals.h) is included before log.h, so we use direct calls to |
| 431 // the Logger rather than the LOG macro. |
| 432 #ifdef DEBUG |
| 433 #define TRACK_MEMORY(name) \ |
| 434 void* operator new(size_t size) { \ |
| 435 void* result = ::operator new(size); \ |
| 436 Logger::NewEvent(name, result, size); \ |
| 437 return result; \ |
| 438 } \ |
| 439 void operator delete(void* object) { \ |
| 440 Logger::DeleteEvent(name, object); \ |
| 441 ::operator delete(object); \ |
| 442 } |
| 443 #else |
| 444 #define TRACK_MEMORY(name) |
| 445 #endif |
| 446 |
| 447 |
| 448 // Feature flags bit positions. They are mostly based on the CPUID spec. |
| 449 // (We assign CPUID itself to one of the currently reserved bits -- |
| 450 // feel free to change this if needed.) |
| 451 // On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX. |
| 452 enum CpuFeature { SSE4_1 = 32 + 19, // x86 |
| 453 SSE3 = 32 + 0, // x86 |
| 454 SSE2 = 26, // x86 |
| 455 CMOV = 15, // x86 |
| 456 RDTSC = 4, // x86 |
| 457 CPUID = 10, // x86 |
| 458 VFP3 = 1, // ARM |
| 459 ARMv7 = 2, // ARM |
| 460 SAHF = 0}; // x86 |
| 461 |
| 462 } } // namespace v8::internal |
| 463 |
| 464 #endif // V8_V8GLOBALS_H_ |
OLD | NEW |