Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1285)

Side by Side Diff: src/globals.h

Issue 5188006: Push version 2.5.7 to trunk.... (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: Created 10 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/full-codegen.cc ('k') | src/handles.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
186 const int KB = 1024; 186 const int KB = 1024;
187 const int MB = KB * KB; 187 const int MB = KB * KB;
188 const int GB = KB * KB * KB; 188 const int GB = KB * KB * KB;
189 const int kMaxInt = 0x7FFFFFFF; 189 const int kMaxInt = 0x7FFFFFFF;
190 const int kMinInt = -kMaxInt - 1; 190 const int kMinInt = -kMaxInt - 1;
191 191
192 const uint32_t kMaxUInt32 = 0xFFFFFFFFu; 192 const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
193 193
194 const int kCharSize = sizeof(char); // NOLINT 194 const int kCharSize = sizeof(char); // NOLINT
195 const int kShortSize = sizeof(short); // NOLINT 195 const int kShortSize = sizeof(short); // NOLINT
196 const int kIntSize = sizeof(int); // NOLINT
196 const int kDoubleSize = sizeof(double); // NOLINT 197 const int kDoubleSize = sizeof(double); // NOLINT
197 const int kIntptrSize = sizeof(intptr_t); // NOLINT 198 const int kIntptrSize = sizeof(intptr_t); // NOLINT
198 // kIntSize and kPointerSize are defined in include/v8.h. 199 const int kPointerSize = sizeof(void*); // NOLINT
199 200
200 #if V8_HOST_ARCH_64_BIT 201 #if V8_HOST_ARCH_64_BIT
201 const int kPointerSizeLog2 = 3; 202 const int kPointerSizeLog2 = 3;
202 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000); 203 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
203 const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF); 204 const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
204 #else 205 #else
205 const int kPointerSizeLog2 = 2; 206 const int kPointerSizeLog2 = 2;
206 const intptr_t kIntptrSignBit = 0x80000000; 207 const intptr_t kIntptrSignBit = 0x80000000;
207 const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu; 208 const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
208 #endif 209 #endif
209 210
210 // Mask for the sign bit in a smi.
211 const intptr_t kSmiSignMask = kIntptrSignBit;
212
213 const int kObjectAlignmentBits = kPointerSizeLog2;
214 const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
215 const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
216
217 // Desired alignment for pointers.
218 const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
219 const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
220
221 // Desired alignment for maps.
222 #if V8_HOST_ARCH_64_BIT
223 const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
224 #else
225 const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
226 #endif
227 const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
228 const intptr_t kMapAlignmentMask = kMapAlignment - 1;
229
230 // Desired alignment for generated code is 32 bytes (to improve cache line
231 // utilization).
232 const int kCodeAlignmentBits = 5;
233 const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
234 const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
235
236 // Tag information for Failure.
237 const int kFailureTag = 3;
238 const int kFailureTagSize = 2;
239 const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
240
241
242 const int kBitsPerByte = 8; 211 const int kBitsPerByte = 8;
243 const int kBitsPerByteLog2 = 3; 212 const int kBitsPerByteLog2 = 3;
244 const int kBitsPerPointer = kPointerSize * kBitsPerByte; 213 const int kBitsPerPointer = kPointerSize * kBitsPerByte;
245 const int kBitsPerInt = kIntSize * kBitsPerByte; 214 const int kBitsPerInt = kIntSize * kBitsPerByte;
246 215
247 // IEEE 754 single precision floating point number bit layout. 216 // IEEE 754 single precision floating point number bit layout.
248 const uint32_t kBinary32SignMask = 0x80000000u; 217 const uint32_t kBinary32SignMask = 0x80000000u;
249 const uint32_t kBinary32ExponentMask = 0x7f800000u; 218 const uint32_t kBinary32ExponentMask = 0x7f800000u;
250 const uint32_t kBinary32MantissaMask = 0x007fffffu; 219 const uint32_t kBinary32MantissaMask = 0x007fffffu;
251 const int kBinary32ExponentBias = 127; 220 const int kBinary32ExponentBias = 127;
252 const int kBinary32MaxExponent = 0xFE; 221 const int kBinary32MaxExponent = 0xFE;
253 const int kBinary32MinExponent = 0x01; 222 const int kBinary32MinExponent = 0x01;
254 const int kBinary32MantissaBits = 23; 223 const int kBinary32MantissaBits = 23;
255 const int kBinary32ExponentShift = 23; 224 const int kBinary32ExponentShift = 23;
256 225
257 // Zap-value: The value used for zapping dead objects.
258 // Should be a recognizable hex value tagged as a heap object pointer.
259 #ifdef V8_HOST_ARCH_64_BIT
260 const Address kZapValue =
261 reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
262 const Address kHandleZapValue =
263 reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
264 const Address kFromSpaceZapValue =
265 reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
266 const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb;
267 #else
268 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
269 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
270 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
271 const uint32_t kDebugZapValue = 0xbadbaddb;
272 #endif
273
274
275 // Number of bits to represent the page size for paged spaces. The value of 13
276 // gives 8K bytes per page.
277 const int kPageSizeBits = 13;
278
279 // On Intel architecture, cache line size is 64 bytes.
280 // On ARM it may be less (32 bytes), but as far this constant is
281 // used for aligning data, it doesn't hurt to align on a greater value.
282 const int kProcessorCacheLineSize = 64;
283
284 // Constants relevant to double precision floating point numbers.
285
286 // Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
287 // other bits set.
288 const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
289 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
290 const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
291
292
293 // -----------------------------------------------------------------------------
294 // Forward declarations for frequently used classes
295 // (sorted alphabetically)
296
297 class AccessorInfo;
298 class Allocation;
299 class Arguments;
300 class Assembler;
301 class AssertNoAllocation;
302 class BreakableStatement;
303 class Code;
304 class CodeGenerator;
305 class CodeStub;
306 class Context;
307 class Debug;
308 class Debugger;
309 class DebugInfo;
310 class Descriptor;
311 class DescriptorArray;
312 class Expression;
313 class ExternalReference;
314 class FixedArray;
315 class FunctionEntry;
316 class FunctionLiteral;
317 class FunctionTemplateInfo;
318 class NumberDictionary;
319 class StringDictionary;
320 class FreeStoreAllocationPolicy;
321 template <typename T> class Handle;
322 class Heap;
323 class HeapObject;
324 class IC;
325 class InterceptorInfo;
326 class IterationStatement;
327 class JSArray;
328 class JSFunction;
329 class JSObject;
330 class LargeObjectSpace;
331 template <typename T, class P = FreeStoreAllocationPolicy> class List;
332 class LookupResult;
333 class MacroAssembler;
334 class Map;
335 class MapSpace;
336 class MarkCompactCollector;
337 class NewSpace;
338 class NodeVisitor;
339 class Object;
340 class MaybeObject;
341 class OldSpace;
342 class Property;
343 class Proxy;
344 class RegExpNode;
345 struct RegExpCompileData;
346 class RegExpTree;
347 class RegExpCompiler;
348 class RegExpVisitor;
349 class Scope;
350 template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
351 class SerializedScopeInfo;
352 class Script;
353 class Slot;
354 class Smi;
355 template <typename Config, class Allocator = FreeStoreAllocationPolicy>
356 class SplayTree;
357 class Statement;
358 class String;
359 class Struct;
360 class SwitchStatement;
361 class AstVisitor;
362 class Variable;
363 class VariableProxy;
364 class RelocInfo;
365 class Deserializer;
366 class MessageLocation;
367 class ObjectGroup;
368 class TickSample;
369 class VirtualMemory;
370 class Mutex;
371
372 typedef bool (*WeakSlotCallback)(Object** pointer);
373
374 // -----------------------------------------------------------------------------
375 // Miscellaneous
376
377 // NOTE: SpaceIterator depends on AllocationSpace enumeration values being
378 // consecutive.
379 enum AllocationSpace {
380 NEW_SPACE, // Semispaces collected with copying collector.
381 OLD_POINTER_SPACE, // May contain pointers to new space.
382 OLD_DATA_SPACE, // Must not have pointers to new space.
383 CODE_SPACE, // No pointers to new space, marked executable.
384 MAP_SPACE, // Only and all map objects.
385 CELL_SPACE, // Only and all cell objects.
386 LO_SPACE, // Promoted large objects.
387
388 FIRST_SPACE = NEW_SPACE,
389 LAST_SPACE = LO_SPACE,
390 FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
391 LAST_PAGED_SPACE = CELL_SPACE
392 };
393 const int kSpaceTagSize = 3;
394 const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
395
396
397 // A flag that indicates whether objects should be pretenured when
398 // allocated (allocated directly into the old generation) or not
399 // (allocated in the young generation if the object size and type
400 // allows).
401 enum PretenureFlag { NOT_TENURED, TENURED };
402
403 enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
404
405 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
406
407 enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
408
409 // Flag indicating whether code is built into the VM (one of the natives files).
410 enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
411
412
413 // A CodeDesc describes a buffer holding instructions and relocation
414 // information. The instructions start at the beginning of the buffer
415 // and grow forward, the relocation information starts at the end of
416 // the buffer and grows backward.
417 //
418 // |<--------------- buffer_size ---------------->|
419 // |<-- instr_size -->| |<-- reloc_size -->|
420 // +==================+========+==================+
421 // | instructions | free | reloc info |
422 // +==================+========+==================+
423 // ^
424 // |
425 // buffer
426
427 struct CodeDesc {
428 byte* buffer;
429 int buffer_size;
430 int instr_size;
431 int reloc_size;
432 Assembler* origin;
433 };
434
435
436 // Callback function on object slots, used for iterating heap object slots in
437 // HeapObjects, global pointers to heap objects, etc. The callback allows the
438 // callback function to change the value of the slot.
439 typedef void (*ObjectSlotCallback)(HeapObject** pointer);
440
441
442 // Callback function used for iterating objects in heap spaces,
443 // for example, scanning heap objects.
444 typedef int (*HeapObjectCallback)(HeapObject* obj);
445
446
447 // Callback function used for checking constraints when copying/relocating
448 // objects. Returns true if an object can be copied/relocated from its
449 // old_addr to a new_addr.
450 typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
451
452
453 // Callback function on inline caches, used for iterating over inline caches
454 // in compiled code.
455 typedef void (*InlineCacheCallback)(Code* code, Address ic);
456
457
458 // State for inline cache call sites. Aliased as IC::State.
459 enum InlineCacheState {
460 // Has never been executed.
461 UNINITIALIZED,
462 // Has been executed but monomorhic state has been delayed.
463 PREMONOMORPHIC,
464 // Has been executed and only one receiver type has been seen.
465 MONOMORPHIC,
466 // Like MONOMORPHIC but check failed due to prototype.
467 MONOMORPHIC_PROTOTYPE_FAILURE,
468 // Multiple receiver types have been seen.
469 MEGAMORPHIC,
470 // Special states for debug break or step in prepare stubs.
471 DEBUG_BREAK,
472 DEBUG_PREPARE_STEP_IN
473 };
474
475
476 enum InLoopFlag {
477 NOT_IN_LOOP,
478 IN_LOOP
479 };
480
481
482 enum CallFunctionFlags {
483 NO_CALL_FUNCTION_FLAGS = 0,
484 RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject.
485 };
486
487
488 enum InlineCacheHolderFlag {
489 OWN_MAP, // For fast properties objects.
490 PROTOTYPE_MAP // For slow properties objects (except GlobalObjects).
491 };
492
493
494 // Type of properties.
495 // Order of properties is significant.
496 // Must fit in the BitField PropertyDetails::TypeField.
497 // A copy of this is in mirror-debugger.js.
498 enum PropertyType {
499 NORMAL = 0, // only in slow mode
500 FIELD = 1, // only in fast mode
501 CONSTANT_FUNCTION = 2, // only in fast mode
502 CALLBACKS = 3,
503 INTERCEPTOR = 4, // only in lookup results, not in descriptors.
504 MAP_TRANSITION = 5, // only in fast mode
505 CONSTANT_TRANSITION = 6, // only in fast mode
506 NULL_DESCRIPTOR = 7, // only in fast mode
507 // All properties before MAP_TRANSITION are real.
508 FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
509 // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
510 // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
511 // nonexistent properties.
512 NONEXISTENT = NULL_DESCRIPTOR
513 };
514
515
516 // Whether to remove map transitions and constant transitions from a
517 // DescriptorArray.
518 enum TransitionFlag {
519 REMOVE_TRANSITIONS,
520 KEEP_TRANSITIONS
521 };
522
523
524 // Union used for fast testing of specific double values.
525 union DoubleRepresentation {
526 double value;
527 int64_t bits;
528 DoubleRepresentation(double x) { value = x; }
529 };
530
531
532 // Union used for customized checking of the IEEE double types
533 // inlined within v8 runtime, rather than going to the underlying
534 // platform headers and libraries
535 union IeeeDoubleLittleEndianArchType {
536 double d;
537 struct {
538 unsigned int man_low :32;
539 unsigned int man_high :20;
540 unsigned int exp :11;
541 unsigned int sign :1;
542 } bits;
543 };
544
545
546 union IeeeDoubleBigEndianArchType {
547 double d;
548 struct {
549 unsigned int sign :1;
550 unsigned int exp :11;
551 unsigned int man_high :20;
552 unsigned int man_low :32;
553 } bits;
554 };
555
556
557 // AccessorCallback
558 struct AccessorDescriptor {
559 MaybeObject* (*getter)(Object* object, void* data);
560 MaybeObject* (*setter)(JSObject* object, Object* value, void* data);
561 void* data;
562 };
563
564
565 // Logging and profiling.
566 // A StateTag represents a possible state of the VM. When compiled with
567 // ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these.
568 // Creating a VMState object enters a state by pushing on the stack, and
569 // destroying a VMState object leaves a state by popping the current state
570 // from the stack.
571
572 #define STATE_TAG_LIST(V) \
573 V(JS) \
574 V(GC) \
575 V(COMPILER) \
576 V(OTHER) \
577 V(EXTERNAL)
578
579 enum StateTag {
580 #define DEF_STATE_TAG(name) name,
581 STATE_TAG_LIST(DEF_STATE_TAG)
582 #undef DEF_STATE_TAG
583 // Pseudo-types.
584 state_tag_count
585 };
586
587
588 // -----------------------------------------------------------------------------
589 // Macros
590
591 // Testers for test.
592
593 #define HAS_SMI_TAG(value) \
594 ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
595
596 #define HAS_FAILURE_TAG(value) \
597 ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
598
599 // OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
600 #define OBJECT_POINTER_ALIGN(value) \
601 (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
602
603 // POINTER_SIZE_ALIGN returns the value aligned as a pointer.
604 #define POINTER_SIZE_ALIGN(value) \
605 (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
606
607 // MAP_POINTER_ALIGN returns the value aligned as a map pointer.
608 #define MAP_POINTER_ALIGN(value) \
609 (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
610
611 // CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
612 #define CODE_POINTER_ALIGN(value) \
613 (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
614
615 // The expression OFFSET_OF(type, field) computes the byte-offset 226 // The expression OFFSET_OF(type, field) computes the byte-offset
616 // of the specified field relative to the containing type. This 227 // of the specified field relative to the containing type. This
617 // corresponds to 'offsetof' (in stddef.h), except that it doesn't 228 // corresponds to 'offsetof' (in stddef.h), except that it doesn't
618 // use 0 or NULL, which causes a problem with the compiler warnings 229 // use 0 or NULL, which causes a problem with the compiler warnings
619 // we have enabled (which is also why 'offsetof' doesn't seem to work). 230 // we have enabled (which is also why 'offsetof' doesn't seem to work).
620 // Here we simply use the non-zero value 4, which seems to work. 231 // Here we simply use the non-zero value 4, which seems to work.
621 #define OFFSET_OF(type, field) \ 232 #define OFFSET_OF(type, field) \
622 (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4) 233 (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
623 234
624 235
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
661 // default constructor, copy constructor and operator= functions. 272 // default constructor, copy constructor and operator= functions.
662 // 273 //
663 // This should be used in the private: declarations for a class 274 // This should be used in the private: declarations for a class
664 // that wants to prevent anyone from instantiating it. This is 275 // that wants to prevent anyone from instantiating it. This is
665 // especially useful for classes containing only static methods. 276 // especially useful for classes containing only static methods.
666 #define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ 277 #define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
667 TypeName(); \ 278 TypeName(); \
668 DISALLOW_COPY_AND_ASSIGN(TypeName) 279 DISALLOW_COPY_AND_ASSIGN(TypeName)
669 280
670 281
671 // Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
672 // inside a C++ class and new and delete will be overloaded so logging is
673 // performed.
674 // This file (globals.h) is included before log.h, so we use direct calls to
675 // the Logger rather than the LOG macro.
676 #ifdef DEBUG
677 #define TRACK_MEMORY(name) \
678 void* operator new(size_t size) { \
679 void* result = ::operator new(size); \
680 Logger::NewEvent(name, result, size); \
681 return result; \
682 } \
683 void operator delete(void* object) { \
684 Logger::DeleteEvent(name, object); \
685 ::operator delete(object); \
686 }
687 #else
688 #define TRACK_MEMORY(name)
689 #endif
690
691 // Define used for helping GCC to make better inlining. Don't bother for debug 282 // Define used for helping GCC to make better inlining. Don't bother for debug
692 // builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation 283 // builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
693 // errors in debug build. 284 // errors in debug build.
694 #if defined(__GNUC__) && !defined(DEBUG) 285 #if defined(__GNUC__) && !defined(DEBUG)
695 #if (__GNUC__ >= 4) 286 #if (__GNUC__ >= 4)
696 #define INLINE(header) inline header __attribute__((always_inline)) 287 #define INLINE(header) inline header __attribute__((always_inline))
697 #define NO_INLINE(header) header __attribute__((noinline)) 288 #define NO_INLINE(header) header __attribute__((noinline))
698 #else 289 #else
699 #define INLINE(header) inline __attribute__((always_inline)) header 290 #define INLINE(header) inline __attribute__((always_inline)) header
700 #define NO_INLINE(header) __attribute__((noinline)) header 291 #define NO_INLINE(header) __attribute__((noinline)) header
701 #endif 292 #endif
702 #else 293 #else
703 #define INLINE(header) inline header 294 #define INLINE(header) inline header
704 #define NO_INLINE(header) header 295 #define NO_INLINE(header) header
705 #endif 296 #endif
706 297
707 298
708 #if defined(__GNUC__) && __GNUC__ >= 4 299 #if defined(__GNUC__) && __GNUC__ >= 4
709 #define MUST_USE_RESULT __attribute__ ((warn_unused_result)) 300 #define MUST_USE_RESULT __attribute__ ((warn_unused_result))
710 #else 301 #else
711 #define MUST_USE_RESULT 302 #define MUST_USE_RESULT
712 #endif 303 #endif
713 304
305 // -----------------------------------------------------------------------------
306 // Forward declarations for frequently used classes
307 // (sorted alphabetically)
714 308
715 // Feature flags bit positions. They are mostly based on the CPUID spec. 309 class FreeStoreAllocationPolicy;
716 // (We assign CPUID itself to one of the currently reserved bits -- 310 template <typename T, class P = FreeStoreAllocationPolicy> class List;
717 // feel free to change this if needed.)
718 // On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
719 enum CpuFeature { SSE4_1 = 32 + 19, // x86
720 SSE3 = 32 + 0, // x86
721 SSE2 = 26, // x86
722 CMOV = 15, // x86
723 RDTSC = 4, // x86
724 CPUID = 10, // x86
725 VFP3 = 1, // ARM
726 ARMv7 = 2, // ARM
727 SAHF = 0}; // x86
728 311
729 } } // namespace v8::internal 312 } } // namespace v8::internal
730 313
731 #endif // V8_GLOBALS_H_ 314 #endif // V8_GLOBALS_H_
OLDNEW
« no previous file with comments | « src/full-codegen.cc ('k') | src/handles.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698