OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
72 void set_predictable_code_size(bool value) { predictable_code_size_ = value; } | 72 void set_predictable_code_size(bool value) { predictable_code_size_ = value; } |
73 | 73 |
74 uint64_t enabled_cpu_features() const { return enabled_cpu_features_; } | 74 uint64_t enabled_cpu_features() const { return enabled_cpu_features_; } |
75 void set_enabled_cpu_features(uint64_t features) { | 75 void set_enabled_cpu_features(uint64_t features) { |
76 enabled_cpu_features_ = features; | 76 enabled_cpu_features_ = features; |
77 } | 77 } |
78 bool IsEnabled(CpuFeature f) { | 78 bool IsEnabled(CpuFeature f) { |
79 return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0; | 79 return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0; |
80 } | 80 } |
81 | 81 |
82 bool is_constant_pool_available() const { | 82 bool is_ool_constant_pool_available() const { |
83 if (FLAG_enable_embedded_constant_pool) { | 83 if (FLAG_enable_ool_constant_pool) { |
84 return constant_pool_available_; | 84 return ool_constant_pool_available_; |
85 } else { | 85 } else { |
86 // Embedded constant pool not supported on this architecture. | 86 // Out-of-line constant pool not supported on this architecture. |
87 UNREACHABLE(); | 87 UNREACHABLE(); |
88 return false; | 88 return false; |
89 } | 89 } |
90 } | 90 } |
91 | 91 |
92 // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for | 92 // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for |
93 // cross-snapshotting. | 93 // cross-snapshotting. |
94 static void QuietNaN(HeapObject* nan) { } | 94 static void QuietNaN(HeapObject* nan) { } |
95 | 95 |
96 int pc_offset() const { return static_cast<int>(pc_ - buffer_); } | 96 int pc_offset() const { return static_cast<int>(pc_ - buffer_); } |
97 | 97 |
98 // This function is called when code generation is aborted, so that | 98 // This function is called when code generation is aborted, so that |
99 // the assembler could clean up internal data structures. | 99 // the assembler could clean up internal data structures. |
100 virtual void AbortedCodeGeneration() { } | 100 virtual void AbortedCodeGeneration() { } |
101 | 101 |
102 static const int kMinimalBufferSize = 4*KB; | 102 static const int kMinimalBufferSize = 4*KB; |
103 | 103 |
104 protected: | 104 protected: |
105 // The buffer into which code and relocation info are generated. It could | 105 // The buffer into which code and relocation info are generated. It could |
106 // either be owned by the assembler or be provided externally. | 106 // either be owned by the assembler or be provided externally. |
107 byte* buffer_; | 107 byte* buffer_; |
108 int buffer_size_; | 108 int buffer_size_; |
109 bool own_buffer_; | 109 bool own_buffer_; |
110 | 110 |
111 void set_constant_pool_available(bool available) { | 111 void set_ool_constant_pool_available(bool available) { |
112 if (FLAG_enable_embedded_constant_pool) { | 112 if (FLAG_enable_ool_constant_pool) { |
113 constant_pool_available_ = available; | 113 ool_constant_pool_available_ = available; |
114 } else { | 114 } else { |
115 // Embedded constant pool not supported on this architecture. | 115 // Out-of-line constant pool not supported on this architecture. |
116 UNREACHABLE(); | 116 UNREACHABLE(); |
117 } | 117 } |
118 } | 118 } |
119 | 119 |
120 // The program counter, which points into the buffer above and moves forward. | 120 // The program counter, which points into the buffer above and moves forward. |
121 byte* pc_; | 121 byte* pc_; |
122 | 122 |
123 private: | 123 private: |
124 Isolate* isolate_; | 124 Isolate* isolate_; |
125 int jit_cookie_; | 125 int jit_cookie_; |
126 uint64_t enabled_cpu_features_; | 126 uint64_t enabled_cpu_features_; |
127 bool emit_debug_code_; | 127 bool emit_debug_code_; |
128 bool predictable_code_size_; | 128 bool predictable_code_size_; |
129 bool serializer_enabled_; | 129 bool serializer_enabled_; |
130 | 130 |
131 // Indicates whether the constant pool can be accessed, which is only possible | 131 // Indicates whether the constant pool can be accessed, which is only possible |
132 // if the pp register points to the current code object's constant pool. | 132 // if the pp register points to the current code object's constant pool. |
133 bool constant_pool_available_; | 133 bool ool_constant_pool_available_; |
134 | 134 |
135 // Constant pool. | 135 // Constant pool. |
136 friend class FrameAndConstantPoolScope; | 136 friend class FrameAndConstantPoolScope; |
137 friend class ConstantPoolUnavailableScope; | 137 friend class ConstantPoolUnavailableScope; |
138 }; | 138 }; |
139 | 139 |
140 | 140 |
141 // Avoids emitting debug code during the lifetime of this scope object. | 141 // Avoids emitting debug code during the lifetime of this scope object. |
142 class DontEmitDebugCodeScope BASE_EMBEDDED { | 142 class DontEmitDebugCodeScope BASE_EMBEDDED { |
143 public: | 143 public: |
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
406 // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding. | 406 // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding. |
407 LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID, | 407 LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID, |
408 LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE_ENCODED | 408 LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE_ENCODED |
409 }; | 409 }; |
410 | 410 |
411 RelocInfo() {} | 411 RelocInfo() {} |
412 | 412 |
413 RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host) | 413 RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host) |
414 : pc_(pc), rmode_(rmode), data_(data), host_(host) { | 414 : pc_(pc), rmode_(rmode), data_(data), host_(host) { |
415 } | 415 } |
| 416 RelocInfo(byte* pc, double data64) |
| 417 : pc_(pc), rmode_(NONE64), data64_(data64), host_(NULL) { |
| 418 } |
416 | 419 |
417 static inline bool IsRealRelocMode(Mode mode) { | 420 static inline bool IsRealRelocMode(Mode mode) { |
418 return mode >= FIRST_REAL_RELOC_MODE && | 421 return mode >= FIRST_REAL_RELOC_MODE && |
419 mode <= LAST_REAL_RELOC_MODE; | 422 mode <= LAST_REAL_RELOC_MODE; |
420 } | 423 } |
421 static inline bool IsPseudoRelocMode(Mode mode) { | 424 static inline bool IsPseudoRelocMode(Mode mode) { |
422 DCHECK(!IsRealRelocMode(mode)); | 425 DCHECK(!IsRealRelocMode(mode)); |
423 return mode >= FIRST_PSEUDO_RELOC_MODE && | 426 return mode >= FIRST_PSEUDO_RELOC_MODE && |
424 mode <= LAST_PSEUDO_RELOC_MODE; | 427 mode <= LAST_PSEUDO_RELOC_MODE; |
425 } | 428 } |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
477 return mode == DEBUG_BREAK; | 480 return mode == DEBUG_BREAK; |
478 } | 481 } |
479 static inline bool IsNone(Mode mode) { | 482 static inline bool IsNone(Mode mode) { |
480 return mode == NONE32 || mode == NONE64; | 483 return mode == NONE32 || mode == NONE64; |
481 } | 484 } |
482 static inline bool IsCodeAgeSequence(Mode mode) { | 485 static inline bool IsCodeAgeSequence(Mode mode) { |
483 return mode == CODE_AGE_SEQUENCE; | 486 return mode == CODE_AGE_SEQUENCE; |
484 } | 487 } |
485 static inline int ModeMask(Mode mode) { return 1 << mode; } | 488 static inline int ModeMask(Mode mode) { return 1 << mode; } |
486 | 489 |
| 490 // Returns true if the first RelocInfo has the same mode and raw data as the |
| 491 // second one. |
| 492 static inline bool IsEqual(RelocInfo first, RelocInfo second) { |
| 493 return first.rmode() == second.rmode() && |
| 494 (first.rmode() == RelocInfo::NONE64 ? |
| 495 first.raw_data64() == second.raw_data64() : |
| 496 first.data() == second.data()); |
| 497 } |
| 498 |
487 // Accessors | 499 // Accessors |
488 byte* pc() const { return pc_; } | 500 byte* pc() const { return pc_; } |
489 void set_pc(byte* pc) { pc_ = pc; } | 501 void set_pc(byte* pc) { pc_ = pc; } |
490 Mode rmode() const { return rmode_; } | 502 Mode rmode() const { return rmode_; } |
491 intptr_t data() const { return data_; } | 503 intptr_t data() const { return data_; } |
| 504 double data64() const { return data64_; } |
| 505 uint64_t raw_data64() { return bit_cast<uint64_t>(data64_); } |
492 Code* host() const { return host_; } | 506 Code* host() const { return host_; } |
493 void set_host(Code* host) { host_ = host; } | 507 void set_host(Code* host) { host_ = host; } |
494 | 508 |
495 // Apply a relocation by delta bytes | 509 // Apply a relocation by delta bytes |
496 INLINE(void apply(intptr_t delta, | 510 INLINE(void apply(intptr_t delta, |
497 ICacheFlushMode icache_flush_mode = | 511 ICacheFlushMode icache_flush_mode = |
498 FLUSH_ICACHE_IF_NEEDED)); | 512 FLUSH_ICACHE_IF_NEEDED)); |
499 | 513 |
500 // Is the pointer this relocation info refers to coded like a plain pointer | 514 // Is the pointer this relocation info refers to coded like a plain pointer |
501 // or is it strange in some way (e.g. relative or patched into a series of | 515 // or is it strange in some way (e.g. relative or patched into a series of |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
624 (1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT); | 638 (1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT); |
625 static const int kApplyMask; // Modes affected by apply. Depends on arch. | 639 static const int kApplyMask; // Modes affected by apply. Depends on arch. |
626 | 640 |
627 private: | 641 private: |
628 // On ARM, note that pc_ is the address of the constant pool entry | 642 // On ARM, note that pc_ is the address of the constant pool entry |
629 // to be relocated and not the address of the instruction | 643 // to be relocated and not the address of the instruction |
630 // referencing the constant pool entry (except when rmode_ == | 644 // referencing the constant pool entry (except when rmode_ == |
631 // comment). | 645 // comment). |
632 byte* pc_; | 646 byte* pc_; |
633 Mode rmode_; | 647 Mode rmode_; |
634 intptr_t data_; | 648 union { |
| 649 intptr_t data_; |
| 650 double data64_; |
| 651 }; |
635 Code* host_; | 652 Code* host_; |
636 // External-reference pointers are also split across instruction-pairs | 653 // External-reference pointers are also split across instruction-pairs |
637 // on some platforms, but are accessed via indirect pointers. This location | 654 // on some platforms, but are accessed via indirect pointers. This location |
638 // provides a place for that pointer to exist naturally. Its address | 655 // provides a place for that pointer to exist naturally. Its address |
639 // is returned by RelocInfo::target_reference_address(). | 656 // is returned by RelocInfo::target_reference_address(). |
640 Address reconstructed_adr_ptr_; | 657 Address reconstructed_adr_ptr_; |
641 friend class RelocIterator; | 658 friend class RelocIterator; |
642 }; | 659 }; |
643 | 660 |
644 | 661 |
(...skipping 502 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1147 | 1164 |
1148 class NullCallWrapper : public CallWrapper { | 1165 class NullCallWrapper : public CallWrapper { |
1149 public: | 1166 public: |
1150 NullCallWrapper() { } | 1167 NullCallWrapper() { } |
1151 virtual ~NullCallWrapper() { } | 1168 virtual ~NullCallWrapper() { } |
1152 virtual void BeforeCall(int call_size) const { } | 1169 virtual void BeforeCall(int call_size) const { } |
1153 virtual void AfterCall() const { } | 1170 virtual void AfterCall() const { } |
1154 }; | 1171 }; |
1155 | 1172 |
1156 | 1173 |
1157 // ----------------------------------------------------------------------------- | |
1158 // Constant pool support | |
1159 | |
1160 class ConstantPoolEntry { | |
1161 public: | |
1162 ConstantPoolEntry() {} | |
1163 ConstantPoolEntry(int position, intptr_t value, bool sharing_ok) | |
1164 : position_(position), | |
1165 merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED), | |
1166 value_(value) {} | |
1167 ConstantPoolEntry(int position, double value) | |
1168 : position_(position), merged_index_(SHARING_ALLOWED), value64_(value) {} | |
1169 | |
1170 int position() const { return position_; } | |
1171 bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; } | |
1172 bool is_merged() const { return merged_index_ >= 0; } | |
1173 int merged_index(void) const { | |
1174 DCHECK(is_merged()); | |
1175 return merged_index_; | |
1176 } | |
1177 void set_merged_index(int index) { | |
1178 merged_index_ = index; | |
1179 DCHECK(is_merged()); | |
1180 } | |
1181 int offset(void) const { | |
1182 DCHECK(merged_index_ >= 0); | |
1183 return merged_index_; | |
1184 } | |
1185 void set_offset(int offset) { | |
1186 DCHECK(offset >= 0); | |
1187 merged_index_ = offset; | |
1188 } | |
1189 intptr_t value() const { return value_; } | |
1190 uint64_t value64() const { return bit_cast<uint64_t>(value64_); } | |
1191 | |
1192 enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES }; | |
1193 | |
1194 static int size(Type type) { | |
1195 return (type == INTPTR) ? kPointerSize : kDoubleSize; | |
1196 } | |
1197 | |
1198 enum Access { REGULAR, OVERFLOWED }; | |
1199 | |
1200 private: | |
1201 int position_; | |
1202 int merged_index_; | |
1203 union { | |
1204 intptr_t value_; | |
1205 double value64_; | |
1206 }; | |
1207 enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 }; | |
1208 }; | |
1209 | |
1210 | |
1211 // ----------------------------------------------------------------------------- | |
1212 // Embedded constant pool support | |
1213 | |
1214 class ConstantPoolBuilder BASE_EMBEDDED { | |
1215 public: | |
1216 ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits); | |
1217 | |
1218 // Add pointer-sized constant to the embedded constant pool | |
1219 ConstantPoolEntry::Access AddEntry(int position, intptr_t value, | |
1220 bool sharing_ok) { | |
1221 ConstantPoolEntry entry(position, value, sharing_ok); | |
1222 return AddEntry(entry, ConstantPoolEntry::INTPTR); | |
1223 } | |
1224 | |
1225 // Add double constant to the embedded constant pool | |
1226 ConstantPoolEntry::Access AddEntry(int position, double value) { | |
1227 ConstantPoolEntry entry(position, value); | |
1228 return AddEntry(entry, ConstantPoolEntry::DOUBLE); | |
1229 } | |
1230 | |
1231 // Previews the access type required for the next new entry to be added. | |
1232 ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const; | |
1233 | |
1234 bool IsEmpty() { | |
1235 return info_[ConstantPoolEntry::INTPTR].entries.empty() && | |
1236 info_[ConstantPoolEntry::INTPTR].shared_entries.empty() && | |
1237 info_[ConstantPoolEntry::DOUBLE].entries.empty() && | |
1238 info_[ConstantPoolEntry::DOUBLE].shared_entries.empty(); | |
1239 } | |
1240 | |
1241 // Emit the constant pool. Invoke only after all entries have been | |
1242 // added and all instructions have been emitted. | |
1243 // Returns position of the emitted pool (zero implies no constant pool). | |
1244 int Emit(Assembler* assm); | |
1245 | |
1246 // Returns the label associated with the start of the constant pool. | |
1247 // Linking to this label in the function prologue may provide an | |
1248 // efficient means of constant pool pointer register initialization | |
1249 // on some architectures. | |
1250 inline Label* EmittedPosition() { return &emitted_label_; } | |
1251 | |
1252 private: | |
1253 ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry, | |
1254 ConstantPoolEntry::Type type); | |
1255 void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type); | |
1256 void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access, | |
1257 ConstantPoolEntry::Type type); | |
1258 | |
1259 struct PerTypeEntryInfo { | |
1260 PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {} | |
1261 bool overflow() const { | |
1262 return (overflow_start >= 0 && | |
1263 overflow_start < static_cast<int>(entries.size())); | |
1264 } | |
1265 int regular_reach_bits; | |
1266 int regular_count; | |
1267 int overflow_start; | |
1268 std::vector<ConstantPoolEntry> entries; | |
1269 std::vector<ConstantPoolEntry> shared_entries; | |
1270 }; | |
1271 | |
1272 Label emitted_label_; // Records pc_offset of emitted pool | |
1273 PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES]; | |
1274 }; | |
1275 | |
1276 | |
1277 } } // namespace v8::internal | 1174 } } // namespace v8::internal |
1278 | 1175 |
1279 #endif // V8_ASSEMBLER_H_ | 1176 #endif // V8_ASSEMBLER_H_ |
OLD | NEW |