OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
6 // are met: | 6 // are met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 29 matching lines...) Expand all Loading... |
40 #ifndef V8_ARM_ASSEMBLER_ARM_H_ | 40 #ifndef V8_ARM_ASSEMBLER_ARM_H_ |
41 #define V8_ARM_ASSEMBLER_ARM_H_ | 41 #define V8_ARM_ASSEMBLER_ARM_H_ |
42 #include <stdio.h> | 42 #include <stdio.h> |
43 #include "assembler.h" | 43 #include "assembler.h" |
44 #include "constants-arm.h" | 44 #include "constants-arm.h" |
45 #include "serialize.h" | 45 #include "serialize.h" |
46 | 46 |
47 namespace v8 { | 47 namespace v8 { |
48 namespace internal { | 48 namespace internal { |
49 | 49 |
| 50 // CpuFeatures keeps track of which features are supported by the target CPU. |
| 51 // Supported features must be enabled by a Scope before use. |
| 52 class CpuFeatures : public AllStatic { |
| 53 public: |
| 54 // Detect features of the target CPU. Set safe defaults if the serializer |
| 55 // is enabled (snapshots must be portable). |
| 56 static void Probe(); |
| 57 |
| 58 // Check whether a feature is supported by the target CPU. |
| 59 static bool IsSupported(CpuFeature f) { |
| 60 ASSERT(initialized_); |
| 61 if (f == VFP3 && !FLAG_enable_vfp3) return false; |
| 62 if (f == VFP2 && !FLAG_enable_vfp2) return false; |
| 63 if (f == SUDIV && !FLAG_enable_sudiv) return false; |
| 64 if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) { |
| 65 return false; |
| 66 } |
| 67 if (f == VFP32DREGS && !FLAG_enable_32dregs) return false; |
| 68 return (supported_ & (1u << f)) != 0; |
| 69 } |
| 70 |
| 71 #ifdef DEBUG |
| 72 // Check whether a feature is currently enabled. |
| 73 static bool IsEnabled(CpuFeature f) { |
| 74 ASSERT(initialized_); |
| 75 Isolate* isolate = Isolate::UncheckedCurrent(); |
| 76 if (isolate == NULL) { |
| 77 // When no isolate is available, work as if we're running in |
| 78 // release mode. |
| 79 return IsSupported(f); |
| 80 } |
| 81 unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features()); |
| 82 return (enabled & (1u << f)) != 0; |
| 83 } |
| 84 #endif |
| 85 |
| 86 // Enable a specified feature within a scope. |
| 87 class Scope BASE_EMBEDDED { |
| 88 #ifdef DEBUG |
| 89 |
| 90 public: |
| 91 explicit Scope(CpuFeature f) { |
| 92 unsigned mask = 1u << f; |
| 93 // VFP2 and ARMv7 are implied by VFP3. |
| 94 if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7; |
| 95 ASSERT(CpuFeatures::IsSupported(f)); |
| 96 ASSERT(!Serializer::enabled() || |
| 97 (CpuFeatures::found_by_runtime_probing_ & mask) == 0); |
| 98 isolate_ = Isolate::UncheckedCurrent(); |
| 99 old_enabled_ = 0; |
| 100 if (isolate_ != NULL) { |
| 101 old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features()); |
| 102 isolate_->set_enabled_cpu_features(old_enabled_ | mask); |
| 103 } |
| 104 } |
| 105 ~Scope() { |
| 106 ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); |
| 107 if (isolate_ != NULL) { |
| 108 isolate_->set_enabled_cpu_features(old_enabled_); |
| 109 } |
| 110 } |
| 111 |
| 112 private: |
| 113 Isolate* isolate_; |
| 114 unsigned old_enabled_; |
| 115 #else |
| 116 |
| 117 public: |
| 118 explicit Scope(CpuFeature f) {} |
| 119 #endif |
| 120 }; |
| 121 |
| 122 class TryForceFeatureScope BASE_EMBEDDED { |
| 123 public: |
| 124 explicit TryForceFeatureScope(CpuFeature f) |
| 125 : old_supported_(CpuFeatures::supported_) { |
| 126 if (CanForce()) { |
| 127 CpuFeatures::supported_ |= (1u << f); |
| 128 } |
| 129 } |
| 130 |
| 131 ~TryForceFeatureScope() { |
| 132 if (CanForce()) { |
| 133 CpuFeatures::supported_ = old_supported_; |
| 134 } |
| 135 } |
| 136 |
| 137 private: |
| 138 static bool CanForce() { |
| 139 // It's only safe to temporarily force support of CPU features |
| 140 // when there's only a single isolate, which is guaranteed when |
| 141 // the serializer is enabled. |
| 142 return Serializer::enabled(); |
| 143 } |
| 144 |
| 145 const unsigned old_supported_; |
| 146 }; |
| 147 |
| 148 private: |
| 149 #ifdef DEBUG |
| 150 static bool initialized_; |
| 151 #endif |
| 152 static unsigned supported_; |
| 153 static unsigned found_by_runtime_probing_; |
| 154 |
| 155 friend class ExternalReference; |
| 156 DISALLOW_COPY_AND_ASSIGN(CpuFeatures); |
| 157 }; |
| 158 |
| 159 |
50 // CPU Registers. | 160 // CPU Registers. |
51 // | 161 // |
52 // 1) We would prefer to use an enum, but enum values are assignment- | 162 // 1) We would prefer to use an enum, but enum values are assignment- |
53 // compatible with int, which has caused code-generation bugs. | 163 // compatible with int, which has caused code-generation bugs. |
54 // | 164 // |
55 // 2) We would prefer to use a class instead of a struct but we don't like | 165 // 2) We would prefer to use a class instead of a struct but we don't like |
56 // the register initialization to depend on the particular initialization | 166 // the register initialization to depend on the particular initialization |
57 // order (which appears to be different on OS X, Linux, and Windows for the | 167 // order (which appears to be different on OS X, Linux, and Windows for the |
58 // installed versions of C++ we tried). Using a struct permits C-style | 168 // installed versions of C++ we tried). Using a struct permits C-style |
59 // "initialization". Also, the Register objects cannot be const as this | 169 // "initialization". Also, the Register objects cannot be const as this |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
185 *m = code_ & 0x1; | 295 *m = code_ & 0x1; |
186 *vm = code_ >> 1; | 296 *vm = code_ >> 1; |
187 } | 297 } |
188 | 298 |
189 int code_; | 299 int code_; |
190 }; | 300 }; |
191 | 301 |
192 | 302 |
193 // Double word VFP register. | 303 // Double word VFP register. |
194 struct DwVfpRegister { | 304 struct DwVfpRegister { |
195 static const int kNumRegisters = 16; | 305 static const int kNumRegisters = 32; |
196 // A few double registers are reserved: one as a scratch register and one to | 306 // A few double registers are reserved: one as a scratch register and one to |
197 // hold 0.0, that does not fit in the immediate field of vmov instructions. | 307 // hold 0.0, that does not fit in the immediate field of vmov instructions. |
198 // d14: 0.0 | 308 // d14: 0.0 |
199 // d15: scratch register. | 309 // d15: scratch register. |
200 static const int kNumReservedRegisters = 2; | 310 static const int kNumReservedRegisters = 2; |
201 static const int kMaxNumAllocatableRegisters = kNumRegisters - | 311 static const int kMaxNumAllocatableRegisters = kNumRegisters - |
202 kNumReservedRegisters; | 312 kNumReservedRegisters; |
203 | 313 |
204 inline static int NumRegisters(); | 314 inline static int NumRegisters(); |
205 inline static int NumAllocatableRegisters(); | 315 inline static int NumAllocatableRegisters(); |
206 inline static int ToAllocationIndex(DwVfpRegister reg); | 316 inline static int ToAllocationIndex(DwVfpRegister reg); |
207 static const char* AllocationIndexToString(int index); | 317 static const char* AllocationIndexToString(int index); |
208 | 318 inline static DwVfpRegister FromAllocationIndex(int index); |
209 static DwVfpRegister FromAllocationIndex(int index) { | |
210 ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); | |
211 return from_code(index); | |
212 } | |
213 | 319 |
214 static DwVfpRegister from_code(int code) { | 320 static DwVfpRegister from_code(int code) { |
215 DwVfpRegister r = { code }; | 321 DwVfpRegister r = { code }; |
216 return r; | 322 return r; |
217 } | 323 } |
218 | 324 |
219 // Supporting d0 to d15, can be later extended to d31. | 325 bool is_valid() const { |
220 bool is_valid() const { return 0 <= code_ && code_ < 16; } | 326 return 0 <= code_ && code_ < kNumRegisters; |
| 327 } |
221 bool is(DwVfpRegister reg) const { return code_ == reg.code_; } | 328 bool is(DwVfpRegister reg) const { return code_ == reg.code_; } |
222 SwVfpRegister low() const { | 329 SwVfpRegister low() const { |
| 330 ASSERT(code_ < 16); |
223 SwVfpRegister reg; | 331 SwVfpRegister reg; |
224 reg.code_ = code_ * 2; | 332 reg.code_ = code_ * 2; |
225 | 333 |
226 ASSERT(reg.is_valid()); | 334 ASSERT(reg.is_valid()); |
227 return reg; | 335 return reg; |
228 } | 336 } |
229 SwVfpRegister high() const { | 337 SwVfpRegister high() const { |
| 338 ASSERT(code_ < 16); |
230 SwVfpRegister reg; | 339 SwVfpRegister reg; |
231 reg.code_ = (code_ * 2) + 1; | 340 reg.code_ = (code_ * 2) + 1; |
232 | 341 |
233 ASSERT(reg.is_valid()); | 342 ASSERT(reg.is_valid()); |
234 return reg; | 343 return reg; |
235 } | 344 } |
236 int code() const { | 345 int code() const { |
237 ASSERT(is_valid()); | 346 ASSERT(is_valid()); |
238 return code_; | 347 return code_; |
239 } | 348 } |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
299 const DwVfpRegister d6 = { 6 }; | 408 const DwVfpRegister d6 = { 6 }; |
300 const DwVfpRegister d7 = { 7 }; | 409 const DwVfpRegister d7 = { 7 }; |
301 const DwVfpRegister d8 = { 8 }; | 410 const DwVfpRegister d8 = { 8 }; |
302 const DwVfpRegister d9 = { 9 }; | 411 const DwVfpRegister d9 = { 9 }; |
303 const DwVfpRegister d10 = { 10 }; | 412 const DwVfpRegister d10 = { 10 }; |
304 const DwVfpRegister d11 = { 11 }; | 413 const DwVfpRegister d11 = { 11 }; |
305 const DwVfpRegister d12 = { 12 }; | 414 const DwVfpRegister d12 = { 12 }; |
306 const DwVfpRegister d13 = { 13 }; | 415 const DwVfpRegister d13 = { 13 }; |
307 const DwVfpRegister d14 = { 14 }; | 416 const DwVfpRegister d14 = { 14 }; |
308 const DwVfpRegister d15 = { 15 }; | 417 const DwVfpRegister d15 = { 15 }; |
| 418 const DwVfpRegister d16 = { 16 }; |
| 419 const DwVfpRegister d17 = { 17 }; |
| 420 const DwVfpRegister d18 = { 18 }; |
| 421 const DwVfpRegister d19 = { 19 }; |
| 422 const DwVfpRegister d20 = { 20 }; |
| 423 const DwVfpRegister d21 = { 21 }; |
| 424 const DwVfpRegister d22 = { 22 }; |
| 425 const DwVfpRegister d23 = { 23 }; |
| 426 const DwVfpRegister d24 = { 24 }; |
| 427 const DwVfpRegister d25 = { 25 }; |
| 428 const DwVfpRegister d26 = { 26 }; |
| 429 const DwVfpRegister d27 = { 27 }; |
| 430 const DwVfpRegister d28 = { 28 }; |
| 431 const DwVfpRegister d29 = { 29 }; |
| 432 const DwVfpRegister d30 = { 30 }; |
| 433 const DwVfpRegister d31 = { 31 }; |
309 | 434 |
310 const Register sfpd_lo = { kRegister_r6_Code }; | 435 const Register sfpd_lo = { kRegister_r6_Code }; |
311 const Register sfpd_hi = { kRegister_r7_Code }; | 436 const Register sfpd_hi = { kRegister_r7_Code }; |
312 | 437 |
313 // Aliases for double registers. Defined using #define instead of | 438 // Aliases for double registers. Defined using #define instead of |
314 // "static const DwVfpRegister&" because Clang complains otherwise when a | 439 // "static const DwVfpRegister&" because Clang complains otherwise when a |
315 // compilation unit that includes this header doesn't use the variables. | 440 // compilation unit that includes this header doesn't use the variables. |
316 #define kFirstCalleeSavedDoubleReg d8 | 441 #define kFirstCalleeSavedDoubleReg d8 |
317 #define kLastCalleeSavedDoubleReg d15 | 442 #define kLastCalleeSavedDoubleReg d15 |
318 #define kDoubleRegZero d14 | 443 #define kDoubleRegZero d14 |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
478 Register rn_; // base | 603 Register rn_; // base |
479 Register rm_; // register offset | 604 Register rm_; // register offset |
480 int32_t offset_; // valid if rm_ == no_reg | 605 int32_t offset_; // valid if rm_ == no_reg |
481 ShiftOp shift_op_; | 606 ShiftOp shift_op_; |
482 int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg | 607 int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg |
483 AddrMode am_; // bits P, U, and W | 608 AddrMode am_; // bits P, U, and W |
484 | 609 |
485 friend class Assembler; | 610 friend class Assembler; |
486 }; | 611 }; |
487 | 612 |
488 // CpuFeatures keeps track of which features are supported by the target CPU. | |
489 // Supported features must be enabled by a Scope before use. | |
490 class CpuFeatures : public AllStatic { | |
491 public: | |
492 // Detect features of the target CPU. Set safe defaults if the serializer | |
493 // is enabled (snapshots must be portable). | |
494 static void Probe(); | |
495 | |
496 // Check whether a feature is supported by the target CPU. | |
497 static bool IsSupported(CpuFeature f) { | |
498 ASSERT(initialized_); | |
499 if (f == VFP3 && !FLAG_enable_vfp3) return false; | |
500 if (f == VFP2 && !FLAG_enable_vfp2) return false; | |
501 if (f == SUDIV && !FLAG_enable_sudiv) return false; | |
502 if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) { | |
503 return false; | |
504 } | |
505 return (supported_ & (1u << f)) != 0; | |
506 } | |
507 | |
508 #ifdef DEBUG | |
509 // Check whether a feature is currently enabled. | |
510 static bool IsEnabled(CpuFeature f) { | |
511 ASSERT(initialized_); | |
512 Isolate* isolate = Isolate::UncheckedCurrent(); | |
513 if (isolate == NULL) { | |
514 // When no isolate is available, work as if we're running in | |
515 // release mode. | |
516 return IsSupported(f); | |
517 } | |
518 unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features()); | |
519 return (enabled & (1u << f)) != 0; | |
520 } | |
521 #endif | |
522 | |
523 // Enable a specified feature within a scope. | |
524 class Scope BASE_EMBEDDED { | |
525 #ifdef DEBUG | |
526 | |
527 public: | |
528 explicit Scope(CpuFeature f) { | |
529 unsigned mask = 1u << f; | |
530 // VFP2 and ARMv7 are implied by VFP3. | |
531 if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7; | |
532 ASSERT(CpuFeatures::IsSupported(f)); | |
533 ASSERT(!Serializer::enabled() || | |
534 (CpuFeatures::found_by_runtime_probing_ & mask) == 0); | |
535 isolate_ = Isolate::UncheckedCurrent(); | |
536 old_enabled_ = 0; | |
537 if (isolate_ != NULL) { | |
538 old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features()); | |
539 isolate_->set_enabled_cpu_features(old_enabled_ | mask); | |
540 } | |
541 } | |
542 ~Scope() { | |
543 ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); | |
544 if (isolate_ != NULL) { | |
545 isolate_->set_enabled_cpu_features(old_enabled_); | |
546 } | |
547 } | |
548 | |
549 private: | |
550 Isolate* isolate_; | |
551 unsigned old_enabled_; | |
552 #else | |
553 | |
554 public: | |
555 explicit Scope(CpuFeature f) {} | |
556 #endif | |
557 }; | |
558 | |
559 class TryForceFeatureScope BASE_EMBEDDED { | |
560 public: | |
561 explicit TryForceFeatureScope(CpuFeature f) | |
562 : old_supported_(CpuFeatures::supported_) { | |
563 if (CanForce()) { | |
564 CpuFeatures::supported_ |= (1u << f); | |
565 } | |
566 } | |
567 | |
568 ~TryForceFeatureScope() { | |
569 if (CanForce()) { | |
570 CpuFeatures::supported_ = old_supported_; | |
571 } | |
572 } | |
573 | |
574 private: | |
575 static bool CanForce() { | |
576 // It's only safe to temporarily force support of CPU features | |
577 // when there's only a single isolate, which is guaranteed when | |
578 // the serializer is enabled. | |
579 return Serializer::enabled(); | |
580 } | |
581 | |
582 const unsigned old_supported_; | |
583 }; | |
584 | |
585 private: | |
586 #ifdef DEBUG | |
587 static bool initialized_; | |
588 #endif | |
589 static unsigned supported_; | |
590 static unsigned found_by_runtime_probing_; | |
591 | |
592 DISALLOW_COPY_AND_ASSIGN(CpuFeatures); | |
593 }; | |
594 | |
595 | |
596 extern const Instr kMovLrPc; | 613 extern const Instr kMovLrPc; |
597 extern const Instr kLdrPCMask; | 614 extern const Instr kLdrPCMask; |
598 extern const Instr kLdrPCPattern; | 615 extern const Instr kLdrPCPattern; |
599 extern const Instr kBlxRegMask; | 616 extern const Instr kBlxRegMask; |
600 extern const Instr kBlxRegPattern; | 617 extern const Instr kBlxRegPattern; |
601 extern const Instr kBlxIp; | 618 extern const Instr kBlxIp; |
602 | 619 |
603 extern const Instr kMovMvnMask; | 620 extern const Instr kMovMvnMask; |
604 extern const Instr kMovMvnPattern; | 621 extern const Instr kMovMvnPattern; |
605 extern const Instr kMovMvnFlip; | 622 extern const Instr kMovMvnFlip; |
(...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
974 LFlag l = Short, Condition cond = al); | 991 LFlag l = Short, Condition cond = al); |
975 void ldc(Coprocessor coproc, CRegister crd, Register base, int option, | 992 void ldc(Coprocessor coproc, CRegister crd, Register base, int option, |
976 LFlag l = Short, Condition cond = al); | 993 LFlag l = Short, Condition cond = al); |
977 | 994 |
978 void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src, | 995 void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src, |
979 LFlag l = Short); // v5 and above | 996 LFlag l = Short); // v5 and above |
980 void ldc2(Coprocessor coproc, CRegister crd, Register base, int option, | 997 void ldc2(Coprocessor coproc, CRegister crd, Register base, int option, |
981 LFlag l = Short); // v5 and above | 998 LFlag l = Short); // v5 and above |
982 | 999 |
983 // Support for VFP. | 1000 // Support for VFP. |
984 // All these APIs support S0 to S31 and D0 to D15. | 1001 // All these APIs support S0 to S31 and D0 to D31. |
985 // Currently these APIs do not support extended D registers, i.e, D16 to D31. | |
986 // However, some simple modifications can allow | |
987 // these APIs to support D16 to D31. | |
988 | 1002 |
989 void vldr(const DwVfpRegister dst, | 1003 void vldr(const DwVfpRegister dst, |
990 const Register base, | 1004 const Register base, |
991 int offset, | 1005 int offset, |
992 const Condition cond = al); | 1006 const Condition cond = al); |
993 void vldr(const DwVfpRegister dst, | 1007 void vldr(const DwVfpRegister dst, |
994 const MemOperand& src, | 1008 const MemOperand& src, |
995 const Condition cond = al); | 1009 const Condition cond = al); |
996 | 1010 |
997 void vldr(const SwVfpRegister dst, | 1011 void vldr(const SwVfpRegister dst, |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1046 double imm, | 1060 double imm, |
1047 const Register scratch = no_reg, | 1061 const Register scratch = no_reg, |
1048 const Condition cond = al); | 1062 const Condition cond = al); |
1049 void vmov(const SwVfpRegister dst, | 1063 void vmov(const SwVfpRegister dst, |
1050 const SwVfpRegister src, | 1064 const SwVfpRegister src, |
1051 const Condition cond = al); | 1065 const Condition cond = al); |
1052 void vmov(const DwVfpRegister dst, | 1066 void vmov(const DwVfpRegister dst, |
1053 const DwVfpRegister src, | 1067 const DwVfpRegister src, |
1054 const Condition cond = al); | 1068 const Condition cond = al); |
1055 void vmov(const DwVfpRegister dst, | 1069 void vmov(const DwVfpRegister dst, |
| 1070 int index, |
| 1071 const Register src, |
| 1072 const Condition cond = al); |
| 1073 void vmov(const DwVfpRegister dst, |
1056 const Register src1, | 1074 const Register src1, |
1057 const Register src2, | 1075 const Register src2, |
1058 const Condition cond = al); | 1076 const Condition cond = al); |
1059 void vmov(const Register dst1, | 1077 void vmov(const Register dst1, |
1060 const Register dst2, | 1078 const Register dst2, |
1061 const DwVfpRegister src, | 1079 const DwVfpRegister src, |
1062 const Condition cond = al); | 1080 const Condition cond = al); |
1063 void vmov(const SwVfpRegister dst, | 1081 void vmov(const SwVfpRegister dst, |
1064 const Register src, | 1082 const Register src, |
1065 const Condition cond = al); | 1083 const Condition cond = al); |
(...skipping 414 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1480 public: | 1498 public: |
1481 explicit EnsureSpace(Assembler* assembler) { | 1499 explicit EnsureSpace(Assembler* assembler) { |
1482 assembler->CheckBuffer(); | 1500 assembler->CheckBuffer(); |
1483 } | 1501 } |
1484 }; | 1502 }; |
1485 | 1503 |
1486 | 1504 |
1487 } } // namespace v8::internal | 1505 } } // namespace v8::internal |
1488 | 1506 |
1489 #endif // V8_ARM_ASSEMBLER_ARM_H_ | 1507 #endif // V8_ARM_ASSEMBLER_ARM_H_ |
OLD | NEW |