OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
6 // are met: | 6 // are met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
276 } else { | 276 } else { |
277 // no relocation needed | 277 // no relocation needed |
278 imm32_ = reinterpret_cast<intptr_t>(obj); | 278 imm32_ = reinterpret_cast<intptr_t>(obj); |
279 rmode_ = RelocInfo::NONE32; | 279 rmode_ = RelocInfo::NONE32; |
280 } | 280 } |
281 } | 281 } |
282 | 282 |
283 | 283 |
284 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) { | 284 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) { |
285 ASSERT(is_uint5(shift_imm)); | 285 ASSERT(is_uint5(shift_imm)); |
| 286 ASSERT(shift_op != NO_SHIFT); |
286 | 287 |
287 rm_ = rm; | 288 rm_ = rm; |
288 rs_ = no_reg; | 289 rs_ = no_reg; |
289 shift_op_ = shift_op; | 290 shift_op_ = shift_op; |
290 shift_imm_ = shift_imm & 31; | 291 shift_imm_ = shift_imm & 31; |
291 | 292 |
292 if ((shift_op == ROR) && (shift_imm == 0)) { | 293 if ((shift_op == ROR) && (shift_imm == 0)) { |
293 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode | 294 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode |
294 // RRX as ROR #0 (See below). | 295 // RRX as ROR #0 (See below). |
295 shift_op = LSL; | 296 shift_op = LSL; |
296 } else if (shift_op == RRX) { | 297 } else if (shift_op == RRX) { |
297 // encoded as ROR with shift_imm == 0 | 298 // encoded as ROR with shift_imm == 0 |
298 ASSERT(shift_imm == 0); | 299 ASSERT(shift_imm == 0); |
299 shift_op_ = ROR; | 300 shift_op_ = ROR; |
300 shift_imm_ = 0; | 301 shift_imm_ = 0; |
301 } | 302 } |
302 } | 303 } |
303 | 304 |
304 | 305 |
305 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) { | 306 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) { |
306 ASSERT(shift_op != RRX); | 307 ASSERT((shift_op != RRX) && (shift_op != NO_SHIFT)); |
307 rm_ = rm; | 308 rm_ = rm; |
308 rs_ = no_reg; | 309 rs_ = no_reg; |
309 shift_op_ = shift_op; | 310 shift_op_ = shift_op; |
310 rs_ = rs; | 311 rs_ = rs; |
311 } | 312 } |
312 | 313 |
313 | 314 |
314 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) { | 315 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) { |
315 rn_ = rn; | 316 rn_ = rn; |
316 rm_ = no_reg; | 317 rm_ = no_reg; |
(...skipping 622 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
939 ASSERT(link >= 0); | 940 ASSERT(link >= 0); |
940 L->link_to(link); | 941 L->link_to(link); |
941 } | 942 } |
942 } | 943 } |
943 | 944 |
944 | 945 |
945 // Low-level code emission routines depending on the addressing mode. | 946 // Low-level code emission routines depending on the addressing mode. |
946 // If this returns true then you have to use the rotate_imm and immed_8 | 947 // If this returns true then you have to use the rotate_imm and immed_8 |
947 // that it returns, because it may have already changed the instruction | 948 // that it returns, because it may have already changed the instruction |
948 // to match them! | 949 // to match them! |
949 static bool fits_shifter(uint32_t imm32, | 950 bool fits_shifter(uint32_t imm32, |
950 uint32_t* rotate_imm, | 951 uint32_t* rotate_imm, |
951 uint32_t* immed_8, | 952 uint32_t* immed_8, |
952 Instr* instr) { | 953 Instr* instr) { |
953 // imm32 must be unsigned. | 954 // imm32 must be unsigned. |
954 for (int rot = 0; rot < 16; rot++) { | 955 for (int rot = 0; rot < 16; rot++) { |
955 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); | 956 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); |
956 if ((imm8 <= 0xff)) { | 957 if ((imm8 <= 0xff)) { |
957 *rotate_imm = rot; | 958 if (rotate_imm != NULL) *rotate_imm = rot; |
958 *immed_8 = imm8; | 959 if (immed_8 != NULL) *immed_8 = imm8; |
959 return true; | 960 return true; |
960 } | 961 } |
961 } | 962 } |
962 // If the opcode is one with a complementary version and the complementary | 963 // If the opcode is one with a complementary version and the complementary |
963 // immediate fits, change the opcode. | 964 // immediate fits, change the opcode. |
964 if (instr != NULL) { | 965 if (instr != NULL) { |
965 if ((*instr & kMovMvnMask) == kMovMvnPattern) { | 966 if ((*instr & kMovMvnMask) == kMovMvnPattern) { |
966 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { | 967 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { |
967 *instr ^= kMovMvnFlip; | 968 *instr ^= kMovMvnFlip; |
968 return true; | 969 return true; |
969 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { | 970 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { |
970 if (CpuFeatures::IsSupported(ARMv7)) { | 971 if (CpuFeatures::IsSupported(ARMv7)) { |
971 if (imm32 < 0x10000) { | 972 if (imm32 < 0x10000) { |
972 *instr ^= kMovwLeaveCCFlip; | 973 *instr ^= kMovwLeaveCCFlip; |
973 *instr |= EncodeMovwImmediate(imm32); | 974 *instr |= EncodeMovwImmediate(imm32); |
974 *rotate_imm = *immed_8 = 0; // Not used for movw. | 975 if (rotate_imm != NULL) *rotate_imm = 0; // Not used for movw. |
| 976 if (immed_8 != NULL) *immed_8 = 0; // Not used for movw. |
975 return true; | 977 return true; |
976 } | 978 } |
977 } | 979 } |
978 } | 980 } |
979 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { | 981 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { |
980 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) { | 982 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) { |
981 *instr ^= kCmpCmnFlip; | 983 *instr ^= kCmpCmnFlip; |
982 return true; | 984 return true; |
983 } | 985 } |
984 } else { | 986 } else { |
(...skipping 2677 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3662 ASSERT((index_64bit == count_of_64bit_) && | 3664 ASSERT((index_64bit == count_of_64bit_) && |
3663 (index_code_ptr == (index_64bit + count_of_code_ptr_)) && | 3665 (index_code_ptr == (index_64bit + count_of_code_ptr_)) && |
3664 (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) && | 3666 (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) && |
3665 (index_32bit == (index_heap_ptr + count_of_32bit_))); | 3667 (index_32bit == (index_heap_ptr + count_of_32bit_))); |
3666 } | 3668 } |
3667 | 3669 |
3668 | 3670 |
3669 } } // namespace v8::internal | 3671 } } // namespace v8::internal |
3670 | 3672 |
3671 #endif // V8_TARGET_ARCH_ARM | 3673 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |