| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
| 6 // are met: | 6 // are met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 274 } else { | 274 } else { |
| 275 // no relocation needed | 275 // no relocation needed |
| 276 imm32_ = reinterpret_cast<intptr_t>(obj); | 276 imm32_ = reinterpret_cast<intptr_t>(obj); |
| 277 rmode_ = RelocInfo::NONE32; | 277 rmode_ = RelocInfo::NONE32; |
| 278 } | 278 } |
| 279 } | 279 } |
| 280 | 280 |
| 281 | 281 |
| 282 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) { | 282 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) { |
| 283 ASSERT(is_uint5(shift_imm)); | 283 ASSERT(is_uint5(shift_imm)); |
| 284 ASSERT(shift_op != NO_SHIFT); | |
| 285 | 284 |
| 286 rm_ = rm; | 285 rm_ = rm; |
| 287 rs_ = no_reg; | 286 rs_ = no_reg; |
| 288 shift_op_ = shift_op; | 287 shift_op_ = shift_op; |
| 289 shift_imm_ = shift_imm & 31; | 288 shift_imm_ = shift_imm & 31; |
| 290 | 289 |
| 291 if ((shift_op == ROR) && (shift_imm == 0)) { | 290 if ((shift_op == ROR) && (shift_imm == 0)) { |
| 292 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode | 291 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode |
| 293 // RRX as ROR #0 (See below). | 292 // RRX as ROR #0 (See below). |
| 294 shift_op = LSL; | 293 shift_op = LSL; |
| 295 } else if (shift_op == RRX) { | 294 } else if (shift_op == RRX) { |
| 296 // encoded as ROR with shift_imm == 0 | 295 // encoded as ROR with shift_imm == 0 |
| 297 ASSERT(shift_imm == 0); | 296 ASSERT(shift_imm == 0); |
| 298 shift_op_ = ROR; | 297 shift_op_ = ROR; |
| 299 shift_imm_ = 0; | 298 shift_imm_ = 0; |
| 300 } | 299 } |
| 301 } | 300 } |
| 302 | 301 |
| 303 | 302 |
| 304 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) { | 303 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) { |
| 305 ASSERT((shift_op != RRX) && (shift_op != NO_SHIFT)); | 304 ASSERT(shift_op != RRX); |
| 306 rm_ = rm; | 305 rm_ = rm; |
| 307 rs_ = no_reg; | 306 rs_ = no_reg; |
| 308 shift_op_ = shift_op; | 307 shift_op_ = shift_op; |
| 309 rs_ = rs; | 308 rs_ = rs; |
| 310 } | 309 } |
| 311 | 310 |
| 312 | 311 |
| 313 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) { | 312 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) { |
| 314 rn_ = rn; | 313 rn_ = rn; |
| 315 rm_ = no_reg; | 314 rm_ = no_reg; |
| (...skipping 635 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 951 ASSERT(link >= 0); | 950 ASSERT(link >= 0); |
| 952 L->link_to(link); | 951 L->link_to(link); |
| 953 } | 952 } |
| 954 } | 953 } |
| 955 | 954 |
| 956 | 955 |
| 957 // Low-level code emission routines depending on the addressing mode. | 956 // Low-level code emission routines depending on the addressing mode. |
| 958 // If this returns true then you have to use the rotate_imm and immed_8 | 957 // If this returns true then you have to use the rotate_imm and immed_8 |
| 959 // that it returns, because it may have already changed the instruction | 958 // that it returns, because it may have already changed the instruction |
| 960 // to match them! | 959 // to match them! |
| 961 bool fits_shifter(uint32_t imm32, | 960 static bool fits_shifter(uint32_t imm32, |
| 962 uint32_t* rotate_imm, | 961 uint32_t* rotate_imm, |
| 963 uint32_t* immed_8, | 962 uint32_t* immed_8, |
| 964 Instr* instr) { | 963 Instr* instr) { |
| 965 // imm32 must be unsigned. | 964 // imm32 must be unsigned. |
| 966 for (int rot = 0; rot < 16; rot++) { | 965 for (int rot = 0; rot < 16; rot++) { |
| 967 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); | 966 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); |
| 968 if ((imm8 <= 0xff)) { | 967 if ((imm8 <= 0xff)) { |
| 969 if (rotate_imm != NULL) *rotate_imm = rot; | 968 *rotate_imm = rot; |
| 970 if (immed_8 != NULL) *immed_8 = imm8; | 969 *immed_8 = imm8; |
| 971 return true; | 970 return true; |
| 972 } | 971 } |
| 973 } | 972 } |
| 974 // If the opcode is one with a complementary version and the complementary | 973 // If the opcode is one with a complementary version and the complementary |
| 975 // immediate fits, change the opcode. | 974 // immediate fits, change the opcode. |
| 976 if (instr != NULL) { | 975 if (instr != NULL) { |
| 977 if ((*instr & kMovMvnMask) == kMovMvnPattern) { | 976 if ((*instr & kMovMvnMask) == kMovMvnPattern) { |
| 978 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { | 977 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { |
| 979 *instr ^= kMovMvnFlip; | 978 *instr ^= kMovMvnFlip; |
| 980 return true; | 979 return true; |
| 981 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { | 980 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { |
| 982 if (CpuFeatures::IsSupported(ARMv7)) { | 981 if (CpuFeatures::IsSupported(ARMv7)) { |
| 983 if (imm32 < 0x10000) { | 982 if (imm32 < 0x10000) { |
| 984 *instr ^= kMovwLeaveCCFlip; | 983 *instr ^= kMovwLeaveCCFlip; |
| 985 *instr |= EncodeMovwImmediate(imm32); | 984 *instr |= EncodeMovwImmediate(imm32); |
| 986 if (rotate_imm != NULL) *rotate_imm = 0; // Not used for movw. | 985 *rotate_imm = *immed_8 = 0; // Not used for movw. |
| 987 if (immed_8 != NULL) *immed_8 = 0; // Not used for movw. | |
| 988 return true; | 986 return true; |
| 989 } | 987 } |
| 990 } | 988 } |
| 991 } | 989 } |
| 992 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { | 990 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { |
| 993 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) { | 991 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) { |
| 994 *instr ^= kCmpCmnFlip; | 992 *instr ^= kCmpCmnFlip; |
| 995 return true; | 993 return true; |
| 996 } | 994 } |
| 997 } else { | 995 } else { |
| (...skipping 2678 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3676 ASSERT((index_64bit == count_of_64bit_) && | 3674 ASSERT((index_64bit == count_of_64bit_) && |
| 3677 (index_code_ptr == (index_64bit + count_of_code_ptr_)) && | 3675 (index_code_ptr == (index_64bit + count_of_code_ptr_)) && |
| 3678 (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) && | 3676 (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) && |
| 3679 (index_32bit == (index_heap_ptr + count_of_32bit_))); | 3677 (index_32bit == (index_heap_ptr + count_of_32bit_))); |
| 3680 } | 3678 } |
| 3681 | 3679 |
| 3682 | 3680 |
| 3683 } } // namespace v8::internal | 3681 } } // namespace v8::internal |
| 3684 | 3682 |
| 3685 #endif // V8_TARGET_ARCH_ARM | 3683 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |