OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 312 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
323 | 323 |
324 Instr Assembler::thumb32_3reg_lsl(Register rd, | 324 Instr Assembler::thumb32_3reg_lsl(Register rd, |
325 const MemOperand& x) { | 325 const MemOperand& x) { |
326 ASSERT(x.rn_.is_valid() && x.rm_.is_valid()); // is Register, both valid | 326 ASSERT(x.rn_.is_valid() && x.rm_.is_valid()); // is Register, both valid |
327 uint8_t imm2 = 0; | 327 uint8_t imm2 = 0; |
328 if (x.shift_op_ == LSL && is_uint2(x.shift_imm_)) { | 328 if (x.shift_op_ == LSL && is_uint2(x.shift_imm_)) { |
329 imm2 = x.shift_imm_ & 3; | 329 imm2 = x.shift_imm_ & 3; |
330 return (x.rn_.code()*BH0 | rd.code()*B12 | imm2*B4 | x.rm_.code()); | 330 return (x.rn_.code()*BH0 | rd.code()*B12 | imm2*B4 | x.rm_.code()); |
331 } | 331 } |
332 switch (x.shift_op_) { | 332 switch (x.shift_op_) { |
333 case LSL: // TODO(rkrithiv): call method to encode lsl instruction | 333 case LSL: lsl_thumb(ip, Operand(x.rm_, LSL, x.shift_imm_), LeaveCC, al); |
334 case LSR: // TODO(rkrithiv): call method to encode lsr instruction | 334 break; |
335 case ASR: // TODO(rkrithiv): call method to encode asr instruction | 335 case LSR: lsr_thumb(ip, Operand(x.rm_, LSR, x.shift_imm_), LeaveCC, al); |
| 336 break; |
| 337 case ASR: asr_thumb(ip, Operand(x.rm_, ASR, x.shift_imm_), LeaveCC, al); |
| 338 break; |
336 default: return (x.rn_.code()*BH0 | rd.code()*B12 | x.rm_.code()); | 339 default: return (x.rn_.code()*BH0 | rd.code()*B12 | x.rm_.code()); |
337 } | 340 } |
338 return (x.rn_.code()*BH0 | rd.code()*B12 | ip.code()); | 341 return (x.rn_.code()*BH0 | rd.code()*B12 | ip.code()); |
339 } | 342 } |
340 | 343 |
341 | 344 |
342 Instr Assembler::thumb32_4reg(Register dst, Register src1, Register src2, | 345 Instr Assembler::thumb32_4reg(Register dst, Register src1, Register src2, |
343 Register srcA) { | 346 Register srcA) { |
344 return (src1.code()*BH0 | srcA.code()*B12 | dst.code()*B8 | src2.code()); | 347 return (src1.code()*BH0 | srcA.code()*B12 | dst.code()*B8 | src2.code()); |
345 } | 348 } |
(...skipping 459 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
805 void Assembler::strd_imm_t1(Register src1, Register src2, | 808 void Assembler::strd_imm_t1(Register src1, Register src2, |
806 const MemOperand& dst) { | 809 const MemOperand& dst) { |
807 emit32(thumb32_mode6(STRD_32_IMM2, STRD_32_IMM2_OP2) | | 810 emit32(thumb32_mode6(STRD_32_IMM2, STRD_32_IMM2_OP2) | |
808 thumb32_3reg_zero_extend_imm8(src1, src2, dst)); | 811 thumb32_3reg_zero_extend_imm8(src1, src2, dst)); |
809 } | 812 } |
810 | 813 |
811 } } // namespace v8::internal | 814 } } // namespace v8::internal |
812 | 815 |
813 #endif // V8_TARGET_ARCH_ARM | 816 #endif // V8_TARGET_ARCH_ARM |
814 | 817 |
OLD | NEW |