| OLD | NEW | 
|---|
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 
| 2 // All Rights Reserved. | 2 // All Rights Reserved. | 
| 3 // | 3 // | 
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without | 
| 5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions | 
| 6 // are met: | 6 // are met: | 
| 7 // | 7 // | 
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, | 
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. | 
| 10 // | 10 // | 
| (...skipping 993 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1004 // Low-level code emission routines depending on the addressing mode. | 1004 // Low-level code emission routines depending on the addressing mode. | 
| 1005 // If this returns true then you have to use the rotate_imm and immed_8 | 1005 // If this returns true then you have to use the rotate_imm and immed_8 | 
| 1006 // that it returns, because it may have already changed the instruction | 1006 // that it returns, because it may have already changed the instruction | 
| 1007 // to match them! | 1007 // to match them! | 
| 1008 static bool fits_shifter(uint32_t imm32, | 1008 static bool fits_shifter(uint32_t imm32, | 
| 1009                          uint32_t* rotate_imm, | 1009                          uint32_t* rotate_imm, | 
| 1010                          uint32_t* immed_8, | 1010                          uint32_t* immed_8, | 
| 1011                          Instr* instr) { | 1011                          Instr* instr) { | 
| 1012   // imm32 must be unsigned. | 1012   // imm32 must be unsigned. | 
| 1013   for (int rot = 0; rot < 16; rot++) { | 1013   for (int rot = 0; rot < 16; rot++) { | 
| 1014     uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); | 1014     uint32_t imm8 = | 
|  | 1015         rot == 0 ? imm32 : (imm32 << 2 * rot) | (imm32 >> (32 - 2 * rot)); | 
| 1015     if ((imm8 <= 0xff)) { | 1016     if ((imm8 <= 0xff)) { | 
| 1016       *rotate_imm = rot; | 1017       *rotate_imm = rot; | 
| 1017       *immed_8 = imm8; | 1018       *immed_8 = imm8; | 
| 1018       return true; | 1019       return true; | 
| 1019     } | 1020     } | 
| 1020   } | 1021   } | 
| 1021   // If the opcode is one with a complementary version and the complementary | 1022   // If the opcode is one with a complementary version and the complementary | 
| 1022   // immediate fits, change the opcode. | 1023   // immediate fits, change the opcode. | 
| 1023   if (instr != NULL) { | 1024   if (instr != NULL) { | 
| 1024     if ((*instr & kMovMvnMask) == kMovMvnPattern) { | 1025     if ((*instr & kMovMvnMask) == kMovMvnPattern) { | 
| (...skipping 2982 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 4007       assm->instr_at_put( | 4008       assm->instr_at_put( | 
| 4008           rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset)); | 4009           rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset)); | 
| 4009     } | 4010     } | 
| 4010   } | 4011   } | 
| 4011 } | 4012 } | 
| 4012 | 4013 | 
| 4013 | 4014 | 
| 4014 } }  // namespace v8::internal | 4015 } }  // namespace v8::internal | 
| 4015 | 4016 | 
| 4016 #endif  // V8_TARGET_ARCH_ARM | 4017 #endif  // V8_TARGET_ARCH_ARM | 
| OLD | NEW | 
|---|