| OLD | NEW | 
|---|
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 //     * Redistributions of source code must retain the above copyright | 6 //     * Redistributions of source code must retain the above copyright | 
| 7 //       notice, this list of conditions and the following disclaimer. | 7 //       notice, this list of conditions and the following disclaimer. | 
| 8 //     * Redistributions in binary form must reproduce the above | 8 //     * Redistributions in binary form must reproduce the above | 
| 9 //       copyright notice, this list of conditions and the following | 9 //       copyright notice, this list of conditions and the following | 
| 10 //       disclaimer in the documentation and/or other materials provided | 10 //       disclaimer in the documentation and/or other materials provided | 
| (...skipping 467 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 478 | 478 | 
| 479 void MacroAssembler::PopSafepointRegistersAndDoubles() { | 479 void MacroAssembler::PopSafepointRegistersAndDoubles() { | 
| 480   for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { | 480   for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { | 
| 481     vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); | 481     vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); | 
| 482   } | 482   } | 
| 483   add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * | 483   add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * | 
| 484                       kDoubleSize)); | 484                       kDoubleSize)); | 
| 485   PopSafepointRegisters(); | 485   PopSafepointRegisters(); | 
| 486 } | 486 } | 
| 487 | 487 | 
|  | 488 void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) { | 
|  | 489   str(reg, SafepointRegisterSlot(reg)); | 
|  | 490 } | 
|  | 491 | 
|  | 492 | 
| 488 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 493 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 
| 489   // The registers are pushed starting with the highest encoding, | 494   // The registers are pushed starting with the highest encoding, | 
| 490   // which means that lowest encodings are closest to the stack pointer. | 495   // which means that lowest encodings are closest to the stack pointer. | 
| 491   ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); | 496   ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); | 
| 492   return reg_code; | 497   return reg_code; | 
| 493 } | 498 } | 
| 494 | 499 | 
| 495 | 500 | 
|  | 501 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { | 
|  | 502   return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kInstrSize); | 
|  | 503 } | 
|  | 504 | 
|  | 505 | 
| 496 void MacroAssembler::Ldrd(Register dst1, Register dst2, | 506 void MacroAssembler::Ldrd(Register dst1, Register dst2, | 
| 497                           const MemOperand& src, Condition cond) { | 507                           const MemOperand& src, Condition cond) { | 
| 498   ASSERT(src.rm().is(no_reg)); | 508   ASSERT(src.rm().is(no_reg)); | 
| 499   ASSERT(!dst1.is(lr));  // r14. | 509   ASSERT(!dst1.is(lr));  // r14. | 
| 500   ASSERT_EQ(0, dst1.code() % 2); | 510   ASSERT_EQ(0, dst1.code() % 2); | 
| 501   ASSERT_EQ(dst1.code() + 1, dst2.code()); | 511   ASSERT_EQ(dst1.code() + 1, dst2.code()); | 
| 502 | 512 | 
| 503   // Generate two ldr instructions if ldrd is not available. | 513   // Generate two ldr instructions if ldrd is not available. | 
| 504   if (CpuFeatures::IsSupported(ARMv7)) { | 514   if (CpuFeatures::IsSupported(ARMv7)) { | 
| 505     CpuFeatures::Scope scope(ARMv7); | 515     CpuFeatures::Scope scope(ARMv7); | 
| (...skipping 1672 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2178   Call(function); | 2188   Call(function); | 
| 2179   int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; | 2189   int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; | 
| 2180   if (OS::ActivationFrameAlignment() > kPointerSize) { | 2190   if (OS::ActivationFrameAlignment() > kPointerSize) { | 
| 2181     ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 2191     ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 
| 2182   } else { | 2192   } else { | 
| 2183     add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); | 2193     add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); | 
| 2184   } | 2194   } | 
| 2185 } | 2195 } | 
| 2186 | 2196 | 
| 2187 | 2197 | 
|  | 2198 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, | 
|  | 2199                                Register result) { | 
|  | 2200   const uint32_t kLdrOffsetMask = (1 << 12) - 1; | 
|  | 2201   const int32_t kPCRegOffset = 2 * kPointerSize; | 
|  | 2202   ldr(result, MemOperand(ldr_location)); | 
|  | 2203   if (FLAG_debug_code) { | 
|  | 2204     // Check that the instruction is a ldr reg, [pc + offset] . | 
|  | 2205     and_(result, result, Operand(kLdrPCPattern)); | 
|  | 2206     cmp(result, Operand(kLdrPCPattern)); | 
|  | 2207     Check(eq, "The instruction to patch should be a load from pc."); | 
|  | 2208     // Result was clobbered. Restore it. | 
|  | 2209     ldr(result, MemOperand(ldr_location)); | 
|  | 2210   } | 
|  | 2211   // Get the address of the constant. | 
|  | 2212   and_(result, result, Operand(kLdrOffsetMask)); | 
|  | 2213   add(result, ldr_location, Operand(result)); | 
|  | 2214   add(result, result, Operand(kPCRegOffset)); | 
|  | 2215 } | 
|  | 2216 | 
|  | 2217 | 
| 2188 #ifdef ENABLE_DEBUGGER_SUPPORT | 2218 #ifdef ENABLE_DEBUGGER_SUPPORT | 
| 2189 CodePatcher::CodePatcher(byte* address, int instructions) | 2219 CodePatcher::CodePatcher(byte* address, int instructions) | 
| 2190     : address_(address), | 2220     : address_(address), | 
| 2191       instructions_(instructions), | 2221       instructions_(instructions), | 
| 2192       size_(instructions * Assembler::kInstrSize), | 2222       size_(instructions * Assembler::kInstrSize), | 
| 2193       masm_(address, size_ + Assembler::kGap) { | 2223       masm_(address, size_ + Assembler::kGap) { | 
| 2194   // Create a new macro assembler pointing to the address of the code to patch. | 2224   // Create a new macro assembler pointing to the address of the code to patch. | 
| 2195   // The size is adjusted with kGap on order for the assembler to generate size | 2225   // The size is adjusted with kGap on order for the assembler to generate size | 
| 2196   // bytes of instructions without failing with buffer size constraints. | 2226   // bytes of instructions without failing with buffer size constraints. | 
| 2197   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2227   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 
| (...skipping 17 matching lines...) Expand all  Loading... | 
| 2215 | 2245 | 
| 2216 void CodePatcher::Emit(Address addr) { | 2246 void CodePatcher::Emit(Address addr) { | 
| 2217   masm()->emit(reinterpret_cast<Instr>(addr)); | 2247   masm()->emit(reinterpret_cast<Instr>(addr)); | 
| 2218 } | 2248 } | 
| 2219 #endif  // ENABLE_DEBUGGER_SUPPORT | 2249 #endif  // ENABLE_DEBUGGER_SUPPORT | 
| 2220 | 2250 | 
| 2221 | 2251 | 
| 2222 } }  // namespace v8::internal | 2252 } }  // namespace v8::internal | 
| 2223 | 2253 | 
| 2224 #endif  // V8_TARGET_ARCH_ARM | 2254 #endif  // V8_TARGET_ARCH_ARM | 
| OLD | NEW | 
|---|