OLD | NEW |
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
126 Condition cond) { | 126 Condition cond) { |
127 // Set lr for return at current pc + 8. | 127 // Set lr for return at current pc + 8. |
128 mov(lr, Operand(pc), LeaveCC, cond); | 128 mov(lr, Operand(pc), LeaveCC, cond); |
129 // Emit a ldr<cond> pc, [pc + offset of target in constant pool]. | 129 // Emit a ldr<cond> pc, [pc + offset of target in constant pool]. |
130 mov(pc, Operand(target, rmode), LeaveCC, cond); | 130 mov(pc, Operand(target, rmode), LeaveCC, cond); |
131 // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a | 131 // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a |
132 // 'blx ip'; however, the code would not be shorter than the above sequence | 132 // 'blx ip'; however, the code would not be shorter than the above sequence |
133 // and the target address of the call would be referenced by the first | 133 // and the target address of the call would be referenced by the first |
134 // instruction rather than the second one, which would make it harder to patch | 134 // instruction rather than the second one, which would make it harder to patch |
135 // (two instructions before the return address, instead of one). | 135 // (two instructions before the return address, instead of one). |
136 ASSERT(kCallTargetAddressOffset == sizeof(Instr)); | 136 ASSERT(kCallTargetAddressOffset == kInstrSize); |
137 } | 137 } |
138 | 138 |
139 | 139 |
140 void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode, | 140 void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode, |
141 Condition cond) { | 141 Condition cond) { |
142 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | 142 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
143 Call(reinterpret_cast<intptr_t>(target), rmode, cond); | 143 Call(reinterpret_cast<intptr_t>(target), rmode, cond); |
144 } | 144 } |
145 | 145 |
146 | 146 |
(...skipping 13 matching lines...) Expand all Loading... |
160 #endif | 160 #endif |
161 } | 161 } |
162 | 162 |
163 | 163 |
164 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) { | 164 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) { |
165 // Empty the const pool. | 165 // Empty the const pool. |
166 CheckConstPool(true, true); | 166 CheckConstPool(true, true); |
167 add(pc, pc, Operand(index, | 167 add(pc, pc, Operand(index, |
168 LSL, | 168 LSL, |
169 assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize)); | 169 assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize)); |
170 BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * sizeof(Instr)); | 170 BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize); |
171 nop(); // Jump table alignment. | 171 nop(); // Jump table alignment. |
172 for (int i = 0; i < targets.length(); i++) { | 172 for (int i = 0; i < targets.length(); i++) { |
173 b(targets[i]); | 173 b(targets[i]); |
174 } | 174 } |
175 } | 175 } |
176 | 176 |
177 | 177 |
178 void MacroAssembler::LoadRoot(Register destination, | 178 void MacroAssembler::LoadRoot(Register destination, |
179 Heap::RootListIndex index, | 179 Heap::RootListIndex index, |
180 Condition cond) { | 180 Condition cond) { |
(...skipping 866 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1047 Jump(code, RelocInfo::CODE_TARGET); | 1047 Jump(code, RelocInfo::CODE_TARGET); |
1048 } | 1048 } |
1049 | 1049 |
1050 if (!resolved) { | 1050 if (!resolved) { |
1051 const char* name = Builtins::GetName(id); | 1051 const char* name = Builtins::GetName(id); |
1052 int argc = Builtins::GetArgumentsCount(id); | 1052 int argc = Builtins::GetArgumentsCount(id); |
1053 uint32_t flags = | 1053 uint32_t flags = |
1054 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | | 1054 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | |
1055 Bootstrapper::FixupFlagsIsPCRelative::encode(true) | | 1055 Bootstrapper::FixupFlagsIsPCRelative::encode(true) | |
1056 Bootstrapper::FixupFlagsUseCodeObject::encode(false); | 1056 Bootstrapper::FixupFlagsUseCodeObject::encode(false); |
1057 Unresolved entry = { pc_offset() - sizeof(Instr), flags, name }; | 1057 Unresolved entry = { pc_offset() - kInstrSize, flags, name }; |
1058 unresolved_.Add(entry); | 1058 unresolved_.Add(entry); |
1059 } | 1059 } |
1060 } | 1060 } |
1061 | 1061 |
1062 | 1062 |
1063 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { | 1063 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { |
1064 bool resolved; | 1064 bool resolved; |
1065 Handle<Code> code = ResolveBuiltin(id, &resolved); | 1065 Handle<Code> code = ResolveBuiltin(id, &resolved); |
1066 | 1066 |
1067 mov(target, Operand(code)); | 1067 mov(target, Operand(code)); |
1068 if (!resolved) { | 1068 if (!resolved) { |
1069 const char* name = Builtins::GetName(id); | 1069 const char* name = Builtins::GetName(id); |
1070 int argc = Builtins::GetArgumentsCount(id); | 1070 int argc = Builtins::GetArgumentsCount(id); |
1071 uint32_t flags = | 1071 uint32_t flags = |
1072 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | | 1072 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | |
1073 Bootstrapper::FixupFlagsIsPCRelative::encode(true) | | 1073 Bootstrapper::FixupFlagsIsPCRelative::encode(true) | |
1074 Bootstrapper::FixupFlagsUseCodeObject::encode(true); | 1074 Bootstrapper::FixupFlagsUseCodeObject::encode(true); |
1075 Unresolved entry = { pc_offset() - sizeof(Instr), flags, name }; | 1075 Unresolved entry = { pc_offset() - kInstrSize, flags, name }; |
1076 unresolved_.Add(entry); | 1076 unresolved_.Add(entry); |
1077 } | 1077 } |
1078 | 1078 |
1079 add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1079 add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); |
1080 } | 1080 } |
1081 | 1081 |
1082 | 1082 |
1083 void MacroAssembler::SetCounter(StatsCounter* counter, int value, | 1083 void MacroAssembler::SetCounter(StatsCounter* counter, int value, |
1084 Register scratch1, Register scratch2) { | 1084 Register scratch1, Register scratch2) { |
1085 if (FLAG_native_code_counters && counter->Enabled()) { | 1085 if (FLAG_native_code_counters && counter->Enabled()) { |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1146 #endif | 1146 #endif |
1147 mov(r0, Operand(p0)); | 1147 mov(r0, Operand(p0)); |
1148 push(r0); | 1148 push(r0); |
1149 mov(r0, Operand(Smi::FromInt(p1 - p0))); | 1149 mov(r0, Operand(Smi::FromInt(p1 - p0))); |
1150 push(r0); | 1150 push(r0); |
1151 CallRuntime(Runtime::kAbort, 2); | 1151 CallRuntime(Runtime::kAbort, 2); |
1152 // will not return here | 1152 // will not return here |
1153 } | 1153 } |
1154 | 1154 |
1155 | 1155 |
| 1156 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 1157 CodePatcher::CodePatcher(byte* address, int instructions) |
| 1158 : address_(address), |
| 1159 instructions_(instructions), |
| 1160 size_(instructions * Assembler::kInstrSize), |
| 1161 masm_(address, size_ + Assembler::kGap) { |
| 1162 // Create a new macro assembler pointing to the address of the code to patch. |
| 1163 // The size is adjusted with kGap on order for the assembler to generate size |
| 1164 // bytes of instructions without failing with buffer size constraints. |
| 1165 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 1166 } |
| 1167 |
| 1168 |
| 1169 CodePatcher::~CodePatcher() { |
| 1170 // Indicate that code has changed. |
| 1171 CPU::FlushICache(address_, size_); |
| 1172 |
| 1173 // Check that the code was patched as expected. |
| 1174 ASSERT(masm_.pc_ == address_ + size_); |
| 1175 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 1176 } |
| 1177 |
| 1178 |
| 1179 void CodePatcher::Emit(Instr x) { |
| 1180 masm()->emit(x); |
| 1181 } |
| 1182 |
| 1183 |
| 1184 void CodePatcher::Emit(Address addr) { |
| 1185 masm()->emit(reinterpret_cast<Instr>(addr)); |
| 1186 } |
| 1187 #endif // ENABLE_DEBUGGER_SUPPORT |
| 1188 |
| 1189 |
1156 } } // namespace v8::internal | 1190 } } // namespace v8::internal |
OLD | NEW |