OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_ARM | 5 #if V8_TARGET_ARCH_ARM |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 1140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1151 // This simulates the initial call to bytecode handlers in interpreter entry | 1151 // This simulates the initial call to bytecode handlers in interpreter entry |
1152 // trampoline. The return will never actually be taken, but our stack walker | 1152 // trampoline. The return will never actually be taken, but our stack walker |
1153 // uses this address to determine whether a frame is interpreted. | 1153 // uses this address to determine whether a frame is interpreted. |
1154 __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); | 1154 __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); |
1155 | 1155 |
1156 Generate_EnterBytecodeDispatch(masm); | 1156 Generate_EnterBytecodeDispatch(masm); |
1157 } | 1157 } |
1158 | 1158 |
1159 | 1159 |
1160 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1160 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
| 1161 // ----------- S t a t e ------------- |
| 1162 // -- r3 : new target (preserved for callee) |
| 1163 // -- r1 : target function (preserved for callee) |
| 1164 // ----------------------------------- |
| 1165 // First lookup code, maybe we don't need to compile! |
| 1166 Label gotta_call_runtime, gotta_call_runtime_no_stack; |
| 1167 Label maybe_call_runtime; |
| 1168 Label try_shared; |
| 1169 Label loop_top, loop_bottom; |
| 1170 |
| 1171 Register closure = r1; |
| 1172 Register new_target = r3; |
| 1173 __ push(new_target); |
| 1174 __ push(closure); |
| 1175 |
| 1176 Register map = r0; |
| 1177 Register index = r2; |
| 1178 __ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 1179 __ ldr(map, |
| 1180 FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
| 1181 __ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset)); |
| 1182 __ cmp(index, Operand(Smi::FromInt(2))); |
| 1183 __ b(lt, &gotta_call_runtime); |
| 1184 |
| 1185 // Find literals. |
| 1186 // r3 : native context |
| 1187 // r2 : length / index |
| 1188 // r0 : optimized code map |
| 1189 // stack[0] : new target |
| 1190 // stack[4] : closure |
| 1191 Register native_context = r3; |
| 1192 __ ldr(native_context, NativeContextMemOperand()); |
| 1193 |
| 1194 __ bind(&loop_top); |
| 1195 Register temp = r1; |
| 1196 Register array_pointer = r5; |
| 1197 |
| 1198 // Does the native context match? |
| 1199 __ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index)); |
| 1200 __ ldr(temp, FieldMemOperand(array_pointer, |
| 1201 SharedFunctionInfo::OffsetToPreviousContext())); |
| 1202 __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
| 1203 __ cmp(temp, native_context); |
| 1204 __ b(ne, &loop_bottom); |
| 1205 // OSR id set to none? |
| 1206 __ ldr(temp, FieldMemOperand(array_pointer, |
| 1207 SharedFunctionInfo::OffsetToPreviousOsrAstId())); |
| 1208 const int bailout_id = BailoutId::None().ToInt(); |
| 1209 __ cmp(temp, Operand(Smi::FromInt(bailout_id))); |
| 1210 __ b(ne, &loop_bottom); |
| 1211 // Literals available? |
| 1212 __ ldr(temp, FieldMemOperand(array_pointer, |
| 1213 SharedFunctionInfo::OffsetToPreviousLiterals())); |
| 1214 __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
| 1215 __ JumpIfSmi(temp, &gotta_call_runtime); |
| 1216 |
| 1217 // Save the literals in the closure. |
| 1218 __ ldr(r4, MemOperand(sp, 0)); |
| 1219 __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset)); |
| 1220 __ push(index); |
| 1221 __ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index, |
| 1222 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 1223 OMIT_SMI_CHECK); |
| 1224 __ pop(index); |
| 1225 |
| 1226 // Code available? |
| 1227 Register entry = r4; |
| 1228 __ ldr(entry, |
| 1229 FieldMemOperand(array_pointer, |
| 1230 SharedFunctionInfo::OffsetToPreviousCachedCode())); |
| 1231 __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
| 1232 __ JumpIfSmi(entry, &maybe_call_runtime); |
| 1233 |
| 1234 // Found literals and code. Get them into the closure and return. |
| 1235 __ pop(closure); |
| 1236 // Store code entry in the closure. |
| 1237 __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1238 |
| 1239 Label install_optimized_code_and_tailcall; |
| 1240 __ bind(&install_optimized_code_and_tailcall); |
| 1241 __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); |
| 1242 |
| 1243 // Link the closure into the optimized function list. |
| 1244 // r4 : code entry |
| 1245 // r3 : native context |
| 1246 // r1 : closure |
| 1247 __ ldr(r5, |
| 1248 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 1249 __ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset)); |
| 1250 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0, |
| 1251 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 1252 OMIT_SMI_CHECK); |
| 1253 const int function_list_offset = |
| 1254 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); |
| 1255 __ str(closure, |
| 1256 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 1257 // Save closure before the write barrier. |
| 1258 __ mov(r5, closure); |
| 1259 __ RecordWriteContextSlot(native_context, function_list_offset, closure, r0, |
| 1260 kLRHasNotBeenSaved, kDontSaveFPRegs); |
| 1261 __ mov(closure, r5); |
| 1262 __ pop(new_target); |
| 1263 __ Jump(entry); |
| 1264 |
| 1265 __ bind(&loop_bottom); |
| 1266 __ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); |
| 1267 __ cmp(index, Operand(Smi::FromInt(1))); |
| 1268 __ b(gt, &loop_top); |
| 1269 |
| 1270 // We found neither literals nor code. |
| 1271 __ jmp(&gotta_call_runtime); |
| 1272 |
| 1273 __ bind(&maybe_call_runtime); |
| 1274 __ pop(closure); |
| 1275 |
| 1276 // Last possibility. Check the context free optimized code map entry. |
| 1277 __ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize + |
| 1278 SharedFunctionInfo::kSharedCodeIndex)); |
| 1279 __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
| 1280 __ JumpIfSmi(entry, &try_shared); |
| 1281 |
| 1282 // Store code entry in the closure. |
| 1283 __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1284 __ jmp(&install_optimized_code_and_tailcall); |
| 1285 |
| 1286 __ bind(&try_shared); |
| 1287 __ pop(new_target); |
| 1288 // Is the full code valid? |
| 1289 __ ldr(entry, |
| 1290 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 1291 __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); |
| 1292 __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset)); |
| 1293 __ and_(r5, r5, Operand(Code::KindField::kMask)); |
| 1294 __ mov(r5, Operand(r5, LSR, Code::KindField::kShift)); |
| 1295 __ cmp(r5, Operand(Code::BUILTIN)); |
| 1296 __ b(eq, &gotta_call_runtime_no_stack); |
| 1297 // Yes, install the full code. |
| 1298 __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1299 __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); |
| 1300 __ Jump(entry); |
| 1301 |
| 1302 __ bind(&gotta_call_runtime); |
| 1303 __ pop(closure); |
| 1304 __ pop(new_target); |
| 1305 __ bind(&gotta_call_runtime_no_stack); |
1161 CallRuntimePassFunction(masm, Runtime::kCompileLazy); | 1306 CallRuntimePassFunction(masm, Runtime::kCompileLazy); |
1162 GenerateTailCallToReturnedCode(masm); | 1307 GenerateTailCallToReturnedCode(masm); |
1163 } | 1308 } |
1164 | 1309 |
1165 | 1310 |
1166 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 1311 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
1167 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); | 1312 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); |
1168 GenerateTailCallToReturnedCode(masm); | 1313 GenerateTailCallToReturnedCode(masm); |
1169 } | 1314 } |
1170 | 1315 |
(...skipping 1427 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2598 } | 2743 } |
2599 } | 2744 } |
2600 | 2745 |
2601 | 2746 |
2602 #undef __ | 2747 #undef __ |
2603 | 2748 |
2604 } // namespace internal | 2749 } // namespace internal |
2605 } // namespace v8 | 2750 } // namespace v8 |
2606 | 2751 |
2607 #endif // V8_TARGET_ARCH_ARM | 2752 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |