OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_MIPS64 | 5 #if V8_TARGET_ARCH_MIPS64 |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 1121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1132 // This simulates the initial call to bytecode handlers in interpreter entry | 1132 // This simulates the initial call to bytecode handlers in interpreter entry |
1133 // trampoline. The return will never actually be taken, but our stack walker | 1133 // trampoline. The return will never actually be taken, but our stack walker |
1134 // uses this address to determine whether a frame is interpreted. | 1134 // uses this address to determine whether a frame is interpreted. |
1135 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline())); | 1135 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline())); |
1136 | 1136 |
1137 Generate_EnterBytecodeDispatch(masm); | 1137 Generate_EnterBytecodeDispatch(masm); |
1138 } | 1138 } |
1139 | 1139 |
1140 | 1140 |
1141 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1141 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
| 1142 // ----------- S t a t e ------------- |
| 1143 // -- a3 : new target (preserved for callee) |
| 1144 // -- a1 : target function (preserved for callee) |
| 1145 // ----------------------------------- |
| 1146 // First lookup code, maybe we don't need to compile! |
| 1147 Label gotta_call_runtime, gotta_call_runtime_no_stack; |
| 1148 Label maybe_call_runtime; |
| 1149 Label try_shared; |
| 1150 Label loop_top, loop_bottom; |
| 1151 |
| 1152 Register closure = a1; |
| 1153 Register new_target = a3; |
| 1154 __ push(new_target); |
| 1155 __ push(closure); |
| 1156 |
| 1157 Register map = a0; |
| 1158 Register index = a2; |
| 1159 __ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 1160 __ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
| 1161 __ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset)); |
| 1162 __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2))); |
| 1163 |
| 1164 // Find literals. |
| 1165 // a3 : native context |
| 1166 // a2 : length / index |
| 1167 // a0 : optimized code map |
| 1168 // stack[0] : new target |
| 1169 // stack[4] : closure |
| 1170 Register native_context = a3; |
| 1171 __ ld(native_context, NativeContextMemOperand()); |
| 1172 |
| 1173 __ bind(&loop_top); |
| 1174 Register temp = a1; |
| 1175 Register array_pointer = a5; |
| 1176 |
| 1177 // Does the native context match? |
| 1178 __ SmiScale(at, index, kPointerSizeLog2); |
| 1179 __ Daddu(array_pointer, map, Operand(at)); |
| 1180 __ ld(temp, FieldMemOperand(array_pointer, |
| 1181 SharedFunctionInfo::OffsetToPreviousContext())); |
| 1182 __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
| 1183 __ Branch(&loop_bottom, ne, temp, Operand(native_context)); |
| 1184 // OSR id set to none? |
| 1185 __ ld(temp, FieldMemOperand(array_pointer, |
| 1186 SharedFunctionInfo::OffsetToPreviousOsrAstId())); |
| 1187 const int bailout_id = BailoutId::None().ToInt(); |
| 1188 __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id))); |
| 1189 // Literals available? |
| 1190 __ ld(temp, FieldMemOperand(array_pointer, |
| 1191 SharedFunctionInfo::OffsetToPreviousLiterals())); |
| 1192 __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); |
| 1193 __ JumpIfSmi(temp, &gotta_call_runtime); |
| 1194 |
| 1195 // Save the literals in the closure. |
| 1196 __ ld(a4, MemOperand(sp, 0)); |
| 1197 __ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset)); |
| 1198 __ push(index); |
| 1199 __ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index, |
| 1200 kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 1201 OMIT_SMI_CHECK); |
| 1202 __ pop(index); |
| 1203 |
| 1204 // Code available? |
| 1205 Register entry = a4; |
| 1206 __ ld(entry, |
| 1207 FieldMemOperand(array_pointer, |
| 1208 SharedFunctionInfo::OffsetToPreviousCachedCode())); |
| 1209 __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
| 1210 __ JumpIfSmi(entry, &maybe_call_runtime); |
| 1211 |
| 1212 // Found literals and code. Get them into the closure and return. |
| 1213 __ pop(closure); |
| 1214 // Store code entry in the closure. |
| 1215 __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1216 |
| 1217 Label install_optimized_code_and_tailcall; |
| 1218 __ bind(&install_optimized_code_and_tailcall); |
| 1219 __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); |
| 1220 |
| 1221 // Link the closure into the optimized function list. |
| 1222 // a4 : code entry |
| 1223 // a3 : native context |
| 1224 // a1 : closure |
| 1225 __ ld(a5, |
| 1226 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 1227 __ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset)); |
| 1228 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0, |
| 1229 kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 1230 OMIT_SMI_CHECK); |
| 1231 const int function_list_offset = |
| 1232 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); |
| 1233 __ sd(closure, |
| 1234 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 1235 // Save closure before the write barrier. |
| 1236 __ mov(a5, closure); |
| 1237 __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0, |
| 1238 kRAHasNotBeenSaved, kDontSaveFPRegs); |
| 1239 __ mov(closure, a5); |
| 1240 __ pop(new_target); |
| 1241 __ Jump(entry); |
| 1242 |
| 1243 __ bind(&loop_bottom); |
| 1244 __ Dsubu(index, index, |
| 1245 Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); |
| 1246 __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1))); |
| 1247 |
| 1248 // We found neither literals nor code. |
| 1249 __ jmp(&gotta_call_runtime); |
| 1250 |
| 1251 __ bind(&maybe_call_runtime); |
| 1252 __ pop(closure); |
| 1253 |
| 1254 // Last possibility. Check the context free optimized code map entry. |
| 1255 __ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize + |
| 1256 SharedFunctionInfo::kSharedCodeIndex)); |
| 1257 __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); |
| 1258 __ JumpIfSmi(entry, &try_shared); |
| 1259 |
| 1260 // Store code entry in the closure. |
| 1261 __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1262 __ jmp(&install_optimized_code_and_tailcall); |
| 1263 |
| 1264 __ bind(&try_shared); |
| 1265 __ pop(new_target); |
| 1266 // Is the full code valid? |
| 1267 __ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); |
| 1268 __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); |
| 1269 __ ld(a5, FieldMemOperand(entry, Code::kFlagsOffset)); |
| 1270 __ And(a5, a5, Operand(Code::KindField::kMask)); |
| 1271 __ dsrl(a5, a5, Code::KindField::kShift); |
| 1272 __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN)); |
| 1273 // Yes, install the full code. |
| 1274 __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1275 __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); |
| 1276 __ Jump(entry); |
| 1277 |
| 1278 __ bind(&gotta_call_runtime); |
| 1279 __ pop(closure); |
| 1280 __ pop(new_target); |
| 1281 __ bind(&gotta_call_runtime_no_stack); |
1142 CallRuntimePassFunction(masm, Runtime::kCompileLazy); | 1282 CallRuntimePassFunction(masm, Runtime::kCompileLazy); |
1143 GenerateTailCallToReturnedCode(masm); | 1283 GenerateTailCallToReturnedCode(masm); |
1144 } | 1284 } |
1145 | 1285 |
1146 | 1286 |
1147 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 1287 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
1148 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); | 1288 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); |
1149 GenerateTailCallToReturnedCode(masm); | 1289 GenerateTailCallToReturnedCode(masm); |
1150 } | 1290 } |
1151 | 1291 |
(...skipping 1515 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2667 } | 2807 } |
2668 } | 2808 } |
2669 | 2809 |
2670 | 2810 |
2671 #undef __ | 2811 #undef __ |
2672 | 2812 |
2673 } // namespace internal | 2813 } // namespace internal |
2674 } // namespace v8 | 2814 } // namespace v8 |
2675 | 2815 |
2676 #endif // V8_TARGET_ARCH_MIPS64 | 2816 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |