OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_MIPS | 5 #if V8_TARGET_ARCH_MIPS |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 1131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1142 // This simulates the initial call to bytecode handlers in interpreter entry | 1142 // This simulates the initial call to bytecode handlers in interpreter entry |
1143 // trampoline. The return will never actually be taken, but our stack walker | 1143 // trampoline. The return will never actually be taken, but our stack walker |
1144 // uses this address to determine whether a frame is interpreted. | 1144 // uses this address to determine whether a frame is interpreted. |
1145 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline())); | 1145 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline())); |
1146 | 1146 |
1147 Generate_EnterBytecodeDispatch(masm); | 1147 Generate_EnterBytecodeDispatch(masm); |
1148 } | 1148 } |
1149 | 1149 |
1150 | 1150 |
1151 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1151 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
1152 // ----------- S t a t e ------------- | |
1153 // -- a3 : new target (preserved for callee) | |
1154 // -- a1 : target function (preserved for callee) | |
1155 // ----------------------------------- | |
1156 // First lookup code, maybe we don't need to compile! | |
1157 Label gotta_call_runtime, gotta_call_runtime_no_stack; | |
1158 Label maybe_call_runtime; | |
1159 Label try_shared; | |
1160 Label loop_top, loop_bottom; | |
1161 | |
1162 Register closure = a1; | |
1163 Register new_target = a3; | |
1164 __ push(new_target); | |
1165 __ push(closure); | |
1166 | |
1167 Register map = a0; | |
1168 Register index = a2; | |
1169 __ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | |
1170 __ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); | |
1171 __ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset)); | |
1172 __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2))); | |
1173 | |
1174 // Find literals. | |
1175 // a3 : native context | |
1176 // a2 : length / index | |
1177 // a0 : optimized code map | |
1178 // stack[0] : new target | |
1179 // stack[4] : closure | |
1180 Register native_context = a3; | |
1181 __ lw(native_context, NativeContextMemOperand()); | |
1182 | |
1183 __ bind(&loop_top); | |
1184 Register temp = a1; | |
1185 Register array_pointer = t1; | |
1186 | |
1187 // Does the native context match? | |
1188 __ sll(at, index, kPointerSizeLog2 - kSmiTagSize); | |
1189 __ Addu(array_pointer, map, Operand(at)); | |
1190 __ lw(temp, FieldMemOperand(array_pointer, | |
1191 SharedFunctionInfo::OffsetToPreviousContext())); | |
1192 __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); | |
1193 __ Branch(&loop_bottom, ne, temp, Operand(native_context)); | |
1194 // OSR id set to none? | |
1195 __ lw(temp, FieldMemOperand(array_pointer, | |
1196 SharedFunctionInfo::OffsetToPreviousOsrAstId())); | |
1197 const int bailout_id = BailoutId::None().ToInt(); | |
1198 __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id))); | |
1199 // Literals available? | |
1200 __ lw(temp, FieldMemOperand(array_pointer, | |
1201 SharedFunctionInfo::OffsetToPreviousLiterals())); | |
1202 __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); | |
1203 __ JumpIfSmi(temp, &gotta_call_runtime); | |
1204 | |
1205 // Save the literals in the closure. | |
1206 __ lw(t0, MemOperand(sp, 0)); | |
1207 __ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset)); | |
1208 __ push(index); | |
1209 __ RecordWriteField(t0, JSFunction::kLiteralsOffset, temp, index, | |
1210 kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
1211 OMIT_SMI_CHECK); | |
1212 __ pop(index); | |
1213 | |
1214 // Code available? | |
1215 Register entry = t0; | |
1216 __ lw(entry, | |
1217 FieldMemOperand(array_pointer, | |
1218 SharedFunctionInfo::OffsetToPreviousCachedCode())); | |
1219 __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | |
1220 __ JumpIfSmi(entry, &maybe_call_runtime); | |
1221 | |
1222 // Found literals and code. Get them into the closure and return. | |
1223 __ pop(closure); | |
1224 // Store code entry in the closure. | |
1225 __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
1226 | |
1227 Label install_optimized_code_and_tailcall; | |
1228 __ bind(&install_optimized_code_and_tailcall); | |
1229 __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); | |
1230 | |
1231 // Link the closure into the optimized function list. | |
1232 // t0 : code entry | |
1233 // a3 : native context | |
1234 // a1 : closure | |
1235 __ lw(t1, | |
1236 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
1237 __ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset)); | |
1238 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, a0, | |
1239 kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
1240 OMIT_SMI_CHECK); | |
1241 const int function_list_offset = | |
1242 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); | |
1243 __ sw(closure, | |
1244 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
1245 // Save closure before the write barrier. | |
1246 __ mov(t1, closure); | |
1247 __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0, | |
1248 kRAHasNotBeenSaved, kDontSaveFPRegs); | |
1249 __ mov(closure, t1); | |
1250 __ pop(new_target); | |
1251 __ Jump(entry); | |
1252 | |
1253 __ bind(&loop_bottom); | |
1254 __ Subu(index, index, | |
1255 Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); | |
1256 __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1))); | |
1257 | |
1258 // We found neither literals nor code. | |
1259 __ jmp(&gotta_call_runtime); | |
1260 | |
1261 __ bind(&maybe_call_runtime); | |
1262 __ pop(closure); | |
1263 | |
1264 // Last possibility. Check the context free optimized code map entry. | |
1265 __ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize + | |
1266 SharedFunctionInfo::kSharedCodeIndex)); | |
1267 __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | |
1268 __ JumpIfSmi(entry, &try_shared); | |
1269 | |
1270 // Store code entry in the closure. | |
1271 __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
1272 __ jmp(&install_optimized_code_and_tailcall); | |
1273 | |
1274 __ bind(&try_shared); | |
1275 __ pop(new_target); | |
1276 // Is the full code valid? | |
1277 __ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | |
1278 __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); | |
1279 __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset)); | |
1280 __ And(t1, t1, Operand(Code::KindField::kMask)); | |
1281 __ srl(t1, t1, Code::KindField::kShift); | |
1282 __ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN)); | |
1283 // Yes, install the full code. | |
1284 __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
1285 __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); | |
1286 __ Jump(entry); | |
1287 | |
1288 __ bind(&gotta_call_runtime); | |
1289 __ pop(closure); | |
1290 __ pop(new_target); | |
1291 __ bind(&gotta_call_runtime_no_stack); | |
1292 CallRuntimePassFunction(masm, Runtime::kCompileLazy); | 1152 CallRuntimePassFunction(masm, Runtime::kCompileLazy); |
1293 GenerateTailCallToReturnedCode(masm); | 1153 GenerateTailCallToReturnedCode(masm); |
1294 } | 1154 } |
1295 | 1155 |
1296 | 1156 |
1297 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 1157 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
1298 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); | 1158 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); |
1299 GenerateTailCallToReturnedCode(masm); | 1159 GenerateTailCallToReturnedCode(masm); |
1300 } | 1160 } |
1301 | 1161 |
(...skipping 1514 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2816 } | 2676 } |
2817 } | 2677 } |
2818 | 2678 |
2819 | 2679 |
2820 #undef __ | 2680 #undef __ |
2821 | 2681 |
2822 } // namespace internal | 2682 } // namespace internal |
2823 } // namespace v8 | 2683 } // namespace v8 |
2824 | 2684 |
2825 #endif // V8_TARGET_ARCH_MIPS | 2685 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |