Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(274)

Side by Side Diff: src/mips64/builtins-mips64.cc

Issue 1670143002: Visit the Optimized Code Map on first call rather than closure creation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fix mips64 rebase error. Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_MIPS64 5 #if V8_TARGET_ARCH_MIPS64
6 6
7 #include "src/codegen.h" 7 #include "src/codegen.h"
8 #include "src/debug/debug.h" 8 #include "src/debug/debug.h"
9 #include "src/deoptimizer.h" 9 #include "src/deoptimizer.h"
10 #include "src/full-codegen/full-codegen.h" 10 #include "src/full-codegen/full-codegen.h"
(...skipping 1199 matching lines...) Expand 10 before | Expand all | Expand 10 after
1210 // This simulates the initial call to bytecode handlers in interpreter entry 1210 // This simulates the initial call to bytecode handlers in interpreter entry
1211 // trampoline. The return will never actually be taken, but our stack walker 1211 // trampoline. The return will never actually be taken, but our stack walker
1212 // uses this address to determine whether a frame is interpreted. 1212 // uses this address to determine whether a frame is interpreted.
1213 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline())); 1213 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
1214 1214
1215 Generate_EnterBytecodeDispatch(masm); 1215 Generate_EnterBytecodeDispatch(masm);
1216 } 1216 }
1217 1217
1218 1218
1219 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { 1219 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
1220 // ----------- S t a t e -------------
1221 // -- a0 : argument count (preserved for callee)
1222 // -- a3 : new target (preserved for callee)
1223 // -- a1 : target function (preserved for callee)
1224 // -----------------------------------
1225 // First lookup code, maybe we don't need to compile!
1226 Label gotta_call_runtime, gotta_call_runtime_no_stack;
1227 Label maybe_call_runtime;
1228 Label try_shared;
1229 Label loop_top, loop_bottom;
1230
1231 Register argument_count = a0;
1232 Register closure = a1;
1233 Register new_target = a3;
1234 __ push(argument_count);
1235 __ push(new_target);
1236 __ push(closure);
1237
1238 Register map = a0;
1239 Register index = a2;
1240 __ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1241 __ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
1242 __ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
1243 __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
1244
1245 // Find literals.
1246 // a3 : native context
1247 // a2 : length / index
1248 // a0 : optimized code map
1249 // stack[0] : new target
1250 // stack[4] : closure
1251 Register native_context = a3;
1252 __ ld(native_context, NativeContextMemOperand());
1253
1254 __ bind(&loop_top);
1255 Register temp = a1;
1256 Register array_pointer = a5;
1257
1258 // Does the native context match?
1259 __ SmiScale(at, index, kPointerSizeLog2);
1260 __ Daddu(array_pointer, map, Operand(at));
1261 __ ld(temp, FieldMemOperand(array_pointer,
1262 SharedFunctionInfo::OffsetToPreviousContext()));
1263 __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
1264 __ Branch(&loop_bottom, ne, temp, Operand(native_context));
1265 // OSR id set to none?
1266 __ ld(temp, FieldMemOperand(array_pointer,
1267 SharedFunctionInfo::OffsetToPreviousOsrAstId()));
1268 const int bailout_id = BailoutId::None().ToInt();
1269 __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
1270 // Literals available?
1271 __ ld(temp, FieldMemOperand(array_pointer,
1272 SharedFunctionInfo::OffsetToPreviousLiterals()));
1273 __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
1274 __ JumpIfSmi(temp, &gotta_call_runtime);
1275
1276 // Save the literals in the closure.
1277 __ ld(a4, MemOperand(sp, 0));
1278 __ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset));
1279 __ push(index);
1280 __ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index,
1281 kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
1282 OMIT_SMI_CHECK);
1283 __ pop(index);
1284
1285 // Code available?
1286 Register entry = a4;
1287 __ ld(entry,
1288 FieldMemOperand(array_pointer,
1289 SharedFunctionInfo::OffsetToPreviousCachedCode()));
1290 __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
1291 __ JumpIfSmi(entry, &maybe_call_runtime);
1292
1293 // Found literals and code. Get them into the closure and return.
1294 __ pop(closure);
1295 // Store code entry in the closure.
1296 __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
1297
1298 Label install_optimized_code_and_tailcall;
1299 __ bind(&install_optimized_code_and_tailcall);
1300 __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
1301 __ RecordWriteCodeEntryField(closure, entry, a5);
1302
1303 // Link the closure into the optimized function list.
1304 // a4 : code entry
1305 // a3 : native context
1306 // a1 : closure
1307 __ ld(a5,
1308 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
1309 __ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
1310 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0,
1311 kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
1312 OMIT_SMI_CHECK);
1313 const int function_list_offset =
1314 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
1315 __ sd(closure,
1316 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
1317 // Save closure before the write barrier.
1318 __ mov(a5, closure);
1319 __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
1320 kRAHasNotBeenSaved, kDontSaveFPRegs);
1321 __ mov(closure, a5);
1322 __ pop(new_target);
1323 __ pop(argument_count);
1324 __ Jump(entry);
1325
1326 __ bind(&loop_bottom);
1327 __ Dsubu(index, index,
1328 Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
1329 __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
1330
1331 // We found neither literals nor code.
1332 __ jmp(&gotta_call_runtime);
1333
1334 __ bind(&maybe_call_runtime);
1335 __ pop(closure);
1336
1337 // Last possibility. Check the context free optimized code map entry.
1338 __ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
1339 SharedFunctionInfo::kSharedCodeIndex));
1340 __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
1341 __ JumpIfSmi(entry, &try_shared);
1342
1343 // Store code entry in the closure.
1344 __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
1345 __ jmp(&install_optimized_code_and_tailcall);
1346
1347 __ bind(&try_shared);
1348 __ pop(new_target);
1349 __ pop(argument_count);
1350 // Is the full code valid?
1351 __ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1352 __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
1353 __ ld(a5, FieldMemOperand(entry, Code::kFlagsOffset));
1354 __ And(a5, a5, Operand(Code::KindField::kMask));
1355 __ dsrl(a5, a5, Code::KindField::kShift);
1356 __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN));
1357 // Yes, install the full code.
1358 __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
1359 __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
1360 __ RecordWriteCodeEntryField(closure, entry, a5);
1361 __ Jump(entry);
1362
1363 __ bind(&gotta_call_runtime);
1364 __ pop(closure);
1365 __ pop(new_target);
1366 __ pop(argument_count);
1367 __ bind(&gotta_call_runtime_no_stack);
1220 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); 1368 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1221 } 1369 }
1222 1370
1223 1371
1224 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { 1372 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
1225 GenerateTailCallToReturnedCode(masm, 1373 GenerateTailCallToReturnedCode(masm,
1226 Runtime::kCompileOptimized_NotConcurrent); 1374 Runtime::kCompileOptimized_NotConcurrent);
1227 } 1375 }
1228 1376
1229
1230 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { 1377 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
1231 GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent); 1378 GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
1232 } 1379 }
1233 1380
1234 1381
1235 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { 1382 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
1236 // For now, we are relying on the fact that make_code_young doesn't do any 1383 // For now, we are relying on the fact that make_code_young doesn't do any
1237 // garbage collection which allows us to save/restore the registers without 1384 // garbage collection which allows us to save/restore the registers without
1238 // worrying about which of them contain pointers. We also don't build an 1385 // worrying about which of them contain pointers. We also don't build an
1239 // internal frame to make the code faster, since we shouldn't have to do stack 1386 // internal frame to make the code faster, since we shouldn't have to do stack
(...skipping 1461 matching lines...) Expand 10 before | Expand all | Expand 10 after
2701 } 2848 }
2702 } 2849 }
2703 2850
2704 2851
2705 #undef __ 2852 #undef __
2706 2853
2707 } // namespace internal 2854 } // namespace internal
2708 } // namespace v8 2855 } // namespace v8
2709 2856
2710 #endif // V8_TARGET_ARCH_MIPS64 2857 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698