Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 157503002: A64: Synchronize with r18444. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/codegen-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1216 matching lines...) Expand 10 before | Expand all | Expand 10 after
1227 1227
1228 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 1228 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1229 // tagged as a small integer. 1229 // tagged as a small integer.
1230 __ InvokeBuiltin(native, JUMP_FUNCTION); 1230 __ InvokeBuiltin(native, JUMP_FUNCTION);
1231 1231
1232 __ bind(&miss); 1232 __ bind(&miss);
1233 GenerateMiss(masm); 1233 GenerateMiss(masm);
1234 } 1234 }
1235 1235
1236 1236
1237 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
1238 __ mov(t9, ra);
1239 __ pop(ra);
1240 if (save_doubles_ == kSaveFPRegs) {
1241 __ PushSafepointRegistersAndDoubles();
1242 } else {
1243 __ PushSafepointRegisters();
1244 }
1245 __ Jump(t9);
1246 }
1247
1248
1249 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
1250 __ mov(t9, ra);
1251 __ pop(ra);
1252 __ StoreToSafepointRegisterSlot(t9, t9);
1253 if (save_doubles_ == kSaveFPRegs) {
1254 __ PopSafepointRegistersAndDoubles();
1255 } else {
1256 __ PopSafepointRegisters();
1257 }
1258 __ Jump(t9);
1259 }
1260
1261
1237 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { 1262 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1238 // We don't allow a GC during a store buffer overflow so there is no need to 1263 // We don't allow a GC during a store buffer overflow so there is no need to
1239 // store the registers in any particular way, but we do have to store and 1264 // store the registers in any particular way, but we do have to store and
1240 // restore them. 1265 // restore them.
1241 __ MultiPush(kJSCallerSaved | ra.bit()); 1266 __ MultiPush(kJSCallerSaved | ra.bit());
1242 if (save_doubles_ == kSaveFPRegs) { 1267 if (save_doubles_ == kSaveFPRegs) {
1243 __ MultiPushFPU(kCallerSavedFPU); 1268 __ MultiPushFPU(kCallerSavedFPU);
1244 } 1269 }
1245 const int argument_count = 1; 1270 const int argument_count = 1;
1246 const int fp_argument_count = 0; 1271 const int fp_argument_count = 0;
(...skipping 19 matching lines...) Expand all
1266 CodeStubInterfaceDescriptor* descriptor) { 1291 CodeStubInterfaceDescriptor* descriptor) {
1267 static Register registers[] = { a1, a0 }; 1292 static Register registers[] = { a1, a0 };
1268 descriptor->register_param_count_ = 2; 1293 descriptor->register_param_count_ = 2;
1269 descriptor->register_params_ = registers; 1294 descriptor->register_params_ = registers;
1270 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); 1295 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
1271 descriptor->SetMissHandler( 1296 descriptor->SetMissHandler(
1272 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); 1297 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
1273 } 1298 }
1274 1299
1275 1300
1276 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1277 // Untagged case: double input in f4, double result goes
1278 // into f4.
1279 // Tagged case: tagged input on top of stack and in a0,
1280 // tagged result (heap number) goes into v0.
1281
1282 Label input_not_smi;
1283 Label loaded;
1284 Label calculate;
1285 Label invalid_cache;
1286 const Register scratch0 = t5;
1287 const Register scratch1 = t3;
1288 const Register cache_entry = a0;
1289 const bool tagged = (argument_type_ == TAGGED);
1290
1291 if (tagged) {
1292 // Argument is a number and is on stack and in a0.
1293 // Load argument and check if it is a smi.
1294 __ JumpIfNotSmi(a0, &input_not_smi);
1295
1296 // Input is a smi. Convert to double and load the low and high words
1297 // of the double into a2, a3.
1298 __ sra(t0, a0, kSmiTagSize);
1299 __ mtc1(t0, f4);
1300 __ cvt_d_w(f4, f4);
1301 __ Move(a2, a3, f4);
1302 __ Branch(&loaded);
1303
1304 __ bind(&input_not_smi);
1305 // Check if input is a HeapNumber.
1306 __ CheckMap(a0,
1307 a1,
1308 Heap::kHeapNumberMapRootIndex,
1309 &calculate,
1310 DONT_DO_SMI_CHECK);
1311 // Input is a HeapNumber. Store the
1312 // low and high words into a2, a3.
1313 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
1314 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
1315 } else {
1316 // Input is untagged double in f4. Output goes to f4.
1317 __ Move(a2, a3, f4);
1318 }
1319 __ bind(&loaded);
1320 // a2 = low 32 bits of double value.
1321 // a3 = high 32 bits of double value.
1322 // Compute hash (the shifts are arithmetic):
1323 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
1324 __ Xor(a1, a2, a3);
1325 __ sra(t0, a1, 16);
1326 __ Xor(a1, a1, t0);
1327 __ sra(t0, a1, 8);
1328 __ Xor(a1, a1, t0);
1329 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
1330 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
1331
1332 // a2 = low 32 bits of double value.
1333 // a3 = high 32 bits of double value.
1334 // a1 = TranscendentalCache::hash(double value).
1335 __ li(cache_entry, Operand(
1336 ExternalReference::transcendental_cache_array_address(
1337 masm->isolate())));
1338 // a0 points to cache array.
1339 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
1340 Isolate::Current()->transcendental_cache()->caches_[0])));
1341 // a0 points to the cache for the type type_.
1342 // If NULL, the cache hasn't been initialized yet, so go through runtime.
1343 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
1344
1345 #ifdef DEBUG
1346 // Check that the layout of cache elements match expectations.
1347 { TranscendentalCache::SubCache::Element test_elem[2];
1348 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
1349 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
1350 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
1351 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
1352 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
1353 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
1354 CHECK_EQ(0, elem_in0 - elem_start);
1355 CHECK_EQ(kIntSize, elem_in1 - elem_start);
1356 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
1357 }
1358 #endif
1359
1360 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
1361 __ sll(t0, a1, 1);
1362 __ Addu(a1, a1, t0);
1363 __ sll(t0, a1, 2);
1364 __ Addu(cache_entry, cache_entry, t0);
1365
1366 // Check if cache matches: Double value is stored in uint32_t[2] array.
1367 __ lw(t0, MemOperand(cache_entry, 0));
1368 __ lw(t1, MemOperand(cache_entry, 4));
1369 __ lw(t2, MemOperand(cache_entry, 8));
1370 __ Branch(&calculate, ne, a2, Operand(t0));
1371 __ Branch(&calculate, ne, a3, Operand(t1));
1372 // Cache hit. Load result, cleanup and return.
1373 Counters* counters = masm->isolate()->counters();
1374 __ IncrementCounter(
1375 counters->transcendental_cache_hit(), 1, scratch0, scratch1);
1376 if (tagged) {
1377 // Pop input value from stack and load result into v0.
1378 __ Drop(1);
1379 __ mov(v0, t2);
1380 } else {
1381 // Load result into f4.
1382 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
1383 }
1384 __ Ret();
1385
1386 __ bind(&calculate);
1387 __ IncrementCounter(
1388 counters->transcendental_cache_miss(), 1, scratch0, scratch1);
1389 if (tagged) {
1390 __ bind(&invalid_cache);
1391 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
1392 masm->isolate()),
1393 1,
1394 1);
1395 } else {
1396 Label no_update;
1397 Label skip_cache;
1398
1399 // Call C function to calculate the result and update the cache.
1400 // a0: precalculated cache entry address.
1401 // a2 and a3: parts of the double value.
1402 // Store a0, a2 and a3 on stack for later before calling C function.
1403 __ Push(a3, a2, cache_entry);
1404 GenerateCallCFunction(masm, scratch0);
1405 __ GetCFunctionDoubleResult(f4);
1406
1407 // Try to update the cache. If we cannot allocate a
1408 // heap number, we return the result without updating.
1409 __ Pop(a3, a2, cache_entry);
1410 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
1411 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
1412 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
1413
1414 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
1415 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
1416 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
1417
1418 __ Ret(USE_DELAY_SLOT);
1419 __ mov(v0, cache_entry);
1420
1421 __ bind(&invalid_cache);
1422 // The cache is invalid. Call runtime which will recreate the
1423 // cache.
1424 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
1425 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
1426 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
1427 {
1428 FrameScope scope(masm, StackFrame::INTERNAL);
1429 __ push(a0);
1430 __ CallRuntime(RuntimeFunction(), 1);
1431 }
1432 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
1433 __ Ret();
1434
1435 __ bind(&skip_cache);
1436 // Call C function to calculate the result and answer directly
1437 // without updating the cache.
1438 GenerateCallCFunction(masm, scratch0);
1439 __ GetCFunctionDoubleResult(f4);
1440 __ bind(&no_update);
1441
1442 // We return the value in f4 without adding it to the cache, but
1443 // we cause a scavenging GC so that future allocations will succeed.
1444 {
1445 FrameScope scope(masm, StackFrame::INTERNAL);
1446
1447 // Allocate an aligned object larger than a HeapNumber.
1448 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
1449 __ li(scratch0, Operand(4 * kPointerSize));
1450 __ push(scratch0);
1451 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1452 }
1453 __ Ret();
1454 }
1455 }
1456
1457
1458 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
1459 Register scratch) {
1460 __ push(ra);
1461 __ PrepareCallCFunction(2, scratch);
1462 if (IsMipsSoftFloatABI) {
1463 __ Move(a0, a1, f4);
1464 } else {
1465 __ mov_d(f12, f4);
1466 }
1467 AllowExternalCallThatCantCauseGC scope(masm);
1468 Isolate* isolate = masm->isolate();
1469 switch (type_) {
1470 case TranscendentalCache::LOG:
1471 __ CallCFunction(
1472 ExternalReference::math_log_double_function(isolate),
1473 0, 1);
1474 break;
1475 default:
1476 UNIMPLEMENTED();
1477 break;
1478 }
1479 __ pop(ra);
1480 }
1481
1482
1483 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1484 switch (type_) {
1485 // Add more cases when necessary.
1486 case TranscendentalCache::LOG: return Runtime::kMath_log;
1487 default:
1488 UNIMPLEMENTED();
1489 return Runtime::kAbort;
1490 }
1491 }
1492
1493
1494 void MathPowStub::Generate(MacroAssembler* masm) { 1301 void MathPowStub::Generate(MacroAssembler* masm) {
1495 const Register base = a1; 1302 const Register base = a1;
1496 const Register exponent = a2; 1303 const Register exponent = a2;
1497 const Register heapnumbermap = t1; 1304 const Register heapnumbermap = t1;
1498 const Register heapnumber = v0; 1305 const Register heapnumber = v0;
1499 const DoubleRegister double_base = f2; 1306 const DoubleRegister double_base = f2;
1500 const DoubleRegister double_exponent = f4; 1307 const DoubleRegister double_exponent = f4;
1501 const DoubleRegister double_result = f0; 1308 const DoubleRegister double_result = f0;
1502 const DoubleRegister double_scratch = f6; 1309 const DoubleRegister double_scratch = f6;
1503 const FPURegister single_scratch = f8; 1310 const FPURegister single_scratch = f8;
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
1712 1519
1713 1520
1714 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 1521 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1715 CEntryStub::GenerateAheadOfTime(isolate); 1522 CEntryStub::GenerateAheadOfTime(isolate);
1716 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); 1523 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1717 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 1524 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1718 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); 1525 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1719 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); 1526 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1720 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); 1527 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1721 BinaryOpICStub::GenerateAheadOfTime(isolate); 1528 BinaryOpICStub::GenerateAheadOfTime(isolate);
1529 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
1530 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
1722 } 1531 }
1723 1532
1724 1533
1534 void StoreRegistersStateStub::GenerateAheadOfTime(
1535 Isolate* isolate) {
1536 StoreRegistersStateStub stub1(kDontSaveFPRegs);
1537 stub1.GetCode(isolate);
1538 // Hydrogen code stubs need stub2 at snapshot time.
1539 StoreRegistersStateStub stub2(kSaveFPRegs);
1540 stub2.GetCode(isolate);
1541 }
1542
1543
1544 void RestoreRegistersStateStub::GenerateAheadOfTime(
1545 Isolate* isolate) {
1546 RestoreRegistersStateStub stub1(kDontSaveFPRegs);
1547 stub1.GetCode(isolate);
1548 // Hydrogen code stubs need stub2 at snapshot time.
1549 RestoreRegistersStateStub stub2(kSaveFPRegs);
1550 stub2.GetCode(isolate);
1551 }
1552
1553
1725 void CodeStub::GenerateFPStubs(Isolate* isolate) { 1554 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1726 SaveFPRegsMode mode = kSaveFPRegs; 1555 SaveFPRegsMode mode = kSaveFPRegs;
1727 CEntryStub save_doubles(1, mode); 1556 CEntryStub save_doubles(1, mode);
1728 StoreBufferOverflowStub stub(mode); 1557 StoreBufferOverflowStub stub(mode);
1729 // These stubs might already be in the snapshot, detect that and don't 1558 // These stubs might already be in the snapshot, detect that and don't
1730 // regenerate, which would lead to code stub initialization state being messed 1559 // regenerate, which would lead to code stub initialization state being messed
1731 // up. 1560 // up.
1732 Code* save_doubles_code; 1561 Code* save_doubles_code;
1733 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { 1562 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
1734 save_doubles_code = *save_doubles.GetCode(isolate); 1563 save_doubles_code = *save_doubles.GetCode(isolate);
(...skipping 4426 matching lines...) Expand 10 before | Expand all | Expand 10 after
6161 __ bind(&fast_elements_case); 5990 __ bind(&fast_elements_case);
6162 GenerateCase(masm, FAST_ELEMENTS); 5991 GenerateCase(masm, FAST_ELEMENTS);
6163 } 5992 }
6164 5993
6165 5994
6166 #undef __ 5995 #undef __
6167 5996
6168 } } // namespace v8::internal 5997 } } // namespace v8::internal
6169 5998
6170 #endif // V8_TARGET_ARCH_MIPS 5999 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/codegen-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698