| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1266 CodeStubInterfaceDescriptor* descriptor) { | 1266 CodeStubInterfaceDescriptor* descriptor) { |
| 1267 static Register registers[] = { a1, a0 }; | 1267 static Register registers[] = { a1, a0 }; |
| 1268 descriptor->register_param_count_ = 2; | 1268 descriptor->register_param_count_ = 2; |
| 1269 descriptor->register_params_ = registers; | 1269 descriptor->register_params_ = registers; |
| 1270 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); | 1270 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); |
| 1271 descriptor->SetMissHandler( | 1271 descriptor->SetMissHandler( |
| 1272 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); | 1272 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); |
| 1273 } | 1273 } |
| 1274 | 1274 |
| 1275 | 1275 |
| 1276 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | |
| 1277 // Untagged case: double input in f4, double result goes | |
| 1278 // into f4. | |
| 1279 // Tagged case: tagged input on top of stack and in a0, | |
| 1280 // tagged result (heap number) goes into v0. | |
| 1281 | |
| 1282 Label input_not_smi; | |
| 1283 Label loaded; | |
| 1284 Label calculate; | |
| 1285 Label invalid_cache; | |
| 1286 const Register scratch0 = t5; | |
| 1287 const Register scratch1 = t3; | |
| 1288 const Register cache_entry = a0; | |
| 1289 const bool tagged = (argument_type_ == TAGGED); | |
| 1290 | |
| 1291 if (tagged) { | |
| 1292 // Argument is a number and is on stack and in a0. | |
| 1293 // Load argument and check if it is a smi. | |
| 1294 __ JumpIfNotSmi(a0, &input_not_smi); | |
| 1295 | |
| 1296 // Input is a smi. Convert to double and load the low and high words | |
| 1297 // of the double into a2, a3. | |
| 1298 __ sra(t0, a0, kSmiTagSize); | |
| 1299 __ mtc1(t0, f4); | |
| 1300 __ cvt_d_w(f4, f4); | |
| 1301 __ Move(a2, a3, f4); | |
| 1302 __ Branch(&loaded); | |
| 1303 | |
| 1304 __ bind(&input_not_smi); | |
| 1305 // Check if input is a HeapNumber. | |
| 1306 __ CheckMap(a0, | |
| 1307 a1, | |
| 1308 Heap::kHeapNumberMapRootIndex, | |
| 1309 &calculate, | |
| 1310 DONT_DO_SMI_CHECK); | |
| 1311 // Input is a HeapNumber. Store the | |
| 1312 // low and high words into a2, a3. | |
| 1313 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset)); | |
| 1314 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4)); | |
| 1315 } else { | |
| 1316 // Input is untagged double in f4. Output goes to f4. | |
| 1317 __ Move(a2, a3, f4); | |
| 1318 } | |
| 1319 __ bind(&loaded); | |
| 1320 // a2 = low 32 bits of double value. | |
| 1321 // a3 = high 32 bits of double value. | |
| 1322 // Compute hash (the shifts are arithmetic): | |
| 1323 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | |
| 1324 __ Xor(a1, a2, a3); | |
| 1325 __ sra(t0, a1, 16); | |
| 1326 __ Xor(a1, a1, t0); | |
| 1327 __ sra(t0, a1, 8); | |
| 1328 __ Xor(a1, a1, t0); | |
| 1329 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); | |
| 1330 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); | |
| 1331 | |
| 1332 // a2 = low 32 bits of double value. | |
| 1333 // a3 = high 32 bits of double value. | |
| 1334 // a1 = TranscendentalCache::hash(double value). | |
| 1335 __ li(cache_entry, Operand( | |
| 1336 ExternalReference::transcendental_cache_array_address( | |
| 1337 masm->isolate()))); | |
| 1338 // a0 points to cache array. | |
| 1339 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof( | |
| 1340 Isolate::Current()->transcendental_cache()->caches_[0]))); | |
| 1341 // a0 points to the cache for the type type_. | |
| 1342 // If NULL, the cache hasn't been initialized yet, so go through runtime. | |
| 1343 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg)); | |
| 1344 | |
| 1345 #ifdef DEBUG | |
| 1346 // Check that the layout of cache elements match expectations. | |
| 1347 { TranscendentalCache::SubCache::Element test_elem[2]; | |
| 1348 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); | |
| 1349 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); | |
| 1350 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); | |
| 1351 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | |
| 1352 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | |
| 1353 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. | |
| 1354 CHECK_EQ(0, elem_in0 - elem_start); | |
| 1355 CHECK_EQ(kIntSize, elem_in1 - elem_start); | |
| 1356 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | |
| 1357 } | |
| 1358 #endif | |
| 1359 | |
| 1360 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12]. | |
| 1361 __ sll(t0, a1, 1); | |
| 1362 __ Addu(a1, a1, t0); | |
| 1363 __ sll(t0, a1, 2); | |
| 1364 __ Addu(cache_entry, cache_entry, t0); | |
| 1365 | |
| 1366 // Check if cache matches: Double value is stored in uint32_t[2] array. | |
| 1367 __ lw(t0, MemOperand(cache_entry, 0)); | |
| 1368 __ lw(t1, MemOperand(cache_entry, 4)); | |
| 1369 __ lw(t2, MemOperand(cache_entry, 8)); | |
| 1370 __ Branch(&calculate, ne, a2, Operand(t0)); | |
| 1371 __ Branch(&calculate, ne, a3, Operand(t1)); | |
| 1372 // Cache hit. Load result, cleanup and return. | |
| 1373 Counters* counters = masm->isolate()->counters(); | |
| 1374 __ IncrementCounter( | |
| 1375 counters->transcendental_cache_hit(), 1, scratch0, scratch1); | |
| 1376 if (tagged) { | |
| 1377 // Pop input value from stack and load result into v0. | |
| 1378 __ Drop(1); | |
| 1379 __ mov(v0, t2); | |
| 1380 } else { | |
| 1381 // Load result into f4. | |
| 1382 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); | |
| 1383 } | |
| 1384 __ Ret(); | |
| 1385 | |
| 1386 __ bind(&calculate); | |
| 1387 __ IncrementCounter( | |
| 1388 counters->transcendental_cache_miss(), 1, scratch0, scratch1); | |
| 1389 if (tagged) { | |
| 1390 __ bind(&invalid_cache); | |
| 1391 __ TailCallExternalReference(ExternalReference(RuntimeFunction(), | |
| 1392 masm->isolate()), | |
| 1393 1, | |
| 1394 1); | |
| 1395 } else { | |
| 1396 Label no_update; | |
| 1397 Label skip_cache; | |
| 1398 | |
| 1399 // Call C function to calculate the result and update the cache. | |
| 1400 // a0: precalculated cache entry address. | |
| 1401 // a2 and a3: parts of the double value. | |
| 1402 // Store a0, a2 and a3 on stack for later before calling C function. | |
| 1403 __ Push(a3, a2, cache_entry); | |
| 1404 GenerateCallCFunction(masm, scratch0); | |
| 1405 __ GetCFunctionDoubleResult(f4); | |
| 1406 | |
| 1407 // Try to update the cache. If we cannot allocate a | |
| 1408 // heap number, we return the result without updating. | |
| 1409 __ Pop(a3, a2, cache_entry); | |
| 1410 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); | |
| 1411 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); | |
| 1412 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); | |
| 1413 | |
| 1414 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize)); | |
| 1415 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize)); | |
| 1416 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize)); | |
| 1417 | |
| 1418 __ Ret(USE_DELAY_SLOT); | |
| 1419 __ mov(v0, cache_entry); | |
| 1420 | |
| 1421 __ bind(&invalid_cache); | |
| 1422 // The cache is invalid. Call runtime which will recreate the | |
| 1423 // cache. | |
| 1424 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); | |
| 1425 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache); | |
| 1426 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset)); | |
| 1427 { | |
| 1428 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 1429 __ push(a0); | |
| 1430 __ CallRuntime(RuntimeFunction(), 1); | |
| 1431 } | |
| 1432 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset)); | |
| 1433 __ Ret(); | |
| 1434 | |
| 1435 __ bind(&skip_cache); | |
| 1436 // Call C function to calculate the result and answer directly | |
| 1437 // without updating the cache. | |
| 1438 GenerateCallCFunction(masm, scratch0); | |
| 1439 __ GetCFunctionDoubleResult(f4); | |
| 1440 __ bind(&no_update); | |
| 1441 | |
| 1442 // We return the value in f4 without adding it to the cache, but | |
| 1443 // we cause a scavenging GC so that future allocations will succeed. | |
| 1444 { | |
| 1445 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 1446 | |
| 1447 // Allocate an aligned object larger than a HeapNumber. | |
| 1448 ASSERT(4 * kPointerSize >= HeapNumber::kSize); | |
| 1449 __ li(scratch0, Operand(4 * kPointerSize)); | |
| 1450 __ push(scratch0); | |
| 1451 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); | |
| 1452 } | |
| 1453 __ Ret(); | |
| 1454 } | |
| 1455 } | |
| 1456 | |
| 1457 | |
| 1458 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, | |
| 1459 Register scratch) { | |
| 1460 __ push(ra); | |
| 1461 __ PrepareCallCFunction(2, scratch); | |
| 1462 if (IsMipsSoftFloatABI) { | |
| 1463 __ Move(a0, a1, f4); | |
| 1464 } else { | |
| 1465 __ mov_d(f12, f4); | |
| 1466 } | |
| 1467 AllowExternalCallThatCantCauseGC scope(masm); | |
| 1468 Isolate* isolate = masm->isolate(); | |
| 1469 switch (type_) { | |
| 1470 case TranscendentalCache::LOG: | |
| 1471 __ CallCFunction( | |
| 1472 ExternalReference::math_log_double_function(isolate), | |
| 1473 0, 1); | |
| 1474 break; | |
| 1475 default: | |
| 1476 UNIMPLEMENTED(); | |
| 1477 break; | |
| 1478 } | |
| 1479 __ pop(ra); | |
| 1480 } | |
| 1481 | |
| 1482 | |
| 1483 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { | |
| 1484 switch (type_) { | |
| 1485 // Add more cases when necessary. | |
| 1486 case TranscendentalCache::LOG: return Runtime::kMath_log; | |
| 1487 default: | |
| 1488 UNIMPLEMENTED(); | |
| 1489 return Runtime::kAbort; | |
| 1490 } | |
| 1491 } | |
| 1492 | |
| 1493 | |
| 1494 void MathPowStub::Generate(MacroAssembler* masm) { | 1276 void MathPowStub::Generate(MacroAssembler* masm) { |
| 1495 const Register base = a1; | 1277 const Register base = a1; |
| 1496 const Register exponent = a2; | 1278 const Register exponent = a2; |
| 1497 const Register heapnumbermap = t1; | 1279 const Register heapnumbermap = t1; |
| 1498 const Register heapnumber = v0; | 1280 const Register heapnumber = v0; |
| 1499 const DoubleRegister double_base = f2; | 1281 const DoubleRegister double_base = f2; |
| 1500 const DoubleRegister double_exponent = f4; | 1282 const DoubleRegister double_exponent = f4; |
| 1501 const DoubleRegister double_result = f0; | 1283 const DoubleRegister double_result = f0; |
| 1502 const DoubleRegister double_scratch = f6; | 1284 const DoubleRegister double_scratch = f6; |
| 1503 const FPURegister single_scratch = f8; | 1285 const FPURegister single_scratch = f8; |
| (...skipping 4657 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6161 __ bind(&fast_elements_case); | 5943 __ bind(&fast_elements_case); |
| 6162 GenerateCase(masm, FAST_ELEMENTS); | 5944 GenerateCase(masm, FAST_ELEMENTS); |
| 6163 } | 5945 } |
| 6164 | 5946 |
| 6165 | 5947 |
| 6166 #undef __ | 5948 #undef __ |
| 6167 | 5949 |
| 6168 } } // namespace v8::internal | 5950 } } // namespace v8::internal |
| 6169 | 5951 |
| 6170 #endif // V8_TARGET_ARCH_MIPS | 5952 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |