OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1224 __ CallCFunction( | 1224 __ CallCFunction( |
1225 ExternalReference::store_buffer_overflow_function(masm->isolate()), | 1225 ExternalReference::store_buffer_overflow_function(masm->isolate()), |
1226 argument_count); | 1226 argument_count); |
1227 if (save_doubles_ == kSaveFPRegs) { | 1227 if (save_doubles_ == kSaveFPRegs) { |
1228 __ RestoreFPRegs(sp, scratch); | 1228 __ RestoreFPRegs(sp, scratch); |
1229 } | 1229 } |
1230 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). | 1230 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). |
1231 } | 1231 } |
1232 | 1232 |
1233 | 1233 |
1234 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | |
1235 // Untagged case: double input in d2, double result goes | |
1236 // into d2. | |
1237 // Tagged case: tagged input on top of stack and in r0, | |
1238 // tagged result (heap number) goes into r0. | |
1239 | |
1240 Label input_not_smi; | |
1241 Label loaded; | |
1242 Label calculate; | |
1243 Label invalid_cache; | |
1244 const Register scratch0 = r9; | |
1245 Register scratch1 = no_reg; // will be r4 | |
1246 const Register cache_entry = r0; | |
1247 const bool tagged = (argument_type_ == TAGGED); | |
1248 | |
1249 if (tagged) { | |
1250 // Argument is a number and is on stack and in r0. | |
1251 // Load argument and check if it is a smi. | |
1252 __ JumpIfNotSmi(r0, &input_not_smi); | |
1253 | |
1254 // Input is a smi. Convert to double and load the low and high words | |
1255 // of the double into r2, r3. | |
1256 __ SmiToDouble(d7, r0); | |
1257 __ vmov(r2, r3, d7); | |
1258 __ b(&loaded); | |
1259 | |
1260 __ bind(&input_not_smi); | |
1261 // Check if input is a HeapNumber. | |
1262 __ CheckMap(r0, | |
1263 r1, | |
1264 Heap::kHeapNumberMapRootIndex, | |
1265 &calculate, | |
1266 DONT_DO_SMI_CHECK); | |
1267 // Input is a HeapNumber. Load it to a double register and store the | |
1268 // low and high words into r2, r3. | |
1269 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
1270 __ vmov(r2, r3, d0); | |
1271 } else { | |
1272 // Input is untagged double in d2. Output goes to d2. | |
1273 __ vmov(r2, r3, d2); | |
1274 } | |
1275 __ bind(&loaded); | |
1276 // r2 = low 32 bits of double value | |
1277 // r3 = high 32 bits of double value | |
1278 // Compute hash (the shifts are arithmetic): | |
1279 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | |
1280 __ eor(r1, r2, Operand(r3)); | |
1281 __ eor(r1, r1, Operand(r1, ASR, 16)); | |
1282 __ eor(r1, r1, Operand(r1, ASR, 8)); | |
1283 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); | |
1284 __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); | |
1285 | |
1286 // r2 = low 32 bits of double value. | |
1287 // r3 = high 32 bits of double value. | |
1288 // r1 = TranscendentalCache::hash(double value). | |
1289 Isolate* isolate = masm->isolate(); | |
1290 ExternalReference cache_array = | |
1291 ExternalReference::transcendental_cache_array_address(isolate); | |
1292 __ mov(cache_entry, Operand(cache_array)); | |
1293 // cache_entry points to cache array. | |
1294 int cache_array_index | |
1295 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); | |
1296 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); | |
1297 // r0 points to the cache for the type type_. | |
1298 // If NULL, the cache hasn't been initialized yet, so go through runtime. | |
1299 __ cmp(cache_entry, Operand::Zero()); | |
1300 __ b(eq, &invalid_cache); | |
1301 | |
1302 #ifdef DEBUG | |
1303 // Check that the layout of cache elements match expectations. | |
1304 { TranscendentalCache::SubCache::Element test_elem[2]; | |
1305 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); | |
1306 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); | |
1307 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); | |
1308 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | |
1309 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | |
1310 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. | |
1311 CHECK_EQ(0, elem_in0 - elem_start); | |
1312 CHECK_EQ(kIntSize, elem_in1 - elem_start); | |
1313 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | |
1314 } | |
1315 #endif | |
1316 | |
1317 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. | |
1318 __ add(r1, r1, Operand(r1, LSL, 1)); | |
1319 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); | |
1320 // Check if cache matches: Double value is stored in uint32_t[2] array. | |
1321 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); | |
1322 __ cmp(r2, r4); | |
1323 __ cmp(r3, r5, eq); | |
1324 __ b(ne, &calculate); | |
1325 | |
1326 scratch1 = r4; // Start of scratch1 range. | |
1327 | |
1328 // Cache hit. Load result, cleanup and return. | |
1329 Counters* counters = masm->isolate()->counters(); | |
1330 __ IncrementCounter( | |
1331 counters->transcendental_cache_hit(), 1, scratch0, scratch1); | |
1332 if (tagged) { | |
1333 // Pop input value from stack and load result into r0. | |
1334 __ pop(); | |
1335 __ mov(r0, Operand(r6)); | |
1336 } else { | |
1337 // Load result into d2. | |
1338 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); | |
1339 } | |
1340 __ Ret(); | |
1341 | |
1342 __ bind(&calculate); | |
1343 __ IncrementCounter( | |
1344 counters->transcendental_cache_miss(), 1, scratch0, scratch1); | |
1345 if (tagged) { | |
1346 __ bind(&invalid_cache); | |
1347 ExternalReference runtime_function = | |
1348 ExternalReference(RuntimeFunction(), masm->isolate()); | |
1349 __ TailCallExternalReference(runtime_function, 1, 1); | |
1350 } else { | |
1351 Label no_update; | |
1352 Label skip_cache; | |
1353 | |
1354 // Call C function to calculate the result and update the cache. | |
1355 // r0: precalculated cache entry address. | |
1356 // r2 and r3: parts of the double value. | |
1357 // Store r0, r2 and r3 on stack for later before calling C function. | |
1358 __ Push(r3, r2, cache_entry); | |
1359 GenerateCallCFunction(masm, scratch0); | |
1360 __ GetCFunctionDoubleResult(d2); | |
1361 | |
1362 // Try to update the cache. If we cannot allocate a | |
1363 // heap number, we return the result without updating. | |
1364 __ Pop(r3, r2, cache_entry); | |
1365 __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); | |
1366 __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); | |
1367 __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); | |
1368 __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit()); | |
1369 __ Ret(); | |
1370 | |
1371 __ bind(&invalid_cache); | |
1372 // The cache is invalid. Call runtime which will recreate the | |
1373 // cache. | |
1374 __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); | |
1375 __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); | |
1376 __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
1377 { | |
1378 FrameScope scope(masm, StackFrame::INTERNAL); | |
1379 __ push(r0); | |
1380 __ CallRuntime(RuntimeFunction(), 1); | |
1381 } | |
1382 __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
1383 __ Ret(); | |
1384 | |
1385 __ bind(&skip_cache); | |
1386 // Call C function to calculate the result and answer directly | |
1387 // without updating the cache. | |
1388 GenerateCallCFunction(masm, scratch0); | |
1389 __ GetCFunctionDoubleResult(d2); | |
1390 __ bind(&no_update); | |
1391 | |
1392 // We return the value in d2 without adding it to the cache, but | |
1393 // we cause a scavenging GC so that future allocations will succeed. | |
1394 { | |
1395 FrameScope scope(masm, StackFrame::INTERNAL); | |
1396 | |
1397 // Allocate an aligned object larger than a HeapNumber. | |
1398 ASSERT(4 * kPointerSize >= HeapNumber::kSize); | |
1399 __ mov(scratch0, Operand(4 * kPointerSize)); | |
1400 __ push(scratch0); | |
1401 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); | |
1402 } | |
1403 __ Ret(); | |
1404 } | |
1405 } | |
1406 | |
1407 | |
1408 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, | |
1409 Register scratch) { | |
1410 Isolate* isolate = masm->isolate(); | |
1411 | |
1412 __ push(lr); | |
1413 __ PrepareCallCFunction(0, 1, scratch); | |
1414 if (masm->use_eabi_hardfloat()) { | |
1415 __ vmov(d0, d2); | |
1416 } else { | |
1417 __ vmov(r0, r1, d2); | |
1418 } | |
1419 AllowExternalCallThatCantCauseGC scope(masm); | |
1420 switch (type_) { | |
1421 case TranscendentalCache::LOG: | |
1422 __ CallCFunction(ExternalReference::math_log_double_function(isolate), | |
1423 0, 1); | |
1424 break; | |
1425 default: | |
1426 UNIMPLEMENTED(); | |
1427 break; | |
1428 } | |
1429 __ pop(lr); | |
1430 } | |
1431 | |
1432 | |
1433 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { | |
1434 switch (type_) { | |
1435 // Add more cases when necessary. | |
1436 case TranscendentalCache::LOG: return Runtime::kMath_log; | |
1437 default: | |
1438 UNIMPLEMENTED(); | |
1439 return Runtime::kAbort; | |
1440 } | |
1441 } | |
1442 | |
1443 | |
1444 void MathPowStub::Generate(MacroAssembler* masm) { | 1234 void MathPowStub::Generate(MacroAssembler* masm) { |
1445 const Register base = r1; | 1235 const Register base = r1; |
1446 const Register exponent = r2; | 1236 const Register exponent = r2; |
1447 const Register heapnumbermap = r5; | 1237 const Register heapnumbermap = r5; |
1448 const Register heapnumber = r0; | 1238 const Register heapnumber = r0; |
1449 const DwVfpRegister double_base = d0; | 1239 const DwVfpRegister double_base = d0; |
1450 const DwVfpRegister double_exponent = d1; | 1240 const DwVfpRegister double_exponent = d1; |
1451 const DwVfpRegister double_result = d2; | 1241 const DwVfpRegister double_result = d2; |
1452 const DwVfpRegister double_scratch = d3; | 1242 const DwVfpRegister double_scratch = d3; |
1453 const SwVfpRegister single_scratch = s6; | 1243 const SwVfpRegister single_scratch = s6; |
(...skipping 4561 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6015 __ bind(&fast_elements_case); | 5805 __ bind(&fast_elements_case); |
6016 GenerateCase(masm, FAST_ELEMENTS); | 5806 GenerateCase(masm, FAST_ELEMENTS); |
6017 } | 5807 } |
6018 | 5808 |
6019 | 5809 |
6020 #undef __ | 5810 #undef __ |
6021 | 5811 |
6022 } } // namespace v8::internal | 5812 } } // namespace v8::internal |
6023 | 5813 |
6024 #endif // V8_TARGET_ARCH_ARM | 5814 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |