Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(172)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 17229005: Convert UnaryOpStub to a HydrogenCodeStub (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: add some duckt tape for now to fix non-sse Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
219 } 219 }
220 220
221 221
222 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( 222 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
223 Isolate* isolate, 223 Isolate* isolate,
224 CodeStubInterfaceDescriptor* descriptor) { 224 CodeStubInterfaceDescriptor* descriptor) {
225 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); 225 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
226 } 226 }
227 227
228 228
229 void UnaryOpStub::InitializeInterfaceDescriptor(
230 Isolate* isolate,
231 CodeStubInterfaceDescriptor* descriptor) {
232 static Register registers[] = { r0 };
233 descriptor->register_param_count_ = 1;
234 descriptor->register_params_ = registers;
235 descriptor->deoptimization_handler_ =
236 FUNCTION_ADDR(UnaryOpIC_Miss);
237 }
238
239
229 #define __ ACCESS_MASM(masm) 240 #define __ ACCESS_MASM(masm)
230 241
242
231 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 243 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
232 Label* slow, 244 Label* slow,
233 Condition cond); 245 Condition cond);
234 static void EmitSmiNonsmiComparison(MacroAssembler* masm, 246 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
235 Register lhs, 247 Register lhs,
236 Register rhs, 248 Register rhs,
237 Label* lhs_not_nan, 249 Label* lhs_not_nan,
238 Label* slow, 250 Label* slow,
239 bool strict); 251 bool strict);
240 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 252 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
(...skipping 1037 matching lines...) Expand 10 before | Expand all | Expand 10 after
1278 __ CallCFunction( 1290 __ CallCFunction(
1279 ExternalReference::store_buffer_overflow_function(masm->isolate()), 1291 ExternalReference::store_buffer_overflow_function(masm->isolate()),
1280 argument_count); 1292 argument_count);
1281 if (save_doubles_ == kSaveFPRegs) { 1293 if (save_doubles_ == kSaveFPRegs) {
1282 __ RestoreFPRegs(sp, scratch); 1294 __ RestoreFPRegs(sp, scratch);
1283 } 1295 }
1284 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). 1296 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1285 } 1297 }
1286 1298
1287 1299
1288 void UnaryOpStub::PrintName(StringStream* stream) {
1289 const char* op_name = Token::Name(op_);
1290 const char* overwrite_name = NULL; // Make g++ happy.
1291 switch (mode_) {
1292 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1293 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1294 }
1295 stream->Add("UnaryOpStub_%s_%s_%s",
1296 op_name,
1297 overwrite_name,
1298 UnaryOpIC::GetName(operand_type_));
1299 }
1300
1301
1302 // TODO(svenpanne): Use virtual functions instead of switch.
1303 void UnaryOpStub::Generate(MacroAssembler* masm) {
1304 switch (operand_type_) {
1305 case UnaryOpIC::UNINITIALIZED:
1306 GenerateTypeTransition(masm);
1307 break;
1308 case UnaryOpIC::SMI:
1309 GenerateSmiStub(masm);
1310 break;
1311 case UnaryOpIC::NUMBER:
1312 GenerateNumberStub(masm);
1313 break;
1314 case UnaryOpIC::GENERIC:
1315 GenerateGenericStub(masm);
1316 break;
1317 }
1318 }
1319
1320
1321 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1322 __ mov(r3, Operand(r0)); // the operand
1323 __ mov(r2, Operand(Smi::FromInt(op_)));
1324 __ mov(r1, Operand(Smi::FromInt(mode_)));
1325 __ mov(r0, Operand(Smi::FromInt(operand_type_)));
1326 __ Push(r3, r2, r1, r0);
1327
1328 __ TailCallExternalReference(
1329 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
1330 }
1331
1332
1333 // TODO(svenpanne): Use virtual functions instead of switch.
1334 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1335 switch (op_) {
1336 case Token::SUB:
1337 GenerateSmiStubSub(masm);
1338 break;
1339 case Token::BIT_NOT:
1340 GenerateSmiStubBitNot(masm);
1341 break;
1342 default:
1343 UNREACHABLE();
1344 }
1345 }
1346
1347
1348 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
1349 Label non_smi, slow;
1350 GenerateSmiCodeSub(masm, &non_smi, &slow);
1351 __ bind(&non_smi);
1352 __ bind(&slow);
1353 GenerateTypeTransition(masm);
1354 }
1355
1356
1357 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
1358 Label non_smi;
1359 GenerateSmiCodeBitNot(masm, &non_smi);
1360 __ bind(&non_smi);
1361 GenerateTypeTransition(masm);
1362 }
1363
1364
1365 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
1366 Label* non_smi,
1367 Label* slow) {
1368 __ JumpIfNotSmi(r0, non_smi);
1369
1370 // The result of negating zero or the smallest negative smi is not a smi.
1371 __ bic(ip, r0, Operand(0x80000000), SetCC);
1372 __ b(eq, slow);
1373
1374 // Return '0 - value'.
1375 __ rsb(r0, r0, Operand::Zero());
1376 __ Ret();
1377 }
1378
1379
1380 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
1381 Label* non_smi) {
1382 __ JumpIfNotSmi(r0, non_smi);
1383
1384 // Flip bits and revert inverted smi-tag.
1385 __ mvn(r0, Operand(r0));
1386 __ bic(r0, r0, Operand(kSmiTagMask));
1387 __ Ret();
1388 }
1389
1390
1391 // TODO(svenpanne): Use virtual functions instead of switch.
1392 void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
1393 switch (op_) {
1394 case Token::SUB:
1395 GenerateNumberStubSub(masm);
1396 break;
1397 case Token::BIT_NOT:
1398 GenerateNumberStubBitNot(masm);
1399 break;
1400 default:
1401 UNREACHABLE();
1402 }
1403 }
1404
1405
1406 void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
1407 Label non_smi, slow, call_builtin;
1408 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
1409 __ bind(&non_smi);
1410 GenerateHeapNumberCodeSub(masm, &slow);
1411 __ bind(&slow);
1412 GenerateTypeTransition(masm);
1413 __ bind(&call_builtin);
1414 GenerateGenericCodeFallback(masm);
1415 }
1416
1417
1418 void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
1419 Label non_smi, slow;
1420 GenerateSmiCodeBitNot(masm, &non_smi);
1421 __ bind(&non_smi);
1422 GenerateHeapNumberCodeBitNot(masm, &slow);
1423 __ bind(&slow);
1424 GenerateTypeTransition(masm);
1425 }
1426
1427 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
1428 Label* slow) {
1429 EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
1430 // r0 is a heap number. Get a new heap number in r1.
1431 if (mode_ == UNARY_OVERWRITE) {
1432 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1433 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
1434 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1435 } else {
1436 Label slow_allocate_heapnumber, heapnumber_allocated;
1437 __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
1438 __ jmp(&heapnumber_allocated);
1439
1440 __ bind(&slow_allocate_heapnumber);
1441 {
1442 FrameScope scope(masm, StackFrame::INTERNAL);
1443 __ push(r0);
1444 __ CallRuntime(Runtime::kNumberAlloc, 0);
1445 __ mov(r1, Operand(r0));
1446 __ pop(r0);
1447 }
1448
1449 __ bind(&heapnumber_allocated);
1450 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
1451 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1452 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
1453 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
1454 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
1455 __ mov(r0, Operand(r1));
1456 }
1457 __ Ret();
1458 }
1459
1460
1461 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
1462 Label* slow) {
1463 EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
1464
1465 // Convert the heap number in r0 to an untagged integer in r1.
1466 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
1467 __ ECMAToInt32(r1, d0, r2, r3, r4, d1);
1468
1469 // Do the bitwise operation and check if the result fits in a smi.
1470 Label try_float;
1471 __ mvn(r1, Operand(r1));
1472 __ cmn(r1, Operand(0x40000000));
1473 __ b(mi, &try_float);
1474
1475 // Tag the result as a smi and we're done.
1476 __ SmiTag(r0, r1);
1477 __ Ret();
1478
1479 // Try to store the result in a heap number.
1480 __ bind(&try_float);
1481 if (mode_ == UNARY_NO_OVERWRITE) {
1482 Label slow_allocate_heapnumber, heapnumber_allocated;
1483 __ AllocateHeapNumber(r0, r3, r4, r6, &slow_allocate_heapnumber);
1484 __ jmp(&heapnumber_allocated);
1485
1486 __ bind(&slow_allocate_heapnumber);
1487 {
1488 FrameScope scope(masm, StackFrame::INTERNAL);
1489 // Push the lower bit of the result (left shifted to look like a smi).
1490 __ mov(r2, Operand(r1, LSL, 31));
1491 // Push the 31 high bits (bit 0 cleared to look like a smi).
1492 __ bic(r1, r1, Operand(1));
1493 __ Push(r2, r1);
1494 __ CallRuntime(Runtime::kNumberAlloc, 0);
1495 __ Pop(r2, r1); // Restore the result.
1496 __ orr(r1, r1, Operand(r2, LSR, 31));
1497 }
1498 __ bind(&heapnumber_allocated);
1499 }
1500
1501 __ vmov(s0, r1);
1502 __ vcvt_f64_s32(d0, s0);
1503 __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
1504 __ Ret();
1505 }
1506
1507
1508 // TODO(svenpanne): Use virtual functions instead of switch.
1509 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
1510 switch (op_) {
1511 case Token::SUB:
1512 GenerateGenericStubSub(masm);
1513 break;
1514 case Token::BIT_NOT:
1515 GenerateGenericStubBitNot(masm);
1516 break;
1517 default:
1518 UNREACHABLE();
1519 }
1520 }
1521
1522
1523 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
1524 Label non_smi, slow;
1525 GenerateSmiCodeSub(masm, &non_smi, &slow);
1526 __ bind(&non_smi);
1527 GenerateHeapNumberCodeSub(masm, &slow);
1528 __ bind(&slow);
1529 GenerateGenericCodeFallback(masm);
1530 }
1531
1532
1533 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
1534 Label non_smi, slow;
1535 GenerateSmiCodeBitNot(masm, &non_smi);
1536 __ bind(&non_smi);
1537 GenerateHeapNumberCodeBitNot(masm, &slow);
1538 __ bind(&slow);
1539 GenerateGenericCodeFallback(masm);
1540 }
1541
1542
1543 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
1544 // Handle the slow case by jumping to the JavaScript builtin.
1545 __ push(r0);
1546 switch (op_) {
1547 case Token::SUB:
1548 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1549 break;
1550 case Token::BIT_NOT:
1551 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
1552 break;
1553 default:
1554 UNREACHABLE();
1555 }
1556 }
1557
1558
1559 // Generates code to call a C function to do a double operation. 1300 // Generates code to call a C function to do a double operation.
1560 // This code never falls through, but returns with a heap number containing 1301 // This code never falls through, but returns with a heap number containing
1561 // the result in r0. 1302 // the result in r0.
1562 // Register heapnumber_result must be a heap number in which the 1303 // Register heapnumber_result must be a heap number in which the
1563 // result of the operation will be stored. 1304 // result of the operation will be stored.
1564 // Requires the following layout on entry: 1305 // Requires the following layout on entry:
1565 // d0: Left value. 1306 // d0: Left value.
1566 // d1: Right value. 1307 // d1: Right value.
1567 // If soft float ABI, use also r0, r1, r2, r3. 1308 // If soft float ABI, use also r0, r1, r2, r3.
1568 static void CallCCodeForDoubleOperation(MacroAssembler* masm, 1309 static void CallCCodeForDoubleOperation(MacroAssembler* masm,
(...skipping 5919 matching lines...) Expand 10 before | Expand all | Expand 10 after
7488 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); 7229 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
7489 } 7230 }
7490 } 7231 }
7491 7232
7492 7233
7493 #undef __ 7234 #undef __
7494 7235
7495 } } // namespace v8::internal 7236 } } // namespace v8::internal
7496 7237
7497 #endif // V8_TARGET_ARCH_ARM 7238 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/full-codegen-arm.cc » ('j') | src/code-stubs.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698