Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(53)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 17229005: Convert UnaryOpStub to a HydrogenCodeStub (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: rebase Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/full-codegen-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
219 } 219 }
220 220
221 221
222 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( 222 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
223 Isolate* isolate, 223 Isolate* isolate,
224 CodeStubInterfaceDescriptor* descriptor) { 224 CodeStubInterfaceDescriptor* descriptor) {
225 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); 225 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
226 } 226 }
227 227
228 228
229 void UnaryOpStub::InitializeInterfaceDescriptor(
230 Isolate* isolate,
231 CodeStubInterfaceDescriptor* descriptor) {
232 static Register registers[] = { r0 };
233 descriptor->register_param_count_ = 1;
234 descriptor->register_params_ = registers;
235 descriptor->deoptimization_handler_ =
236 FUNCTION_ADDR(UnaryOpIC_Miss);
237 }
238
239
229 #define __ ACCESS_MASM(masm) 240 #define __ ACCESS_MASM(masm)
230 241
242
231 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 243 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
232 Label* slow, 244 Label* slow,
233 Condition cond); 245 Condition cond);
234 static void EmitSmiNonsmiComparison(MacroAssembler* masm, 246 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
235 Register lhs, 247 Register lhs,
236 Register rhs, 248 Register rhs,
237 Label* lhs_not_nan, 249 Label* lhs_not_nan,
238 Label* slow, 250 Label* slow,
239 bool strict); 251 bool strict);
240 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 252 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
(...skipping 1041 matching lines...) Expand 10 before | Expand all | Expand 10 after
1282 __ CallCFunction( 1294 __ CallCFunction(
1283 ExternalReference::store_buffer_overflow_function(masm->isolate()), 1295 ExternalReference::store_buffer_overflow_function(masm->isolate()),
1284 argument_count); 1296 argument_count);
1285 if (save_doubles_ == kSaveFPRegs) { 1297 if (save_doubles_ == kSaveFPRegs) {
1286 __ RestoreFPRegs(sp, scratch); 1298 __ RestoreFPRegs(sp, scratch);
1287 } 1299 }
1288 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). 1300 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1289 } 1301 }
1290 1302
1291 1303
1292 void UnaryOpStub::PrintName(StringStream* stream) {
1293 const char* op_name = Token::Name(op_);
1294 const char* overwrite_name = NULL; // Make g++ happy.
1295 switch (mode_) {
1296 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1297 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1298 }
1299 stream->Add("UnaryOpStub_%s_%s_%s",
1300 op_name,
1301 overwrite_name,
1302 UnaryOpIC::GetName(operand_type_));
1303 }
1304
1305
1306 // TODO(svenpanne): Use virtual functions instead of switch.
1307 void UnaryOpStub::Generate(MacroAssembler* masm) {
1308 switch (operand_type_) {
1309 case UnaryOpIC::UNINITIALIZED:
1310 GenerateTypeTransition(masm);
1311 break;
1312 case UnaryOpIC::SMI:
1313 GenerateSmiStub(masm);
1314 break;
1315 case UnaryOpIC::NUMBER:
1316 GenerateNumberStub(masm);
1317 break;
1318 case UnaryOpIC::GENERIC:
1319 GenerateGenericStub(masm);
1320 break;
1321 }
1322 }
1323
1324
1325 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1326 __ mov(r3, Operand(r0)); // the operand
1327 __ mov(r2, Operand(Smi::FromInt(op_)));
1328 __ mov(r1, Operand(Smi::FromInt(mode_)));
1329 __ mov(r0, Operand(Smi::FromInt(operand_type_)));
1330 __ Push(r3, r2, r1, r0);
1331
1332 __ TailCallExternalReference(
1333 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
1334 }
1335
1336
1337 // TODO(svenpanne): Use virtual functions instead of switch.
1338 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1339 switch (op_) {
1340 case Token::SUB:
1341 GenerateSmiStubSub(masm);
1342 break;
1343 case Token::BIT_NOT:
1344 GenerateSmiStubBitNot(masm);
1345 break;
1346 default:
1347 UNREACHABLE();
1348 }
1349 }
1350
1351
1352 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
1353 Label non_smi, slow;
1354 GenerateSmiCodeSub(masm, &non_smi, &slow);
1355 __ bind(&non_smi);
1356 __ bind(&slow);
1357 GenerateTypeTransition(masm);
1358 }
1359
1360
1361 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
1362 Label non_smi;
1363 GenerateSmiCodeBitNot(masm, &non_smi);
1364 __ bind(&non_smi);
1365 GenerateTypeTransition(masm);
1366 }
1367
1368
1369 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
1370 Label* non_smi,
1371 Label* slow) {
1372 __ JumpIfNotSmi(r0, non_smi);
1373
1374 // The result of negating zero or the smallest negative smi is not a smi.
1375 __ bic(ip, r0, Operand(0x80000000), SetCC);
1376 __ b(eq, slow);
1377
1378 // Return '0 - value'.
1379 __ rsb(r0, r0, Operand::Zero());
1380 __ Ret();
1381 }
1382
1383
1384 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
1385 Label* non_smi) {
1386 __ JumpIfNotSmi(r0, non_smi);
1387
1388 // Flip bits and revert inverted smi-tag.
1389 __ mvn(r0, Operand(r0));
1390 __ bic(r0, r0, Operand(kSmiTagMask));
1391 __ Ret();
1392 }
1393
1394
1395 // TODO(svenpanne): Use virtual functions instead of switch.
1396 void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
1397 switch (op_) {
1398 case Token::SUB:
1399 GenerateNumberStubSub(masm);
1400 break;
1401 case Token::BIT_NOT:
1402 GenerateNumberStubBitNot(masm);
1403 break;
1404 default:
1405 UNREACHABLE();
1406 }
1407 }
1408
1409
1410 void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
1411 Label non_smi, slow, call_builtin;
1412 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
1413 __ bind(&non_smi);
1414 GenerateHeapNumberCodeSub(masm, &slow);
1415 __ bind(&slow);
1416 GenerateTypeTransition(masm);
1417 __ bind(&call_builtin);
1418 GenerateGenericCodeFallback(masm);
1419 }
1420
1421
1422 void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
1423 Label non_smi, slow;
1424 GenerateSmiCodeBitNot(masm, &non_smi);
1425 __ bind(&non_smi);
1426 GenerateHeapNumberCodeBitNot(masm, &slow);
1427 __ bind(&slow);
1428 GenerateTypeTransition(masm);
1429 }
1430
1431 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
1432 Label* slow) {
1433 EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
1434 // r0 is a heap number. Get a new heap number in r1.
1435 if (mode_ == UNARY_OVERWRITE) {
1436 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1437 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
1438 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1439 } else {
1440 Label slow_allocate_heapnumber, heapnumber_allocated;
1441 __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
1442 __ jmp(&heapnumber_allocated);
1443
1444 __ bind(&slow_allocate_heapnumber);
1445 {
1446 FrameScope scope(masm, StackFrame::INTERNAL);
1447 __ push(r0);
1448 __ CallRuntime(Runtime::kNumberAlloc, 0);
1449 __ mov(r1, Operand(r0));
1450 __ pop(r0);
1451 }
1452
1453 __ bind(&heapnumber_allocated);
1454 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
1455 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1456 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
1457 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
1458 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
1459 __ mov(r0, Operand(r1));
1460 }
1461 __ Ret();
1462 }
1463
1464
1465 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
1466 Label* slow) {
1467 EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
1468
1469 // Convert the heap number in r0 to an untagged integer in r1.
1470 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
1471 __ ECMAToInt32(r1, d0, r2, r3, r4, d1);
1472
1473 // Do the bitwise operation and check if the result fits in a smi.
1474 Label try_float;
1475 __ mvn(r1, Operand(r1));
1476 __ cmn(r1, Operand(0x40000000));
1477 __ b(mi, &try_float);
1478
1479 // Tag the result as a smi and we're done.
1480 __ SmiTag(r0, r1);
1481 __ Ret();
1482
1483 // Try to store the result in a heap number.
1484 __ bind(&try_float);
1485 if (mode_ == UNARY_NO_OVERWRITE) {
1486 Label slow_allocate_heapnumber, heapnumber_allocated;
1487 __ AllocateHeapNumber(r0, r3, r4, r6, &slow_allocate_heapnumber);
1488 __ jmp(&heapnumber_allocated);
1489
1490 __ bind(&slow_allocate_heapnumber);
1491 {
1492 FrameScope scope(masm, StackFrame::INTERNAL);
1493 // Push the lower bit of the result (left shifted to look like a smi).
1494 __ mov(r2, Operand(r1, LSL, 31));
1495 // Push the 31 high bits (bit 0 cleared to look like a smi).
1496 __ bic(r1, r1, Operand(1));
1497 __ Push(r2, r1);
1498 __ CallRuntime(Runtime::kNumberAlloc, 0);
1499 __ Pop(r2, r1); // Restore the result.
1500 __ orr(r1, r1, Operand(r2, LSR, 31));
1501 }
1502 __ bind(&heapnumber_allocated);
1503 }
1504
1505 __ vmov(s0, r1);
1506 __ vcvt_f64_s32(d0, s0);
1507 __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
1508 __ Ret();
1509 }
1510
1511
1512 // TODO(svenpanne): Use virtual functions instead of switch.
1513 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
1514 switch (op_) {
1515 case Token::SUB:
1516 GenerateGenericStubSub(masm);
1517 break;
1518 case Token::BIT_NOT:
1519 GenerateGenericStubBitNot(masm);
1520 break;
1521 default:
1522 UNREACHABLE();
1523 }
1524 }
1525
1526
1527 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
1528 Label non_smi, slow;
1529 GenerateSmiCodeSub(masm, &non_smi, &slow);
1530 __ bind(&non_smi);
1531 GenerateHeapNumberCodeSub(masm, &slow);
1532 __ bind(&slow);
1533 GenerateGenericCodeFallback(masm);
1534 }
1535
1536
1537 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
1538 Label non_smi, slow;
1539 GenerateSmiCodeBitNot(masm, &non_smi);
1540 __ bind(&non_smi);
1541 GenerateHeapNumberCodeBitNot(masm, &slow);
1542 __ bind(&slow);
1543 GenerateGenericCodeFallback(masm);
1544 }
1545
1546
1547 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
1548 // Handle the slow case by jumping to the JavaScript builtin.
1549 __ push(r0);
1550 switch (op_) {
1551 case Token::SUB:
1552 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1553 break;
1554 case Token::BIT_NOT:
1555 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
1556 break;
1557 default:
1558 UNREACHABLE();
1559 }
1560 }
1561
1562
1563 // Generates code to call a C function to do a double operation. 1304 // Generates code to call a C function to do a double operation.
1564 // This code never falls through, but returns with a heap number containing 1305 // This code never falls through, but returns with a heap number containing
1565 // the result in r0. 1306 // the result in r0.
1566 // Register heapnumber_result must be a heap number in which the 1307 // Register heapnumber_result must be a heap number in which the
1567 // result of the operation will be stored. 1308 // result of the operation will be stored.
1568 // Requires the following layout on entry: 1309 // Requires the following layout on entry:
1569 // d0: Left value. 1310 // d0: Left value.
1570 // d1: Right value. 1311 // d1: Right value.
1571 // If soft float ABI, use also r0, r1, r2, r3. 1312 // If soft float ABI, use also r0, r1, r2, r3.
1572 static void CallCCodeForDoubleOperation(MacroAssembler* masm, 1313 static void CallCCodeForDoubleOperation(MacroAssembler* masm,
(...skipping 5851 matching lines...) Expand 10 before | Expand all | Expand 10 after
7424 __ bind(&fast_elements_case); 7165 __ bind(&fast_elements_case);
7425 GenerateCase(masm, FAST_ELEMENTS); 7166 GenerateCase(masm, FAST_ELEMENTS);
7426 } 7167 }
7427 7168
7428 7169
7429 #undef __ 7170 #undef __
7430 7171
7431 } } // namespace v8::internal 7172 } } // namespace v8::internal
7432 7173
7433 #endif // V8_TARGET_ARCH_ARM 7174 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/full-codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698