Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(339)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 6606006: [Isolates] Merge 6500:6700 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
48 Register rhs, 48 Register rhs,
49 Label* lhs_not_nan, 49 Label* lhs_not_nan,
50 Label* slow, 50 Label* slow,
51 bool strict); 51 bool strict);
52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); 52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
54 Register lhs, 54 Register lhs,
55 Register rhs); 55 Register rhs);
56 56
57 57
58 void ToNumberStub::Generate(MacroAssembler* masm) {
59 // The ToNumber stub takes one argument in eax.
60 Label check_heap_number, call_builtin;
61 __ tst(r0, Operand(kSmiTagMask));
62 __ b(ne, &check_heap_number);
63 __ Ret();
64
65 __ bind(&check_heap_number);
66 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
67 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
68 __ cmp(r1, ip);
69 __ b(ne, &call_builtin);
70 __ Ret();
71
72 __ bind(&call_builtin);
73 __ push(r0);
74 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS);
75 }
76
77
58 void FastNewClosureStub::Generate(MacroAssembler* masm) { 78 void FastNewClosureStub::Generate(MacroAssembler* masm) {
59 // Create a new closure from the given function info in new 79 // Create a new closure from the given function info in new
60 // space. Set the context to the current context in cp. 80 // space. Set the context to the current context in cp.
61 Label gc; 81 Label gc;
62 82
63 // Pop the function info from the stack. 83 // Pop the function info from the stack.
64 __ pop(r3); 84 __ pop(r3);
65 85
66 // Attempt to allocate new JSFunction in new space. 86 // Attempt to allocate new JSFunction in new space.
67 __ AllocateInNewSpace(JSFunction::kSize, 87 __ AllocateInNewSpace(JSFunction::kSize,
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after
362 Destination destination, 382 Destination destination,
363 Register scratch1, 383 Register scratch1,
364 Register scratch2); 384 Register scratch2);
365 385
366 // Loads objects from r0 and r1 (right and left in binary operations) into 386 // Loads objects from r0 and r1 (right and left in binary operations) into
367 // floating point registers. Depending on the destination the values ends up 387 // floating point registers. Depending on the destination the values ends up
368 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is 388 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
369 // floating point registers VFP3 must be supported. If core registers are 389 // floating point registers VFP3 must be supported. If core registers are
370 // requested when VFP3 is supported d6 and d7 will still be scratched. If 390 // requested when VFP3 is supported d6 and d7 will still be scratched. If
371 // either r0 or r1 is not a number (not smi and not heap number object) the 391 // either r0 or r1 is not a number (not smi and not heap number object) the
372 // not_number label is jumped to. 392 // not_number label is jumped to with r0 and r1 intact.
373 static void LoadOperands(MacroAssembler* masm, 393 static void LoadOperands(MacroAssembler* masm,
374 FloatingPointHelper::Destination destination, 394 FloatingPointHelper::Destination destination,
375 Register heap_number_map, 395 Register heap_number_map,
376 Register scratch1, 396 Register scratch1,
377 Register scratch2, 397 Register scratch2,
378 Label* not_number); 398 Label* not_number);
399
400 // Loads the number from object into dst as a 32-bit integer if possible. If
401 // the object is not a 32-bit integer control continues at the label
402 // not_int32. If VFP is supported double_scratch is used but not scratch2.
403 static void LoadNumberAsInteger(MacroAssembler* masm,
404 Register object,
405 Register dst,
406 Register heap_number_map,
407 Register scratch1,
408 Register scratch2,
409 DwVfpRegister double_scratch,
410 Label* not_int32);
411
379 private: 412 private:
380 static void LoadNumber(MacroAssembler* masm, 413 static void LoadNumber(MacroAssembler* masm,
381 FloatingPointHelper::Destination destination, 414 FloatingPointHelper::Destination destination,
382 Register object, 415 Register object,
383 DwVfpRegister dst, 416 DwVfpRegister dst,
384 Register dst1, 417 Register dst1,
385 Register dst2, 418 Register dst2,
386 Register heap_number_map, 419 Register heap_number_map,
387 Register scratch1, 420 Register scratch1,
388 Register scratch2, 421 Register scratch2,
389 Label* not_number); 422 Label* not_number);
390 }; 423 };
391 424
392 425
393 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, 426 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
394 FloatingPointHelper::Destination destination, 427 FloatingPointHelper::Destination destination,
395 Register scratch1, 428 Register scratch1,
396 Register scratch2) { 429 Register scratch2) {
397 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 430 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
398 CpuFeatures::Scope scope(VFP3); 431 CpuFeatures::Scope scope(VFP3);
399 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); 432 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
400 __ vmov(s15, scratch1); 433 __ vmov(d7.high(), scratch1);
401 __ vcvt_f64_s32(d7, s15); 434 __ vcvt_f64_s32(d7, d7.high());
402 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); 435 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
403 __ vmov(s13, scratch1); 436 __ vmov(d6.high(), scratch1);
404 __ vcvt_f64_s32(d6, s13); 437 __ vcvt_f64_s32(d6, d6.high());
405 if (destination == kCoreRegisters) { 438 if (destination == kCoreRegisters) {
406 __ vmov(r2, r3, d7); 439 __ vmov(r2, r3, d7);
407 __ vmov(r0, r1, d6); 440 __ vmov(r0, r1, d6);
408 } 441 }
409 } else { 442 } else {
410 ASSERT(destination == kCoreRegisters); 443 ASSERT(destination == kCoreRegisters);
411 // Write Smi from r0 to r3 and r2 in double format. 444 // Write Smi from r0 to r3 and r2 in double format.
412 __ mov(scratch1, Operand(r0)); 445 __ mov(scratch1, Operand(r0));
413 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); 446 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
414 __ push(lr); 447 __ push(lr);
(...skipping 19 matching lines...) Expand all
434 LoadNumber(masm, destination, 467 LoadNumber(masm, destination,
435 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); 468 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
436 469
437 // Load left operand (r1) to d7 or r0/r1. 470 // Load left operand (r1) to d7 or r0/r1.
438 LoadNumber(masm, destination, 471 LoadNumber(masm, destination,
439 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); 472 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
440 } 473 }
441 474
442 475
443 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, 476 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
444 Destination destination, 477 Destination destination,
445 Register object, 478 Register object,
446 DwVfpRegister dst, 479 DwVfpRegister dst,
447 Register dst1, 480 Register dst1,
448 Register dst2, 481 Register dst2,
449 Register heap_number_map, 482 Register heap_number_map,
450 Register scratch1, 483 Register scratch1,
451 Register scratch2, 484 Register scratch2,
452 Label* not_number) { 485 Label* not_number) {
486 if (FLAG_debug_code) {
487 __ AbortIfNotRootValue(heap_number_map,
488 Heap::kHeapNumberMapRootIndex,
489 "HeapNumberMap register clobbered.");
490 }
491
453 Label is_smi, done; 492 Label is_smi, done;
454 493
455 __ JumpIfSmi(object, &is_smi); 494 __ JumpIfSmi(object, &is_smi);
456 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); 495 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
457 496
458 // Handle loading a double from a heap number. 497 // Handle loading a double from a heap number.
459 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 498 if (Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
499 destination == kVFPRegisters) {
460 CpuFeatures::Scope scope(VFP3); 500 CpuFeatures::Scope scope(VFP3);
461 // Load the double from tagged HeapNumber to double register. 501 // Load the double from tagged HeapNumber to double register.
462 __ sub(scratch1, object, Operand(kHeapObjectTag)); 502 __ sub(scratch1, object, Operand(kHeapObjectTag));
463 __ vldr(dst, scratch1, HeapNumber::kValueOffset); 503 __ vldr(dst, scratch1, HeapNumber::kValueOffset);
464 } else { 504 } else {
465 ASSERT(destination == kCoreRegisters); 505 ASSERT(destination == kCoreRegisters);
466 // Load the double from heap number to dst1 and dst2 in double format. 506 // Load the double from heap number to dst1 and dst2 in double format.
467 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); 507 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
468 } 508 }
469 __ jmp(&done); 509 __ jmp(&done);
470 510
471 // Handle loading a double from a smi. 511 // Handle loading a double from a smi.
472 __ bind(&is_smi); 512 __ bind(&is_smi);
473 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 513 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
474 CpuFeatures::Scope scope(VFP3); 514 CpuFeatures::Scope scope(VFP3);
475 // Convert smi to double. 515 // Convert smi to double using VFP instructions.
476 __ SmiUntag(scratch1, object); 516 __ SmiUntag(scratch1, object);
477 __ vmov(dst.high(), scratch1); 517 __ vmov(dst.high(), scratch1);
478 __ vcvt_f64_s32(dst, dst.high()); 518 __ vcvt_f64_s32(dst, dst.high());
479 if (destination == kCoreRegisters) { 519 if (destination == kCoreRegisters) {
520 // Load the converted smi to dst1 and dst2 in double format.
480 __ vmov(dst1, dst2, dst); 521 __ vmov(dst1, dst2, dst);
481 } 522 }
482 } else { 523 } else {
483 ASSERT(destination == kCoreRegisters); 524 ASSERT(destination == kCoreRegisters);
484 // Write Smi to dst1 and dst2 double format. 525 // Write smi to dst1 and dst2 double format.
485 __ mov(scratch1, Operand(object)); 526 __ mov(scratch1, Operand(object));
486 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); 527 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
487 __ push(lr); 528 __ push(lr);
488 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); 529 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
489 __ pop(lr); 530 __ pop(lr);
490 } 531 }
491 532
492 __ bind(&done); 533 __ bind(&done);
493 } 534 }
494 535
495 536
537 void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
538 Register object,
539 Register dst,
540 Register heap_number_map,
541 Register scratch1,
542 Register scratch2,
543 DwVfpRegister double_scratch,
544 Label* not_int32) {
545 if (FLAG_debug_code) {
546 __ AbortIfNotRootValue(heap_number_map,
547 Heap::kHeapNumberMapRootIndex,
548 "HeapNumberMap register clobbered.");
549 }
550 Label is_smi, done;
551 __ JumpIfSmi(object, &is_smi);
552 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
553 __ cmp(scratch1, heap_number_map);
554 __ b(ne, not_int32);
555 __ ConvertToInt32(
556 object, dst, scratch1, scratch2, double_scratch, not_int32);
557 __ jmp(&done);
558 __ bind(&is_smi);
559 __ SmiUntag(dst, object);
560 __ bind(&done);
561 }
562
563
564
496 // See comment for class. 565 // See comment for class.
497 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { 566 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
498 Label max_negative_int; 567 Label max_negative_int;
499 // the_int_ has the answer which is a signed int32 but not a Smi. 568 // the_int_ has the answer which is a signed int32 but not a Smi.
500 // We test for the special value that has a different exponent. This test 569 // We test for the special value that has a different exponent. This test
501 // has the neat side effect of setting the flags according to the sign. 570 // has the neat side effect of setting the flags according to the sign.
502 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 571 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
503 __ cmp(the_int_, Operand(0x80000000u)); 572 __ cmp(the_int_, Operand(0x80000000u));
504 __ b(eq, &max_negative_int); 573 __ b(eq, &max_negative_int);
505 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 574 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
(...skipping 1144 matching lines...) Expand 10 before | Expand all | Expand 10 after
1650 Label done_checking_rhs, done_checking_lhs; 1719 Label done_checking_rhs, done_checking_lhs;
1651 1720
1652 Register heap_number_map = r6; 1721 Register heap_number_map = r6;
1653 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1722 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1654 1723
1655 __ tst(lhs, Operand(kSmiTagMask)); 1724 __ tst(lhs, Operand(kSmiTagMask));
1656 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. 1725 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
1657 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); 1726 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
1658 __ cmp(r4, heap_number_map); 1727 __ cmp(r4, heap_number_map);
1659 __ b(ne, &slow); 1728 __ b(ne, &slow);
1660 __ ConvertToInt32(lhs, r3, r5, r4, &slow); 1729 __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
1661 __ jmp(&done_checking_lhs); 1730 __ jmp(&done_checking_lhs);
1662 __ bind(&lhs_is_smi); 1731 __ bind(&lhs_is_smi);
1663 __ mov(r3, Operand(lhs, ASR, 1)); 1732 __ mov(r3, Operand(lhs, ASR, 1));
1664 __ bind(&done_checking_lhs); 1733 __ bind(&done_checking_lhs);
1665 1734
1666 __ tst(rhs, Operand(kSmiTagMask)); 1735 __ tst(rhs, Operand(kSmiTagMask));
1667 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. 1736 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
1668 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); 1737 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
1669 __ cmp(r4, heap_number_map); 1738 __ cmp(r4, heap_number_map);
1670 __ b(ne, &slow); 1739 __ b(ne, &slow);
1671 __ ConvertToInt32(rhs, r2, r5, r4, &slow); 1740 __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
1672 __ jmp(&done_checking_rhs); 1741 __ jmp(&done_checking_rhs);
1673 __ bind(&rhs_is_smi); 1742 __ bind(&rhs_is_smi);
1674 __ mov(r2, Operand(rhs, ASR, 1)); 1743 __ mov(r2, Operand(rhs, ASR, 1));
1675 __ bind(&done_checking_rhs); 1744 __ bind(&done_checking_rhs);
1676 1745
1677 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); 1746 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
1678 1747
1679 // r0 and r1: Original operands (Smi or heap numbers). 1748 // r0 and r1: Original operands (Smi or heap numbers).
1680 // r2 and r3: Signed int32 operands. 1749 // r2 and r3: Signed int32 operands.
1681 switch (op_) { 1750 switch (op_) {
(...skipping 748 matching lines...) Expand 10 before | Expand all | Expand 10 after
2430 2499
2431 OS::SNPrintF(Vector<char>(name_, kMaxNameLength), 2500 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
2432 "TypeRecordingBinaryOpStub_%s_%s_%s", 2501 "TypeRecordingBinaryOpStub_%s_%s_%s",
2433 op_name, 2502 op_name,
2434 overwrite_name, 2503 overwrite_name,
2435 TRBinaryOpIC::GetName(operands_type_)); 2504 TRBinaryOpIC::GetName(operands_type_));
2436 return name_; 2505 return name_;
2437 } 2506 }
2438 2507
2439 2508
2440 void TypeRecordingBinaryOpStub::GenerateOptimisticSmiOperation( 2509 void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
2441 MacroAssembler* masm) { 2510 MacroAssembler* masm) {
2442 Register left = r1; 2511 Register left = r1;
2443 Register right = r0; 2512 Register right = r0;
2513 Register scratch1 = r7;
2514 Register scratch2 = r9;
2444 2515
2445 ASSERT(right.is(r0)); 2516 ASSERT(right.is(r0));
2517 STATIC_ASSERT(kSmiTag == 0);
2446 2518
2519 Label not_smi_result;
2447 switch (op_) { 2520 switch (op_) {
2448 case Token::ADD: 2521 case Token::ADD:
2449 __ add(right, left, Operand(right), SetCC); // Add optimistically. 2522 __ add(right, left, Operand(right), SetCC); // Add optimistically.
2450 __ Ret(vc); 2523 __ Ret(vc);
2451 __ sub(right, right, Operand(left)); // Revert optimistic add. 2524 __ sub(right, right, Operand(left)); // Revert optimistic add.
2452 break; 2525 break;
2453 case Token::SUB: 2526 case Token::SUB:
2454 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. 2527 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
2455 __ Ret(vc); 2528 __ Ret(vc);
2456 __ sub(right, left, Operand(right)); // Revert optimistic subtract. 2529 __ sub(right, left, Operand(right)); // Revert optimistic subtract.
2457 break; 2530 break;
2531 case Token::MUL:
2532 // Remove tag from one of the operands. This way the multiplication result
2533 // will be a smi if it fits the smi range.
2534 __ SmiUntag(ip, right);
2535 // Do multiplication
2536 // scratch1 = lower 32 bits of ip * left.
2537 // scratch2 = higher 32 bits of ip * left.
2538 __ smull(scratch1, scratch2, left, ip);
2539 // Check for overflowing the smi range - no overflow if higher 33 bits of
2540 // the result are identical.
2541 __ mov(ip, Operand(scratch1, ASR, 31));
2542 __ cmp(ip, Operand(scratch2));
2543 __ b(ne, &not_smi_result);
2544 // Go slow on zero result to handle -0.
2545 __ tst(scratch1, Operand(scratch1));
2546 __ mov(right, Operand(scratch1), LeaveCC, ne);
2547 __ Ret(ne);
2548 // We need -0 if we were multiplying a negative number with 0 to get 0.
2549 // We know one of them was zero.
2550 __ add(scratch2, right, Operand(left), SetCC);
2551 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
2552 __ Ret(pl); // Return smi 0 if the non-zero one was positive.
2553 // We fall through here if we multiplied a negative number with 0, because
2554 // that would mean we should produce -0.
2555 break;
2556 case Token::DIV:
2557 // Check for power of two on the right hand side.
2558 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2559 // Check for positive and no remainder (scratch1 contains right - 1).
2560 __ orr(scratch2, scratch1, Operand(0x80000000u));
2561 __ tst(left, scratch2);
2562 __ b(ne, &not_smi_result);
2563
2564 // Perform division by shifting.
2565 __ CountLeadingZeros(scratch1, scratch1, scratch2);
2566 __ rsb(scratch1, scratch1, Operand(31));
2567 __ mov(right, Operand(left, LSR, scratch1));
2568 __ Ret();
2569 break;
2570 case Token::MOD:
2571 // Check for two positive smis.
2572 __ orr(scratch1, left, Operand(right));
2573 __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
2574 __ b(ne, &not_smi_result);
2575
2576 // Check for power of two on the right hand side.
2577 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2578
2579 // Perform modulus by masking.
2580 __ and_(right, left, Operand(scratch1));
2581 __ Ret();
2582 break;
2583 case Token::BIT_OR:
2584 __ orr(right, left, Operand(right));
2585 __ Ret();
2586 break;
2587 case Token::BIT_AND:
2588 __ and_(right, left, Operand(right));
2589 __ Ret();
2590 break;
2591 case Token::BIT_XOR:
2592 __ eor(right, left, Operand(right));
2593 __ Ret();
2594 break;
2458 default: 2595 default:
2459 UNREACHABLE(); 2596 UNREACHABLE();
2460 } 2597 }
2598 __ bind(&not_smi_result);
2461 } 2599 }
2462 2600
2463 2601
2464 void TypeRecordingBinaryOpStub::GenerateVFPOperation( 2602 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2465 MacroAssembler* masm) { 2603 bool smi_operands,
2604 Label* not_numbers,
2605 Label* gc_required) {
2606 Register left = r1;
2607 Register right = r0;
2608 Register scratch1 = r7;
2609 Register scratch2 = r9;
2610
2611 ASSERT(smi_operands || (not_numbers != NULL));
2612 if (smi_operands && FLAG_debug_code) {
2613 __ AbortIfNotSmi(left);
2614 __ AbortIfNotSmi(right);
2615 }
2616
2617 Register heap_number_map = r6;
2618 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2619
2466 switch (op_) { 2620 switch (op_) {
2467 case Token::ADD: 2621 case Token::ADD:
2468 __ vadd(d5, d6, d7);
2469 break;
2470 case Token::SUB: 2622 case Token::SUB:
2471 __ vsub(d5, d6, d7); 2623 case Token::MUL:
2472 break; 2624 case Token::DIV:
2625 case Token::MOD: {
2626 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2627 // depending on whether VFP3 is available or not.
2628 FloatingPointHelper::Destination destination =
2629 Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
2630 op_ != Token::MOD ?
2631 FloatingPointHelper::kVFPRegisters :
2632 FloatingPointHelper::kCoreRegisters;
2633
2634 // Allocate new heap number for result.
2635 Register result = r5;
2636 __ AllocateHeapNumber(
2637 result, scratch1, scratch2, heap_number_map, gc_required);
2638
2639 // Load the operands.
2640 if (smi_operands) {
2641 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2642 } else {
2643 FloatingPointHelper::LoadOperands(masm,
2644 destination,
2645 heap_number_map,
2646 scratch1,
2647 scratch2,
2648 not_numbers);
2649 }
2650
2651 // Calculate the result.
2652 if (destination == FloatingPointHelper::kVFPRegisters) {
2653 // Using VFP registers:
2654 // d6: Left value
2655 // d7: Right value
2656 CpuFeatures::Scope scope(VFP3);
2657 switch (op_) {
2658 case Token::ADD:
2659 __ vadd(d5, d6, d7);
2660 break;
2661 case Token::SUB:
2662 __ vsub(d5, d6, d7);
2663 break;
2664 case Token::MUL:
2665 __ vmul(d5, d6, d7);
2666 break;
2667 case Token::DIV:
2668 __ vdiv(d5, d6, d7);
2669 break;
2670 default:
2671 UNREACHABLE();
2672 }
2673
2674 __ sub(r0, result, Operand(kHeapObjectTag));
2675 __ vstr(d5, r0, HeapNumber::kValueOffset);
2676 __ add(r0, r0, Operand(kHeapObjectTag));
2677 __ Ret();
2678 } else {
2679 // Using core registers:
2680 // r0: Left value (least significant part of mantissa).
2681 // r1: Left value (sign, exponent, top of mantissa).
2682 // r2: Right value (least significant part of mantissa).
2683 // r3: Right value (sign, exponent, top of mantissa).
2684
2685 // Push the current return address before the C call. Return will be
2686 // through pop(pc) below.
2687 __ push(lr);
2688 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2689 // Call C routine that may not cause GC or other trouble. r5 is callee
2690 // save.
2691 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2692 // Store answer in the overwritable heap number.
2693 #if !defined(USE_ARM_EABI)
2694 // Double returned in fp coprocessor register 0 and 1, encoded as
2695 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2696 // need to substract the tag from r5.
2697 __ sub(scratch1, result, Operand(kHeapObjectTag));
2698 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2699 #else
2700 // Double returned in registers 0 and 1.
2701 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
2702 #endif
2703 // Plase result in r0 and return to the pushed return address.
2704 __ mov(r0, Operand(result));
2705 __ pop(pc);
2706 }
2707 break;
2708 }
2709 case Token::BIT_OR:
2710 case Token::BIT_XOR:
2711 case Token::BIT_AND: {
2712 if (smi_operands) {
2713 __ SmiUntag(r3, left);
2714 __ SmiUntag(r2, right);
2715 } else {
2716 // Convert operands to 32-bit integers. Right in r2 and left in r3.
2717 FloatingPointHelper::LoadNumberAsInteger(masm,
2718 left,
2719 r3,
2720 heap_number_map,
2721 scratch1,
2722 scratch2,
2723 d0,
2724 not_numbers);
2725 FloatingPointHelper::LoadNumberAsInteger(masm,
2726 right,
2727 r2,
2728 heap_number_map,
2729 scratch1,
2730 scratch2,
2731 d0,
2732 not_numbers);
2733 }
2734 switch (op_) {
2735 case Token::BIT_OR:
2736 __ orr(r2, r3, Operand(r2));
2737 break;
2738 case Token::BIT_XOR:
2739 __ eor(r2, r3, Operand(r2));
2740 break;
2741 case Token::BIT_AND:
2742 __ and_(r2, r3, Operand(r2));
2743 break;
2744 default:
2745 UNREACHABLE();
2746 }
2747
2748 Label result_not_a_smi;
2749 // Check that the *signed* result fits in a smi.
2750 __ add(r3, r2, Operand(0x40000000), SetCC);
2751 __ b(mi, &result_not_a_smi);
2752 __ SmiTag(r0, r2);
2753 __ Ret();
2754
2755 // Allocate new heap number for result.
2756 __ bind(&result_not_a_smi);
2757 __ AllocateHeapNumber(
2758 r5, scratch1, scratch2, heap_number_map, gc_required);
2759
2760 // r2: Answer as signed int32.
2761 // r5: Heap number to write answer into.
2762
2763 // Nothing can go wrong now, so move the heap number to r0, which is the
2764 // result.
2765 __ mov(r0, Operand(r5));
2766
2767 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
2768 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
2769 CpuFeatures::Scope scope(VFP3);
2770 __ vmov(s0, r2);
2771 __ vcvt_f64_s32(d0, s0);
2772 __ sub(r3, r0, Operand(kHeapObjectTag));
2773 __ vstr(d0, r3, HeapNumber::kValueOffset);
2774 __ Ret();
2775 } else {
2776 // Tail call that writes the int32 in r2 to the heap number in r0, using
2777 // r3 as scratch. r0 is preserved and returned.
2778 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
2779 __ TailCallStub(&stub);
2780 }
2781 break;
2782 }
2473 default: 2783 default:
2474 UNREACHABLE(); 2784 UNREACHABLE();
2475 } 2785 }
2476 } 2786 }
2477 2787
2478 2788
2479 // Generate the smi code. If the operation on smis are successful this return is 2789 // Generate the smi code. If the operation on smis are successful this return is
2480 // generated. If the result is not a smi and heap number allocation is not 2790 // generated. If the result is not a smi and heap number allocation is not
2481 // requested the code falls through. If number allocation is requested but a 2791 // requested the code falls through. If number allocation is requested but a
2482 // heap number cannot be allocated the code jumps to the lable gc_required. 2792 // heap number cannot be allocated the code jumps to the lable gc_required.
2483 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, 2793 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
2484 Label* gc_required, 2794 Label* gc_required,
2485 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { 2795 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2486 Label not_smis; 2796 Label not_smis;
2487 2797
2488 ASSERT(op_ == Token::ADD || op_ == Token::SUB); 2798 ASSERT(op_ == Token::ADD ||
2799 op_ == Token::SUB ||
2800 op_ == Token::MUL ||
2801 op_ == Token::DIV ||
2802 op_ == Token::MOD ||
2803 op_ == Token::BIT_OR ||
2804 op_ == Token::BIT_AND ||
2805 op_ == Token::BIT_XOR);
2489 2806
2490 Register left = r1; 2807 Register left = r1;
2491 Register right = r0; 2808 Register right = r0;
2492 Register scratch1 = r7; 2809 Register scratch1 = r7;
2493 Register scratch2 = r9; 2810 Register scratch2 = r9;
2494 2811
2495 // Perform combined smi check on both operands. 2812 // Perform combined smi check on both operands.
2496 __ orr(scratch1, left, Operand(right)); 2813 __ orr(scratch1, left, Operand(right));
2497 STATIC_ASSERT(kSmiTag == 0); 2814 STATIC_ASSERT(kSmiTag == 0);
2498 __ tst(scratch1, Operand(kSmiTagMask)); 2815 __ tst(scratch1, Operand(kSmiTagMask));
2499 __ b(ne, &not_smis); 2816 __ b(ne, &not_smis);
2500 2817
2501 GenerateOptimisticSmiOperation(masm); 2818 // If the smi-smi operation results in a smi return is generated.
2819 GenerateSmiSmiOperation(masm);
2502 2820
2503 // If heap number results are possible generate the result in an allocated 2821 // If heap number results are possible generate the result in an allocated
2504 // heap number. 2822 // heap number.
2505 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { 2823 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2506 FloatingPointHelper::Destination destination = 2824 GenerateFPOperation(masm, true, NULL, gc_required);
2507 Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
2508 Token::MOD != op_ ?
2509 FloatingPointHelper::kVFPRegisters :
2510 FloatingPointHelper::kCoreRegisters;
2511
2512 Register heap_number_map = r6;
2513 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2514
2515 // Allocate new heap number for result.
2516 Register heap_number = r5;
2517 __ AllocateHeapNumber(
2518 heap_number, scratch1, scratch2, heap_number_map, gc_required);
2519
2520 // Load the smis.
2521 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2522
2523 // Calculate the result.
2524 if (destination == FloatingPointHelper::kVFPRegisters) {
2525 // Using VFP registers:
2526 // d6: Left value
2527 // d7: Right value
2528 CpuFeatures::Scope scope(VFP3);
2529 GenerateVFPOperation(masm);
2530
2531 __ sub(r0, heap_number, Operand(kHeapObjectTag));
2532 __ vstr(d5, r0, HeapNumber::kValueOffset);
2533 __ add(r0, r0, Operand(kHeapObjectTag));
2534 __ Ret();
2535 } else {
2536 // Using core registers:
2537 // r0: Left value (least significant part of mantissa).
2538 // r1: Left value (sign, exponent, top of mantissa).
2539 // r2: Right value (least significant part of mantissa).
2540 // r3: Right value (sign, exponent, top of mantissa).
2541
2542 __ push(lr); // For later.
2543 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2544 // Call C routine that may not cause GC or other trouble. r5 is callee
2545 // save.
2546 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2547 // Store answer in the overwritable heap number.
2548 #if !defined(USE_ARM_EABI)
2549 // Double returned in fp coprocessor register 0 and 1, encoded as
2550 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2551 // need to substract the tag from r5.
2552 __ sub(scratch1, heap_number, Operand(kHeapObjectTag));
2553 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2554 #else
2555 // Double returned in registers 0 and 1.
2556 __ Strd(r0, r1, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
2557 #endif
2558 __ mov(r0, Operand(heap_number));
2559 // And we are done.
2560 __ pop(pc);
2561 }
2562 } 2825 }
2563 __ bind(&not_smis); 2826 __ bind(&not_smis);
2564 } 2827 }
2565 2828
2566 2829
2567 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 2830 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2568 Label not_smis, call_runtime; 2831 Label not_smis, call_runtime;
2569 2832
2570 ASSERT(op_ == Token::ADD || op_ == Token::SUB); 2833 ASSERT(op_ == Token::ADD ||
2834 op_ == Token::SUB ||
2835 op_ == Token::MUL ||
2836 op_ == Token::DIV ||
2837 op_ == Token::MOD ||
2838 op_ == Token::BIT_OR ||
2839 op_ == Token::BIT_AND ||
2840 op_ == Token::BIT_XOR);
2571 2841
2572 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || 2842 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
2573 result_type_ == TRBinaryOpIC::SMI) { 2843 result_type_ == TRBinaryOpIC::SMI) {
2574 // Only allow smi results. 2844 // Only allow smi results.
2575 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); 2845 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
2576 } else { 2846 } else {
2577 // Allow heap number result and don't make a transition if a heap number 2847 // Allow heap number result and don't make a transition if a heap number
2578 // cannot be allocated. 2848 // cannot be allocated.
2579 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 2849 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2580 } 2850 }
(...skipping 11 matching lines...) Expand all
2592 ASSERT(operands_type_ == TRBinaryOpIC::STRING); 2862 ASSERT(operands_type_ == TRBinaryOpIC::STRING);
2593 ASSERT(op_ == Token::ADD); 2863 ASSERT(op_ == Token::ADD);
2594 // Try to add arguments as strings, otherwise, transition to the generic 2864 // Try to add arguments as strings, otherwise, transition to the generic
2595 // TRBinaryOpIC type. 2865 // TRBinaryOpIC type.
2596 GenerateAddStrings(masm); 2866 GenerateAddStrings(masm);
2597 GenerateTypeTransition(masm); 2867 GenerateTypeTransition(masm);
2598 } 2868 }
2599 2869
2600 2870
2601 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 2871 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2602 ASSERT(op_ == Token::ADD || op_ == Token::SUB); 2872 ASSERT(op_ == Token::ADD ||
2873 op_ == Token::SUB ||
2874 op_ == Token::MUL ||
2875 op_ == Token::DIV ||
2876 op_ == Token::MOD ||
2877 op_ == Token::BIT_OR ||
2878 op_ == Token::BIT_AND ||
2879 op_ == Token::BIT_XOR);
2603 2880
2604 ASSERT(operands_type_ == TRBinaryOpIC::INT32); 2881 ASSERT(operands_type_ == TRBinaryOpIC::INT32);
2605 2882
2606 GenerateTypeTransition(masm); 2883 GenerateTypeTransition(masm);
2607 } 2884 }
2608 2885
2609 2886
2610 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { 2887 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2611 ASSERT(op_ == Token::ADD || op_ == Token::SUB); 2888 ASSERT(op_ == Token::ADD ||
2889 op_ == Token::SUB ||
2890 op_ == Token::MUL ||
2891 op_ == Token::DIV ||
2892 op_ == Token::MOD ||
2893 op_ == Token::BIT_OR ||
2894 op_ == Token::BIT_AND ||
2895 op_ == Token::BIT_XOR);
2612 2896
2613 Register scratch1 = r7; 2897 Label not_numbers, call_runtime;
2614 Register scratch2 = r9;
2615
2616 Label not_number, call_runtime;
2617 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); 2898 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
2618 2899
2619 Register heap_number_map = r6; 2900 GenerateFPOperation(masm, false, &not_numbers, &call_runtime);
2620 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2621 2901
2622 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending on 2902 __ bind(&not_numbers);
2623 // whether VFP3 is available.
2624 FloatingPointHelper::Destination destination =
2625 Isolate::Current()->cpu_features()->IsSupported(VFP3) ?
2626 FloatingPointHelper::kVFPRegisters :
2627 FloatingPointHelper::kCoreRegisters;
2628 FloatingPointHelper::LoadOperands(masm,
2629 destination,
2630 heap_number_map,
2631 scratch1,
2632 scratch2,
2633 &not_number);
2634 if (destination == FloatingPointHelper::kVFPRegisters) {
2635 // Use floating point instructions for the binary operation.
2636 CpuFeatures::Scope scope(VFP3);
2637 GenerateVFPOperation(masm);
2638
2639 // Get a heap number object for the result - might be left or right if one
2640 // of these are overwritable.
2641 GenerateHeapResultAllocation(
2642 masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
2643
2644 // Fill the result into the allocated heap number and return.
2645 __ sub(r0, r4, Operand(kHeapObjectTag));
2646 __ vstr(d5, r0, HeapNumber::kValueOffset);
2647 __ add(r0, r0, Operand(kHeapObjectTag));
2648 __ Ret();
2649
2650 } else {
2651 // Call a C function for the binary operation.
2652 // r0/r1: Left operand
2653 // r2/r3: Right operand
2654
2655 // Get a heap number object for the result - might be left or right if one
2656 // of these are overwritable. Uses a callee-save register to keep the value
2657 // across the c call.
2658 GenerateHeapResultAllocation(
2659 masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
2660
2661 __ push(lr); // For returning later (no GC after this point).
2662 __ PrepareCallCFunction(4, scratch1); // Two doubles count as 4 arguments.
2663 // Call C routine that may not cause GC or other trouble. r4 is callee
2664 // saved.
2665 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2666
2667 // Fill the result into the allocated heap number.
2668 #if !defined(USE_ARM_EABI)
2669 // Double returned in fp coprocessor register 0 and 1, encoded as
2670 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2671 // need to substract the tag from r5.
2672 __ sub(scratch1, r4, Operand(kHeapObjectTag));
2673 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2674 #else
2675 // Double returned in registers 0 and 1.
2676 __ Strd(r0, r1, FieldMemOperand(r4, HeapNumber::kValueOffset));
2677 #endif
2678 __ mov(r0, Operand(r4));
2679 __ pop(pc); // Return to the pushed lr.
2680 }
2681
2682 __ bind(&not_number);
2683 GenerateTypeTransition(masm); 2903 GenerateTypeTransition(masm);
2684 2904
2685 __ bind(&call_runtime); 2905 __ bind(&call_runtime);
2686 GenerateCallRuntime(masm); 2906 GenerateCallRuntime(masm);
2687 } 2907 }
2688 2908
2689 2909
2690 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 2910 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2691 ASSERT(op_ == Token::ADD || op_ == Token::SUB); 2911 ASSERT(op_ == Token::ADD ||
2912 op_ == Token::SUB ||
2913 op_ == Token::MUL ||
2914 op_ == Token::DIV ||
2915 op_ == Token::MOD ||
2916 op_ == Token::BIT_OR ||
2917 op_ == Token::BIT_AND ||
2918 op_ == Token::BIT_XOR);
2692 2919
2693 Label call_runtime; 2920 Label call_runtime;
2694 2921
2695 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 2922 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2696 2923
2697 // If all else fails, use the runtime system to get the correct 2924 // If all else fails, use the runtime system to get the correct
2698 // result. 2925 // result.
2699 __ bind(&call_runtime); 2926 __ bind(&call_runtime);
2700 2927
2701 // Try to add strings before calling runtime. 2928 // Try to add strings before calling runtime.
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
2737 2964
2738 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { 2965 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
2739 GenerateRegisterArgsPush(masm); 2966 GenerateRegisterArgsPush(masm);
2740 switch (op_) { 2967 switch (op_) {
2741 case Token::ADD: 2968 case Token::ADD:
2742 __ InvokeBuiltin(Builtins::ADD, JUMP_JS); 2969 __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
2743 break; 2970 break;
2744 case Token::SUB: 2971 case Token::SUB:
2745 __ InvokeBuiltin(Builtins::SUB, JUMP_JS); 2972 __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
2746 break; 2973 break;
2974 case Token::MUL:
2975 __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
2976 break;
2977 case Token::DIV:
2978 __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
2979 break;
2980 case Token::MOD:
2981 __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
2982 break;
2983 case Token::BIT_OR:
2984 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
2985 break;
2986 case Token::BIT_AND:
2987 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
2988 break;
2989 case Token::BIT_XOR:
2990 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
2991 break;
2747 default: 2992 default:
2748 UNREACHABLE(); 2993 UNREACHABLE();
2749 } 2994 }
2750 } 2995 }
2751 2996
2752 2997
2753 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( 2998 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
2754 MacroAssembler* masm, 2999 MacroAssembler* masm,
2755 Register result, 3000 Register result,
2756 Register heap_number_map, 3001 Register heap_number_map,
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after
2963 __ Assert(ne, "Unexpected smi operand."); 3208 __ Assert(ne, "Unexpected smi operand.");
2964 } 3209 }
2965 3210
2966 // Check if the operand is a heap number. 3211 // Check if the operand is a heap number.
2967 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); 3212 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
2968 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 3213 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2969 __ cmp(r1, heap_number_map); 3214 __ cmp(r1, heap_number_map);
2970 __ b(ne, &slow); 3215 __ b(ne, &slow);
2971 3216
2972 // Convert the heap number is r0 to an untagged integer in r1. 3217 // Convert the heap number is r0 to an untagged integer in r1.
2973 __ ConvertToInt32(r0, r1, r2, r3, &slow); 3218 __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
2974 3219
2975 // Do the bitwise operation (move negated) and check if the result 3220 // Do the bitwise operation (move negated) and check if the result
2976 // fits in a smi. 3221 // fits in a smi.
2977 Label try_float; 3222 Label try_float;
2978 __ mvn(r1, Operand(r1)); 3223 __ mvn(r1, Operand(r1));
2979 __ add(r2, r1, Operand(0x40000000), SetCC); 3224 __ add(r2, r1, Operand(0x40000000), SetCC);
2980 __ b(mi, &try_float); 3225 __ b(mi, &try_float);
2981 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); 3226 __ mov(r0, Operand(r1, LSL, kSmiTagSize));
2982 __ b(&done); 3227 __ b(&done);
2983 3228
(...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after
3260 // sp: stack pointer (restored as callee's sp after C call) 3505 // sp: stack pointer (restored as callee's sp after C call)
3261 // cp: current context (C callee-saved) 3506 // cp: current context (C callee-saved)
3262 3507
3263 // Result returned in r0 or r0+r1 by default. 3508 // Result returned in r0 or r0+r1 by default.
3264 3509
3265 // NOTE: Invocations of builtins may return failure objects 3510 // NOTE: Invocations of builtins may return failure objects
3266 // instead of a proper result. The builtin entry handles 3511 // instead of a proper result. The builtin entry handles
3267 // this by performing a garbage collection and retrying the 3512 // this by performing a garbage collection and retrying the
3268 // builtin once. 3513 // builtin once.
3269 3514
3515 // Compute the argv pointer in a callee-saved register.
3516 __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
3517 __ sub(r6, r6, Operand(kPointerSize));
3518
3270 // Enter the exit frame that transitions from JavaScript to C++. 3519 // Enter the exit frame that transitions from JavaScript to C++.
3271 __ EnterExitFrame(save_doubles_); 3520 __ EnterExitFrame(save_doubles_);
3272 3521
3522 // Setup argc and the builtin function in callee-saved registers.
3523 __ mov(r4, Operand(r0));
3524 __ mov(r5, Operand(r1));
3525
3273 // r4: number of arguments (C callee-saved) 3526 // r4: number of arguments (C callee-saved)
3274 // r5: pointer to builtin function (C callee-saved) 3527 // r5: pointer to builtin function (C callee-saved)
3275 // r6: pointer to first argument (C callee-saved) 3528 // r6: pointer to first argument (C callee-saved)
3276 3529
3277 Label throw_normal_exception; 3530 Label throw_normal_exception;
3278 Label throw_termination_exception; 3531 Label throw_termination_exception;
3279 Label throw_out_of_memory_exception; 3532 Label throw_out_of_memory_exception;
3280 3533
3281 // Call into the runtime system. 3534 // Call into the runtime system.
3282 GenerateCore(masm, 3535 GenerateCore(masm,
(...skipping 2384 matching lines...) Expand 10 before | Expand all | Expand 10 after
5667 // Compute the entry point of the rewritten stub. 5920 // Compute the entry point of the rewritten stub.
5668 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); 5921 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
5669 // Restore registers. 5922 // Restore registers.
5670 __ pop(lr); 5923 __ pop(lr);
5671 __ pop(r0); 5924 __ pop(r0);
5672 __ pop(r1); 5925 __ pop(r1);
5673 __ Jump(r2); 5926 __ Jump(r2);
5674 } 5927 }
5675 5928
5676 5929
5930 void DirectCEntryStub::Generate(MacroAssembler* masm) {
5931 __ ldr(pc, MemOperand(sp, 0));
5932 }
5933
5934
5935 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
5936 ApiFunction *function) {
5937 __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
5938 RelocInfo::CODE_TARGET));
5939 // Push return address (accessible to GC through exit frame pc).
5940 __ mov(r2,
5941 Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
5942 __ str(pc, MemOperand(sp, 0));
5943 __ Jump(r2); // Call the api function.
5944 }
5945
5946
5947 void GenerateFastPixelArrayLoad(MacroAssembler* masm,
5948 Register receiver,
5949 Register key,
5950 Register elements_map,
5951 Register elements,
5952 Register scratch1,
5953 Register scratch2,
5954 Register result,
5955 Label* not_pixel_array,
5956 Label* key_not_smi,
5957 Label* out_of_range) {
5958 // Register use:
5959 //
5960 // receiver - holds the receiver on entry.
5961 // Unchanged unless 'result' is the same register.
5962 //
5963 // key - holds the smi key on entry.
5964 // Unchanged unless 'result' is the same register.
5965 //
5966 // elements - set to be the receiver's elements on exit.
5967 //
5968 // elements_map - set to be the map of the receiver's elements
5969 // on exit.
5970 //
5971 // result - holds the result of the pixel array load on exit,
5972 // tagged as a smi if successful.
5973 //
5974 // Scratch registers:
5975 //
5976 // scratch1 - used a scratch register in map check, if map
5977 // check is successful, contains the length of the
5978 // pixel array, the pointer to external elements and
5979 // the untagged result.
5980 //
5981 // scratch2 - holds the untaged key.
5982
5983 // Some callers already have verified that the key is a smi. key_not_smi is
5984 // set to NULL as a sentinel for that case. Otherwise, add an explicit check
5985 // to ensure the key is a smi must be added.
5986 if (key_not_smi != NULL) {
5987 __ JumpIfNotSmi(key, key_not_smi);
5988 } else {
5989 if (FLAG_debug_code) {
5990 __ AbortIfNotSmi(key);
5991 }
5992 }
5993 __ SmiUntag(scratch2, key);
5994
5995 // Verify that the receiver has pixel array elements.
5996 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
5997 __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
5998 not_pixel_array, true);
5999
6000 // Key must be in range of the pixel array.
6001 __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
6002 __ cmp(scratch2, scratch1);
6003 __ b(hs, out_of_range); // unsigned check handles negative keys.
6004
6005 // Perform the indexed load and tag the result as a smi.
6006 __ ldr(scratch1,
6007 FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
6008 __ ldrb(scratch1, MemOperand(scratch1, scratch2));
6009 __ SmiTag(r0, scratch1);
6010 __ Ret();
6011 }
6012
6013
5677 #undef __ 6014 #undef __
5678 6015
5679 } } // namespace v8::internal 6016 } } // namespace v8::internal
5680 6017
5681 #endif // V8_TARGET_ARCH_ARM 6018 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698