OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
382 Destination destination, | 382 Destination destination, |
383 Register scratch1, | 383 Register scratch1, |
384 Register scratch2); | 384 Register scratch2); |
385 | 385 |
386 // Loads objects from r0 and r1 (right and left in binary operations) into | 386 // Loads objects from r0 and r1 (right and left in binary operations) into |
387 // floating point registers. Depending on the destination the values ends up | 387 // floating point registers. Depending on the destination the values ends up |
388 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is | 388 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is |
389 // floating point registers VFP3 must be supported. If core registers are | 389 // floating point registers VFP3 must be supported. If core registers are |
390 // requested when VFP3 is supported d6 and d7 will still be scratched. If | 390 // requested when VFP3 is supported d6 and d7 will still be scratched. If |
391 // either r0 or r1 is not a number (not smi and not heap number object) the | 391 // either r0 or r1 is not a number (not smi and not heap number object) the |
392 // not_number label is jumped to. | 392 // not_number label is jumped to with r0 and r1 intact. |
393 static void LoadOperands(MacroAssembler* masm, | 393 static void LoadOperands(MacroAssembler* masm, |
394 FloatingPointHelper::Destination destination, | 394 FloatingPointHelper::Destination destination, |
395 Register heap_number_map, | 395 Register heap_number_map, |
396 Register scratch1, | 396 Register scratch1, |
397 Register scratch2, | 397 Register scratch2, |
398 Label* not_number); | 398 Label* not_number); |
399 private: | 399 private: |
400 static void LoadNumber(MacroAssembler* masm, | 400 static void LoadNumber(MacroAssembler* masm, |
401 FloatingPointHelper::Destination destination, | 401 FloatingPointHelper::Destination destination, |
402 Register object, | 402 Register object, |
403 DwVfpRegister dst, | 403 DwVfpRegister dst, |
404 Register dst1, | 404 Register dst1, |
405 Register dst2, | 405 Register dst2, |
406 Register heap_number_map, | 406 Register heap_number_map, |
407 Register scratch1, | 407 Register scratch1, |
408 Register scratch2, | 408 Register scratch2, |
409 Label* not_number); | 409 Label* not_number); |
410 }; | 410 }; |
411 | 411 |
412 | 412 |
413 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | 413 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
414 FloatingPointHelper::Destination destination, | 414 FloatingPointHelper::Destination destination, |
415 Register scratch1, | 415 Register scratch1, |
416 Register scratch2) { | 416 Register scratch2) { |
417 if (CpuFeatures::IsSupported(VFP3)) { | 417 if (CpuFeatures::IsSupported(VFP3)) { |
418 CpuFeatures::Scope scope(VFP3); | 418 CpuFeatures::Scope scope(VFP3); |
419 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); | 419 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); |
420 __ vmov(s15, scratch1); | 420 __ vmov(d7.high(), scratch1); |
421 __ vcvt_f64_s32(d7, s15); | 421 __ vcvt_f64_s32(d7, d7.high()); |
422 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); | 422 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); |
423 __ vmov(s13, scratch1); | 423 __ vmov(d6.high(), scratch1); |
424 __ vcvt_f64_s32(d6, s13); | 424 __ vcvt_f64_s32(d6, d6.high()); |
425 if (destination == kCoreRegisters) { | 425 if (destination == kCoreRegisters) { |
426 __ vmov(r2, r3, d7); | 426 __ vmov(r2, r3, d7); |
427 __ vmov(r0, r1, d6); | 427 __ vmov(r0, r1, d6); |
428 } | 428 } |
429 } else { | 429 } else { |
430 ASSERT(destination == kCoreRegisters); | 430 ASSERT(destination == kCoreRegisters); |
431 // Write Smi from r0 to r3 and r2 in double format. | 431 // Write Smi from r0 to r3 and r2 in double format. |
432 __ mov(scratch1, Operand(r0)); | 432 __ mov(scratch1, Operand(r0)); |
433 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); | 433 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); |
434 __ push(lr); | 434 __ push(lr); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
469 Register heap_number_map, | 469 Register heap_number_map, |
470 Register scratch1, | 470 Register scratch1, |
471 Register scratch2, | 471 Register scratch2, |
472 Label* not_number) { | 472 Label* not_number) { |
473 Label is_smi, done; | 473 Label is_smi, done; |
474 | 474 |
475 __ JumpIfSmi(object, &is_smi); | 475 __ JumpIfSmi(object, &is_smi); |
476 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 476 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
477 | 477 |
478 // Handle loading a double from a heap number. | 478 // Handle loading a double from a heap number. |
479 if (CpuFeatures::IsSupported(VFP3)) { | 479 if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) { |
480 CpuFeatures::Scope scope(VFP3); | 480 CpuFeatures::Scope scope(VFP3); |
481 // Load the double from tagged HeapNumber to double register. | 481 // Load the double from tagged HeapNumber to double register. |
482 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 482 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
483 __ vldr(dst, scratch1, HeapNumber::kValueOffset); | 483 __ vldr(dst, scratch1, HeapNumber::kValueOffset); |
484 } else { | 484 } else { |
485 ASSERT(destination == kCoreRegisters); | 485 ASSERT(destination == kCoreRegisters); |
486 // Load the double from heap number to dst1 and dst2 in double format. | 486 // Load the double from heap number to dst1 and dst2 in double format. |
487 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | 487 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
488 } | 488 } |
489 __ jmp(&done); | 489 __ jmp(&done); |
490 | 490 |
491 // Handle loading a double from a smi. | 491 // Handle loading a double from a smi. |
492 __ bind(&is_smi); | 492 __ bind(&is_smi); |
493 if (CpuFeatures::IsSupported(VFP3)) { | 493 if (CpuFeatures::IsSupported(VFP3)) { |
494 CpuFeatures::Scope scope(VFP3); | 494 CpuFeatures::Scope scope(VFP3); |
495 // Convert smi to double. | 495 // Convert smi to double using VFP instructions. |
496 __ SmiUntag(scratch1, object); | 496 __ SmiUntag(scratch1, object); |
497 __ vmov(dst.high(), scratch1); | 497 __ vmov(dst.high(), scratch1); |
498 __ vcvt_f64_s32(dst, dst.high()); | 498 __ vcvt_f64_s32(dst, dst.high()); |
499 if (destination == kCoreRegisters) { | 499 if (destination == kCoreRegisters) { |
| 500 // Load the converted smi to dst1 and dst2 in double format. |
500 __ vmov(dst1, dst2, dst); | 501 __ vmov(dst1, dst2, dst); |
501 } | 502 } |
502 } else { | 503 } else { |
503 ASSERT(destination == kCoreRegisters); | 504 ASSERT(destination == kCoreRegisters); |
504 // Write Smi to dst1 and dst2 double format. | 505 // Write smi to dst1 and dst2 double format. |
505 __ mov(scratch1, Operand(object)); | 506 __ mov(scratch1, Operand(object)); |
506 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); | 507 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); |
507 __ push(lr); | 508 __ push(lr); |
508 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); | 509 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); |
509 __ pop(lr); | 510 __ pop(lr); |
510 } | 511 } |
511 | 512 |
512 __ bind(&done); | 513 __ bind(&done); |
513 } | 514 } |
514 | 515 |
(...skipping 1979 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2494 __ mov(right, Operand(scratch1), LeaveCC, ne); | 2495 __ mov(right, Operand(scratch1), LeaveCC, ne); |
2495 __ Ret(ne); | 2496 __ Ret(ne); |
2496 // We need -0 if we were multiplying a negative number with 0 to get 0. | 2497 // We need -0 if we were multiplying a negative number with 0 to get 0. |
2497 // We know one of them was zero. | 2498 // We know one of them was zero. |
2498 __ add(scratch2, right, Operand(left), SetCC); | 2499 __ add(scratch2, right, Operand(left), SetCC); |
2499 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); | 2500 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); |
2500 __ Ret(pl); // Return smi 0 if the non-zero one was positive. | 2501 __ Ret(pl); // Return smi 0 if the non-zero one was positive. |
2501 // We fall through here if we multiplied a negative number with 0, because | 2502 // We fall through here if we multiplied a negative number with 0, because |
2502 // that would mean we should produce -0. | 2503 // that would mean we should produce -0. |
2503 break; | 2504 break; |
| 2505 case Token::DIV: |
| 2506 // Check for power of two on the right hand side. |
| 2507 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); |
| 2508 // Check for positive and no remainder (scratch1 contains right - 1). |
| 2509 __ orr(scratch2, scratch1, Operand(0x80000000u)); |
| 2510 __ tst(left, scratch2); |
| 2511 __ b(ne, ¬_smi_result); |
| 2512 |
| 2513 // Perform division by shifting. |
| 2514 __ CountLeadingZeros(scratch1, scratch1, scratch2); |
| 2515 __ rsb(scratch1, scratch1, Operand(31)); |
| 2516 __ mov(right, Operand(left, LSR, scratch1)); |
| 2517 __ Ret(); |
| 2518 break; |
| 2519 case Token::MOD: |
| 2520 // Check for two positive smis. |
| 2521 __ orr(scratch1, left, Operand(right)); |
| 2522 __ tst(scratch1, Operand(0x80000000u | kSmiTagMask)); |
| 2523 __ b(ne, ¬_smi_result); |
| 2524 |
| 2525 // Check for power of two on the right hand side. |
| 2526 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); |
| 2527 |
| 2528 // Perform modulus by masking. |
| 2529 __ and_(right, left, Operand(scratch1)); |
| 2530 __ Ret(); |
| 2531 break; |
2504 default: | 2532 default: |
2505 UNREACHABLE(); | 2533 UNREACHABLE(); |
2506 } | 2534 } |
2507 __ bind(¬_smi_result); | 2535 __ bind(¬_smi_result); |
2508 } | 2536 } |
2509 | 2537 |
2510 | 2538 |
2511 void TypeRecordingBinaryOpStub::GenerateVFPOperation( | 2539 void TypeRecordingBinaryOpStub::GenerateVFPOperation( |
2512 MacroAssembler* masm) { | 2540 MacroAssembler* masm) { |
2513 switch (op_) { | 2541 switch (op_) { |
2514 case Token::ADD: | 2542 case Token::ADD: |
2515 __ vadd(d5, d6, d7); | 2543 __ vadd(d5, d6, d7); |
2516 break; | 2544 break; |
2517 case Token::SUB: | 2545 case Token::SUB: |
2518 __ vsub(d5, d6, d7); | 2546 __ vsub(d5, d6, d7); |
2519 break; | 2547 break; |
2520 case Token::MUL: | 2548 case Token::MUL: |
2521 __ vmul(d5, d6, d7); | 2549 __ vmul(d5, d6, d7); |
2522 break; | 2550 break; |
| 2551 case Token::DIV: |
| 2552 __ vdiv(d5, d6, d7); |
| 2553 break; |
2523 default: | 2554 default: |
2524 UNREACHABLE(); | 2555 UNREACHABLE(); |
2525 } | 2556 } |
2526 } | 2557 } |
2527 | 2558 |
2528 | 2559 |
2529 // Generate the smi code. If the operation on smis are successful this return is | 2560 // Generate the smi code. If the operation on smis are successful this return is |
2530 // generated. If the result is not a smi and heap number allocation is not | 2561 // generated. If the result is not a smi and heap number allocation is not |
2531 // requested the code falls through. If number allocation is requested but a | 2562 // requested the code falls through. If number allocation is requested but a |
2532 // heap number cannot be allocated the code jumps to the lable gc_required. | 2563 // heap number cannot be allocated the code jumps to the lable gc_required. |
2533 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 2564 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
2534 Label* gc_required, | 2565 Label* gc_required, |
2535 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 2566 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
2536 Label not_smis; | 2567 Label not_smis; |
2537 | 2568 |
2538 ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL); | 2569 ASSERT(op_ == Token::ADD || |
| 2570 op_ == Token::SUB || |
| 2571 op_ == Token::MUL || |
| 2572 op_ == Token::DIV || |
| 2573 op_ == Token::MOD); |
2539 | 2574 |
2540 Register left = r1; | 2575 Register left = r1; |
2541 Register right = r0; | 2576 Register right = r0; |
2542 Register scratch1 = r7; | 2577 Register scratch1 = r7; |
2543 Register scratch2 = r9; | 2578 Register scratch2 = r9; |
2544 | 2579 |
2545 // Perform combined smi check on both operands. | 2580 // Perform combined smi check on both operands. |
2546 __ orr(scratch1, left, Operand(right)); | 2581 __ orr(scratch1, left, Operand(right)); |
2547 STATIC_ASSERT(kSmiTag == 0); | 2582 STATIC_ASSERT(kSmiTag == 0); |
2548 __ tst(scratch1, Operand(kSmiTagMask)); | 2583 __ tst(scratch1, Operand(kSmiTagMask)); |
2549 __ b(ne, ¬_smis); | 2584 __ b(ne, ¬_smis); |
2550 | 2585 |
| 2586 // If the smi-smi operation results in a smi return is generated. |
2551 GenerateSmiSmiOperation(masm); | 2587 GenerateSmiSmiOperation(masm); |
2552 | 2588 |
2553 // If heap number results are possible generate the result in an allocated | 2589 // If heap number results are possible generate the result in an allocated |
2554 // heap number. | 2590 // heap number. |
2555 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { | 2591 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { |
2556 FloatingPointHelper::Destination destination = | 2592 FloatingPointHelper::Destination destination = |
2557 CpuFeatures::IsSupported(VFP3) && Token::MOD != op_ ? | 2593 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? |
2558 FloatingPointHelper::kVFPRegisters : | 2594 FloatingPointHelper::kVFPRegisters : |
2559 FloatingPointHelper::kCoreRegisters; | 2595 FloatingPointHelper::kCoreRegisters; |
2560 | 2596 |
2561 Register heap_number_map = r6; | 2597 Register heap_number_map = r6; |
2562 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2598 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
2563 | 2599 |
2564 // Allocate new heap number for result. | 2600 // Allocate new heap number for result. |
2565 Register heap_number = r5; | 2601 Register result = r5; |
2566 __ AllocateHeapNumber( | 2602 __ AllocateHeapNumber( |
2567 heap_number, scratch1, scratch2, heap_number_map, gc_required); | 2603 result, scratch1, scratch2, heap_number_map, gc_required); |
2568 | 2604 |
2569 // Load the smis. | 2605 // Load the smis. |
2570 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); | 2606 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); |
2571 | 2607 |
2572 // Calculate the result. | 2608 // Calculate the result. |
2573 if (destination == FloatingPointHelper::kVFPRegisters) { | 2609 if (destination == FloatingPointHelper::kVFPRegisters) { |
2574 // Using VFP registers: | 2610 // Using VFP registers: |
2575 // d6: Left value | 2611 // d6: Left value |
2576 // d7: Right value | 2612 // d7: Right value |
2577 CpuFeatures::Scope scope(VFP3); | 2613 CpuFeatures::Scope scope(VFP3); |
2578 GenerateVFPOperation(masm); | 2614 GenerateVFPOperation(masm); |
2579 | 2615 |
2580 __ sub(r0, heap_number, Operand(kHeapObjectTag)); | 2616 __ sub(r0, result, Operand(kHeapObjectTag)); |
2581 __ vstr(d5, r0, HeapNumber::kValueOffset); | 2617 __ vstr(d5, r0, HeapNumber::kValueOffset); |
2582 __ add(r0, r0, Operand(kHeapObjectTag)); | 2618 __ add(r0, r0, Operand(kHeapObjectTag)); |
2583 __ Ret(); | 2619 __ Ret(); |
2584 } else { | 2620 } else { |
2585 // Using core registers: | 2621 // Using core registers: |
2586 // r0: Left value (least significant part of mantissa). | 2622 // r0: Left value (least significant part of mantissa). |
2587 // r1: Left value (sign, exponent, top of mantissa). | 2623 // r1: Left value (sign, exponent, top of mantissa). |
2588 // r2: Right value (least significant part of mantissa). | 2624 // r2: Right value (least significant part of mantissa). |
2589 // r3: Right value (sign, exponent, top of mantissa). | 2625 // r3: Right value (sign, exponent, top of mantissa). |
2590 | 2626 |
2591 __ push(lr); // For later. | 2627 __ push(lr); // For later. |
2592 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. | 2628 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. |
2593 // Call C routine that may not cause GC or other trouble. r5 is callee | 2629 // Call C routine that may not cause GC or other trouble. r5 is callee |
2594 // save. | 2630 // save. |
2595 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | 2631 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); |
2596 // Store answer in the overwritable heap number. | 2632 // Store answer in the overwritable heap number. |
2597 #if !defined(USE_ARM_EABI) | 2633 #if !defined(USE_ARM_EABI) |
2598 // Double returned in fp coprocessor register 0 and 1, encoded as | 2634 // Double returned in fp coprocessor register 0 and 1, encoded as |
2599 // register cr8. Offsets must be divisible by 4 for coprocessor so we | 2635 // register cr8. Offsets must be divisible by 4 for coprocessor so we |
2600 // need to substract the tag from r5. | 2636 // need to substract the tag from r5. |
2601 __ sub(scratch1, heap_number, Operand(kHeapObjectTag)); | 2637 __ sub(scratch1, result, Operand(kHeapObjectTag)); |
2602 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); | 2638 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); |
2603 #else | 2639 #else |
2604 // Double returned in registers 0 and 1. | 2640 // Double returned in registers 0 and 1. |
2605 __ Strd(r0, r1, FieldMemOperand(heap_number, HeapNumber::kValueOffset)); | 2641 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); |
2606 #endif | 2642 #endif |
2607 __ mov(r0, Operand(heap_number)); | 2643 __ mov(r0, Operand(result)); |
2608 // And we are done. | 2644 // And we are done. |
2609 __ pop(pc); | 2645 __ pop(pc); |
2610 } | 2646 } |
2611 } | 2647 } |
2612 __ bind(¬_smis); | 2648 __ bind(¬_smis); |
2613 } | 2649 } |
2614 | 2650 |
2615 | 2651 |
2616 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 2652 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
2617 Label not_smis, call_runtime; | 2653 Label not_smis, call_runtime; |
2618 | 2654 |
2619 ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL); | 2655 ASSERT(op_ == Token::ADD || |
| 2656 op_ == Token::SUB || |
| 2657 op_ == Token::MUL || |
| 2658 op_ == Token::DIV || |
| 2659 op_ == Token::MOD); |
2620 | 2660 |
2621 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || | 2661 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || |
2622 result_type_ == TRBinaryOpIC::SMI) { | 2662 result_type_ == TRBinaryOpIC::SMI) { |
2623 // Only allow smi results. | 2663 // Only allow smi results. |
2624 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); | 2664 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); |
2625 } else { | 2665 } else { |
2626 // Allow heap number result and don't make a transition if a heap number | 2666 // Allow heap number result and don't make a transition if a heap number |
2627 // cannot be allocated. | 2667 // cannot be allocated. |
2628 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 2668 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
2629 } | 2669 } |
(...skipping 11 matching lines...) Expand all Loading... |
2641 ASSERT(operands_type_ == TRBinaryOpIC::STRING); | 2681 ASSERT(operands_type_ == TRBinaryOpIC::STRING); |
2642 ASSERT(op_ == Token::ADD); | 2682 ASSERT(op_ == Token::ADD); |
2643 // Try to add arguments as strings, otherwise, transition to the generic | 2683 // Try to add arguments as strings, otherwise, transition to the generic |
2644 // TRBinaryOpIC type. | 2684 // TRBinaryOpIC type. |
2645 GenerateAddStrings(masm); | 2685 GenerateAddStrings(masm); |
2646 GenerateTypeTransition(masm); | 2686 GenerateTypeTransition(masm); |
2647 } | 2687 } |
2648 | 2688 |
2649 | 2689 |
2650 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 2690 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
2651 ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL); | 2691 ASSERT(op_ == Token::ADD || |
| 2692 op_ == Token::SUB || |
| 2693 op_ == Token::MUL || |
| 2694 op_ == Token::DIV || |
| 2695 op_ == Token::MOD); |
2652 | 2696 |
2653 ASSERT(operands_type_ == TRBinaryOpIC::INT32); | 2697 ASSERT(operands_type_ == TRBinaryOpIC::INT32); |
2654 | 2698 |
2655 GenerateTypeTransition(masm); | 2699 GenerateTypeTransition(masm); |
2656 } | 2700 } |
2657 | 2701 |
2658 | 2702 |
2659 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 2703 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
2660 ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL); | 2704 ASSERT(op_ == Token::ADD || |
| 2705 op_ == Token::SUB || |
| 2706 op_ == Token::MUL || |
| 2707 op_ == Token::DIV || |
| 2708 op_ == Token::MOD); |
2661 | 2709 |
2662 Register scratch1 = r7; | 2710 Register scratch1 = r7; |
2663 Register scratch2 = r9; | 2711 Register scratch2 = r9; |
2664 | 2712 |
2665 Label not_number, call_runtime; | 2713 Label not_number, call_runtime; |
2666 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); | 2714 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); |
2667 | 2715 |
2668 Register heap_number_map = r6; | 2716 Register heap_number_map = r6; |
2669 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2717 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
2670 | 2718 |
| 2719 // Get a heap number object for the result - might be left or right if one |
| 2720 // of these are overwritable. Uses a callee-save register to keep the value |
| 2721 // across the C call which we might use below. |
| 2722 Register result = r5; |
| 2723 GenerateHeapResultAllocation( |
| 2724 masm, result, heap_number_map, scratch1, scratch2, &call_runtime); |
| 2725 |
2671 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending on | 2726 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending on |
2672 // whether VFP3 is available. | 2727 // whether VFP3 is available. |
2673 FloatingPointHelper::Destination destination = | 2728 FloatingPointHelper::Destination destination = |
2674 CpuFeatures::IsSupported(VFP3) ? | 2729 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? |
2675 FloatingPointHelper::kVFPRegisters : | 2730 FloatingPointHelper::kVFPRegisters : |
2676 FloatingPointHelper::kCoreRegisters; | 2731 FloatingPointHelper::kCoreRegisters; |
2677 FloatingPointHelper::LoadOperands(masm, | 2732 FloatingPointHelper::LoadOperands(masm, |
2678 destination, | 2733 destination, |
2679 heap_number_map, | 2734 heap_number_map, |
2680 scratch1, | 2735 scratch1, |
2681 scratch2, | 2736 scratch2, |
2682 ¬_number); | 2737 ¬_number); |
2683 if (destination == FloatingPointHelper::kVFPRegisters) { | 2738 if (destination == FloatingPointHelper::kVFPRegisters) { |
2684 // Use floating point instructions for the binary operation. | 2739 // Use floating point instructions for the binary operation. |
2685 CpuFeatures::Scope scope(VFP3); | 2740 CpuFeatures::Scope scope(VFP3); |
2686 GenerateVFPOperation(masm); | 2741 GenerateVFPOperation(masm); |
2687 | 2742 |
2688 // Get a heap number object for the result - might be left or right if one | |
2689 // of these are overwritable. | |
2690 GenerateHeapResultAllocation( | |
2691 masm, r4, heap_number_map, scratch1, scratch2, &call_runtime); | |
2692 | |
2693 // Fill the result into the allocated heap number and return. | 2743 // Fill the result into the allocated heap number and return. |
2694 __ sub(r0, r4, Operand(kHeapObjectTag)); | 2744 __ sub(r0, result, Operand(kHeapObjectTag)); |
2695 __ vstr(d5, r0, HeapNumber::kValueOffset); | 2745 __ vstr(d5, r0, HeapNumber::kValueOffset); |
2696 __ add(r0, r0, Operand(kHeapObjectTag)); | 2746 __ add(r0, r0, Operand(kHeapObjectTag)); |
2697 __ Ret(); | 2747 __ Ret(); |
2698 | 2748 |
2699 } else { | 2749 } else { |
2700 // Call a C function for the binary operation. | 2750 // Call a C function for the binary operation. |
2701 // r0/r1: Left operand | 2751 // r0/r1: Left operand |
2702 // r2/r3: Right operand | 2752 // r2/r3: Right operand |
2703 | 2753 |
2704 // Get a heap number object for the result - might be left or right if one | |
2705 // of these are overwritable. Uses a callee-save register to keep the value | |
2706 // across the c call. | |
2707 GenerateHeapResultAllocation( | |
2708 masm, r4, heap_number_map, scratch1, scratch2, &call_runtime); | |
2709 | |
2710 __ push(lr); // For returning later (no GC after this point). | 2754 __ push(lr); // For returning later (no GC after this point). |
2711 __ PrepareCallCFunction(4, scratch1); // Two doubles count as 4 arguments. | 2755 __ PrepareCallCFunction(4, scratch1); // Two doubles count as 4 arguments. |
2712 // Call C routine that may not cause GC or other trouble. r4 is callee | 2756 // Call C routine that may not cause GC or other trouble. result (r5) is |
2713 // saved. | 2757 // callee saved. |
2714 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | 2758 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); |
2715 | |
2716 // Fill the result into the allocated heap number. | 2759 // Fill the result into the allocated heap number. |
2717 #if !defined(USE_ARM_EABI) | 2760 #if !defined(USE_ARM_EABI) |
2718 // Double returned in fp coprocessor register 0 and 1, encoded as | 2761 // Double returned in fp coprocessor register 0 and 1, encoded as |
2719 // register cr8. Offsets must be divisible by 4 for coprocessor so we | 2762 // register cr8. Offsets must be divisible by 4 for coprocessor so we |
2720 // need to substract the tag from r5. | 2763 // need to substract the tag from r5. |
2721 __ sub(scratch1, r4, Operand(kHeapObjectTag)); | 2764 __ sub(scratch1, result, Operand(kHeapObjectTag)); |
2722 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); | 2765 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); |
2723 #else | 2766 #else |
2724 // Double returned in registers 0 and 1. | 2767 // Double returned in registers 0 and 1. |
2725 __ Strd(r0, r1, FieldMemOperand(r4, HeapNumber::kValueOffset)); | 2768 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); |
2726 #endif | 2769 #endif |
2727 __ mov(r0, Operand(r4)); | 2770 __ mov(r0, Operand(result)); |
2728 __ pop(pc); // Return to the pushed lr. | 2771 __ pop(pc); // Return to the pushed lr. |
2729 } | 2772 } |
2730 | 2773 |
2731 __ bind(¬_number); | 2774 __ bind(¬_number); |
2732 GenerateTypeTransition(masm); | 2775 GenerateTypeTransition(masm); |
2733 | 2776 |
2734 __ bind(&call_runtime); | 2777 __ bind(&call_runtime); |
2735 GenerateCallRuntime(masm); | 2778 GenerateCallRuntime(masm); |
2736 } | 2779 } |
2737 | 2780 |
2738 | 2781 |
2739 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 2782 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
2740 ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL); | 2783 ASSERT(op_ == Token::ADD || |
| 2784 op_ == Token::SUB || |
| 2785 op_ == Token::MUL || |
| 2786 op_ == Token::DIV || |
| 2787 op_ == Token::MOD); |
2741 | 2788 |
2742 Label call_runtime; | 2789 Label call_runtime; |
2743 | 2790 |
2744 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 2791 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
2745 | 2792 |
2746 // If all else fails, use the runtime system to get the correct | 2793 // If all else fails, use the runtime system to get the correct |
2747 // result. | 2794 // result. |
2748 __ bind(&call_runtime); | 2795 __ bind(&call_runtime); |
2749 | 2796 |
2750 // Try to add strings before calling runtime. | 2797 // Try to add strings before calling runtime. |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2789 switch (op_) { | 2836 switch (op_) { |
2790 case Token::ADD: | 2837 case Token::ADD: |
2791 __ InvokeBuiltin(Builtins::ADD, JUMP_JS); | 2838 __ InvokeBuiltin(Builtins::ADD, JUMP_JS); |
2792 break; | 2839 break; |
2793 case Token::SUB: | 2840 case Token::SUB: |
2794 __ InvokeBuiltin(Builtins::SUB, JUMP_JS); | 2841 __ InvokeBuiltin(Builtins::SUB, JUMP_JS); |
2795 break; | 2842 break; |
2796 case Token::MUL: | 2843 case Token::MUL: |
2797 __ InvokeBuiltin(Builtins::MUL, JUMP_JS); | 2844 __ InvokeBuiltin(Builtins::MUL, JUMP_JS); |
2798 break; | 2845 break; |
| 2846 case Token::DIV: |
| 2847 __ InvokeBuiltin(Builtins::DIV, JUMP_JS); |
| 2848 break; |
| 2849 case Token::MOD: |
| 2850 __ InvokeBuiltin(Builtins::MOD, JUMP_JS); |
| 2851 break; |
2799 default: | 2852 default: |
2800 UNREACHABLE(); | 2853 UNREACHABLE(); |
2801 } | 2854 } |
2802 } | 2855 } |
2803 | 2856 |
2804 | 2857 |
2805 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( | 2858 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( |
2806 MacroAssembler* masm, | 2859 MacroAssembler* masm, |
2807 Register result, | 2860 Register result, |
2808 Register heap_number_map, | 2861 Register heap_number_map, |
(...skipping 2907 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5716 __ pop(r1); | 5769 __ pop(r1); |
5717 __ Jump(r2); | 5770 __ Jump(r2); |
5718 } | 5771 } |
5719 | 5772 |
5720 | 5773 |
5721 #undef __ | 5774 #undef __ |
5722 | 5775 |
5723 } } // namespace v8::internal | 5776 } } // namespace v8::internal |
5724 | 5777 |
5725 #endif // V8_TARGET_ARCH_ARM | 5778 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |