Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(15)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 6606002: Merge revision 6500-6600 from bleeding_edge to the isolates branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
48 Register rhs, 48 Register rhs,
49 Label* lhs_not_nan, 49 Label* lhs_not_nan,
50 Label* slow, 50 Label* slow,
51 bool strict); 51 bool strict);
52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); 52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
54 Register lhs, 54 Register lhs,
55 Register rhs); 55 Register rhs);
56 56
57 57
58 void ToNumberStub::Generate(MacroAssembler* masm) {
59 // The ToNumber stub takes one argument in eax.
60 Label check_heap_number, call_builtin;
61 __ tst(r0, Operand(kSmiTagMask));
62 __ b(ne, &check_heap_number);
63 __ Ret();
64
65 __ bind(&check_heap_number);
66 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
67 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
68 __ cmp(r1, ip);
69 __ b(ne, &call_builtin);
70 __ Ret();
71
72 __ bind(&call_builtin);
73 __ push(r0);
74 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS);
75 }
76
77
58 void FastNewClosureStub::Generate(MacroAssembler* masm) { 78 void FastNewClosureStub::Generate(MacroAssembler* masm) {
59 // Create a new closure from the given function info in new 79 // Create a new closure from the given function info in new
60 // space. Set the context to the current context in cp. 80 // space. Set the context to the current context in cp.
61 Label gc; 81 Label gc;
62 82
63 // Pop the function info from the stack. 83 // Pop the function info from the stack.
64 __ pop(r3); 84 __ pop(r3);
65 85
66 // Attempt to allocate new JSFunction in new space. 86 // Attempt to allocate new JSFunction in new space.
67 __ AllocateInNewSpace(JSFunction::kSize, 87 __ AllocateInNewSpace(JSFunction::kSize,
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after
362 Destination destination, 382 Destination destination,
363 Register scratch1, 383 Register scratch1,
364 Register scratch2); 384 Register scratch2);
365 385
366 // Loads objects from r0 and r1 (right and left in binary operations) into 386 // Loads objects from r0 and r1 (right and left in binary operations) into
367 // floating point registers. Depending on the destination the values ends up 387 // floating point registers. Depending on the destination the values ends up
368 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is 388 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
369 // floating point registers VFP3 must be supported. If core registers are 389 // floating point registers VFP3 must be supported. If core registers are
370 // requested when VFP3 is supported d6 and d7 will still be scratched. If 390 // requested when VFP3 is supported d6 and d7 will still be scratched. If
371 // either r0 or r1 is not a number (not smi and not heap number object) the 391 // either r0 or r1 is not a number (not smi and not heap number object) the
372 // not_number label is jumped to. 392 // not_number label is jumped to with r0 and r1 intact.
373 static void LoadOperands(MacroAssembler* masm, 393 static void LoadOperands(MacroAssembler* masm,
374 FloatingPointHelper::Destination destination, 394 FloatingPointHelper::Destination destination,
375 Register heap_number_map, 395 Register heap_number_map,
376 Register scratch1, 396 Register scratch1,
377 Register scratch2, 397 Register scratch2,
378 Label* not_number); 398 Label* not_number);
379 private: 399 private:
380 static void LoadNumber(MacroAssembler* masm, 400 static void LoadNumber(MacroAssembler* masm,
381 FloatingPointHelper::Destination destination, 401 FloatingPointHelper::Destination destination,
382 Register object, 402 Register object,
383 DwVfpRegister dst, 403 DwVfpRegister dst,
384 Register dst1, 404 Register dst1,
385 Register dst2, 405 Register dst2,
386 Register heap_number_map, 406 Register heap_number_map,
387 Register scratch1, 407 Register scratch1,
388 Register scratch2, 408 Register scratch2,
389 Label* not_number); 409 Label* not_number);
390 }; 410 };
391 411
392 412
393 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, 413 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
394 FloatingPointHelper::Destination destination, 414 FloatingPointHelper::Destination destination,
395 Register scratch1, 415 Register scratch1,
396 Register scratch2) { 416 Register scratch2) {
397 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 417 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
398 CpuFeatures::Scope scope(VFP3); 418 CpuFeatures::Scope scope(VFP3);
399 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); 419 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
400 __ vmov(s15, scratch1); 420 __ vmov(d7.high(), scratch1);
401 __ vcvt_f64_s32(d7, s15); 421 __ vcvt_f64_s32(d7, d7.high());
402 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); 422 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
403 __ vmov(s13, scratch1); 423 __ vmov(d6.high(), scratch1);
404 __ vcvt_f64_s32(d6, s13); 424 __ vcvt_f64_s32(d6, d6.high());
405 if (destination == kCoreRegisters) { 425 if (destination == kCoreRegisters) {
406 __ vmov(r2, r3, d7); 426 __ vmov(r2, r3, d7);
407 __ vmov(r0, r1, d6); 427 __ vmov(r0, r1, d6);
408 } 428 }
409 } else { 429 } else {
410 ASSERT(destination == kCoreRegisters); 430 ASSERT(destination == kCoreRegisters);
411 // Write Smi from r0 to r3 and r2 in double format. 431 // Write Smi from r0 to r3 and r2 in double format.
412 __ mov(scratch1, Operand(r0)); 432 __ mov(scratch1, Operand(r0));
413 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); 433 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
414 __ push(lr); 434 __ push(lr);
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
449 Register heap_number_map, 469 Register heap_number_map,
450 Register scratch1, 470 Register scratch1,
451 Register scratch2, 471 Register scratch2,
452 Label* not_number) { 472 Label* not_number) {
453 Label is_smi, done; 473 Label is_smi, done;
454 474
455 __ JumpIfSmi(object, &is_smi); 475 __ JumpIfSmi(object, &is_smi);
456 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); 476 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
457 477
458 // Handle loading a double from a heap number. 478 // Handle loading a double from a heap number.
459 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 479 if (Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
480 destination == kVFPRegisters) {
460 CpuFeatures::Scope scope(VFP3); 481 CpuFeatures::Scope scope(VFP3);
461 // Load the double from tagged HeapNumber to double register. 482 // Load the double from tagged HeapNumber to double register.
462 __ sub(scratch1, object, Operand(kHeapObjectTag)); 483 __ sub(scratch1, object, Operand(kHeapObjectTag));
463 __ vldr(dst, scratch1, HeapNumber::kValueOffset); 484 __ vldr(dst, scratch1, HeapNumber::kValueOffset);
464 } else { 485 } else {
465 ASSERT(destination == kCoreRegisters); 486 ASSERT(destination == kCoreRegisters);
466 // Load the double from heap number to dst1 and dst2 in double format. 487 // Load the double from heap number to dst1 and dst2 in double format.
467 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); 488 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
468 } 489 }
469 __ jmp(&done); 490 __ jmp(&done);
470 491
471 // Handle loading a double from a smi. 492 // Handle loading a double from a smi.
472 __ bind(&is_smi); 493 __ bind(&is_smi);
473 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 494 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
474 CpuFeatures::Scope scope(VFP3); 495 CpuFeatures::Scope scope(VFP3);
475 // Convert smi to double. 496 // Convert smi to double using VFP instructions.
476 __ SmiUntag(scratch1, object); 497 __ SmiUntag(scratch1, object);
477 __ vmov(dst.high(), scratch1); 498 __ vmov(dst.high(), scratch1);
478 __ vcvt_f64_s32(dst, dst.high()); 499 __ vcvt_f64_s32(dst, dst.high());
479 if (destination == kCoreRegisters) { 500 if (destination == kCoreRegisters) {
501 // Load the converted smi to dst1 and dst2 in double format.
480 __ vmov(dst1, dst2, dst); 502 __ vmov(dst1, dst2, dst);
481 } 503 }
482 } else { 504 } else {
483 ASSERT(destination == kCoreRegisters); 505 ASSERT(destination == kCoreRegisters);
484 // Write Smi to dst1 and dst2 double format. 506 // Write smi to dst1 and dst2 double format.
485 __ mov(scratch1, Operand(object)); 507 __ mov(scratch1, Operand(object));
486 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); 508 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
487 __ push(lr); 509 __ push(lr);
488 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); 510 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
489 __ pop(lr); 511 __ pop(lr);
490 } 512 }
491 513
492 __ bind(&done); 514 __ bind(&done);
493 } 515 }
494 516
(...skipping 1935 matching lines...) Expand 10 before | Expand all | Expand 10 after
2430 2452
2431 OS::SNPrintF(Vector<char>(name_, kMaxNameLength), 2453 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
2432 "TypeRecordingBinaryOpStub_%s_%s_%s", 2454 "TypeRecordingBinaryOpStub_%s_%s_%s",
2433 op_name, 2455 op_name,
2434 overwrite_name, 2456 overwrite_name,
2435 TRBinaryOpIC::GetName(operands_type_)); 2457 TRBinaryOpIC::GetName(operands_type_));
2436 return name_; 2458 return name_;
2437 } 2459 }
2438 2460
2439 2461
2440 void TypeRecordingBinaryOpStub::GenerateOptimisticSmiOperation( 2462 void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
2441 MacroAssembler* masm) { 2463 MacroAssembler* masm) {
2442 Register left = r1; 2464 Register left = r1;
2443 Register right = r0; 2465 Register right = r0;
2466 Register scratch1 = r7;
2467 Register scratch2 = r9;
2444 2468
2445 ASSERT(right.is(r0)); 2469 ASSERT(right.is(r0));
2470 STATIC_ASSERT(kSmiTag == 0);
2446 2471
2472 Label not_smi_result;
2447 switch (op_) { 2473 switch (op_) {
2448 case Token::ADD: 2474 case Token::ADD:
2449 __ add(right, left, Operand(right), SetCC); // Add optimistically. 2475 __ add(right, left, Operand(right), SetCC); // Add optimistically.
2450 __ Ret(vc); 2476 __ Ret(vc);
2451 __ sub(right, right, Operand(left)); // Revert optimistic add. 2477 __ sub(right, right, Operand(left)); // Revert optimistic add.
2452 break; 2478 break;
2453 case Token::SUB: 2479 case Token::SUB:
2454 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. 2480 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
2455 __ Ret(vc); 2481 __ Ret(vc);
2456 __ sub(right, left, Operand(right)); // Revert optimistic subtract. 2482 __ sub(right, left, Operand(right)); // Revert optimistic subtract.
2457 break; 2483 break;
2484 case Token::MUL:
2485 // Remove tag from one of the operands. This way the multiplication result
2486 // will be a smi if it fits the smi range.
2487 __ SmiUntag(ip, right);
2488 // Do multiplication
2489 // scratch1 = lower 32 bits of ip * left.
2490 // scratch2 = higher 32 bits of ip * left.
2491 __ smull(scratch1, scratch2, left, ip);
2492 // Check for overflowing the smi range - no overflow if higher 33 bits of
2493 // the result are identical.
2494 __ mov(ip, Operand(scratch1, ASR, 31));
2495 __ cmp(ip, Operand(scratch2));
2496 __ b(ne, &not_smi_result);
2497 // Go slow on zero result to handle -0.
2498 __ tst(scratch1, Operand(scratch1));
2499 __ mov(right, Operand(scratch1), LeaveCC, ne);
2500 __ Ret(ne);
2501 // We need -0 if we were multiplying a negative number with 0 to get 0.
2502 // We know one of them was zero.
2503 __ add(scratch2, right, Operand(left), SetCC);
2504 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
2505 __ Ret(pl); // Return smi 0 if the non-zero one was positive.
2506 // We fall through here if we multiplied a negative number with 0, because
2507 // that would mean we should produce -0.
2508 break;
2509 case Token::DIV:
2510 // Check for power of two on the right hand side.
2511 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2512 // Check for positive and no remainder (scratch1 contains right - 1).
2513 __ orr(scratch2, scratch1, Operand(0x80000000u));
2514 __ tst(left, scratch2);
2515 __ b(ne, &not_smi_result);
2516
2517 // Perform division by shifting.
2518 __ CountLeadingZeros(scratch1, scratch1, scratch2);
2519 __ rsb(scratch1, scratch1, Operand(31));
2520 __ mov(right, Operand(left, LSR, scratch1));
2521 __ Ret();
2522 break;
2523 case Token::MOD:
2524 // Check for two positive smis.
2525 __ orr(scratch1, left, Operand(right));
2526 __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
2527 __ b(ne, &not_smi_result);
2528
2529 // Check for power of two on the right hand side.
2530 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2531
2532 // Perform modulus by masking.
2533 __ and_(right, left, Operand(scratch1));
2534 __ Ret();
2535 break;
2458 default: 2536 default:
2459 UNREACHABLE(); 2537 UNREACHABLE();
2460 } 2538 }
2539 __ bind(&not_smi_result);
2461 } 2540 }
2462 2541
2463 2542
2464 void TypeRecordingBinaryOpStub::GenerateVFPOperation( 2543 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2465 MacroAssembler* masm) { 2544 bool smi_operands,
2466 switch (op_) { 2545 Label* not_numbers,
2467 case Token::ADD: 2546 Label* gc_required) {
2468 __ vadd(d5, d6, d7); 2547 Register left = r1;
2469 break; 2548 Register right = r0;
2470 case Token::SUB: 2549 Register scratch1 = r7;
2471 __ vsub(d5, d6, d7); 2550 Register scratch2 = r9;
2472 break; 2551
2473 default: 2552 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending
2474 UNREACHABLE(); 2553 // on whether VFP3 is available.
2554 bool has_vfp3 = Isolate::Current()->cpu_features()->IsSupported(VFP3);
2555 FloatingPointHelper::Destination destination =
2556 has_vfp3 && op_ != Token::MOD ?
2557 FloatingPointHelper::kVFPRegisters :
2558 FloatingPointHelper::kCoreRegisters;
2559
2560 Register heap_number_map = r6;
2561 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2562
2563 // Allocate new heap number for result.
2564 Register result = r5;
2565 __ AllocateHeapNumber(
2566 result, scratch1, scratch2, heap_number_map, gc_required);
2567
2568 // Load the operands.
2569 if (smi_operands) {
2570 if (FLAG_debug_code) {
2571 __ AbortIfNotSmi(left);
2572 __ AbortIfNotSmi(right);
2573 }
2574 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2575 } else {
2576 FloatingPointHelper::LoadOperands(masm,
2577 destination,
2578 heap_number_map,
2579 scratch1,
2580 scratch2,
2581 not_numbers);
2582 }
2583
2584 // Calculate the result.
2585 if (destination == FloatingPointHelper::kVFPRegisters) {
2586 // Using VFP registers:
2587 // d6: Left value
2588 // d7: Right value
2589 CpuFeatures::Scope scope(VFP3);
2590 switch (op_) {
2591 case Token::ADD:
2592 __ vadd(d5, d6, d7);
2593 break;
2594 case Token::SUB:
2595 __ vsub(d5, d6, d7);
2596 break;
2597 case Token::MUL:
2598 __ vmul(d5, d6, d7);
2599 break;
2600 case Token::DIV:
2601 __ vdiv(d5, d6, d7);
2602 break;
2603 default:
2604 UNREACHABLE();
2605 }
2606
2607 __ sub(r0, result, Operand(kHeapObjectTag));
2608 __ vstr(d5, r0, HeapNumber::kValueOffset);
2609 __ add(r0, r0, Operand(kHeapObjectTag));
2610 __ Ret();
2611 } else {
2612 // Using core registers:
2613 // r0: Left value (least significant part of mantissa).
2614 // r1: Left value (sign, exponent, top of mantissa).
2615 // r2: Right value (least significant part of mantissa).
2616 // r3: Right value (sign, exponent, top of mantissa).
2617
2618 __ push(lr); // For later.
2619 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2620 // Call C routine that may not cause GC or other trouble. r5 is callee
2621 // save.
2622 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2623 // Store answer in the overwritable heap number.
2624 #if !defined(USE_ARM_EABI)
2625 // Double returned in fp coprocessor register 0 and 1, encoded as
2626 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2627 // need to substract the tag from r5.
2628 __ sub(scratch1, result, Operand(kHeapObjectTag));
2629 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2630 #else
2631 // Double returned in registers 0 and 1.
2632 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
2633 #endif
2634 __ mov(r0, Operand(result));
2635 // And we are done.
2636 __ pop(pc);
2475 } 2637 }
2476 } 2638 }
2477 2639
2478 2640
2479 // Generate the smi code. If the operation on smis are successful this return is 2641 // Generate the smi code. If the operation on smis are successful this return is
2480 // generated. If the result is not a smi and heap number allocation is not 2642 // generated. If the result is not a smi and heap number allocation is not
2481 // requested the code falls through. If number allocation is requested but a 2643 // requested the code falls through. If number allocation is requested but a
2482 // heap number cannot be allocated the code jumps to the lable gc_required. 2644 // heap number cannot be allocated the code jumps to the lable gc_required.
2483 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, 2645 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
2484 Label* gc_required, 2646 Label* gc_required,
2485 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { 2647 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2486 Label not_smis; 2648 Label not_smis;
2487 2649
2488 ASSERT(op_ == Token::ADD || op_ == Token::SUB); 2650 ASSERT(op_ == Token::ADD ||
2651 op_ == Token::SUB ||
2652 op_ == Token::MUL ||
2653 op_ == Token::DIV ||
2654 op_ == Token::MOD);
2489 2655
2490 Register left = r1; 2656 Register left = r1;
2491 Register right = r0; 2657 Register right = r0;
2492 Register scratch1 = r7; 2658 Register scratch1 = r7;
2493 Register scratch2 = r9; 2659 Register scratch2 = r9;
2494 2660
2495 // Perform combined smi check on both operands. 2661 // Perform combined smi check on both operands.
2496 __ orr(scratch1, left, Operand(right)); 2662 __ orr(scratch1, left, Operand(right));
2497 STATIC_ASSERT(kSmiTag == 0); 2663 STATIC_ASSERT(kSmiTag == 0);
2498 __ tst(scratch1, Operand(kSmiTagMask)); 2664 __ tst(scratch1, Operand(kSmiTagMask));
2499 __ b(ne, &not_smis); 2665 __ b(ne, &not_smis);
2500 2666
2501 GenerateOptimisticSmiOperation(masm); 2667 // If the smi-smi operation results in a smi return is generated.
2668 GenerateSmiSmiOperation(masm);
2502 2669
2503 // If heap number results are possible generate the result in an allocated 2670 // If heap number results are possible generate the result in an allocated
2504 // heap number. 2671 // heap number.
2505 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { 2672 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2506 FloatingPointHelper::Destination destination = 2673 GenerateFPOperation(masm, true, NULL, gc_required);
2507 Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
2508 Token::MOD != op_ ?
2509 FloatingPointHelper::kVFPRegisters :
2510 FloatingPointHelper::kCoreRegisters;
2511
2512 Register heap_number_map = r6;
2513 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2514
2515 // Allocate new heap number for result.
2516 Register heap_number = r5;
2517 __ AllocateHeapNumber(
2518 heap_number, scratch1, scratch2, heap_number_map, gc_required);
2519
2520 // Load the smis.
2521 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2522
2523 // Calculate the result.
2524 if (destination == FloatingPointHelper::kVFPRegisters) {
2525 // Using VFP registers:
2526 // d6: Left value
2527 // d7: Right value
2528 CpuFeatures::Scope scope(VFP3);
2529 GenerateVFPOperation(masm);
2530
2531 __ sub(r0, heap_number, Operand(kHeapObjectTag));
2532 __ vstr(d5, r0, HeapNumber::kValueOffset);
2533 __ add(r0, r0, Operand(kHeapObjectTag));
2534 __ Ret();
2535 } else {
2536 // Using core registers:
2537 // r0: Left value (least significant part of mantissa).
2538 // r1: Left value (sign, exponent, top of mantissa).
2539 // r2: Right value (least significant part of mantissa).
2540 // r3: Right value (sign, exponent, top of mantissa).
2541
2542 __ push(lr); // For later.
2543 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2544 // Call C routine that may not cause GC or other trouble. r5 is callee
2545 // save.
2546 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2547 // Store answer in the overwritable heap number.
2548 #if !defined(USE_ARM_EABI)
2549 // Double returned in fp coprocessor register 0 and 1, encoded as
2550 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2551 // need to substract the tag from r5.
2552 __ sub(scratch1, heap_number, Operand(kHeapObjectTag));
2553 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2554 #else
2555 // Double returned in registers 0 and 1.
2556 __ Strd(r0, r1, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
2557 #endif
2558 __ mov(r0, Operand(heap_number));
2559 // And we are done.
2560 __ pop(pc);
2561 }
2562 } 2674 }
2563 __ bind(&not_smis); 2675 __ bind(&not_smis);
2564 } 2676 }
2565 2677
2566 2678
2567 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 2679 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2568 Label not_smis, call_runtime; 2680 Label not_smis, call_runtime;
2569 2681
2570 ASSERT(op_ == Token::ADD || op_ == Token::SUB); 2682 ASSERT(op_ == Token::ADD ||
2683 op_ == Token::SUB ||
2684 op_ == Token::MUL ||
2685 op_ == Token::DIV ||
2686 op_ == Token::MOD);
2571 2687
2572 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || 2688 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
2573 result_type_ == TRBinaryOpIC::SMI) { 2689 result_type_ == TRBinaryOpIC::SMI) {
2574 // Only allow smi results. 2690 // Only allow smi results.
2575 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); 2691 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
2576 } else { 2692 } else {
2577 // Allow heap number result and don't make a transition if a heap number 2693 // Allow heap number result and don't make a transition if a heap number
2578 // cannot be allocated. 2694 // cannot be allocated.
2579 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 2695 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2580 } 2696 }
(...skipping 11 matching lines...) Expand all
2592 ASSERT(operands_type_ == TRBinaryOpIC::STRING); 2708 ASSERT(operands_type_ == TRBinaryOpIC::STRING);
2593 ASSERT(op_ == Token::ADD); 2709 ASSERT(op_ == Token::ADD);
2594 // Try to add arguments as strings, otherwise, transition to the generic 2710 // Try to add arguments as strings, otherwise, transition to the generic
2595 // TRBinaryOpIC type. 2711 // TRBinaryOpIC type.
2596 GenerateAddStrings(masm); 2712 GenerateAddStrings(masm);
2597 GenerateTypeTransition(masm); 2713 GenerateTypeTransition(masm);
2598 } 2714 }
2599 2715
2600 2716
2601 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 2717 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2602 ASSERT(op_ == Token::ADD || op_ == Token::SUB); 2718 ASSERT(op_ == Token::ADD ||
2719 op_ == Token::SUB ||
2720 op_ == Token::MUL ||
2721 op_ == Token::DIV ||
2722 op_ == Token::MOD);
2603 2723
2604 ASSERT(operands_type_ == TRBinaryOpIC::INT32); 2724 ASSERT(operands_type_ == TRBinaryOpIC::INT32);
2605 2725
2606 GenerateTypeTransition(masm); 2726 GenerateTypeTransition(masm);
2607 } 2727 }
2608 2728
2609 2729
2610 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { 2730 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2611 ASSERT(op_ == Token::ADD || op_ == Token::SUB); 2731 ASSERT(op_ == Token::ADD ||
2732 op_ == Token::SUB ||
2733 op_ == Token::MUL ||
2734 op_ == Token::DIV ||
2735 op_ == Token::MOD);
2612 2736
2613 Register scratch1 = r7; 2737 Label not_numbers, call_runtime;
2614 Register scratch2 = r9;
2615
2616 Label not_number, call_runtime;
2617 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); 2738 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
2618 2739
2619 Register heap_number_map = r6; 2740 GenerateFPOperation(masm, false, &not_numbers, &call_runtime);
2620 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2621 2741
2622 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending on 2742 __ bind(&not_numbers);
2623 // whether VFP3 is available.
2624 FloatingPointHelper::Destination destination =
2625 Isolate::Current()->cpu_features()->IsSupported(VFP3) ?
2626 FloatingPointHelper::kVFPRegisters :
2627 FloatingPointHelper::kCoreRegisters;
2628 FloatingPointHelper::LoadOperands(masm,
2629 destination,
2630 heap_number_map,
2631 scratch1,
2632 scratch2,
2633 &not_number);
2634 if (destination == FloatingPointHelper::kVFPRegisters) {
2635 // Use floating point instructions for the binary operation.
2636 CpuFeatures::Scope scope(VFP3);
2637 GenerateVFPOperation(masm);
2638
2639 // Get a heap number object for the result - might be left or right if one
2640 // of these are overwritable.
2641 GenerateHeapResultAllocation(
2642 masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
2643
2644 // Fill the result into the allocated heap number and return.
2645 __ sub(r0, r4, Operand(kHeapObjectTag));
2646 __ vstr(d5, r0, HeapNumber::kValueOffset);
2647 __ add(r0, r0, Operand(kHeapObjectTag));
2648 __ Ret();
2649
2650 } else {
2651 // Call a C function for the binary operation.
2652 // r0/r1: Left operand
2653 // r2/r3: Right operand
2654
2655 // Get a heap number object for the result - might be left or right if one
2656 // of these are overwritable. Uses a callee-save register to keep the value
2657 // across the c call.
2658 GenerateHeapResultAllocation(
2659 masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
2660
2661 __ push(lr); // For returning later (no GC after this point).
2662 __ PrepareCallCFunction(4, scratch1); // Two doubles count as 4 arguments.
2663 // Call C routine that may not cause GC or other trouble. r4 is callee
2664 // saved.
2665 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2666
2667 // Fill the result into the allocated heap number.
2668 #if !defined(USE_ARM_EABI)
2669 // Double returned in fp coprocessor register 0 and 1, encoded as
2670 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2671 // need to substract the tag from r5.
2672 __ sub(scratch1, r4, Operand(kHeapObjectTag));
2673 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2674 #else
2675 // Double returned in registers 0 and 1.
2676 __ Strd(r0, r1, FieldMemOperand(r4, HeapNumber::kValueOffset));
2677 #endif
2678 __ mov(r0, Operand(r4));
2679 __ pop(pc); // Return to the pushed lr.
2680 }
2681
2682 __ bind(&not_number);
2683 GenerateTypeTransition(masm); 2743 GenerateTypeTransition(masm);
2684 2744
2685 __ bind(&call_runtime); 2745 __ bind(&call_runtime);
2686 GenerateCallRuntime(masm); 2746 GenerateCallRuntime(masm);
2687 } 2747 }
2688 2748
2689 2749
2690 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 2750 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2691 ASSERT(op_ == Token::ADD || op_ == Token::SUB); 2751 ASSERT(op_ == Token::ADD ||
2752 op_ == Token::SUB ||
2753 op_ == Token::MUL ||
2754 op_ == Token::DIV ||
2755 op_ == Token::MOD);
2692 2756
2693 Label call_runtime; 2757 Label call_runtime;
2694 2758
2695 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 2759 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2696 2760
2697 // If all else fails, use the runtime system to get the correct 2761 // If all else fails, use the runtime system to get the correct
2698 // result. 2762 // result.
2699 __ bind(&call_runtime); 2763 __ bind(&call_runtime);
2700 2764
2701 // Try to add strings before calling runtime. 2765 // Try to add strings before calling runtime.
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
2737 2801
2738 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { 2802 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
2739 GenerateRegisterArgsPush(masm); 2803 GenerateRegisterArgsPush(masm);
2740 switch (op_) { 2804 switch (op_) {
2741 case Token::ADD: 2805 case Token::ADD:
2742 __ InvokeBuiltin(Builtins::ADD, JUMP_JS); 2806 __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
2743 break; 2807 break;
2744 case Token::SUB: 2808 case Token::SUB:
2745 __ InvokeBuiltin(Builtins::SUB, JUMP_JS); 2809 __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
2746 break; 2810 break;
2811 case Token::MUL:
2812 __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
2813 break;
2814 case Token::DIV:
2815 __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
2816 break;
2817 case Token::MOD:
2818 __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
2819 break;
2747 default: 2820 default:
2748 UNREACHABLE(); 2821 UNREACHABLE();
2749 } 2822 }
2750 } 2823 }
2751 2824
2752 2825
2753 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( 2826 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
2754 MacroAssembler* masm, 2827 MacroAssembler* masm,
2755 Register result, 2828 Register result,
2756 Register heap_number_map, 2829 Register heap_number_map,
(...skipping 2915 matching lines...) Expand 10 before | Expand all | Expand 10 after
5672 __ pop(r1); 5745 __ pop(r1);
5673 __ Jump(r2); 5746 __ Jump(r2);
5674 } 5747 }
5675 5748
5676 5749
5677 #undef __ 5750 #undef __
5678 5751
5679 } } // namespace v8::internal 5752 } } // namespace v8::internal
5680 5753
5681 #endif // V8_TARGET_ARCH_ARM 5754 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.cc » ('j') | src/ast.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698