OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 374 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
385 if (destination == kCoreRegisters) { | 385 if (destination == kCoreRegisters) { |
386 __ vmov(r2, r3, d7); | 386 __ vmov(r2, r3, d7); |
387 __ vmov(r0, r1, d6); | 387 __ vmov(r0, r1, d6); |
388 } | 388 } |
389 } else { | 389 } else { |
390 ASSERT(destination == kCoreRegisters); | 390 ASSERT(destination == kCoreRegisters); |
391 // Write Smi from r0 to r3 and r2 in double format. | 391 // Write Smi from r0 to r3 and r2 in double format. |
392 __ mov(scratch1, Operand(r0)); | 392 __ mov(scratch1, Operand(r0)); |
393 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); | 393 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); |
394 __ push(lr); | 394 __ push(lr); |
395 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 395 __ Call(stub1.GetCode()); |
396 // Write Smi from r1 to r1 and r0 in double format. | 396 // Write Smi from r1 to r1 and r0 in double format. |
397 __ mov(scratch1, Operand(r1)); | 397 __ mov(scratch1, Operand(r1)); |
398 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); | 398 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); |
399 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 399 __ Call(stub2.GetCode()); |
400 __ pop(lr); | 400 __ pop(lr); |
401 } | 401 } |
402 } | 402 } |
403 | 403 |
404 | 404 |
405 void FloatingPointHelper::LoadOperands( | 405 void FloatingPointHelper::LoadOperands( |
406 MacroAssembler* masm, | 406 MacroAssembler* masm, |
407 FloatingPointHelper::Destination destination, | 407 FloatingPointHelper::Destination destination, |
408 Register heap_number_map, | 408 Register heap_number_map, |
409 Register scratch1, | 409 Register scratch1, |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
466 if (destination == kCoreRegisters) { | 466 if (destination == kCoreRegisters) { |
467 // Load the converted smi to dst1 and dst2 in double format. | 467 // Load the converted smi to dst1 and dst2 in double format. |
468 __ vmov(dst1, dst2, dst); | 468 __ vmov(dst1, dst2, dst); |
469 } | 469 } |
470 } else { | 470 } else { |
471 ASSERT(destination == kCoreRegisters); | 471 ASSERT(destination == kCoreRegisters); |
472 // Write smi to dst1 and dst2 double format. | 472 // Write smi to dst1 and dst2 double format. |
473 __ mov(scratch1, Operand(object)); | 473 __ mov(scratch1, Operand(object)); |
474 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); | 474 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); |
475 __ push(lr); | 475 __ push(lr); |
476 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); | 476 __ Call(stub.GetCode()); |
477 __ pop(lr); | 477 __ pop(lr); |
478 } | 478 } |
479 | 479 |
480 __ bind(&done); | 480 __ bind(&done); |
481 } | 481 } |
482 | 482 |
483 | 483 |
484 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, | 484 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, |
485 Register object, | 485 Register object, |
486 Register dst, | 486 Register dst, |
(...skipping 564 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1051 CpuFeatures::Scope scope(VFP3); | 1051 CpuFeatures::Scope scope(VFP3); |
1052 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); | 1052 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); |
1053 // Load the double from rhs, tagged HeapNumber r0, to d6. | 1053 // Load the double from rhs, tagged HeapNumber r0, to d6. |
1054 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 1054 __ sub(r7, rhs, Operand(kHeapObjectTag)); |
1055 __ vldr(d6, r7, HeapNumber::kValueOffset); | 1055 __ vldr(d6, r7, HeapNumber::kValueOffset); |
1056 } else { | 1056 } else { |
1057 __ push(lr); | 1057 __ push(lr); |
1058 // Convert lhs to a double in r2, r3. | 1058 // Convert lhs to a double in r2, r3. |
1059 __ mov(r7, Operand(lhs)); | 1059 __ mov(r7, Operand(lhs)); |
1060 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 1060 ConvertToDoubleStub stub1(r3, r2, r7, r6); |
1061 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 1061 __ Call(stub1.GetCode()); |
1062 // Load rhs to a double in r0, r1. | 1062 // Load rhs to a double in r0, r1. |
1063 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1063 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1064 __ pop(lr); | 1064 __ pop(lr); |
1065 } | 1065 } |
1066 | 1066 |
1067 // We now have both loaded as doubles but we can skip the lhs nan check | 1067 // We now have both loaded as doubles but we can skip the lhs nan check |
1068 // since it's a smi. | 1068 // since it's a smi. |
1069 __ jmp(lhs_not_nan); | 1069 __ jmp(lhs_not_nan); |
1070 | 1070 |
1071 __ bind(&rhs_is_smi); | 1071 __ bind(&rhs_is_smi); |
(...skipping 21 matching lines...) Expand all Loading... |
1093 __ vldr(d7, r7, HeapNumber::kValueOffset); | 1093 __ vldr(d7, r7, HeapNumber::kValueOffset); |
1094 // Convert rhs to a double in d6 . | 1094 // Convert rhs to a double in d6 . |
1095 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); | 1095 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); |
1096 } else { | 1096 } else { |
1097 __ push(lr); | 1097 __ push(lr); |
1098 // Load lhs to a double in r2, r3. | 1098 // Load lhs to a double in r2, r3. |
1099 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1099 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1100 // Convert rhs to a double in r0, r1. | 1100 // Convert rhs to a double in r0, r1. |
1101 __ mov(r7, Operand(rhs)); | 1101 __ mov(r7, Operand(rhs)); |
1102 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 1102 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
1103 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 1103 __ Call(stub2.GetCode()); |
1104 __ pop(lr); | 1104 __ pop(lr); |
1105 } | 1105 } |
1106 // Fall through to both_loaded_as_doubles. | 1106 // Fall through to both_loaded_as_doubles. |
1107 } | 1107 } |
1108 | 1108 |
1109 | 1109 |
1110 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { | 1110 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { |
1111 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 1111 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
1112 Register rhs_exponent = exp_first ? r0 : r1; | 1112 Register rhs_exponent = exp_first ? r0 : r1; |
1113 Register lhs_exponent = exp_first ? r2 : r3; | 1113 Register lhs_exponent = exp_first ? r2 : r3; |
(...skipping 5488 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6602 __ mov(result, Operand(0)); | 6602 __ mov(result, Operand(0)); |
6603 __ Ret(); | 6603 __ Ret(); |
6604 } | 6604 } |
6605 | 6605 |
6606 | 6606 |
6607 #undef __ | 6607 #undef __ |
6608 | 6608 |
6609 } } // namespace v8::internal | 6609 } } // namespace v8::internal |
6610 | 6610 |
6611 #endif // V8_TARGET_ARCH_ARM | 6611 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |