| OLD | NEW |
| 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 4365 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4376 } | 4376 } |
| 4377 | 4377 |
| 4378 | 4378 |
| 4379 // We fall into this code if the operands were Smis, but the result was | 4379 // We fall into this code if the operands were Smis, but the result was |
| 4380 // not (eg. overflow). We branch into this code (to the not_smi label) if | 4380 // not (eg. overflow). We branch into this code (to the not_smi label) if |
| 4381 // the operands were not both Smi. | 4381 // the operands were not both Smi. |
| 4382 static void HandleBinaryOpSlowCases(MacroAssembler* masm, | 4382 static void HandleBinaryOpSlowCases(MacroAssembler* masm, |
| 4383 Label* not_smi, | 4383 Label* not_smi, |
| 4384 const Builtins::JavaScript& builtin, | 4384 const Builtins::JavaScript& builtin, |
| 4385 Token::Value operation, | 4385 Token::Value operation, |
| 4386 int swi_number, | |
| 4387 OverwriteMode mode) { | 4386 OverwriteMode mode) { |
| 4388 Label slow; | 4387 Label slow; |
| 4389 __ bind(&slow); | 4388 __ bind(&slow); |
| 4390 __ push(r1); | 4389 __ push(r1); |
| 4391 __ push(r0); | 4390 __ push(r0); |
| 4392 __ mov(r0, Operand(1)); // Set number of arguments. | 4391 __ mov(r0, Operand(1)); // Set number of arguments. |
| 4393 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. | 4392 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. |
| 4394 | 4393 |
| 4395 __ bind(not_smi); | 4394 __ bind(not_smi); |
| 4396 __ tst(r0, Operand(kSmiTagMask)); | 4395 __ tst(r0, Operand(kSmiTagMask)); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 4424 } else { | 4423 } else { |
| 4425 ASSERT(mode == OVERWRITE_RIGHT); | 4424 ASSERT(mode == OVERWRITE_RIGHT); |
| 4426 __ push(lr); | 4425 __ push(lr); |
| 4427 __ push(r0); | 4426 __ push(r0); |
| 4428 } | 4427 } |
| 4429 // Calling convention says that first double is in r0 and r1. | 4428 // Calling convention says that first double is in r0 and r1. |
| 4430 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 4429 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
| 4431 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); | 4430 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); |
| 4432 // Call C routine that may not cause GC or other trouble. | 4431 // Call C routine that may not cause GC or other trouble. |
| 4433 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); | 4432 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); |
| 4434 #if !defined(__arm__) | |
| 4435 // Notify the simulator that we are calling an add routine in C. | |
| 4436 __ swi(swi_number); | |
| 4437 #else | |
| 4438 // Actually call the add routine written in C. | |
| 4439 __ Call(r5); | 4433 __ Call(r5); |
| 4440 #endif | |
| 4441 // Store answer in the overwritable heap number. | 4434 // Store answer in the overwritable heap number. |
| 4442 __ pop(r4); | 4435 __ pop(r4); |
| 4443 #if !defined(__ARM_EABI__) && defined(__arm__) | 4436 #if !defined(__ARM_EABI__) |
| 4444 // Double returned in fp coprocessor register 0 and 1, encoded as register | 4437 // Double returned in fp coprocessor register 0 and 1, encoded as register |
| 4445 // cr8. Offsets must be divisible by 4 for coprocessor so we need to | 4438 // cr8. Offsets must be divisible by 4 for coprocessor so we need to |
| 4446 // substract the tag from r4. | 4439 // substract the tag from r4. |
| 4447 __ sub(r5, r4, Operand(kHeapObjectTag)); | 4440 __ sub(r5, r4, Operand(kHeapObjectTag)); |
| 4448 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset)); | 4441 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset)); |
| 4449 #else | 4442 #else |
| 4450 // Double returned in fp coprocessor register 0 and 1. | 4443 // Double returned in registers 0 and 1. |
| 4451 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); | 4444 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); |
| 4452 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize)); | 4445 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize)); |
| 4453 #endif | 4446 #endif |
| 4454 __ mov(r0, Operand(r4)); | 4447 __ mov(r0, Operand(r4)); |
| 4455 // And we are done. | 4448 // And we are done. |
| 4456 __ pop(pc); | 4449 __ pop(pc); |
| 4457 } | 4450 } |
| 4458 | 4451 |
| 4459 | 4452 |
| 4460 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 4453 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 4475 __ b(ne, ¬_smi); | 4468 __ b(ne, ¬_smi); |
| 4476 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. | 4469 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. |
| 4477 // Return if no overflow. | 4470 // Return if no overflow. |
| 4478 __ Ret(vc); | 4471 __ Ret(vc); |
| 4479 __ sub(r0, r0, Operand(r1)); // Revert optimistic add. | 4472 __ sub(r0, r0, Operand(r1)); // Revert optimistic add. |
| 4480 | 4473 |
| 4481 HandleBinaryOpSlowCases(masm, | 4474 HandleBinaryOpSlowCases(masm, |
| 4482 ¬_smi, | 4475 ¬_smi, |
| 4483 Builtins::ADD, | 4476 Builtins::ADD, |
| 4484 Token::ADD, | 4477 Token::ADD, |
| 4485 assembler::arm::simulator_fp_add, | |
| 4486 mode_); | 4478 mode_); |
| 4487 break; | 4479 break; |
| 4488 } | 4480 } |
| 4489 | 4481 |
| 4490 case Token::SUB: { | 4482 case Token::SUB: { |
| 4491 Label not_smi; | 4483 Label not_smi; |
| 4492 // Fast path. | 4484 // Fast path. |
| 4493 ASSERT(kSmiTag == 0); // Adjust code below. | 4485 ASSERT(kSmiTag == 0); // Adjust code below. |
| 4494 __ tst(r2, Operand(kSmiTagMask)); | 4486 __ tst(r2, Operand(kSmiTagMask)); |
| 4495 __ b(ne, ¬_smi); | 4487 __ b(ne, ¬_smi); |
| 4496 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. | 4488 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. |
| 4497 // Return if no overflow. | 4489 // Return if no overflow. |
| 4498 __ Ret(vc); | 4490 __ Ret(vc); |
| 4499 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. | 4491 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. |
| 4500 | 4492 |
| 4501 HandleBinaryOpSlowCases(masm, | 4493 HandleBinaryOpSlowCases(masm, |
| 4502 ¬_smi, | 4494 ¬_smi, |
| 4503 Builtins::SUB, | 4495 Builtins::SUB, |
| 4504 Token::SUB, | 4496 Token::SUB, |
| 4505 assembler::arm::simulator_fp_sub, | |
| 4506 mode_); | 4497 mode_); |
| 4507 break; | 4498 break; |
| 4508 } | 4499 } |
| 4509 | 4500 |
| 4510 case Token::MUL: { | 4501 case Token::MUL: { |
| 4511 Label not_smi, slow; | 4502 Label not_smi, slow; |
| 4512 ASSERT(kSmiTag == 0); // adjust code below | 4503 ASSERT(kSmiTag == 0); // adjust code below |
| 4513 __ tst(r2, Operand(kSmiTagMask)); | 4504 __ tst(r2, Operand(kSmiTagMask)); |
| 4514 __ b(ne, ¬_smi); | 4505 __ b(ne, ¬_smi); |
| 4515 // Remove tag from one operand (but keep sign), so that result is Smi. | 4506 // Remove tag from one operand (but keep sign), so that result is Smi. |
| 4516 __ mov(ip, Operand(r0, ASR, kSmiTagSize)); | 4507 __ mov(ip, Operand(r0, ASR, kSmiTagSize)); |
| 4517 // Do multiplication | 4508 // Do multiplication |
| 4518 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1. | 4509 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1. |
| 4519 // Go slow on overflows (overflow bit is not set). | 4510 // Go slow on overflows (overflow bit is not set). |
| 4520 __ mov(ip, Operand(r3, ASR, 31)); | 4511 __ mov(ip, Operand(r3, ASR, 31)); |
| 4521 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical | 4512 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical |
| 4522 __ b(ne, &slow); | 4513 __ b(ne, &slow); |
| 4523 // Go slow on zero result to handle -0. | 4514 // Go slow on zero result to handle -0. |
| 4524 __ tst(r3, Operand(r3)); | 4515 __ tst(r3, Operand(r3)); |
| 4525 __ mov(r0, Operand(r3), LeaveCC, ne); | 4516 __ mov(r0, Operand(r3), LeaveCC, ne); |
| 4526 __ Ret(ne); | 4517 __ Ret(ne); |
| 4527 // Slow case. | 4518 // Slow case. |
| 4528 __ bind(&slow); | 4519 __ bind(&slow); |
| 4529 | 4520 |
| 4530 HandleBinaryOpSlowCases(masm, | 4521 HandleBinaryOpSlowCases(masm, |
| 4531 ¬_smi, | 4522 ¬_smi, |
| 4532 Builtins::MUL, | 4523 Builtins::MUL, |
| 4533 Token::MUL, | 4524 Token::MUL, |
| 4534 assembler::arm::simulator_fp_mul, | |
| 4535 mode_); | 4525 mode_); |
| 4536 break; | 4526 break; |
| 4537 } | 4527 } |
| 4538 | 4528 |
| 4539 case Token::BIT_OR: | 4529 case Token::BIT_OR: |
| 4540 case Token::BIT_AND: | 4530 case Token::BIT_AND: |
| 4541 case Token::BIT_XOR: { | 4531 case Token::BIT_XOR: { |
| 4542 Label slow; | 4532 Label slow; |
| 4543 ASSERT(kSmiTag == 0); // adjust code below | 4533 ASSERT(kSmiTag == 0); // adjust code below |
| 4544 __ tst(r2, Operand(kSmiTagMask)); | 4534 __ tst(r2, Operand(kSmiTagMask)); |
| (...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4785 StackFrame::Type frame_type, | 4775 StackFrame::Type frame_type, |
| 4786 bool do_gc, | 4776 bool do_gc, |
| 4787 bool always_allocate) { | 4777 bool always_allocate) { |
| 4788 // r0: result parameter for PerformGC, if any | 4778 // r0: result parameter for PerformGC, if any |
| 4789 // r4: number of arguments including receiver (C callee-saved) | 4779 // r4: number of arguments including receiver (C callee-saved) |
| 4790 // r5: pointer to builtin function (C callee-saved) | 4780 // r5: pointer to builtin function (C callee-saved) |
| 4791 // r6: pointer to the first argument (C callee-saved) | 4781 // r6: pointer to the first argument (C callee-saved) |
| 4792 | 4782 |
| 4793 if (do_gc) { | 4783 if (do_gc) { |
| 4794 // Passing r0. | 4784 // Passing r0. |
| 4795 __ Call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); | 4785 ExternalReference gc_reference = ExternalReference::perform_gc_function(); |
| 4786 __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY); |
| 4796 } | 4787 } |
| 4797 | 4788 |
| 4798 ExternalReference scope_depth = | 4789 ExternalReference scope_depth = |
| 4799 ExternalReference::heap_always_allocate_scope_depth(); | 4790 ExternalReference::heap_always_allocate_scope_depth(); |
| 4800 if (always_allocate) { | 4791 if (always_allocate) { |
| 4801 __ mov(r0, Operand(scope_depth)); | 4792 __ mov(r0, Operand(scope_depth)); |
| 4802 __ ldr(r1, MemOperand(r0)); | 4793 __ ldr(r1, MemOperand(r0)); |
| 4803 __ add(r1, r1, Operand(1)); | 4794 __ add(r1, r1, Operand(1)); |
| 4804 __ str(r1, MemOperand(r0)); | 4795 __ str(r1, MemOperand(r0)); |
| 4805 } | 4796 } |
| 4806 | 4797 |
| 4807 // Call C built-in. | 4798 // Call C built-in. |
| 4808 // r0 = argc, r1 = argv | 4799 // r0 = argc, r1 = argv |
| 4809 __ mov(r0, Operand(r4)); | 4800 __ mov(r0, Operand(r4)); |
| 4810 __ mov(r1, Operand(r6)); | 4801 __ mov(r1, Operand(r6)); |
| 4811 | 4802 |
| 4812 // TODO(1242173): To let the GC traverse the return address of the exit | 4803 // TODO(1242173): To let the GC traverse the return address of the exit |
| 4813 // frames, we need to know where the return address is. Right now, | 4804 // frames, we need to know where the return address is. Right now, |
| 4814 // we push it on the stack to be able to find it again, but we never | 4805 // we push it on the stack to be able to find it again, but we never |
| 4815 // restore from it in case of changes, which makes it impossible to | 4806 // restore from it in case of changes, which makes it impossible to |
| 4816 // support moving the C entry code stub. This should be fixed, but currently | 4807 // support moving the C entry code stub. This should be fixed, but currently |
| 4817 // this is OK because the CEntryStub gets generated so early in the V8 boot | 4808 // this is OK because the CEntryStub gets generated so early in the V8 boot |
| 4818 // sequence that it is not moving ever. | 4809 // sequence that it is not moving ever. |
| 4819 __ add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4 | 4810 __ add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4 |
| 4820 __ push(lr); | 4811 __ push(lr); |
| 4821 #if !defined(__arm__) | |
| 4822 // Notify the simulator of the transition to C code. | |
| 4823 __ swi(assembler::arm::call_rt_r5); | |
| 4824 #else /* !defined(__arm__) */ | |
| 4825 __ Jump(r5); | 4812 __ Jump(r5); |
| 4826 #endif /* !defined(__arm__) */ | |
| 4827 | 4813 |
| 4828 if (always_allocate) { | 4814 if (always_allocate) { |
| 4829 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 | 4815 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 |
| 4830 // though (contain the result). | 4816 // though (contain the result). |
| 4831 __ mov(r2, Operand(scope_depth)); | 4817 __ mov(r2, Operand(scope_depth)); |
| 4832 __ ldr(r3, MemOperand(r2)); | 4818 __ ldr(r3, MemOperand(r2)); |
| 4833 __ sub(r3, r3, Operand(1)); | 4819 __ sub(r3, r3, Operand(1)); |
| 4834 __ str(r3, MemOperand(r2)); | 4820 __ str(r3, MemOperand(r2)); |
| 4835 } | 4821 } |
| 4836 | 4822 |
| (...skipping 352 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5189 __ mov(r2, Operand(0)); | 5175 __ mov(r2, Operand(0)); |
| 5190 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); | 5176 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); |
| 5191 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), | 5177 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), |
| 5192 RelocInfo::CODE_TARGET); | 5178 RelocInfo::CODE_TARGET); |
| 5193 } | 5179 } |
| 5194 | 5180 |
| 5195 | 5181 |
| 5196 #undef __ | 5182 #undef __ |
| 5197 | 5183 |
| 5198 } } // namespace v8::internal | 5184 } } // namespace v8::internal |
| OLD | NEW |