OLD | NEW |
---|---|
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 657 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
668 class OpBits: public BitField<Token::Value, 2, 14> {}; | 668 class OpBits: public BitField<Token::Value, 2, 14> {}; |
669 | 669 |
670 Major MajorKey() { return GenericBinaryOp; } | 670 Major MajorKey() { return GenericBinaryOp; } |
671 int MinorKey() { | 671 int MinorKey() { |
672 // Encode the parameters in a unique 16 bit value. | 672 // Encode the parameters in a unique 16 bit value. |
673 return OpBits::encode(op_) | 673 return OpBits::encode(op_) |
674 | ModeBits::encode(mode_); | 674 | ModeBits::encode(mode_); |
675 } | 675 } |
676 | 676 |
677 void Generate(MacroAssembler* masm); | 677 void Generate(MacroAssembler* masm); |
678 void HandleNonSmiBitwiseOp(MacroAssembler* masm); | |
678 | 679 |
679 const char* GetName() { | 680 const char* GetName() { |
680 switch (op_) { | 681 switch (op_) { |
681 case Token::ADD: return "GenericBinaryOpStub_ADD"; | 682 case Token::ADD: return "GenericBinaryOpStub_ADD"; |
682 case Token::SUB: return "GenericBinaryOpStub_SUB"; | 683 case Token::SUB: return "GenericBinaryOpStub_SUB"; |
683 case Token::MUL: return "GenericBinaryOpStub_MUL"; | 684 case Token::MUL: return "GenericBinaryOpStub_MUL"; |
684 case Token::DIV: return "GenericBinaryOpStub_DIV"; | 685 case Token::DIV: return "GenericBinaryOpStub_DIV"; |
685 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; | 686 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; |
686 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; | 687 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; |
687 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; | 688 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; |
(...skipping 2896 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3584 LoadAndSpill(node->expression()); | 3585 LoadAndSpill(node->expression()); |
3585 frame_->EmitPop(r0); | 3586 frame_->EmitPop(r0); |
3586 switch (op) { | 3587 switch (op) { |
3587 case Token::NOT: | 3588 case Token::NOT: |
3588 case Token::DELETE: | 3589 case Token::DELETE: |
3589 case Token::TYPEOF: | 3590 case Token::TYPEOF: |
3590 UNREACHABLE(); // handled above | 3591 UNREACHABLE(); // handled above |
3591 break; | 3592 break; |
3592 | 3593 |
3593 case Token::SUB: { | 3594 case Token::SUB: { |
3594 UnarySubStub stub; | 3595 bool overwrite = |
3596 (node->AsBinaryOperation() != NULL && | |
3597 node->AsBinaryOperation()->ResultOverwriteAllowed()); | |
3598 UnarySubStub stub(overwrite); | |
3595 frame_->CallStub(&stub, 0); | 3599 frame_->CallStub(&stub, 0); |
3596 break; | 3600 break; |
3597 } | 3601 } |
3598 | 3602 |
3599 case Token::BIT_NOT: { | 3603 case Token::BIT_NOT: { |
3600 // smi check | 3604 // smi check |
3601 JumpTarget smi_label; | 3605 JumpTarget smi_label; |
3602 JumpTarget continue_label; | 3606 JumpTarget continue_label; |
3603 __ tst(r0, Operand(kSmiTagMask)); | 3607 __ tst(r0, Operand(kSmiTagMask)); |
3604 smi_label.Branch(eq); | 3608 smi_label.Branch(eq); |
(...skipping 749 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4354 frame->EmitPush(result.reg()); | 4358 frame->EmitPush(result.reg()); |
4355 break; | 4359 break; |
4356 } | 4360 } |
4357 | 4361 |
4358 default: | 4362 default: |
4359 UNREACHABLE(); | 4363 UNREACHABLE(); |
4360 } | 4364 } |
4361 } | 4365 } |
4362 | 4366 |
4363 | 4367 |
4368 // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz | |
4369 // instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0 | |
4370 // (31 instead of 32). | |
4371 static void CountLeadingZeros( | |
4372 MacroAssembler* masm, | |
4373 Register source, | |
4374 Register scratch, | |
4375 Register zeros) { | |
4376 #ifdef __ARM_ARCH_5__ | |
4377 __ clz(zeros, source); // This instruction is only supported after ARM5. | |
4378 #else | |
4379 __ mov(zeros, Operand(0)); | |
4380 __ mov(scratch, source); | |
4381 // Top 16. | |
4382 __ tst(scratch, Operand(0xffff0000)); | |
4383 __ add(zeros, zeros, Operand(16), LeaveCC, eq); | |
4384 __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq); | |
4385 // Top 8. | |
4386 __ tst(scratch, Operand(0xff000000)); | |
4387 __ add(zeros, zeros, Operand(8), LeaveCC, eq); | |
4388 __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq); | |
4389 // Top 4. | |
4390 __ tst(scratch, Operand(0xf0000000)); | |
4391 __ add(zeros, zeros, Operand(4), LeaveCC, eq); | |
4392 __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq); | |
4393 // Top 2. | |
4394 __ tst(scratch, Operand(0xc0000000)); | |
4395 __ add(zeros, zeros, Operand(2), LeaveCC, eq); | |
4396 __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq); | |
4397 // Top bit. | |
4398 __ tst(scratch, Operand(0x80000000)); | |
4399 __ add(zeros, zeros, Operand(1), LeaveCC, eq); | |
4400 #endif | |
4401 } | |
4402 | |
4403 | |
4404 // Takes a Smi and converts to an IEEE 64 bit floating point value in two | |
4405 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and | |
4406 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a | |
4407 // scratch register. Destroys the source register. No GC occurs during this | |
4408 // stub so you don't have to set up the frame. | |
4409 class ConvertToDoubleStub : public CodeStub { | |
4410 public: | |
4411 ConvertToDoubleStub(Register result_reg_1, | |
4412 Register result_reg_2, | |
4413 Register source_reg, | |
4414 Register scratch_reg) | |
4415 : result1_(result_reg_1), | |
4416 result2_(result_reg_2), | |
4417 source_(source_reg), | |
4418 zeros_(scratch_reg) { } | |
4419 | |
4420 private: | |
4421 Register result1_; | |
4422 Register result2_; | |
4423 Register source_; | |
4424 Register zeros_; | |
4425 | |
4426 // Minor key encoding in 16 bits. | |
4427 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | |
4428 class OpBits: public BitField<Token::Value, 2, 14> {}; | |
4429 | |
4430 Major MajorKey() { return ConvertToDouble; } | |
4431 int MinorKey() { | |
4432 // Encode the parameters in a unique 16 bit value. | |
4433 return result1_.code() + | |
4434 (result2_.code() << 4) + | |
4435 (source_.code() << 8) + | |
4436 (zeros_.code() << 12); | |
4437 } | |
4438 | |
4439 void Generate(MacroAssembler* masm); | |
4440 | |
4441 const char* GetName() { return "ConvertToDoubleStub"; } | |
4442 | |
4443 #ifdef DEBUG | |
4444 void Print() { PrintF("ConvertToDoubleStub\n"); } | |
4445 #endif | |
4446 }; | |
4447 | |
4448 | |
4449 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | |
4450 Label not_special, done; | |
4451 // Convert from Smi to integer. | |
4452 __ mov(source_, Operand(source_, ASR, kSmiTagSize)); | |
4453 // Move sign bit from source to destination. This works because the sign bit | |
4454 // in the exponent word of the double has the same position and polarity as | |
4455 // the 2's complement sign bit in a Smi. | |
4456 ASSERT(HeapNumber::kSignMask == 0x80000000u); | |
4457 __ and_(result1_, source_, Operand(HeapNumber::kSignMask), SetCC); | |
4458 // Subtract from 0 if source was negative. | |
4459 __ rsb(source_, source_, Operand(0), LeaveCC, ne); | |
4460 __ cmp(source_, Operand(1)); | |
4461 __ b(gt, ¬_special); | |
4462 | |
4463 // We have -1, 0 or 1, which we treat specially. | |
4464 __ cmp(source_, Operand(0)); | |
4465 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). | |
4466 static const uint32_t exponent_word_for_1 = | |
4467 HeapNumber::kExponentBias << HeapNumber::kExponentShift; | |
4468 __ orr(result1_, result1_, Operand(exponent_word_for_1), LeaveCC, ne); | |
4469 // 1, 0 and -1 all have 0 for the second word. | |
4470 __ mov(result2_, Operand(0)); | |
4471 __ jmp(&done); | |
4472 | |
4473 __ bind(¬_special); | |
4474 // Count leading zeros. Uses result2 for a scratch register on pre-ARM5. | |
4475 // Gets the wrong answer for 0, but we already checked for that case above. | |
4476 CountLeadingZeros(masm, source_, result2_, zeros_); | |
4477 // Compute exponent and or it into the exponent register. | |
4478 // We use result2 as a scratch register here. | |
4479 __ rsb(result2_, zeros_, Operand(31 + HeapNumber::kExponentBias)); | |
4480 __ orr(result1_, result1_, Operand(result2_, LSL, 20)); | |
iposva
2009/06/10 04:30:48
Please use kExponentShift, which you added to the
| |
4481 // Shift up the source chopping the top bit off. | |
4482 __ add(zeros_, zeros_, Operand(1)); | |
4483 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. | |
4484 __ mov(source_, Operand(source_, LSL, zeros_)); | |
4485 // Compute lower part of fraction (last 12 bits). | |
4486 __ mov(result2_, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); | |
4487 // And the top (top 20 bits). | |
4488 __ orr(result1_, | |
4489 result1_, | |
4490 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | |
4491 __ bind(&done); | |
4492 __ Ret(); | |
4493 } | |
4494 | |
4495 | |
4496 // This stub can convert a signed int32 to a heap number (double). It does | |
4497 // not work for int32s that are in Smi range! No GC occurs during this stub | |
4498 // so you don't have to set up the frame. | |
4499 class WriteInt32ToHeapNumberStub : public CodeStub { | |
4500 public: | |
4501 WriteInt32ToHeapNumberStub(Register the_int, | |
4502 Register the_heap_number, | |
4503 Register scratch) | |
4504 : the_int_(the_int), | |
4505 the_heap_number_(the_heap_number), | |
4506 scratch_(scratch) { } | |
4507 | |
4508 private: | |
4509 Register the_int_; | |
4510 Register the_heap_number_; | |
4511 Register scratch_; | |
4512 | |
4513 // Minor key encoding in 16 bits. | |
4514 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | |
4515 class OpBits: public BitField<Token::Value, 2, 14> {}; | |
4516 | |
4517 Major MajorKey() { return WriteInt32ToHeapNumber; } | |
4518 int MinorKey() { | |
4519 // Encode the parameters in a unique 16 bit value. | |
4520 return the_int_.code() + | |
4521 (the_heap_number_.code() << 4) + | |
4522 (scratch_.code() << 8); | |
4523 } | |
4524 | |
4525 void Generate(MacroAssembler* masm); | |
4526 | |
4527 const char* GetName() { return "WriteInt32ToHeapNumberStub"; } | |
4528 | |
4529 #ifdef DEBUG | |
4530 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } | |
4531 #endif | |
4532 }; | |
4533 | |
4534 | |
4535 // See comment for class. | |
4536 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) { | |
4537 Label max_negative_int; | |
4538 // the_int_ has the answer which is a signed int32 but not a Smi. | |
4539 // We test for the special value that has a different exponent. This test | |
4540 // has the neat side effect of setting the flags according to the sign. | |
4541 ASSERT(HeapNumber::kSignMask == 0x80000000u); | |
4542 __ cmp(the_int_, Operand(0x80000000)); | |
4543 __ b(eq, &max_negative_int); | |
4544 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. | |
4545 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). | |
4546 uint32_t non_smi_exponent = | |
4547 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | |
4548 __ mov(scratch_, Operand(non_smi_exponent)); | |
4549 // Set the sign bit in scratch_ if the value was negative. | |
4550 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); | |
4551 // Subtract from 0 if the value was negative. | |
4552 __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs); | |
4553 // We should be masking the implict first digit of the mantissa away here, | |
4554 // but it just ends up combining harmlessly with the last digit of the | |
4555 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get | |
4556 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. | |
4557 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); | |
4558 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | |
4559 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); | |
4560 __ str(scratch_, FieldMemOperand(the_heap_number_, | |
4561 HeapNumber::kExponentOffset)); | |
4562 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); | |
4563 __ str(scratch_, FieldMemOperand(the_heap_number_, | |
4564 HeapNumber::kMantissaOffset)); | |
4565 __ Ret(); | |
4566 | |
4567 __ bind(&max_negative_int); | |
4568 // The max negative int32 is stored as a positive number in the mantissa of | |
4569 // a double because it uses a sign bit instead of using two's complement. | |
4570 // The actual mantissa bits stored are all 0 because the implicit most | |
4571 // significant 1 bit is not stored. | |
4572 non_smi_exponent += 1 << HeapNumber::kExponentShift; | |
4573 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); | |
4574 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); | |
4575 __ mov(ip, Operand(0)); | |
4576 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); | |
4577 __ Ret(); | |
4578 } | |
4579 | |
4580 | |
4581 // Allocates a heap number or jumps to the label if the young space is full and | |
4582 // a scavenge is needed. | |
4364 static void AllocateHeapNumber( | 4583 static void AllocateHeapNumber( |
4365 MacroAssembler* masm, | 4584 MacroAssembler* masm, |
4366 Label* need_gc, // Jump here if young space is full. | 4585 Label* need_gc, // Jump here if young space is full. |
4367 Register result_reg, // The tagged address of the new heap number. | 4586 Register result_reg, // The tagged address of the new heap number. |
4368 Register allocation_top_addr_reg, // A scratch register. | 4587 Register allocation_top_addr_reg, // A scratch register. |
4369 Register scratch2) { // Another scratch register. | 4588 Register scratch2) { // Another scratch register. |
4370 ExternalReference allocation_top = | 4589 ExternalReference allocation_top = |
4371 ExternalReference::new_space_allocation_top_address(); | 4590 ExternalReference::new_space_allocation_top_address(); |
4372 ExternalReference allocation_limit = | 4591 ExternalReference allocation_limit = |
4373 ExternalReference::new_space_allocation_limit_address(); | 4592 ExternalReference::new_space_allocation_limit_address(); |
(...skipping 16 matching lines...) Expand all Loading... | |
4390 __ str(result_reg, MemOperand(allocation_top_addr_reg)); // store new top | 4609 __ str(result_reg, MemOperand(allocation_top_addr_reg)); // store new top |
4391 // Tag and adjust back to start of new object. | 4610 // Tag and adjust back to start of new object. |
4392 __ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag)); | 4611 __ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag)); |
4393 // Get heap number map into scratch2. | 4612 // Get heap number map into scratch2. |
4394 __ mov(scratch2, Operand(Factory::heap_number_map())); | 4613 __ mov(scratch2, Operand(Factory::heap_number_map())); |
4395 // Store heap number map in new object. | 4614 // Store heap number map in new object. |
4396 __ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset)); | 4615 __ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset)); |
4397 } | 4616 } |
4398 | 4617 |
4399 | 4618 |
4619 // Checks that the object register (which is assumed not to be a Smi) points to | |
4620 // a heap number. Jumps to the label if it is not. | |
4621 void CheckForHeapNumber(MacroAssembler* masm, | |
4622 Register object, | |
4623 Register scratch, | |
4624 Label* slow) { | |
4625 // Get map of object into scratch. | |
4626 __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | |
4627 // Get type of object into scratch. | |
4628 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | |
4629 __ cmp(scratch, Operand(HEAP_NUMBER_TYPE)); | |
4630 __ b(ne, slow); | |
4631 } | |
4632 | |
4633 | |
4400 // We fall into this code if the operands were Smis, but the result was | 4634 // We fall into this code if the operands were Smis, but the result was |
4401 // not (eg. overflow). We branch into this code (to the not_smi label) if | 4635 // not (eg. overflow). We branch into this code (to the not_smi label) if |
4402 // the operands were not both Smi. | 4636 // the operands were not both Smi. The operands are in r0 and r1. In order |
4637 // to call the C-implemented binary fp operation routines we need to end up | |
4638 // with the double precision floating point operands in r0 and r1 (for the | |
4639 // value in r1) and r2 and r3 (for the value in r0). | |
4403 static void HandleBinaryOpSlowCases(MacroAssembler* masm, | 4640 static void HandleBinaryOpSlowCases(MacroAssembler* masm, |
4404 Label* not_smi, | 4641 Label* not_smi, |
4405 const Builtins::JavaScript& builtin, | 4642 const Builtins::JavaScript& builtin, |
4406 Token::Value operation, | 4643 Token::Value operation, |
4407 int swi_number, | 4644 int swi_number, |
4408 OverwriteMode mode) { | 4645 OverwriteMode mode) { |
4409 Label slow; | 4646 Label slow, slow_pop_2_first, do_the_call; |
4647 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | |
4648 // Smi-smi case (overflow). | |
4649 // Since both are Smis there is no heap number to overwrite, so allocate. | |
4650 // The new heap number is in r5. r6 and r7 are scratch. | |
4651 AllocateHeapNumber(masm, &slow, r5, r6, r7); | |
4652 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. | |
4653 ConvertToDoubleStub stub1(r3, r2, r0, r6); | |
4654 __ push(lr); | |
4655 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | |
4656 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. | |
4657 __ mov(r7, Operand(r1)); | |
4658 ConvertToDoubleStub stub2(r1, r0, r7, r6); | |
4659 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | |
4660 __ pop(lr); | |
4661 __ jmp(&do_the_call); // Tail call. No return. | |
4662 | |
4663 // We jump to here if something goes wrong (one param is not a number of any | |
4664 // sort or new-space allocation fails). | |
4410 __ bind(&slow); | 4665 __ bind(&slow); |
4411 __ push(r1); | 4666 __ push(r1); |
4412 __ push(r0); | 4667 __ push(r0); |
4413 __ mov(r0, Operand(1)); // Set number of arguments. | 4668 __ mov(r0, Operand(1)); // Set number of arguments. |
4414 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. | 4669 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. |
4415 | 4670 |
4671 // We branch here if at least one of r0 and r1 is not a Smi. | |
4416 __ bind(not_smi); | 4672 __ bind(not_smi); |
4673 if (mode == NO_OVERWRITE) { | |
4674 // In the case where there is no chance of an overwritable float we may as | |
4675 // well do the allocation immediately while r0 and r1 are untouched. | |
4676 AllocateHeapNumber(masm, &slow, r5, r6, r7); | |
4677 } | |
4678 | |
4679 // Move r0 to a double in r2-r3. | |
4417 __ tst(r0, Operand(kSmiTagMask)); | 4680 __ tst(r0, Operand(kSmiTagMask)); |
4418 __ b(eq, &slow); // We can't handle a Smi-double combination yet. | 4681 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. |
4682 CheckForHeapNumber(masm, r0, r4, &slow); | |
4683 if (mode == OVERWRITE_RIGHT) { | |
4684 __ mov(r5, Operand(r0)); // Overwrite this heap number. | |
4685 } | |
4686 // Calling convention says that second double is in r2 and r3. | |
4687 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | |
4688 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
4689 __ jmp(&finished_loading_r0); | |
4690 __ bind(&r0_is_smi); | |
4691 if (mode == OVERWRITE_RIGHT) { | |
4692 // We can't overwrite a Smi so get address of new heap number into r5. | |
4693 AllocateHeapNumber(masm, &slow, r5, r6, r7); | |
4694 } | |
4695 // Write Smi from r0 to r3 and r2 in double format. | |
4696 __ mov(r7, Operand(r0)); | |
4697 ConvertToDoubleStub stub3(r3, r2, r7, r6); | |
4698 __ push(lr); | |
4699 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | |
4700 __ pop(lr); | |
4701 __ bind(&finished_loading_r0); | |
4702 | |
4703 // Move r1 to a double in r0-r1. | |
4419 __ tst(r1, Operand(kSmiTagMask)); | 4704 __ tst(r1, Operand(kSmiTagMask)); |
4420 __ b(eq, &slow); // We can't handle a Smi-double combination yet. | 4705 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. |
4421 // Get map of r0 into r2. | 4706 CheckForHeapNumber(masm, r1, r4, &slow); |
4422 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | 4707 if (mode == OVERWRITE_LEFT) { |
4423 // Get type of r0 into r3. | 4708 __ mov(r5, Operand(r1)); // Overwrite this heap number. |
4424 __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset)); | |
4425 __ cmp(r3, Operand(HEAP_NUMBER_TYPE)); | |
4426 __ b(ne, &slow); | |
4427 // Get type of r1 into r3. | |
4428 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
4429 // Check they are both the same map (heap number map). | |
4430 __ cmp(r2, r3); | |
4431 __ b(ne, &slow); | |
4432 // Both are doubles. | |
4433 // Calling convention says that second double is in r2 and r3. | |
4434 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
4435 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); | |
4436 | |
4437 if (mode == NO_OVERWRITE) { | |
4438 // Get address of new heap number into r5. | |
4439 AllocateHeapNumber(masm, &slow, r5, r6, r7); | |
4440 __ push(lr); | |
4441 __ push(r5); | |
4442 } else if (mode == OVERWRITE_LEFT) { | |
4443 __ push(lr); | |
4444 __ push(r1); | |
4445 } else { | |
4446 ASSERT(mode == OVERWRITE_RIGHT); | |
4447 __ push(lr); | |
4448 __ push(r0); | |
4449 } | 4709 } |
4450 // Calling convention says that first double is in r0 and r1. | 4710 // Calling convention says that first double is in r0 and r1. |
4451 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 4711 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); |
4452 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); | 4712 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kExponentOffset)); |
4713 __ jmp(&finished_loading_r1); | |
4714 __ bind(&r1_is_smi); | |
4715 if (mode == OVERWRITE_LEFT) { | |
4716 // We can't overwrite a Smi so get address of new heap number into r5. | |
4717 AllocateHeapNumber(masm, &slow, r5, r6, r7); | |
4718 } | |
4719 // Write Smi from r1 to r1 and r0 in double format. | |
4720 __ mov(r7, Operand(r1)); | |
4721 ConvertToDoubleStub stub4(r1, r0, r7, r6); | |
4722 __ push(lr); | |
4723 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | |
4724 __ pop(lr); | |
4725 __ bind(&finished_loading_r1); | |
4726 | |
4727 __ bind(&do_the_call); | |
4728 // r0: Left value (least significant part of mantissa). | |
4729 // r1: Left value (sign, exponent, top of mantissa). | |
4730 // r2: Right value (least significant part of mantissa). | |
4731 // r3: Right value (sign, exponent, top of mantissa). | |
4732 // r5: Address of heap number for result. | |
4733 __ push(lr); // For later. | |
4734 __ push(r5); // Address of heap number that is answer. | |
4453 // Call C routine that may not cause GC or other trouble. | 4735 // Call C routine that may not cause GC or other trouble. |
4454 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); | 4736 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); |
4455 #if !defined(__arm__) | 4737 #if !defined(__arm__) |
4456 // Notify the simulator that we are calling an add routine in C. | 4738 // Notify the simulator that we are calling an add routine in C. |
4457 __ swi(swi_number); | 4739 __ swi(swi_number); |
4458 #else | 4740 #else |
4459 // Actually call the add routine written in C. | 4741 // Actually call the add routine written in C. |
4460 __ Call(r5); | 4742 __ Call(r5); |
4461 #endif | 4743 #endif |
4462 // Store answer in the overwritable heap number. | 4744 // Store answer in the overwritable heap number. |
4463 __ pop(r4); | 4745 __ pop(r4); |
4464 #if !defined(__ARM_EABI__) && defined(__arm__) | 4746 #if !defined(__ARM_EABI__) && defined(__arm__) |
4465 // Double returned in fp coprocessor register 0 and 1, encoded as register | 4747 // Double returned in fp coprocessor register 0 and 1, encoded as register |
4466 // cr8. Offsets must be divisible by 4 for coprocessor so we need to | 4748 // cr8. Offsets must be divisible by 4 for coprocessor so we need to |
4467 // substract the tag from r4. | 4749 // substract the tag from r4. |
4468 __ sub(r5, r4, Operand(kHeapObjectTag)); | 4750 __ sub(r5, r4, Operand(kHeapObjectTag)); |
4469 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset)); | 4751 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset)); |
4470 #else | 4752 #else |
4471 // Double returned in fp coprocessor register 0 and 1. | 4753 // Double returned in fp coprocessor register 0 and 1. |
4472 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); | 4754 __ str(r0, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); |
4473 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize)); | 4755 __ str(r1, FieldMemOperand(r4, HeapNumber::kExponentOffset)); |
4474 #endif | 4756 #endif |
4475 __ mov(r0, Operand(r4)); | 4757 __ mov(r0, Operand(r4)); |
4476 // And we are done. | 4758 // And we are done. |
4477 __ pop(pc); | 4759 __ pop(pc); |
4478 } | 4760 } |
4479 | 4761 |
4480 | 4762 |
4763 // Tries to get a signed int32 out of a double precision floating point heap | |
4764 // number. Rounds towards 0. Only succeeds for doubles that are in the ranges | |
4765 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds | |
4766 // almost to the range of signed int32 values that are not Smis. Jumps to the | |
4767 // label if the double isn't in the range it can cope with. | |
4768 static void GetInt32(MacroAssembler* masm, | |
4769 Register source, | |
4770 Register dest, | |
4771 Register scratch, | |
4772 Label* slow) { | |
4773 Register scratch2 = dest; | |
4774 // Get exponent word. | |
4775 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); | |
4776 // Get exponent alone in scratch2. | |
4777 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask)); | |
4778 // Check whether the exponent matches a 32 bit signed int that is not a Smi. | |
4779 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). | |
4780 const uint32_t non_smi_exponent = | |
4781 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | |
4782 __ cmp(scratch2, Operand(non_smi_exponent)); | |
4783 // If not, then we go slow. | |
4784 __ b(ne, slow); | |
4785 // Get the top bits of the mantissa. | |
4786 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); | |
4787 // Put back the implicit 1. | |
4788 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); | |
4789 // Shift up the mantissa bits to take up the space the exponent used to take. | |
4790 // We just orred in the implicit bit so that took care of one and we want to | |
4791 // leave the sign bit 0 so we subtract 2 bits from the shift distance. | |
4792 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | |
4793 __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); | |
4794 // Put sign in zero flag. | |
4795 __ tst(scratch, Operand(HeapNumber::kSignMask)); | |
4796 // Get the second half of the double. | |
4797 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | |
4798 // Shift down 22 bits to get the last 10 bits. | |
4799 __ orr(dest, scratch2, Operand(scratch, LSR, 32 - shift_distance)); | |
4800 // Fix sign if sign bit was set. | |
4801 __ rsb(dest, dest, Operand(0), LeaveCC, ne); | |
4802 } | |
4803 | |
4804 | |
4805 // For bitwise ops where the inputs are not both Smis we here try to determine | |
4806 // whether both inputs are either Smis or at least heap numbers that can be | |
4807 // represented by a 32 bit signed value. We truncate towards zero as required | |
4808 // by the ES spec. If this is the case we do the bitwise op and see if the | |
4809 // result is a Smi. If so, great, otherwise we try to find a heap number to | |
4810 // write the answer into (either by allocating or by overwriting). | |
4811 // On entry the operands are in r0 and r1. On exit the answer is in r0. | |
4812 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { | |
4813 Label slow, result_not_a_smi; | |
4814 Label r0_is_smi, r1_is_smi; | |
4815 Label done_checking_r0, done_checking_r1; | |
4816 | |
4817 __ tst(r1, Operand(kSmiTagMask)); | |
4818 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. | |
4819 CheckForHeapNumber(masm, r1, r4, &slow); | |
4820 GetInt32(masm, r1, r3, r4, &slow); | |
4821 __ jmp(&done_checking_r1); | |
4822 __ bind(&r1_is_smi); | |
4823 __ mov(r3, Operand(r1, ASR, 1)); | |
4824 __ bind(&done_checking_r1); | |
4825 | |
4826 __ tst(r0, Operand(kSmiTagMask)); | |
4827 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. | |
4828 CheckForHeapNumber(masm, r0, r4, &slow); | |
4829 GetInt32(masm, r0, r2, r4, &slow); | |
4830 __ jmp(&done_checking_r0); | |
4831 __ bind(&r0_is_smi); | |
4832 __ mov(r2, Operand(r0, ASR, 1)); | |
4833 __ bind(&done_checking_r0); | |
4834 | |
4835 // r0 and r1: Original operands (Smi or heap numbers). | |
4836 // r2 and r3: Signed int32 operands. | |
4837 switch (op_) { | |
4838 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; | |
4839 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; | |
4840 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; | |
4841 case Token::SAR: | |
4842 // Use only the 5 least significant bits of the shift count. | |
4843 __ and_(r2, r2, Operand(0x1f)); | |
4844 __ mov(r2, Operand(r3, ASR, r2)); | |
4845 break; | |
4846 case Token::SHR: | |
4847 // Use only the 5 least significant bits of the shift count. | |
4848 __ and_(r2, r2, Operand(0x1f)); | |
4849 __ mov(r2, Operand(r3, LSR, r2), SetCC); | |
4850 // SHR is special because it is required to produce a positive answer. | |
4851 // The code below for writing into heap numbers isn't capable of writing | |
4852 // the register as an unsigned int so we go to slow case if we hit this | |
4853 // case. | |
4854 __ b(mi, &slow); | |
4855 break; | |
4856 case Token::SHL: | |
4857 // Use only the 5 least significant bits of the shift count. | |
4858 __ and_(r2, r2, Operand(0x1f)); | |
4859 __ mov(r2, Operand(r3, LSL, r2)); | |
4860 break; | |
4861 default: UNREACHABLE(); | |
4862 } | |
4863 // check that the *signed* result fits in a smi | |
4864 __ add(r3, r2, Operand(0x40000000), SetCC); | |
4865 __ b(mi, &result_not_a_smi); | |
4866 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); | |
4867 __ Ret(); | |
4868 | |
4869 Label have_to_allocate, got_a_heap_number; | |
4870 __ bind(&result_not_a_smi); | |
4871 switch (mode_) { | |
4872 case OVERWRITE_RIGHT: { | |
4873 __ tst(r0, Operand(kSmiTagMask)); | |
4874 __ b(eq, &have_to_allocate); | |
4875 __ mov(r5, Operand(r0)); | |
4876 break; | |
4877 } | |
4878 case OVERWRITE_LEFT: { | |
4879 __ tst(r1, Operand(kSmiTagMask)); | |
4880 __ b(eq, &have_to_allocate); | |
4881 __ mov(r5, Operand(r1)); | |
4882 break; | |
4883 } | |
4884 case NO_OVERWRITE: { | |
4885 // Get a new heap number in r5. r6 and r7 are scratch. | |
4886 AllocateHeapNumber(masm, &slow, r5, r6, r7); | |
4887 } | |
4888 default: break; | |
4889 } | |
4890 __ bind(&got_a_heap_number); | |
4891 // r2: Answer as signed int32. | |
4892 // r5: Heap number to write answer into. | |
4893 | |
4894 // Nothing can go wrong now, so move the heap number to r0, which is the | |
4895 // result. | |
4896 __ mov(r0, Operand(r5)); | |
4897 | |
4898 // Tail call that writes the int32 in r2 to the heap number in r0, using | |
4899 // r3 as scratch. r0 is preserved and returned. | |
4900 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | |
4901 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | |
4902 | |
4903 if (mode_ != NO_OVERWRITE) { | |
4904 __ bind(&have_to_allocate); | |
4905 // Get a new heap number in r5. r6 and r7 are scratch. | |
4906 AllocateHeapNumber(masm, &slow, r5, r6, r7); | |
4907 __ jmp(&got_a_heap_number); | |
4908 } | |
4909 | |
4910 // If all else failed then we go to the runtime system. | |
4911 __ bind(&slow); | |
4912 __ push(r1); // restore stack | |
4913 __ push(r0); | |
4914 __ mov(r0, Operand(1)); // 1 argument (not counting receiver). | |
4915 switch (op_) { | |
4916 case Token::BIT_OR: | |
4917 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | |
4918 break; | |
4919 case Token::BIT_AND: | |
4920 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); | |
4921 break; | |
4922 case Token::BIT_XOR: | |
4923 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); | |
4924 break; | |
4925 case Token::SAR: | |
4926 __ InvokeBuiltin(Builtins::SAR, JUMP_JS); | |
4927 break; | |
4928 case Token::SHR: | |
4929 __ InvokeBuiltin(Builtins::SHR, JUMP_JS); | |
4930 break; | |
4931 case Token::SHL: | |
4932 __ InvokeBuiltin(Builtins::SHL, JUMP_JS); | |
4933 break; | |
4934 default: | |
4935 UNREACHABLE(); | |
4936 } | |
4937 } | |
4938 | |
4939 | |
4481 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 4940 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
4482 // r1 : x | 4941 // r1 : x |
4483 // r0 : y | 4942 // r0 : y |
4484 // result : r0 | 4943 // result : r0 |
4485 | 4944 |
4486 // All ops need to know whether we are dealing with two Smis. Set up r2 to | 4945 // All ops need to know whether we are dealing with two Smis. Set up r2 to |
4487 // tell us that. | 4946 // tell us that. |
4488 __ orr(r2, r1, Operand(r0)); // r2 = x | y; | 4947 __ orr(r2, r1, Operand(r0)); // r2 = x | y; |
4489 | 4948 |
4490 switch (op_) { | 4949 switch (op_) { |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4546 __ mov(r0, Operand(r3), LeaveCC, ne); | 5005 __ mov(r0, Operand(r3), LeaveCC, ne); |
4547 __ Ret(ne); | 5006 __ Ret(ne); |
4548 // Slow case. | 5007 // Slow case. |
4549 __ bind(&slow); | 5008 __ bind(&slow); |
4550 | 5009 |
4551 HandleBinaryOpSlowCases(masm, | 5010 HandleBinaryOpSlowCases(masm, |
4552 ¬_smi, | 5011 ¬_smi, |
4553 Builtins::MUL, | 5012 Builtins::MUL, |
4554 Token::MUL, | 5013 Token::MUL, |
4555 assembler::arm::simulator_fp_mul, | 5014 assembler::arm::simulator_fp_mul, |
4556 mode_); | 5015 mode_); |
4557 break; | 5016 break; |
4558 } | 5017 } |
4559 | 5018 |
4560 case Token::BIT_OR: | 5019 case Token::BIT_OR: |
4561 case Token::BIT_AND: | 5020 case Token::BIT_AND: |
4562 case Token::BIT_XOR: { | 5021 case Token::BIT_XOR: |
5022 case Token::SAR: | |
5023 case Token::SHR: | |
5024 case Token::SHL: { | |
4563 Label slow; | 5025 Label slow; |
4564 ASSERT(kSmiTag == 0); // adjust code below | 5026 ASSERT(kSmiTag == 0); // adjust code below |
4565 __ tst(r2, Operand(kSmiTagMask)); | 5027 __ tst(r2, Operand(kSmiTagMask)); |
4566 __ b(ne, &slow); | 5028 __ b(ne, &slow); |
4567 switch (op_) { | 5029 switch (op_) { |
4568 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break; | 5030 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break; |
4569 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break; | 5031 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break; |
4570 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break; | 5032 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break; |
5033 case Token::SAR: | |
5034 // Remove tags from right operand. | |
5035 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y | |
5036 // Use only the 5 least significant bits of the shift count. | |
5037 __ and_(r2, r2, Operand(0x1f)); | |
5038 __ mov(r0, Operand(r1, ASR, r2)); | |
5039 // Smi tag result. | |
5040 __ and_(r0, r0, Operand(~kSmiTagMask)); | |
5041 break; | |
5042 case Token::SHR: | |
5043 // Remove tags from operands. We can't do this on a 31 bit number | |
5044 // because then the 0s get shifted into bit 30 instead of bit 31. | |
5045 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x | |
5046 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y | |
5047 // Use only the 5 least significant bits of the shift count. | |
5048 __ and_(r2, r2, Operand(0x1f)); | |
5049 __ mov(r3, Operand(r3, LSR, r2)); | |
5050 // Unsigned shift is not allowed to produce a negative number, so | |
5051 // check the sign bit and the sign bit after Smi tagging. | |
5052 __ tst(r3, Operand(0xc0000000)); | |
5053 __ b(ne, &slow); | |
5054 // Smi tag result. | |
5055 __ mov(r0, Operand(r3, LSL, kSmiTagSize)); | |
5056 break; | |
5057 case Token::SHL: | |
5058 // Remove tags from operands. | |
5059 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x | |
5060 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y | |
5061 // Use only the 5 least significant bits of the shift count. | |
5062 __ and_(r2, r2, Operand(0x1f)); | |
5063 __ mov(r3, Operand(r3, LSL, r2)); | |
5064 // Check that the signed result fits in a Smi. | |
5065 __ add(r2, r3, Operand(0x40000000), SetCC); | |
5066 __ b(mi, &slow); | |
5067 __ mov(r0, Operand(r3, LSL, kSmiTagSize)); | |
5068 break; | |
4571 default: UNREACHABLE(); | 5069 default: UNREACHABLE(); |
4572 } | 5070 } |
4573 __ Ret(); | 5071 __ Ret(); |
4574 __ bind(&slow); | 5072 __ bind(&slow); |
4575 __ push(r1); // restore stack | 5073 HandleNonSmiBitwiseOp(masm); |
4576 __ push(r0); | |
4577 __ mov(r0, Operand(1)); // 1 argument (not counting receiver). | |
4578 switch (op_) { | |
4579 case Token::BIT_OR: | |
4580 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | |
4581 break; | |
4582 case Token::BIT_AND: | |
4583 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); | |
4584 break; | |
4585 case Token::BIT_XOR: | |
4586 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); | |
4587 break; | |
4588 default: | |
4589 UNREACHABLE(); | |
4590 } | |
4591 break; | 5074 break; |
4592 } | 5075 } |
4593 | 5076 |
4594 case Token::SHL: | |
4595 case Token::SHR: | |
4596 case Token::SAR: { | |
4597 Label slow; | |
4598 ASSERT(kSmiTag == 0); // adjust code below | |
4599 __ tst(r2, Operand(kSmiTagMask)); | |
4600 __ b(ne, &slow); | |
4601 // remove tags from operands (but keep sign) | |
4602 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x | |
4603 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y | |
4604 // use only the 5 least significant bits of the shift count | |
4605 __ and_(r2, r2, Operand(0x1f)); | |
4606 // perform operation | |
4607 switch (op_) { | |
4608 case Token::SAR: | |
4609 __ mov(r3, Operand(r3, ASR, r2)); | |
4610 // no checks of result necessary | |
4611 break; | |
4612 | |
4613 case Token::SHR: | |
4614 __ mov(r3, Operand(r3, LSR, r2)); | |
4615 // check that the *unsigned* result fits in a smi | |
4616 // neither of the two high-order bits can be set: | |
4617 // - 0x80000000: high bit would be lost when smi tagging | |
4618 // - 0x40000000: this number would convert to negative when | |
4619 // smi tagging these two cases can only happen with shifts | |
4620 // by 0 or 1 when handed a valid smi | |
4621 __ and_(r2, r3, Operand(0xc0000000), SetCC); | |
4622 __ b(ne, &slow); | |
4623 break; | |
4624 | |
4625 case Token::SHL: | |
4626 __ mov(r3, Operand(r3, LSL, r2)); | |
4627 // check that the *signed* result fits in a smi | |
4628 __ add(r2, r3, Operand(0x40000000), SetCC); | |
4629 __ b(mi, &slow); | |
4630 break; | |
4631 | |
4632 default: UNREACHABLE(); | |
4633 } | |
4634 // tag result and store it in r0 | |
4635 ASSERT(kSmiTag == 0); // adjust code below | |
4636 __ mov(r0, Operand(r3, LSL, kSmiTagSize)); | |
4637 __ Ret(); | |
4638 // slow case | |
4639 __ bind(&slow); | |
4640 __ push(r1); // restore stack | |
4641 __ push(r0); | |
4642 __ mov(r0, Operand(1)); // 1 argument (not counting receiver). | |
4643 switch (op_) { | |
4644 case Token::SAR: __ InvokeBuiltin(Builtins::SAR, JUMP_JS); break; | |
4645 case Token::SHR: __ InvokeBuiltin(Builtins::SHR, JUMP_JS); break; | |
4646 case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break; | |
4647 default: UNREACHABLE(); | |
4648 } | |
4649 break; | |
4650 } | |
4651 | |
4652 default: UNREACHABLE(); | 5077 default: UNREACHABLE(); |
4653 } | 5078 } |
4654 // This code should be unreachable. | 5079 // This code should be unreachable. |
4655 __ stop("Unreachable"); | 5080 __ stop("Unreachable"); |
4656 } | 5081 } |
4657 | 5082 |
4658 | 5083 |
4659 void StackCheckStub::Generate(MacroAssembler* masm) { | 5084 void StackCheckStub::Generate(MacroAssembler* masm) { |
4660 Label within_limit; | 5085 Label within_limit; |
4661 __ mov(ip, Operand(ExternalReference::address_of_stack_guard_limit())); | 5086 __ mov(ip, Operand(ExternalReference::address_of_stack_guard_limit())); |
4662 __ ldr(ip, MemOperand(ip)); | 5087 __ ldr(ip, MemOperand(ip)); |
4663 __ cmp(sp, Operand(ip)); | 5088 __ cmp(sp, Operand(ip)); |
4664 __ b(hs, &within_limit); | 5089 __ b(hs, &within_limit); |
4665 // Do tail-call to runtime routine. Runtime routines expect at least one | 5090 // Do tail-call to runtime routine. Runtime routines expect at least one |
4666 // argument, so give it a Smi. | 5091 // argument, so give it a Smi. |
4667 __ mov(r0, Operand(Smi::FromInt(0))); | 5092 __ mov(r0, Operand(Smi::FromInt(0))); |
4668 __ push(r0); | 5093 __ push(r0); |
4669 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1); | 5094 __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1); |
4670 __ bind(&within_limit); | 5095 __ bind(&within_limit); |
4671 | 5096 |
4672 __ StubReturn(1); | 5097 __ StubReturn(1); |
4673 } | 5098 } |
4674 | 5099 |
4675 | 5100 |
4676 void UnarySubStub::Generate(MacroAssembler* masm) { | 5101 void UnarySubStub::Generate(MacroAssembler* masm) { |
4677 Label undo; | 5102 Label undo; |
4678 Label slow; | 5103 Label slow; |
4679 Label done; | 5104 Label done; |
5105 Label not_smi; | |
4680 | 5106 |
4681 // Enter runtime system if the value is not a smi. | 5107 // Enter runtime system if the value is not a smi. |
4682 __ tst(r0, Operand(kSmiTagMask)); | 5108 __ tst(r0, Operand(kSmiTagMask)); |
4683 __ b(ne, &slow); | 5109 __ b(ne, ¬_smi); |
4684 | 5110 |
4685 // Enter runtime system if the value of the expression is zero | 5111 // Enter runtime system if the value of the expression is zero |
4686 // to make sure that we switch between 0 and -0. | 5112 // to make sure that we switch between 0 and -0. |
4687 __ cmp(r0, Operand(0)); | 5113 __ cmp(r0, Operand(0)); |
4688 __ b(eq, &slow); | 5114 __ b(eq, &slow); |
4689 | 5115 |
4690 // The value of the expression is a smi that is not zero. Try | 5116 // The value of the expression is a smi that is not zero. Try |
4691 // optimistic subtraction '0 - value'. | 5117 // optimistic subtraction '0 - value'. |
4692 __ rsb(r1, r0, Operand(0), SetCC); | 5118 __ rsb(r1, r0, Operand(0), SetCC); |
4693 __ b(vs, &slow); | 5119 __ b(vs, &slow); |
4694 | 5120 |
4695 // If result is a smi we are done. | 5121 __ mov(r0, Operand(r1)); // Set r0 to result. |
4696 __ tst(r1, Operand(kSmiTagMask)); | 5122 __ StubReturn(1); |
4697 __ mov(r0, Operand(r1), LeaveCC, eq); // conditionally set r0 to result | |
4698 __ b(eq, &done); | |
4699 | 5123 |
4700 // Enter runtime system. | 5124 // Enter runtime system. |
4701 __ bind(&slow); | 5125 __ bind(&slow); |
4702 __ push(r0); | 5126 __ push(r0); |
4703 __ mov(r0, Operand(0)); // set number of arguments | 5127 __ mov(r0, Operand(0)); // Set number of arguments. |
4704 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); | 5128 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); |
4705 | 5129 |
4706 __ bind(&done); | 5130 __ bind(&done); |
4707 __ StubReturn(1); | 5131 __ StubReturn(1); |
5132 | |
5133 __ bind(¬_smi); | |
5134 CheckForHeapNumber(masm, r0, r1, &slow); | |
5135 // r0 is a heap number. Get a new heap number in r1. | |
5136 if (overwrite_) { | |
5137 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
5138 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | |
5139 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
5140 } else { | |
5141 AllocateHeapNumber(masm, &slow, r1, r2, r3); | |
5142 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | |
5143 __ str(r2, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); | |
5144 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
5145 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | |
5146 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); | |
5147 __ mov(r0, Operand(r1)); | |
5148 } | |
5149 __ StubReturn(1); | |
4708 } | 5150 } |
4709 | 5151 |
4710 | 5152 |
4711 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { | 5153 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { |
4712 // r0 holds exception | 5154 // r0 holds exception |
4713 ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code | 5155 ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code |
4714 __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); | 5156 __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); |
4715 __ ldr(sp, MemOperand(r3)); | 5157 __ ldr(sp, MemOperand(r3)); |
4716 __ pop(r2); // pop next in chain | 5158 __ pop(r2); // pop next in chain |
4717 __ str(r2, MemOperand(r3)); | 5159 __ str(r2, MemOperand(r3)); |
(...skipping 492 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5210 __ mov(r2, Operand(0)); | 5652 __ mov(r2, Operand(0)); |
5211 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); | 5653 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); |
5212 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), | 5654 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), |
5213 RelocInfo::CODE_TARGET); | 5655 RelocInfo::CODE_TARGET); |
5214 } | 5656 } |
5215 | 5657 |
5216 | 5658 |
5217 #undef __ | 5659 #undef __ |
5218 | 5660 |
5219 } } // namespace v8::internal | 5661 } } // namespace v8::internal |
OLD | NEW |