| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 150 __ AllocateInNewSpace(FixedArray::SizeFor(length), | 150 __ AllocateInNewSpace(FixedArray::SizeFor(length), |
| 151 v0, | 151 v0, |
| 152 a1, | 152 a1, |
| 153 a2, | 153 a2, |
| 154 &gc, | 154 &gc, |
| 155 TAG_OBJECT); | 155 TAG_OBJECT); |
| 156 | 156 |
| 157 // Load the function from the stack. | 157 // Load the function from the stack. |
| 158 __ lw(a3, MemOperand(sp, 0)); | 158 __ lw(a3, MemOperand(sp, 0)); |
| 159 | 159 |
| 160 // Setup the object header. | 160 // Set up the object header. |
| 161 __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex); | 161 __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex); |
| 162 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); | 162 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 163 __ li(a2, Operand(Smi::FromInt(length))); | 163 __ li(a2, Operand(Smi::FromInt(length))); |
| 164 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); | 164 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); |
| 165 | 165 |
| 166 // Setup the fixed slots. | 166 // Set up the fixed slots. |
| 167 __ li(a1, Operand(Smi::FromInt(0))); | 167 __ li(a1, Operand(Smi::FromInt(0))); |
| 168 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX))); | 168 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX))); |
| 169 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 169 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| 170 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX))); | 170 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX))); |
| 171 | 171 |
| 172 // Copy the global object from the previous context. | 172 // Copy the global object from the previous context. |
| 173 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | 173 __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 174 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX))); | 174 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 175 | 175 |
| 176 // Initialize the rest of the slots to undefined. | 176 // Initialize the rest of the slots to undefined. |
| (...skipping 24 matching lines...) Expand all Loading... |
| 201 int length = slots_ + Context::MIN_CONTEXT_SLOTS; | 201 int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
| 202 __ AllocateInNewSpace(FixedArray::SizeFor(length), | 202 __ AllocateInNewSpace(FixedArray::SizeFor(length), |
| 203 v0, a1, a2, &gc, TAG_OBJECT); | 203 v0, a1, a2, &gc, TAG_OBJECT); |
| 204 | 204 |
| 205 // Load the function from the stack. | 205 // Load the function from the stack. |
| 206 __ lw(a3, MemOperand(sp, 0)); | 206 __ lw(a3, MemOperand(sp, 0)); |
| 207 | 207 |
| 208 // Load the serialized scope info from the stack. | 208 // Load the serialized scope info from the stack. |
| 209 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); | 209 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); |
| 210 | 210 |
| 211 // Setup the object header. | 211 // Set up the object header. |
| 212 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex); | 212 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex); |
| 213 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); | 213 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 214 __ li(a2, Operand(Smi::FromInt(length))); | 214 __ li(a2, Operand(Smi::FromInt(length))); |
| 215 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); | 215 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); |
| 216 | 216 |
| 217 // If this block context is nested in the global context we get a smi | 217 // If this block context is nested in the global context we get a smi |
| 218 // sentinel instead of a function. The block context should get the | 218 // sentinel instead of a function. The block context should get the |
| 219 // canonical empty function of the global context as its closure which | 219 // canonical empty function of the global context as its closure which |
| 220 // we still have to look up. | 220 // we still have to look up. |
| 221 Label after_sentinel; | 221 Label after_sentinel; |
| 222 __ JumpIfNotSmi(a3, &after_sentinel); | 222 __ JumpIfNotSmi(a3, &after_sentinel); |
| 223 if (FLAG_debug_code) { | 223 if (FLAG_debug_code) { |
| 224 const char* message = "Expected 0 as a Smi sentinel"; | 224 const char* message = "Expected 0 as a Smi sentinel"; |
| 225 __ Assert(eq, message, a3, Operand(zero_reg)); | 225 __ Assert(eq, message, a3, Operand(zero_reg)); |
| 226 } | 226 } |
| 227 __ lw(a3, GlobalObjectOperand()); | 227 __ lw(a3, GlobalObjectOperand()); |
| 228 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset)); | 228 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset)); |
| 229 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX)); | 229 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX)); |
| 230 __ bind(&after_sentinel); | 230 __ bind(&after_sentinel); |
| 231 | 231 |
| 232 // Setup the fixed slots. | 232 // Set up the fixed slots. |
| 233 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX)); | 233 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX)); |
| 234 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX)); | 234 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX)); |
| 235 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX)); | 235 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX)); |
| 236 | 236 |
| 237 // Copy the global object from the previous context. | 237 // Copy the global object from the previous context. |
| 238 __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX)); | 238 __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX)); |
| 239 __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX)); | 239 __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX)); |
| 240 | 240 |
| 241 // Initialize the rest of the slots to the hole value. | 241 // Initialize the rest of the slots to the hole value. |
| 242 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex); | 242 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex); |
| (...skipping 476 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 719 __ Branch(&done, eq, int_scratch, Operand(zero_reg)); | 719 __ Branch(&done, eq, int_scratch, Operand(zero_reg)); |
| 720 | 720 |
| 721 // Preload the sign of the value. | 721 // Preload the sign of the value. |
| 722 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask)); | 722 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask)); |
| 723 // Get the absolute value of the object (as an unsigned integer). | 723 // Get the absolute value of the object (as an unsigned integer). |
| 724 Label skip_sub; | 724 Label skip_sub; |
| 725 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg)); | 725 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg)); |
| 726 __ Subu(int_scratch, zero_reg, int_scratch); | 726 __ Subu(int_scratch, zero_reg, int_scratch); |
| 727 __ bind(&skip_sub); | 727 __ bind(&skip_sub); |
| 728 | 728 |
| 729 // Get mantisssa[51:20]. | 729 // Get mantissa[51:20]. |
| 730 | 730 |
| 731 // Get the position of the first set bit. | 731 // Get the position of the first set bit. |
| 732 __ clz(dst1, int_scratch); | 732 __ clz(dst1, int_scratch); |
| 733 __ li(scratch2, 31); | 733 __ li(scratch2, 31); |
| 734 __ Subu(dst1, scratch2, dst1); | 734 __ Subu(dst1, scratch2, dst1); |
| 735 | 735 |
| 736 // Set the exponent. | 736 // Set the exponent. |
| 737 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias)); | 737 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias)); |
| 738 __ Ins(dst2, scratch2, | 738 __ Ins(dst2, scratch2, |
| 739 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 739 HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
| (...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 964 __ subu(tmp, scratch, at); | 964 __ subu(tmp, scratch, at); |
| 965 __ Branch(not_int32, gt, tmp, Operand(30)); | 965 __ Branch(not_int32, gt, tmp, Operand(30)); |
| 966 // - Bits [21:0] in the mantissa are not null. | 966 // - Bits [21:0] in the mantissa are not null. |
| 967 __ And(tmp, src2, 0x3fffff); | 967 __ And(tmp, src2, 0x3fffff); |
| 968 __ Branch(not_int32, ne, tmp, Operand(zero_reg)); | 968 __ Branch(not_int32, ne, tmp, Operand(zero_reg)); |
| 969 | 969 |
| 970 // Otherwise the exponent needs to be big enough to shift left all the | 970 // Otherwise the exponent needs to be big enough to shift left all the |
| 971 // non zero bits left. So we need the (30 - exponent) last bits of the | 971 // non zero bits left. So we need the (30 - exponent) last bits of the |
| 972 // 31 higher bits of the mantissa to be null. | 972 // 31 higher bits of the mantissa to be null. |
| 973 // Because bits [21:0] are null, we can check instead that the | 973 // Because bits [21:0] are null, we can check instead that the |
| 974 // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null. | 974 // (32 - exponent) last bits of the 32 higher bits of the mantissa are null. |
| 975 | 975 |
| 976 // Get the 32 higher bits of the mantissa in dst. | 976 // Get the 32 higher bits of the mantissa in dst. |
| 977 __ Ext(dst, | 977 __ Ext(dst, |
| 978 src2, | 978 src2, |
| 979 HeapNumber::kMantissaBitsInTopWord, | 979 HeapNumber::kMantissaBitsInTopWord, |
| 980 32 - HeapNumber::kMantissaBitsInTopWord); | 980 32 - HeapNumber::kMantissaBitsInTopWord); |
| 981 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord); | 981 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord); |
| 982 __ or_(dst, dst, at); | 982 __ or_(dst, dst, at); |
| 983 | 983 |
| 984 // Create the mask and test the lower bits (of the higher bits). | 984 // Create the mask and test the lower bits (of the higher bits). |
| (...skipping 3013 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3998 | 3998 |
| 3999 // Compute the argv pointer in a callee-saved register. | 3999 // Compute the argv pointer in a callee-saved register. |
| 4000 __ sll(s1, a0, kPointerSizeLog2); | 4000 __ sll(s1, a0, kPointerSizeLog2); |
| 4001 __ Addu(s1, sp, s1); | 4001 __ Addu(s1, sp, s1); |
| 4002 __ Subu(s1, s1, Operand(kPointerSize)); | 4002 __ Subu(s1, s1, Operand(kPointerSize)); |
| 4003 | 4003 |
| 4004 // Enter the exit frame that transitions from JavaScript to C++. | 4004 // Enter the exit frame that transitions from JavaScript to C++. |
| 4005 FrameScope scope(masm, StackFrame::MANUAL); | 4005 FrameScope scope(masm, StackFrame::MANUAL); |
| 4006 __ EnterExitFrame(save_doubles_); | 4006 __ EnterExitFrame(save_doubles_); |
| 4007 | 4007 |
| 4008 // Setup argc and the builtin function in callee-saved registers. | 4008 // Set up argc and the builtin function in callee-saved registers. |
| 4009 __ mov(s0, a0); | 4009 __ mov(s0, a0); |
| 4010 __ mov(s2, a1); | 4010 __ mov(s2, a1); |
| 4011 | 4011 |
| 4012 // s0: number of arguments (C callee-saved) | 4012 // s0: number of arguments (C callee-saved) |
| 4013 // s1: pointer to first argument (C callee-saved) | 4013 // s1: pointer to first argument (C callee-saved) |
| 4014 // s2: pointer to builtin function (C callee-saved) | 4014 // s2: pointer to builtin function (C callee-saved) |
| 4015 | 4015 |
| 4016 Label throw_normal_exception; | 4016 Label throw_normal_exception; |
| 4017 Label throw_termination_exception; | 4017 Label throw_termination_exception; |
| 4018 Label throw_out_of_memory_exception; | 4018 Label throw_out_of_memory_exception; |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4090 | 4090 |
| 4091 // We build an EntryFrame. | 4091 // We build an EntryFrame. |
| 4092 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. | 4092 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
| 4093 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | 4093 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
| 4094 __ li(t2, Operand(Smi::FromInt(marker))); | 4094 __ li(t2, Operand(Smi::FromInt(marker))); |
| 4095 __ li(t1, Operand(Smi::FromInt(marker))); | 4095 __ li(t1, Operand(Smi::FromInt(marker))); |
| 4096 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, | 4096 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, |
| 4097 isolate))); | 4097 isolate))); |
| 4098 __ lw(t0, MemOperand(t0)); | 4098 __ lw(t0, MemOperand(t0)); |
| 4099 __ Push(t3, t2, t1, t0); | 4099 __ Push(t3, t2, t1, t0); |
| 4100 // Setup frame pointer for the frame to be pushed. | 4100 // Set up frame pointer for the frame to be pushed. |
| 4101 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); | 4101 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); |
| 4102 | 4102 |
| 4103 // Registers: | 4103 // Registers: |
| 4104 // a0: entry_address | 4104 // a0: entry_address |
| 4105 // a1: function | 4105 // a1: function |
| 4106 // a2: reveiver_pointer | 4106 // a2: reveiver_pointer |
| 4107 // a3: argc | 4107 // a3: argc |
| 4108 // s0: argv | 4108 // s0: argv |
| 4109 // | 4109 // |
| 4110 // Stack: | 4110 // Stack: |
| (...skipping 466 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4577 // v0 = address of new object (tagged) | 4577 // v0 = address of new object (tagged) |
| 4578 // a1 = mapped parameter count (tagged) | 4578 // a1 = mapped parameter count (tagged) |
| 4579 // a2 = argument count (tagged) | 4579 // a2 = argument count (tagged) |
| 4580 // t0 = address of boilerplate object (tagged) | 4580 // t0 = address of boilerplate object (tagged) |
| 4581 // Copy the JS object part. | 4581 // Copy the JS object part. |
| 4582 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { | 4582 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { |
| 4583 __ lw(a3, FieldMemOperand(t0, i)); | 4583 __ lw(a3, FieldMemOperand(t0, i)); |
| 4584 __ sw(a3, FieldMemOperand(v0, i)); | 4584 __ sw(a3, FieldMemOperand(v0, i)); |
| 4585 } | 4585 } |
| 4586 | 4586 |
| 4587 // Setup the callee in-object property. | 4587 // Set up the callee in-object property. |
| 4588 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); | 4588 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); |
| 4589 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); | 4589 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); |
| 4590 const int kCalleeOffset = JSObject::kHeaderSize + | 4590 const int kCalleeOffset = JSObject::kHeaderSize + |
| 4591 Heap::kArgumentsCalleeIndex * kPointerSize; | 4591 Heap::kArgumentsCalleeIndex * kPointerSize; |
| 4592 __ sw(a3, FieldMemOperand(v0, kCalleeOffset)); | 4592 __ sw(a3, FieldMemOperand(v0, kCalleeOffset)); |
| 4593 | 4593 |
| 4594 // Use the length (smi tagged) and set that as an in-object property too. | 4594 // Use the length (smi tagged) and set that as an in-object property too. |
| 4595 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | 4595 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
| 4596 const int kLengthOffset = JSObject::kHeaderSize + | 4596 const int kLengthOffset = JSObject::kHeaderSize + |
| 4597 Heap::kArgumentsLengthIndex * kPointerSize; | 4597 Heap::kArgumentsLengthIndex * kPointerSize; |
| 4598 __ sw(a2, FieldMemOperand(v0, kLengthOffset)); | 4598 __ sw(a2, FieldMemOperand(v0, kLengthOffset)); |
| 4599 | 4599 |
| 4600 // Setup the elements pointer in the allocated arguments object. | 4600 // Set up the elements pointer in the allocated arguments object. |
| 4601 // If we allocated a parameter map, t0 will point there, otherwise | 4601 // If we allocated a parameter map, t0 will point there, otherwise |
| 4602 // it will point to the backing store. | 4602 // it will point to the backing store. |
| 4603 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize)); | 4603 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize)); |
| 4604 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); | 4604 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); |
| 4605 | 4605 |
| 4606 // v0 = address of new object (tagged) | 4606 // v0 = address of new object (tagged) |
| 4607 // a1 = mapped parameter count (tagged) | 4607 // a1 = mapped parameter count (tagged) |
| 4608 // a2 = argument count (tagged) | 4608 // a2 = argument count (tagged) |
| 4609 // t0 = address of parameter map or backing store (tagged) | 4609 // t0 = address of parameter map or backing store (tagged) |
| 4610 // Initialize parameter map. If there are no mapped arguments, we're done. | 4610 // Initialize parameter map. If there are no mapped arguments, we're done. |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4692 __ Addu(t5, t5, Operand(Smi::FromInt(1))); | 4692 __ Addu(t5, t5, Operand(Smi::FromInt(1))); |
| 4693 | 4693 |
| 4694 __ bind(&arguments_test); | 4694 __ bind(&arguments_test); |
| 4695 __ Branch(&arguments_loop, lt, t5, Operand(a2)); | 4695 __ Branch(&arguments_loop, lt, t5, Operand(a2)); |
| 4696 | 4696 |
| 4697 // Return and remove the on-stack parameters. | 4697 // Return and remove the on-stack parameters. |
| 4698 __ Addu(sp, sp, Operand(3 * kPointerSize)); | 4698 __ Addu(sp, sp, Operand(3 * kPointerSize)); |
| 4699 __ Ret(); | 4699 __ Ret(); |
| 4700 | 4700 |
| 4701 // Do the runtime call to allocate the arguments object. | 4701 // Do the runtime call to allocate the arguments object. |
| 4702 // a2 = argument count (taggged) | 4702 // a2 = argument count (tagged) |
| 4703 __ bind(&runtime); | 4703 __ bind(&runtime); |
| 4704 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. | 4704 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. |
| 4705 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | 4705 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
| 4706 } | 4706 } |
| 4707 | 4707 |
| 4708 | 4708 |
| 4709 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { | 4709 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
| 4710 // sp[0] : number of parameters | 4710 // sp[0] : number of parameters |
| 4711 // sp[4] : receiver displacement | 4711 // sp[4] : receiver displacement |
| 4712 // sp[8] : function | 4712 // sp[8] : function |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4767 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); | 4767 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); |
| 4768 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize + | 4768 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize + |
| 4769 Heap::kArgumentsLengthIndex * kPointerSize)); | 4769 Heap::kArgumentsLengthIndex * kPointerSize)); |
| 4770 | 4770 |
| 4771 Label done; | 4771 Label done; |
| 4772 __ Branch(&done, eq, a1, Operand(zero_reg)); | 4772 __ Branch(&done, eq, a1, Operand(zero_reg)); |
| 4773 | 4773 |
| 4774 // Get the parameters pointer from the stack. | 4774 // Get the parameters pointer from the stack. |
| 4775 __ lw(a2, MemOperand(sp, 1 * kPointerSize)); | 4775 __ lw(a2, MemOperand(sp, 1 * kPointerSize)); |
| 4776 | 4776 |
| 4777 // Setup the elements pointer in the allocated arguments object and | 4777 // Set up the elements pointer in the allocated arguments object and |
| 4778 // initialize the header in the elements fixed array. | 4778 // initialize the header in the elements fixed array. |
| 4779 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict)); | 4779 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict)); |
| 4780 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); | 4780 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); |
| 4781 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex); | 4781 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex); |
| 4782 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset)); | 4782 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset)); |
| 4783 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset)); | 4783 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset)); |
| 4784 // Untag the length for the loop. | 4784 // Untag the length for the loop. |
| 4785 __ srl(a1, a1, kSmiTagSize); | 4785 __ srl(a1, a1, kSmiTagSize); |
| 4786 | 4786 |
| 4787 // Copy the fixed array slots. | 4787 // Copy the fixed array slots. |
| 4788 Label loop; | 4788 Label loop; |
| 4789 // Setup t0 to point to the first array slot. | 4789 // Set up t0 to point to the first array slot. |
| 4790 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4790 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 4791 __ bind(&loop); | 4791 __ bind(&loop); |
| 4792 // Pre-decrement a2 with kPointerSize on each iteration. | 4792 // Pre-decrement a2 with kPointerSize on each iteration. |
| 4793 // Pre-decrement in order to skip receiver. | 4793 // Pre-decrement in order to skip receiver. |
| 4794 __ Addu(a2, a2, Operand(-kPointerSize)); | 4794 __ Addu(a2, a2, Operand(-kPointerSize)); |
| 4795 __ lw(a3, MemOperand(a2)); | 4795 __ lw(a3, MemOperand(a2)); |
| 4796 // Post-increment t0 with kPointerSize on each iteration. | 4796 // Post-increment t0 with kPointerSize on each iteration. |
| 4797 __ sw(a3, MemOperand(t0)); | 4797 __ sw(a3, MemOperand(t0)); |
| 4798 __ Addu(t0, t0, Operand(kPointerSize)); | 4798 __ Addu(t0, t0, Operand(kPointerSize)); |
| 4799 __ Subu(a1, a1, Operand(1)); | 4799 __ Subu(a1, a1, Operand(1)); |
| (...skipping 618 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5418 { | 5418 { |
| 5419 Handle<Code> adaptor = | 5419 Handle<Code> adaptor = |
| 5420 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 5420 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
| 5421 __ Jump(adaptor, RelocInfo::CODE_TARGET); | 5421 __ Jump(adaptor, RelocInfo::CODE_TARGET); |
| 5422 } | 5422 } |
| 5423 | 5423 |
| 5424 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead | 5424 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead |
| 5425 // of the original receiver from the call site). | 5425 // of the original receiver from the call site). |
| 5426 __ bind(&non_function); | 5426 __ bind(&non_function); |
| 5427 __ sw(a1, MemOperand(sp, argc_ * kPointerSize)); | 5427 __ sw(a1, MemOperand(sp, argc_ * kPointerSize)); |
| 5428 __ li(a0, Operand(argc_)); // Setup the number of arguments. | 5428 __ li(a0, Operand(argc_)); // Set up the number of arguments. |
| 5429 __ mov(a2, zero_reg); | 5429 __ mov(a2, zero_reg); |
| 5430 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION); | 5430 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION); |
| 5431 __ SetCallKind(t1, CALL_AS_METHOD); | 5431 __ SetCallKind(t1, CALL_AS_METHOD); |
| 5432 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 5432 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 5433 RelocInfo::CODE_TARGET); | 5433 RelocInfo::CODE_TARGET); |
| 5434 } | 5434 } |
| 5435 | 5435 |
| 5436 | 5436 |
| 5437 // Unfortunately you have to run without snapshots to see most of these | 5437 // Unfortunately you have to run without snapshots to see most of these |
| 5438 // names in the profile since most compare stubs end up in the snapshot. | 5438 // names in the profile since most compare stubs end up in the snapshot. |
| (...skipping 481 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5920 Register result = candidate; | 5920 Register result = candidate; |
| 5921 __ bind(&found_in_symbol_table); | 5921 __ bind(&found_in_symbol_table); |
| 5922 __ mov(v0, result); | 5922 __ mov(v0, result); |
| 5923 } | 5923 } |
| 5924 | 5924 |
| 5925 | 5925 |
| 5926 void StringHelper::GenerateHashInit(MacroAssembler* masm, | 5926 void StringHelper::GenerateHashInit(MacroAssembler* masm, |
| 5927 Register hash, | 5927 Register hash, |
| 5928 Register character) { | 5928 Register character) { |
| 5929 // hash = seed + character + ((seed + character) << 10); | 5929 // hash = seed + character + ((seed + character) << 10); |
| 5930 __ LoadRoot(hash, Heap::kStringHashSeedRootIndex); | 5930 __ LoadRoot(hash, Heap::kHashSeedRootIndex); |
| 5931 // Untag smi seed and add the character. | 5931 // Untag smi seed and add the character. |
| 5932 __ SmiUntag(hash); | 5932 __ SmiUntag(hash); |
| 5933 __ addu(hash, hash, character); | 5933 __ addu(hash, hash, character); |
| 5934 __ sll(at, hash, 10); | 5934 __ sll(at, hash, 10); |
| 5935 __ addu(hash, hash, at); | 5935 __ addu(hash, hash, at); |
| 5936 // hash ^= hash >> 6; | 5936 // hash ^= hash >> 6; |
| 5937 __ srl(at, hash, 6); | 5937 __ srl(at, hash, 6); |
| 5938 __ xor_(hash, hash, at); | 5938 __ xor_(hash, hash, at); |
| 5939 } | 5939 } |
| 5940 | 5940 |
| 5941 | 5941 |
| 5942 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, | 5942 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, |
| 5943 Register hash, | 5943 Register hash, |
| 5944 Register character) { | 5944 Register character) { |
| 5945 // hash += character; | 5945 // hash += character; |
| 5946 __ addu(hash, hash, character); | 5946 __ addu(hash, hash, character); |
| 5947 // hash += hash << 10; | 5947 // hash += hash << 10; |
| 5948 __ sll(at, hash, 10); | 5948 __ sll(at, hash, 10); |
| 5949 __ addu(hash, hash, at); | 5949 __ addu(hash, hash, at); |
| 5950 // hash ^= hash >> 6; | 5950 // hash ^= hash >> 6; |
| 5951 __ srl(at, hash, 6); | 5951 __ srl(at, hash, 6); |
| 5952 __ xor_(hash, hash, at); | 5952 __ xor_(hash, hash, at); |
| 5953 } | 5953 } |
| 5954 | 5954 |
| 5955 | 5955 |
| 5956 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | 5956 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, |
| 5957 Register hash) { | 5957 Register hash) { |
| 5958 // hash += hash << 3; | 5958 // hash += hash << 3; |
| 5959 __ sll(at, hash, 3); | 5959 __ sll(at, hash, 3); |
| 5960 __ addu(hash, hash, at); | 5960 __ addu(hash, hash, at); |
| 5961 // hash ^= hash >> 11; | 5961 // hash ^= hash >> 11; |
| 5962 __ srl(at, hash, 11); | 5962 __ srl(at, hash, 11); |
| 5963 __ xor_(hash, hash, at); | 5963 __ xor_(hash, hash, at); |
| 5964 // hash += hash << 15; | 5964 // hash += hash << 15; |
| 5965 __ sll(at, hash, 15); | 5965 __ sll(at, hash, 15); |
| 5966 __ addu(hash, hash, at); | 5966 __ addu(hash, hash, at); |
| 5967 | 5967 |
| 5968 uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1; | 5968 __ li(at, Operand(String::kHashBitMask)); |
| 5969 __ li(at, Operand(kHashShiftCutOffMask)); | |
| 5970 __ and_(hash, hash, at); | 5969 __ and_(hash, hash, at); |
| 5971 | 5970 |
| 5972 // if (hash == 0) hash = 27; | 5971 // if (hash == 0) hash = 27; |
| 5973 __ ori(at, zero_reg, 27); | 5972 __ ori(at, zero_reg, StringHasher::kZeroHash); |
| 5974 __ movz(hash, at, hash); | 5973 __ movz(hash, at, hash); |
| 5975 } | 5974 } |
| 5976 | 5975 |
| 5977 | 5976 |
| 5978 void SubStringStub::Generate(MacroAssembler* masm) { | 5977 void SubStringStub::Generate(MacroAssembler* masm) { |
| 5979 Label runtime; | 5978 Label runtime; |
| 5980 // Stack frame on entry. | 5979 // Stack frame on entry. |
| 5981 // ra: return address | 5980 // ra: return address |
| 5982 // sp[0]: to | 5981 // sp[0]: to |
| 5983 // sp[4]: from | 5982 // sp[4]: from |
| (...skipping 1598 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7582 __ Ret(USE_DELAY_SLOT); | 7581 __ Ret(USE_DELAY_SLOT); |
| 7583 __ mov(v0, a0); | 7582 __ mov(v0, a0); |
| 7584 } | 7583 } |
| 7585 | 7584 |
| 7586 | 7585 |
| 7587 #undef __ | 7586 #undef __ |
| 7588 | 7587 |
| 7589 } } // namespace v8::internal | 7588 } } // namespace v8::internal |
| 7590 | 7589 |
| 7591 #endif // V8_TARGET_ARCH_MIPS | 7590 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |