| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if V8_TARGET_ARCH_MIPS64 | 5 #if V8_TARGET_ARCH_MIPS64 |
| 6 | 6 |
| 7 #include "src/code-stubs.h" | 7 #include "src/code-stubs.h" |
| 8 #include "src/api-arguments.h" | 8 #include "src/api-arguments.h" |
| 9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
| 10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
| 11 #include "src/ic/handler-compiler.h" | 11 #include "src/ic/handler-compiler.h" |
| 12 #include "src/ic/ic.h" | 12 #include "src/ic/ic.h" |
| 13 #include "src/ic/stub-cache.h" | 13 #include "src/ic/stub-cache.h" |
| 14 #include "src/isolate.h" | 14 #include "src/isolate.h" |
| 15 #include "src/mips64/code-stubs-mips64.h" | 15 #include "src/mips64/code-stubs-mips64.h" |
| 16 #include "src/regexp/jsregexp.h" | 16 #include "src/regexp/jsregexp.h" |
| 17 #include "src/regexp/regexp-macro-assembler.h" | 17 #include "src/regexp/regexp-macro-assembler.h" |
| 18 #include "src/runtime/runtime.h" | 18 #include "src/runtime/runtime.h" |
| 19 | 19 |
| 20 namespace v8 { | 20 namespace v8 { |
| 21 namespace internal { | 21 namespace internal { |
| 22 | 22 |
| 23 #define __ ACCESS_MASM(masm) | 23 #define __ ACCESS_MASM(masm) |
| 24 | 24 |
| 25 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) { | 25 void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) { |
| 26 __ dsll(t9, a0, kPointerSizeLog2); | 26 __ dsll(t9, a0, kPointerSizeLog2); |
| 27 __ Daddu(t9, sp, t9); | 27 __ Daddu(t9, sp, t9); |
| 28 __ sd(a1, MemOperand(t9, 0)); | 28 __ Sd(a1, MemOperand(t9, 0)); |
| 29 __ Push(a1); | 29 __ Push(a1); |
| 30 __ Push(a2); | 30 __ Push(a2); |
| 31 __ Daddu(a0, a0, 3); | 31 __ Daddu(a0, a0, 3); |
| 32 __ TailCallRuntime(Runtime::kNewArray); | 32 __ TailCallRuntime(Runtime::kNewArray); |
| 33 } | 33 } |
| 34 | 34 |
| 35 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, | 35 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, |
| 36 Condition cc); | 36 Condition cc); |
| 37 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 37 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 38 Register lhs, | 38 Register lhs, |
| (...skipping 15 matching lines...) Expand all Loading... |
| 54 int param_count = descriptor.GetRegisterParameterCount(); | 54 int param_count = descriptor.GetRegisterParameterCount(); |
| 55 { | 55 { |
| 56 // Call the runtime system in a fresh internal frame. | 56 // Call the runtime system in a fresh internal frame. |
| 57 FrameScope scope(masm, StackFrame::INTERNAL); | 57 FrameScope scope(masm, StackFrame::INTERNAL); |
| 58 DCHECK((param_count == 0) || | 58 DCHECK((param_count == 0) || |
| 59 a0.is(descriptor.GetRegisterParameter(param_count - 1))); | 59 a0.is(descriptor.GetRegisterParameter(param_count - 1))); |
| 60 // Push arguments, adjust sp. | 60 // Push arguments, adjust sp. |
| 61 __ Dsubu(sp, sp, Operand(param_count * kPointerSize)); | 61 __ Dsubu(sp, sp, Operand(param_count * kPointerSize)); |
| 62 for (int i = 0; i < param_count; ++i) { | 62 for (int i = 0; i < param_count; ++i) { |
| 63 // Store argument to stack. | 63 // Store argument to stack. |
| 64 __ sd(descriptor.GetRegisterParameter(i), | 64 __ Sd(descriptor.GetRegisterParameter(i), |
| 65 MemOperand(sp, (param_count - 1 - i) * kPointerSize)); | 65 MemOperand(sp, (param_count - 1 - i) * kPointerSize)); |
| 66 } | 66 } |
| 67 __ CallExternalReference(miss, param_count); | 67 __ CallExternalReference(miss, param_count); |
| 68 } | 68 } |
| 69 | 69 |
| 70 __ Ret(); | 70 __ Ret(); |
| 71 } | 71 } |
| 72 | 72 |
| 73 | 73 |
| 74 void DoubleToIStub::Generate(MacroAssembler* masm) { | 74 void DoubleToIStub::Generate(MacroAssembler* masm) { |
| 75 Label out_of_range, only_low, negate, done; | 75 Label out_of_range, only_low, negate, done; |
| 76 Register input_reg = source(); | 76 Register input_reg = source(); |
| 77 Register result_reg = destination(); | 77 Register result_reg = destination(); |
| 78 | 78 |
| 79 int double_offset = offset(); | 79 int double_offset = offset(); |
| 80 // Account for saved regs if input is sp. | 80 // Account for saved regs if input is sp. |
| 81 if (input_reg.is(sp)) double_offset += 3 * kPointerSize; | 81 if (input_reg.is(sp)) double_offset += 3 * kPointerSize; |
| 82 | 82 |
| 83 Register scratch = | 83 Register scratch = |
| 84 GetRegisterThatIsNotOneOf(input_reg, result_reg); | 84 GetRegisterThatIsNotOneOf(input_reg, result_reg); |
| 85 Register scratch2 = | 85 Register scratch2 = |
| 86 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); | 86 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); |
| 87 Register scratch3 = | 87 Register scratch3 = |
| 88 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2); | 88 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2); |
| 89 DoubleRegister double_scratch = kLithiumScratchDouble; | 89 DoubleRegister double_scratch = kLithiumScratchDouble; |
| 90 | 90 |
| 91 __ Push(scratch, scratch2, scratch3); | 91 __ Push(scratch, scratch2, scratch3); |
| 92 if (!skip_fastpath()) { | 92 if (!skip_fastpath()) { |
| 93 // Load double input. | 93 // Load double input. |
| 94 __ ldc1(double_scratch, MemOperand(input_reg, double_offset)); | 94 __ Ldc1(double_scratch, MemOperand(input_reg, double_offset)); |
| 95 | 95 |
| 96 // Clear cumulative exception flags and save the FCSR. | 96 // Clear cumulative exception flags and save the FCSR. |
| 97 __ cfc1(scratch2, FCSR); | 97 __ cfc1(scratch2, FCSR); |
| 98 __ ctc1(zero_reg, FCSR); | 98 __ ctc1(zero_reg, FCSR); |
| 99 | 99 |
| 100 // Try a conversion to a signed integer. | 100 // Try a conversion to a signed integer. |
| 101 __ Trunc_w_d(double_scratch, double_scratch); | 101 __ Trunc_w_d(double_scratch, double_scratch); |
| 102 // Move the converted value into the result register. | 102 // Move the converted value into the result register. |
| 103 __ mfc1(scratch3, double_scratch); | 103 __ mfc1(scratch3, double_scratch); |
| 104 | 104 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 116 __ Branch(&error, ne, scratch, Operand(zero_reg)); | 116 __ Branch(&error, ne, scratch, Operand(zero_reg)); |
| 117 __ Move(result_reg, scratch3); | 117 __ Move(result_reg, scratch3); |
| 118 __ Branch(&done); | 118 __ Branch(&done); |
| 119 __ bind(&error); | 119 __ bind(&error); |
| 120 } | 120 } |
| 121 | 121 |
| 122 // Load the double value and perform a manual truncation. | 122 // Load the double value and perform a manual truncation. |
| 123 Register input_high = scratch2; | 123 Register input_high = scratch2; |
| 124 Register input_low = scratch3; | 124 Register input_low = scratch3; |
| 125 | 125 |
| 126 __ lw(input_low, | 126 __ Lw(input_low, |
| 127 MemOperand(input_reg, double_offset + Register::kMantissaOffset)); | 127 MemOperand(input_reg, double_offset + Register::kMantissaOffset)); |
| 128 __ lw(input_high, | 128 __ Lw(input_high, |
| 129 MemOperand(input_reg, double_offset + Register::kExponentOffset)); | 129 MemOperand(input_reg, double_offset + Register::kExponentOffset)); |
| 130 | 130 |
| 131 Label normal_exponent, restore_sign; | 131 Label normal_exponent, restore_sign; |
| 132 // Extract the biased exponent in result. | 132 // Extract the biased exponent in result. |
| 133 __ Ext(result_reg, | 133 __ Ext(result_reg, |
| 134 input_high, | 134 input_high, |
| 135 HeapNumber::kExponentShift, | 135 HeapNumber::kExponentShift, |
| 136 HeapNumber::kExponentBits); | 136 HeapNumber::kExponentBits); |
| 137 | 137 |
| 138 // Check for Infinity and NaNs, which should return 0. | 138 // Check for Infinity and NaNs, which should return 0. |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 274 // x < x is false regardless. For the others here is some code to check | 274 // x < x is false regardless. For the others here is some code to check |
| 275 // for NaN. | 275 // for NaN. |
| 276 if (cc != lt && cc != gt) { | 276 if (cc != lt && cc != gt) { |
| 277 __ bind(&heap_number); | 277 __ bind(&heap_number); |
| 278 // It is a heap number, so return non-equal if it's NaN and equal if it's | 278 // It is a heap number, so return non-equal if it's NaN and equal if it's |
| 279 // not NaN. | 279 // not NaN. |
| 280 | 280 |
| 281 // The representation of NaN values has all exponent bits (52..62) set, | 281 // The representation of NaN values has all exponent bits (52..62) set, |
| 282 // and not all mantissa bits (0..51) clear. | 282 // and not all mantissa bits (0..51) clear. |
| 283 // Read top bits of double representation (second word of value). | 283 // Read top bits of double representation (second word of value). |
| 284 __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset)); | 284 __ Lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset)); |
| 285 // Test that exponent bits are all set. | 285 // Test that exponent bits are all set. |
| 286 __ And(a7, a6, Operand(exp_mask_reg)); | 286 __ And(a7, a6, Operand(exp_mask_reg)); |
| 287 // If all bits not set (ne cond), then not a NaN, objects are equal. | 287 // If all bits not set (ne cond), then not a NaN, objects are equal. |
| 288 __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg)); | 288 __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg)); |
| 289 | 289 |
| 290 // Shift out flag and all exponent bits, retaining only mantissa. | 290 // Shift out flag and all exponent bits, retaining only mantissa. |
| 291 __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord); | 291 __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord); |
| 292 // Or with all low-bits of mantissa. | 292 // Or with all low-bits of mantissa. |
| 293 __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); | 293 __ Lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); |
| 294 __ Or(v0, a7, Operand(a6)); | 294 __ Or(v0, a7, Operand(a6)); |
| 295 // For equal we already have the right value in v0: Return zero (equal) | 295 // For equal we already have the right value in v0: Return zero (equal) |
| 296 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 296 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
| 297 // not (it's a NaN). For <= and >= we need to load v0 with the failing | 297 // not (it's a NaN). For <= and >= we need to load v0 with the failing |
| 298 // value if it's a NaN. | 298 // value if it's a NaN. |
| 299 if (cc != eq) { | 299 if (cc != eq) { |
| 300 // All-zero means Infinity means equal. | 300 // All-zero means Infinity means equal. |
| 301 __ Ret(eq, v0, Operand(zero_reg)); | 301 __ Ret(eq, v0, Operand(zero_reg)); |
| 302 DCHECK(is_int16(GREATER) && is_int16(LESS)); | 302 DCHECK(is_int16(GREATER) && is_int16(LESS)); |
| 303 __ Ret(USE_DELAY_SLOT); | 303 __ Ret(USE_DELAY_SLOT); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 336 } else { | 336 } else { |
| 337 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 337 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
| 338 // the runtime. | 338 // the runtime. |
| 339 __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE)); | 339 __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE)); |
| 340 } | 340 } |
| 341 // Rhs is a smi, lhs is a number. | 341 // Rhs is a smi, lhs is a number. |
| 342 // Convert smi rhs to double. | 342 // Convert smi rhs to double. |
| 343 __ SmiUntag(at, rhs); | 343 __ SmiUntag(at, rhs); |
| 344 __ mtc1(at, f14); | 344 __ mtc1(at, f14); |
| 345 __ cvt_d_w(f14, f14); | 345 __ cvt_d_w(f14, f14); |
| 346 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 346 __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
| 347 | 347 |
| 348 // We now have both loaded as doubles. | 348 // We now have both loaded as doubles. |
| 349 __ jmp(both_loaded_as_doubles); | 349 __ jmp(both_loaded_as_doubles); |
| 350 | 350 |
| 351 __ bind(&lhs_is_smi); | 351 __ bind(&lhs_is_smi); |
| 352 // Lhs is a Smi. Check whether the non-smi is a heap number. | 352 // Lhs is a Smi. Check whether the non-smi is a heap number. |
| 353 __ GetObjectType(rhs, t0, t0); | 353 __ GetObjectType(rhs, t0, t0); |
| 354 if (strict) { | 354 if (strict) { |
| 355 // If lhs was not a number and rhs was a Smi then strict equality cannot | 355 // If lhs was not a number and rhs was a Smi then strict equality cannot |
| 356 // succeed. Return non-equal. | 356 // succeed. Return non-equal. |
| 357 __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE)); | 357 __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE)); |
| 358 __ li(v0, Operand(1)); | 358 __ li(v0, Operand(1)); |
| 359 } else { | 359 } else { |
| 360 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 360 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
| 361 // the runtime. | 361 // the runtime. |
| 362 __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE)); | 362 __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE)); |
| 363 } | 363 } |
| 364 | 364 |
| 365 // Lhs is a smi, rhs is a number. | 365 // Lhs is a smi, rhs is a number. |
| 366 // Convert smi lhs to double. | 366 // Convert smi lhs to double. |
| 367 __ SmiUntag(at, lhs); | 367 __ SmiUntag(at, lhs); |
| 368 __ mtc1(at, f12); | 368 __ mtc1(at, f12); |
| 369 __ cvt_d_w(f12, f12); | 369 __ cvt_d_w(f12, f12); |
| 370 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 370 __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 371 // Fall through to both_loaded_as_doubles. | 371 // Fall through to both_loaded_as_doubles. |
| 372 } | 372 } |
| 373 | 373 |
| 374 | 374 |
| 375 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 375 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 376 Register lhs, | 376 Register lhs, |
| 377 Register rhs) { | 377 Register rhs) { |
| 378 // If either operand is a JS object or an oddball value, then they are | 378 // If either operand is a JS object or an oddball value, then they are |
| 379 // not equal since their pointers are different. | 379 // not equal since their pointers are different. |
| 380 // There is no test for undetectability in strict equality. | 380 // There is no test for undetectability in strict equality. |
| (...skipping 30 matching lines...) Expand all Loading... |
| 411 | 411 |
| 412 | 412 |
| 413 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, | 413 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
| 414 Register lhs, | 414 Register lhs, |
| 415 Register rhs, | 415 Register rhs, |
| 416 Label* both_loaded_as_doubles, | 416 Label* both_loaded_as_doubles, |
| 417 Label* not_heap_numbers, | 417 Label* not_heap_numbers, |
| 418 Label* slow) { | 418 Label* slow) { |
| 419 __ GetObjectType(lhs, a3, a2); | 419 __ GetObjectType(lhs, a3, a2); |
| 420 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); | 420 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); |
| 421 __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 421 __ Ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
| 422 // If first was a heap number & second wasn't, go to slow case. | 422 // If first was a heap number & second wasn't, go to slow case. |
| 423 __ Branch(slow, ne, a3, Operand(a2)); | 423 __ Branch(slow, ne, a3, Operand(a2)); |
| 424 | 424 |
| 425 // Both are heap numbers. Load them up then jump to the code we have | 425 // Both are heap numbers. Load them up then jump to the code we have |
| 426 // for that. | 426 // for that. |
| 427 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 427 __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
| 428 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 428 __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 429 | 429 |
| 430 __ jmp(both_loaded_as_doubles); | 430 __ jmp(both_loaded_as_doubles); |
| 431 } | 431 } |
| 432 | 432 |
| 433 | 433 |
| 434 // Fast negative check for internalized-to-internalized equality. | 434 // Fast negative check for internalized-to-internalized equality. |
| 435 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 435 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
| 436 Register lhs, Register rhs, | 436 Register lhs, Register rhs, |
| 437 Label* possible_strings, | 437 Label* possible_strings, |
| 438 Label* runtime_call) { | 438 Label* runtime_call) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 451 __ And(at, a3, Operand(kIsNotInternalizedMask)); | 451 __ And(at, a3, Operand(kIsNotInternalizedMask)); |
| 452 __ Branch(possible_strings, ne, at, Operand(zero_reg)); | 452 __ Branch(possible_strings, ne, at, Operand(zero_reg)); |
| 453 | 453 |
| 454 // Both are internalized. We already checked they weren't the same pointer so | 454 // Both are internalized. We already checked they weren't the same pointer so |
| 455 // they are not equal. Return non-equal by returning the non-zero object | 455 // they are not equal. Return non-equal by returning the non-zero object |
| 456 // pointer in v0. | 456 // pointer in v0. |
| 457 __ Ret(USE_DELAY_SLOT); | 457 __ Ret(USE_DELAY_SLOT); |
| 458 __ mov(v0, a0); // In delay slot. | 458 __ mov(v0, a0); // In delay slot. |
| 459 | 459 |
| 460 __ bind(&object_test); | 460 __ bind(&object_test); |
| 461 __ ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 461 __ Ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
| 462 __ ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 462 __ Ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
| 463 __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset)); | 463 __ Lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset)); |
| 464 __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset)); | 464 __ Lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset)); |
| 465 __ And(at, t0, Operand(1 << Map::kIsUndetectable)); | 465 __ And(at, t0, Operand(1 << Map::kIsUndetectable)); |
| 466 __ Branch(&undetectable, ne, at, Operand(zero_reg)); | 466 __ Branch(&undetectable, ne, at, Operand(zero_reg)); |
| 467 __ And(at, t1, Operand(1 << Map::kIsUndetectable)); | 467 __ And(at, t1, Operand(1 << Map::kIsUndetectable)); |
| 468 __ Branch(&return_unequal, ne, at, Operand(zero_reg)); | 468 __ Branch(&return_unequal, ne, at, Operand(zero_reg)); |
| 469 | 469 |
| 470 __ GetInstanceType(a2, a2); | 470 __ GetInstanceType(a2, a2); |
| 471 __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE)); | 471 __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE)); |
| 472 __ GetInstanceType(a3, a3); | 472 __ GetInstanceType(a3, a3); |
| 473 __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE)); | 473 __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE)); |
| 474 | 474 |
| (...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 753 const DoubleRegister double_scratch = f6; | 753 const DoubleRegister double_scratch = f6; |
| 754 const FPURegister single_scratch = f8; | 754 const FPURegister single_scratch = f8; |
| 755 const Register scratch = t1; | 755 const Register scratch = t1; |
| 756 const Register scratch2 = a7; | 756 const Register scratch2 = a7; |
| 757 | 757 |
| 758 Label call_runtime, done, int_exponent; | 758 Label call_runtime, done, int_exponent; |
| 759 if (exponent_type() == TAGGED) { | 759 if (exponent_type() == TAGGED) { |
| 760 // Base is already in double_base. | 760 // Base is already in double_base. |
| 761 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 761 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); |
| 762 | 762 |
| 763 __ ldc1(double_exponent, | 763 __ Ldc1(double_exponent, |
| 764 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 764 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
| 765 } | 765 } |
| 766 | 766 |
| 767 if (exponent_type() != INTEGER) { | 767 if (exponent_type() != INTEGER) { |
| 768 Label int_exponent_convert; | 768 Label int_exponent_convert; |
| 769 // Detect integer exponents stored as double. | 769 // Detect integer exponents stored as double. |
| 770 __ EmitFPUTruncate(kRoundToMinusInf, | 770 __ EmitFPUTruncate(kRoundToMinusInf, |
| 771 scratch, | 771 scratch, |
| 772 double_exponent, | 772 double_exponent, |
| 773 at, | 773 at, |
| (...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 989 __ addiupc(ra, kNumInstructionsToJump + 1); | 989 __ addiupc(ra, kNumInstructionsToJump + 1); |
| 990 } else { | 990 } else { |
| 991 // This branch-and-link sequence is needed to find the current PC on mips | 991 // This branch-and-link sequence is needed to find the current PC on mips |
| 992 // before r6, saved to the ra register. | 992 // before r6, saved to the ra register. |
| 993 __ bal(&find_ra); // bal exposes branch delay slot. | 993 __ bal(&find_ra); // bal exposes branch delay slot. |
| 994 __ Daddu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize); | 994 __ Daddu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize); |
| 995 } | 995 } |
| 996 __ bind(&find_ra); | 996 __ bind(&find_ra); |
| 997 | 997 |
| 998 // This spot was reserved in EnterExitFrame. | 998 // This spot was reserved in EnterExitFrame. |
| 999 __ sd(ra, MemOperand(sp, result_stack_size)); | 999 __ Sd(ra, MemOperand(sp, result_stack_size)); |
| 1000 // Stack space reservation moved to the branch delay slot below. | 1000 // Stack space reservation moved to the branch delay slot below. |
| 1001 // Stack is still aligned. | 1001 // Stack is still aligned. |
| 1002 | 1002 |
| 1003 // Call the C routine. | 1003 // Call the C routine. |
| 1004 __ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. | 1004 __ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. |
| 1005 __ jalr(t9); | 1005 __ jalr(t9); |
| 1006 // Set up sp in the delay slot. | 1006 // Set up sp in the delay slot. |
| 1007 __ daddiu(sp, sp, -kCArgsSlotsSize); | 1007 __ daddiu(sp, sp, -kCArgsSlotsSize); |
| 1008 // Make sure the stored 'ra' points to this position. | 1008 // Make sure the stored 'ra' points to this position. |
| 1009 DCHECK_EQ(kNumInstructionsToJump, | 1009 DCHECK_EQ(kNumInstructionsToJump, |
| 1010 masm->InstructionsGeneratedSince(&find_ra)); | 1010 masm->InstructionsGeneratedSince(&find_ra)); |
| 1011 } | 1011 } |
| 1012 if (result_size() > 2) { | 1012 if (result_size() > 2) { |
| 1013 DCHECK_EQ(3, result_size()); | 1013 DCHECK_EQ(3, result_size()); |
| 1014 // Read result values stored on stack. | 1014 // Read result values stored on stack. |
| 1015 __ ld(a0, MemOperand(v0, 2 * kPointerSize)); | 1015 __ Ld(a0, MemOperand(v0, 2 * kPointerSize)); |
| 1016 __ ld(v1, MemOperand(v0, 1 * kPointerSize)); | 1016 __ Ld(v1, MemOperand(v0, 1 * kPointerSize)); |
| 1017 __ ld(v0, MemOperand(v0, 0 * kPointerSize)); | 1017 __ Ld(v0, MemOperand(v0, 0 * kPointerSize)); |
| 1018 } | 1018 } |
| 1019 // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers! | 1019 // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers! |
| 1020 | 1020 |
| 1021 // Check result for exception sentinel. | 1021 // Check result for exception sentinel. |
| 1022 Label exception_returned; | 1022 Label exception_returned; |
| 1023 __ LoadRoot(a4, Heap::kExceptionRootIndex); | 1023 __ LoadRoot(a4, Heap::kExceptionRootIndex); |
| 1024 __ Branch(&exception_returned, eq, a4, Operand(v0)); | 1024 __ Branch(&exception_returned, eq, a4, Operand(v0)); |
| 1025 | 1025 |
| 1026 // Check that there is no pending exception, otherwise we | 1026 // Check that there is no pending exception, otherwise we |
| 1027 // should have returned the exception sentinel. | 1027 // should have returned the exception sentinel. |
| 1028 if (FLAG_debug_code) { | 1028 if (FLAG_debug_code) { |
| 1029 Label okay; | 1029 Label okay; |
| 1030 ExternalReference pending_exception_address( | 1030 ExternalReference pending_exception_address( |
| 1031 Isolate::kPendingExceptionAddress, isolate()); | 1031 Isolate::kPendingExceptionAddress, isolate()); |
| 1032 __ li(a2, Operand(pending_exception_address)); | 1032 __ li(a2, Operand(pending_exception_address)); |
| 1033 __ ld(a2, MemOperand(a2)); | 1033 __ Ld(a2, MemOperand(a2)); |
| 1034 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); | 1034 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); |
| 1035 // Cannot use check here as it attempts to generate call into runtime. | 1035 // Cannot use check here as it attempts to generate call into runtime. |
| 1036 __ Branch(&okay, eq, a4, Operand(a2)); | 1036 __ Branch(&okay, eq, a4, Operand(a2)); |
| 1037 __ stop("Unexpected pending exception"); | 1037 __ stop("Unexpected pending exception"); |
| 1038 __ bind(&okay); | 1038 __ bind(&okay); |
| 1039 } | 1039 } |
| 1040 | 1040 |
| 1041 // Exit C frame and return. | 1041 // Exit C frame and return. |
| 1042 // v0:v1: result | 1042 // v0:v1: result |
| 1043 // sp: stack pointer | 1043 // sp: stack pointer |
| (...skipping 30 matching lines...) Expand all Loading... |
| 1074 FrameScope scope(masm, StackFrame::MANUAL); | 1074 FrameScope scope(masm, StackFrame::MANUAL); |
| 1075 __ PrepareCallCFunction(3, 0, a0); | 1075 __ PrepareCallCFunction(3, 0, a0); |
| 1076 __ mov(a0, zero_reg); | 1076 __ mov(a0, zero_reg); |
| 1077 __ mov(a1, zero_reg); | 1077 __ mov(a1, zero_reg); |
| 1078 __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); | 1078 __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); |
| 1079 __ CallCFunction(find_handler, 3); | 1079 __ CallCFunction(find_handler, 3); |
| 1080 } | 1080 } |
| 1081 | 1081 |
| 1082 // Retrieve the handler context, SP and FP. | 1082 // Retrieve the handler context, SP and FP. |
| 1083 __ li(cp, Operand(pending_handler_context_address)); | 1083 __ li(cp, Operand(pending_handler_context_address)); |
| 1084 __ ld(cp, MemOperand(cp)); | 1084 __ Ld(cp, MemOperand(cp)); |
| 1085 __ li(sp, Operand(pending_handler_sp_address)); | 1085 __ li(sp, Operand(pending_handler_sp_address)); |
| 1086 __ ld(sp, MemOperand(sp)); | 1086 __ Ld(sp, MemOperand(sp)); |
| 1087 __ li(fp, Operand(pending_handler_fp_address)); | 1087 __ li(fp, Operand(pending_handler_fp_address)); |
| 1088 __ ld(fp, MemOperand(fp)); | 1088 __ Ld(fp, MemOperand(fp)); |
| 1089 | 1089 |
| 1090 // If the handler is a JS frame, restore the context to the frame. Note that | 1090 // If the handler is a JS frame, restore the context to the frame. Note that |
| 1091 // the context will be set to (cp == 0) for non-JS frames. | 1091 // the context will be set to (cp == 0) for non-JS frames. |
| 1092 Label zero; | 1092 Label zero; |
| 1093 __ Branch(&zero, eq, cp, Operand(zero_reg)); | 1093 __ Branch(&zero, eq, cp, Operand(zero_reg)); |
| 1094 __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 1094 __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 1095 __ bind(&zero); | 1095 __ bind(&zero); |
| 1096 | 1096 |
| 1097 // Compute the handler entry address and jump to it. | 1097 // Compute the handler entry address and jump to it. |
| 1098 __ li(a1, Operand(pending_handler_code_address)); | 1098 __ li(a1, Operand(pending_handler_code_address)); |
| 1099 __ ld(a1, MemOperand(a1)); | 1099 __ Ld(a1, MemOperand(a1)); |
| 1100 __ li(a2, Operand(pending_handler_offset_address)); | 1100 __ li(a2, Operand(pending_handler_offset_address)); |
| 1101 __ ld(a2, MemOperand(a2)); | 1101 __ Ld(a2, MemOperand(a2)); |
| 1102 __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1102 __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1103 __ Daddu(t9, a1, a2); | 1103 __ Daddu(t9, a1, a2); |
| 1104 __ Jump(t9); | 1104 __ Jump(t9); |
| 1105 } | 1105 } |
| 1106 | 1106 |
| 1107 | 1107 |
| 1108 void JSEntryStub::Generate(MacroAssembler* masm) { | 1108 void JSEntryStub::Generate(MacroAssembler* masm) { |
| 1109 Label invoke, handler_entry, exit; | 1109 Label invoke, handler_entry, exit; |
| 1110 Isolate* isolate = masm->isolate(); | 1110 Isolate* isolate = masm->isolate(); |
| 1111 | 1111 |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1136 | 1136 |
| 1137 __ InitializeRootRegister(); | 1137 __ InitializeRootRegister(); |
| 1138 | 1138 |
| 1139 // We build an EntryFrame. | 1139 // We build an EntryFrame. |
| 1140 __ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used. | 1140 __ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
| 1141 StackFrame::Type marker = type(); | 1141 StackFrame::Type marker = type(); |
| 1142 __ li(a6, Operand(StackFrame::TypeToMarker(marker))); | 1142 __ li(a6, Operand(StackFrame::TypeToMarker(marker))); |
| 1143 __ li(a5, Operand(StackFrame::TypeToMarker(marker))); | 1143 __ li(a5, Operand(StackFrame::TypeToMarker(marker))); |
| 1144 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate); | 1144 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate); |
| 1145 __ li(a4, Operand(c_entry_fp)); | 1145 __ li(a4, Operand(c_entry_fp)); |
| 1146 __ ld(a4, MemOperand(a4)); | 1146 __ Ld(a4, MemOperand(a4)); |
| 1147 __ Push(a7, a6, a5, a4); | 1147 __ Push(a7, a6, a5, a4); |
| 1148 // Set up frame pointer for the frame to be pushed. | 1148 // Set up frame pointer for the frame to be pushed. |
| 1149 __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); | 1149 __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); |
| 1150 | 1150 |
| 1151 // Registers: | 1151 // Registers: |
| 1152 // a0: entry_address | 1152 // a0: entry_address |
| 1153 // a1: function | 1153 // a1: function |
| 1154 // a2: receiver_pointer | 1154 // a2: receiver_pointer |
| 1155 // a3: argc | 1155 // a3: argc |
| 1156 // s0: argv | 1156 // s0: argv |
| 1157 // | 1157 // |
| 1158 // Stack: | 1158 // Stack: |
| 1159 // caller fp | | 1159 // caller fp | |
| 1160 // function slot | entry frame | 1160 // function slot | entry frame |
| 1161 // context slot | | 1161 // context slot | |
| 1162 // bad fp (0xff...f) | | 1162 // bad fp (0xff...f) | |
| 1163 // callee saved registers + ra | 1163 // callee saved registers + ra |
| 1164 // [ O32: 4 args slots] | 1164 // [ O32: 4 args slots] |
| 1165 // args | 1165 // args |
| 1166 | 1166 |
| 1167 // If this is the outermost JS call, set js_entry_sp value. | 1167 // If this is the outermost JS call, set js_entry_sp value. |
| 1168 Label non_outermost_js; | 1168 Label non_outermost_js; |
| 1169 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); | 1169 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); |
| 1170 __ li(a5, Operand(ExternalReference(js_entry_sp))); | 1170 __ li(a5, Operand(ExternalReference(js_entry_sp))); |
| 1171 __ ld(a6, MemOperand(a5)); | 1171 __ Ld(a6, MemOperand(a5)); |
| 1172 __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg)); | 1172 __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg)); |
| 1173 __ sd(fp, MemOperand(a5)); | 1173 __ Sd(fp, MemOperand(a5)); |
| 1174 __ li(a4, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); | 1174 __ li(a4, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); |
| 1175 Label cont; | 1175 Label cont; |
| 1176 __ b(&cont); | 1176 __ b(&cont); |
| 1177 __ nop(); // Branch delay slot nop. | 1177 __ nop(); // Branch delay slot nop. |
| 1178 __ bind(&non_outermost_js); | 1178 __ bind(&non_outermost_js); |
| 1179 __ li(a4, Operand(StackFrame::INNER_JSENTRY_FRAME)); | 1179 __ li(a4, Operand(StackFrame::INNER_JSENTRY_FRAME)); |
| 1180 __ bind(&cont); | 1180 __ bind(&cont); |
| 1181 __ push(a4); | 1181 __ push(a4); |
| 1182 | 1182 |
| 1183 // Jump to a faked try block that does the invoke, with a faked catch | 1183 // Jump to a faked try block that does the invoke, with a faked catch |
| 1184 // block that sets the pending exception. | 1184 // block that sets the pending exception. |
| 1185 __ jmp(&invoke); | 1185 __ jmp(&invoke); |
| 1186 __ bind(&handler_entry); | 1186 __ bind(&handler_entry); |
| 1187 handler_offset_ = handler_entry.pos(); | 1187 handler_offset_ = handler_entry.pos(); |
| 1188 // Caught exception: Store result (exception) in the pending exception | 1188 // Caught exception: Store result (exception) in the pending exception |
| 1189 // field in the JSEnv and return a failure sentinel. Coming in here the | 1189 // field in the JSEnv and return a failure sentinel. Coming in here the |
| 1190 // fp will be invalid because the PushStackHandler below sets it to 0 to | 1190 // fp will be invalid because the PushStackHandler below sets it to 0 to |
| 1191 // signal the existence of the JSEntry frame. | 1191 // signal the existence of the JSEntry frame. |
| 1192 __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 1192 __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| 1193 isolate))); | 1193 isolate))); |
| 1194 __ sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0. | 1194 __ Sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0. |
| 1195 __ LoadRoot(v0, Heap::kExceptionRootIndex); | 1195 __ LoadRoot(v0, Heap::kExceptionRootIndex); |
| 1196 __ b(&exit); // b exposes branch delay slot. | 1196 __ b(&exit); // b exposes branch delay slot. |
| 1197 __ nop(); // Branch delay slot nop. | 1197 __ nop(); // Branch delay slot nop. |
| 1198 | 1198 |
| 1199 // Invoke: Link this frame into the handler chain. | 1199 // Invoke: Link this frame into the handler chain. |
| 1200 __ bind(&invoke); | 1200 __ bind(&invoke); |
| 1201 __ PushStackHandler(); | 1201 __ PushStackHandler(); |
| 1202 // If an exception not caught by another handler occurs, this handler | 1202 // If an exception not caught by another handler occurs, this handler |
| 1203 // returns control to the code after the bal(&invoke) above, which | 1203 // returns control to the code after the bal(&invoke) above, which |
| 1204 // restores all kCalleeSaved registers (including cp and fp) to their | 1204 // restores all kCalleeSaved registers (including cp and fp) to their |
| (...skipping 18 matching lines...) Expand all Loading... |
| 1223 // args | 1223 // args |
| 1224 | 1224 |
| 1225 if (type() == StackFrame::ENTRY_CONSTRUCT) { | 1225 if (type() == StackFrame::ENTRY_CONSTRUCT) { |
| 1226 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, | 1226 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, |
| 1227 isolate); | 1227 isolate); |
| 1228 __ li(a4, Operand(construct_entry)); | 1228 __ li(a4, Operand(construct_entry)); |
| 1229 } else { | 1229 } else { |
| 1230 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate()); | 1230 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate()); |
| 1231 __ li(a4, Operand(entry)); | 1231 __ li(a4, Operand(entry)); |
| 1232 } | 1232 } |
| 1233 __ ld(t9, MemOperand(a4)); // Deref address. | 1233 __ Ld(t9, MemOperand(a4)); // Deref address. |
| 1234 // Call JSEntryTrampoline. | 1234 // Call JSEntryTrampoline. |
| 1235 __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); | 1235 __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); |
| 1236 __ Call(t9); | 1236 __ Call(t9); |
| 1237 | 1237 |
| 1238 // Unlink this frame from the handler chain. | 1238 // Unlink this frame from the handler chain. |
| 1239 __ PopStackHandler(); | 1239 __ PopStackHandler(); |
| 1240 | 1240 |
| 1241 __ bind(&exit); // v0 holds result | 1241 __ bind(&exit); // v0 holds result |
| 1242 // Check if the current stack frame is marked as the outermost JS frame. | 1242 // Check if the current stack frame is marked as the outermost JS frame. |
| 1243 Label non_outermost_js_2; | 1243 Label non_outermost_js_2; |
| 1244 __ pop(a5); | 1244 __ pop(a5); |
| 1245 __ Branch(&non_outermost_js_2, ne, a5, | 1245 __ Branch(&non_outermost_js_2, ne, a5, |
| 1246 Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); | 1246 Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); |
| 1247 __ li(a5, Operand(ExternalReference(js_entry_sp))); | 1247 __ li(a5, Operand(ExternalReference(js_entry_sp))); |
| 1248 __ sd(zero_reg, MemOperand(a5)); | 1248 __ Sd(zero_reg, MemOperand(a5)); |
| 1249 __ bind(&non_outermost_js_2); | 1249 __ bind(&non_outermost_js_2); |
| 1250 | 1250 |
| 1251 // Restore the top frame descriptors from the stack. | 1251 // Restore the top frame descriptors from the stack. |
| 1252 __ pop(a5); | 1252 __ pop(a5); |
| 1253 __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress, | 1253 __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress, |
| 1254 isolate))); | 1254 isolate))); |
| 1255 __ sd(a5, MemOperand(a4)); | 1255 __ Sd(a5, MemOperand(a4)); |
| 1256 | 1256 |
| 1257 // Reset the stack to the callee saved registers. | 1257 // Reset the stack to the callee saved registers. |
| 1258 __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); | 1258 __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); |
| 1259 | 1259 |
| 1260 // Restore callee-saved fpu registers. | 1260 // Restore callee-saved fpu registers. |
| 1261 __ MultiPopFPU(kCalleeSavedFPU); | 1261 __ MultiPopFPU(kCalleeSavedFPU); |
| 1262 | 1262 |
| 1263 // Restore callee saved registers from the stack. | 1263 // Restore callee saved registers from the stack. |
| 1264 __ MultiPop(kCalleeSaved | ra.bit()); | 1264 __ MultiPop(kCalleeSaved | ra.bit()); |
| 1265 // Return. | 1265 // Return. |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1289 // Abi O32: | 1289 // Abi O32: |
| 1290 // [sp + 5] - Argument 9 | 1290 // [sp + 5] - Argument 9 |
| 1291 // [sp + 4] - Argument 8 | 1291 // [sp + 4] - Argument 8 |
| 1292 // [sp + 3] - Argument 7 | 1292 // [sp + 3] - Argument 7 |
| 1293 // [sp + 2] - Argument 6 | 1293 // [sp + 2] - Argument 6 |
| 1294 // [sp + 1] - Argument 5 | 1294 // [sp + 1] - Argument 5 |
| 1295 // [sp + 0] - saved ra | 1295 // [sp + 0] - saved ra |
| 1296 | 1296 |
| 1297 // Argument 9: Pass current isolate address. | 1297 // Argument 9: Pass current isolate address. |
| 1298 __ li(t1, Operand(ExternalReference::isolate_address(isolate()))); | 1298 __ li(t1, Operand(ExternalReference::isolate_address(isolate()))); |
| 1299 __ sd(t1, MemOperand(sp, 1 * kPointerSize)); | 1299 __ Sd(t1, MemOperand(sp, 1 * kPointerSize)); |
| 1300 | 1300 |
| 1301 // Argument 8: Indicate that this is a direct call from JavaScript. | 1301 // Argument 8: Indicate that this is a direct call from JavaScript. |
| 1302 __ li(a7, Operand(1)); | 1302 __ li(a7, Operand(1)); |
| 1303 | 1303 |
| 1304 // Argument 7: Start (high end) of backtracking stack memory area. | 1304 // Argument 7: Start (high end) of backtracking stack memory area. |
| 1305 ExternalReference address_of_regexp_stack_memory_address = | 1305 ExternalReference address_of_regexp_stack_memory_address = |
| 1306 ExternalReference::address_of_regexp_stack_memory_address(isolate()); | 1306 ExternalReference::address_of_regexp_stack_memory_address(isolate()); |
| 1307 ExternalReference address_of_regexp_stack_memory_size = | 1307 ExternalReference address_of_regexp_stack_memory_size = |
| 1308 ExternalReference::address_of_regexp_stack_memory_size(isolate()); | 1308 ExternalReference::address_of_regexp_stack_memory_size(isolate()); |
| 1309 __ li(t1, Operand(address_of_regexp_stack_memory_address)); | 1309 __ li(t1, Operand(address_of_regexp_stack_memory_address)); |
| 1310 __ ld(t1, MemOperand(t1, 0)); | 1310 __ Ld(t1, MemOperand(t1, 0)); |
| 1311 __ li(t2, Operand(address_of_regexp_stack_memory_size)); | 1311 __ li(t2, Operand(address_of_regexp_stack_memory_size)); |
| 1312 __ ld(t2, MemOperand(t2, 0)); | 1312 __ Ld(t2, MemOperand(t2, 0)); |
| 1313 __ daddu(a6, t1, t2); | 1313 __ daddu(a6, t1, t2); |
| 1314 | 1314 |
| 1315 // Argument 6: Set the number of capture registers to zero to force global | 1315 // Argument 6: Set the number of capture registers to zero to force global |
| 1316 // regexps to behave as non-global. This does not affect non-global regexps. | 1316 // regexps to behave as non-global. This does not affect non-global regexps. |
| 1317 __ mov(a5, zero_reg); | 1317 __ mov(a5, zero_reg); |
| 1318 | 1318 |
| 1319 // Argument 5: static offsets vector buffer. | 1319 // Argument 5: static offsets vector buffer. |
| 1320 __ li( | 1320 __ li( |
| 1321 a4, | 1321 a4, |
| 1322 Operand(ExternalReference::address_of_static_offsets_vector(isolate()))); | 1322 Operand(ExternalReference::address_of_static_offsets_vector(isolate()))); |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1381 Label initialize, done, miss, megamorphic, not_array_function; | 1381 Label initialize, done, miss, megamorphic, not_array_function; |
| 1382 | 1382 |
| 1383 DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()), | 1383 DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()), |
| 1384 masm->isolate()->heap()->megamorphic_symbol()); | 1384 masm->isolate()->heap()->megamorphic_symbol()); |
| 1385 DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()), | 1385 DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()), |
| 1386 masm->isolate()->heap()->uninitialized_symbol()); | 1386 masm->isolate()->heap()->uninitialized_symbol()); |
| 1387 | 1387 |
| 1388 // Load the cache state into a5. | 1388 // Load the cache state into a5. |
| 1389 __ dsrl(a5, a3, 32 - kPointerSizeLog2); | 1389 __ dsrl(a5, a3, 32 - kPointerSizeLog2); |
| 1390 __ Daddu(a5, a2, Operand(a5)); | 1390 __ Daddu(a5, a2, Operand(a5)); |
| 1391 __ ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize)); | 1391 __ Ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize)); |
| 1392 | 1392 |
| 1393 // A monomorphic cache hit or an already megamorphic state: invoke the | 1393 // A monomorphic cache hit or an already megamorphic state: invoke the |
| 1394 // function without changing the state. | 1394 // function without changing the state. |
| 1395 // We don't know if a5 is a WeakCell or a Symbol, but it's harmless to read at | 1395 // We don't know if a5 is a WeakCell or a Symbol, but it's harmless to read at |
| 1396 // this position in a symbol (see static asserts in feedback-vector.h). | 1396 // this position in a symbol (see static asserts in feedback-vector.h). |
| 1397 Label check_allocation_site; | 1397 Label check_allocation_site; |
| 1398 Register feedback_map = a6; | 1398 Register feedback_map = a6; |
| 1399 Register weak_value = t0; | 1399 Register weak_value = t0; |
| 1400 __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset)); | 1400 __ Ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset)); |
| 1401 __ Branch(&done, eq, a1, Operand(weak_value)); | 1401 __ Branch(&done, eq, a1, Operand(weak_value)); |
| 1402 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); | 1402 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); |
| 1403 __ Branch(&done, eq, a5, Operand(at)); | 1403 __ Branch(&done, eq, a5, Operand(at)); |
| 1404 __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset)); | 1404 __ Ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset)); |
| 1405 __ LoadRoot(at, Heap::kWeakCellMapRootIndex); | 1405 __ LoadRoot(at, Heap::kWeakCellMapRootIndex); |
| 1406 __ Branch(&check_allocation_site, ne, feedback_map, Operand(at)); | 1406 __ Branch(&check_allocation_site, ne, feedback_map, Operand(at)); |
| 1407 | 1407 |
| 1408 // If the weak cell is cleared, we have a new chance to become monomorphic. | 1408 // If the weak cell is cleared, we have a new chance to become monomorphic. |
| 1409 __ JumpIfSmi(weak_value, &initialize); | 1409 __ JumpIfSmi(weak_value, &initialize); |
| 1410 __ jmp(&megamorphic); | 1410 __ jmp(&megamorphic); |
| 1411 | 1411 |
| 1412 __ bind(&check_allocation_site); | 1412 __ bind(&check_allocation_site); |
| 1413 // If we came here, we need to see if we are the array function. | 1413 // If we came here, we need to see if we are the array function. |
| 1414 // If we didn't have a matching function, and we didn't find the megamorph | 1414 // If we didn't have a matching function, and we didn't find the megamorph |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1427 // A monomorphic miss (i.e, here the cache is not uninitialized) goes | 1427 // A monomorphic miss (i.e, here the cache is not uninitialized) goes |
| 1428 // megamorphic. | 1428 // megamorphic. |
| 1429 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex); | 1429 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex); |
| 1430 __ Branch(&initialize, eq, a5, Operand(at)); | 1430 __ Branch(&initialize, eq, a5, Operand(at)); |
| 1431 // MegamorphicSentinel is an immortal immovable object (undefined) so no | 1431 // MegamorphicSentinel is an immortal immovable object (undefined) so no |
| 1432 // write-barrier is needed. | 1432 // write-barrier is needed. |
| 1433 __ bind(&megamorphic); | 1433 __ bind(&megamorphic); |
| 1434 __ dsrl(a5, a3, 32 - kPointerSizeLog2); | 1434 __ dsrl(a5, a3, 32 - kPointerSizeLog2); |
| 1435 __ Daddu(a5, a2, Operand(a5)); | 1435 __ Daddu(a5, a2, Operand(a5)); |
| 1436 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); | 1436 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); |
| 1437 __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize)); | 1437 __ Sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize)); |
| 1438 __ jmp(&done); | 1438 __ jmp(&done); |
| 1439 | 1439 |
| 1440 // An uninitialized cache is patched with the function. | 1440 // An uninitialized cache is patched with the function. |
| 1441 __ bind(&initialize); | 1441 __ bind(&initialize); |
| 1442 // Make sure the function is the Array() function. | 1442 // Make sure the function is the Array() function. |
| 1443 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5); | 1443 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5); |
| 1444 __ Branch(¬_array_function, ne, a1, Operand(a5)); | 1444 __ Branch(¬_array_function, ne, a1, Operand(a5)); |
| 1445 | 1445 |
| 1446 // The target function is the Array constructor, | 1446 // The target function is the Array constructor, |
| 1447 // Create an AllocationSite if we don't already have it, store it in the | 1447 // Create an AllocationSite if we don't already have it, store it in the |
| 1448 // slot. | 1448 // slot. |
| 1449 CreateAllocationSiteStub create_stub(masm->isolate()); | 1449 CreateAllocationSiteStub create_stub(masm->isolate()); |
| 1450 CallStubInRecordCallTarget(masm, &create_stub); | 1450 CallStubInRecordCallTarget(masm, &create_stub); |
| 1451 __ Branch(&done); | 1451 __ Branch(&done); |
| 1452 | 1452 |
| 1453 __ bind(¬_array_function); | 1453 __ bind(¬_array_function); |
| 1454 | 1454 |
| 1455 CreateWeakCellStub weak_cell_stub(masm->isolate()); | 1455 CreateWeakCellStub weak_cell_stub(masm->isolate()); |
| 1456 CallStubInRecordCallTarget(masm, &weak_cell_stub); | 1456 CallStubInRecordCallTarget(masm, &weak_cell_stub); |
| 1457 | 1457 |
| 1458 __ bind(&done); | 1458 __ bind(&done); |
| 1459 | 1459 |
| 1460 // Increment the call count for all function calls. | 1460 // Increment the call count for all function calls. |
| 1461 __ SmiScale(a4, a3, kPointerSizeLog2); | 1461 __ SmiScale(a4, a3, kPointerSizeLog2); |
| 1462 __ Daddu(a5, a2, Operand(a4)); | 1462 __ Daddu(a5, a2, Operand(a4)); |
| 1463 __ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize)); | 1463 __ Ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize)); |
| 1464 __ Daddu(a4, a4, Operand(Smi::FromInt(1))); | 1464 __ Daddu(a4, a4, Operand(Smi::FromInt(1))); |
| 1465 __ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize)); | 1465 __ Sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize)); |
| 1466 } | 1466 } |
| 1467 | 1467 |
| 1468 | 1468 |
| 1469 void CallConstructStub::Generate(MacroAssembler* masm) { | 1469 void CallConstructStub::Generate(MacroAssembler* masm) { |
| 1470 // a0 : number of arguments | 1470 // a0 : number of arguments |
| 1471 // a1 : the function to call | 1471 // a1 : the function to call |
| 1472 // a2 : feedback vector | 1472 // a2 : feedback vector |
| 1473 // a3 : slot in feedback vector (Smi, for RecordCallTarget) | 1473 // a3 : slot in feedback vector (Smi, for RecordCallTarget) |
| 1474 | 1474 |
| 1475 Label non_function; | 1475 Label non_function; |
| 1476 // Check that the function is not a smi. | 1476 // Check that the function is not a smi. |
| 1477 __ JumpIfSmi(a1, &non_function); | 1477 __ JumpIfSmi(a1, &non_function); |
| 1478 // Check that the function is a JSFunction. | 1478 // Check that the function is a JSFunction. |
| 1479 __ GetObjectType(a1, a5, a5); | 1479 __ GetObjectType(a1, a5, a5); |
| 1480 __ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE)); | 1480 __ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE)); |
| 1481 | 1481 |
| 1482 GenerateRecordCallTarget(masm); | 1482 GenerateRecordCallTarget(masm); |
| 1483 | 1483 |
| 1484 __ dsrl(at, a3, 32 - kPointerSizeLog2); | 1484 __ dsrl(at, a3, 32 - kPointerSizeLog2); |
| 1485 __ Daddu(a5, a2, at); | 1485 __ Daddu(a5, a2, at); |
| 1486 Label feedback_register_initialized; | 1486 Label feedback_register_initialized; |
| 1487 // Put the AllocationSite from the feedback vector into a2, or undefined. | 1487 // Put the AllocationSite from the feedback vector into a2, or undefined. |
| 1488 __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize)); | 1488 __ Ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize)); |
| 1489 __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset)); | 1489 __ Ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset)); |
| 1490 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); | 1490 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); |
| 1491 __ Branch(&feedback_register_initialized, eq, a5, Operand(at)); | 1491 __ Branch(&feedback_register_initialized, eq, a5, Operand(at)); |
| 1492 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | 1492 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
| 1493 __ bind(&feedback_register_initialized); | 1493 __ bind(&feedback_register_initialized); |
| 1494 | 1494 |
| 1495 __ AssertUndefinedOrAllocationSite(a2, a5); | 1495 __ AssertUndefinedOrAllocationSite(a2, a5); |
| 1496 | 1496 |
| 1497 // Pass function as new target. | 1497 // Pass function as new target. |
| 1498 __ mov(a3, a1); | 1498 __ mov(a3, a1); |
| 1499 | 1499 |
| 1500 // Tail call to the function-specific construct stub (still in the caller | 1500 // Tail call to the function-specific construct stub (still in the caller |
| 1501 // context at this point). | 1501 // context at this point). |
| 1502 __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); | 1502 __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
| 1503 __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset)); | 1503 __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset)); |
| 1504 __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1504 __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1505 __ Jump(at); | 1505 __ Jump(at); |
| 1506 | 1506 |
| 1507 __ bind(&non_function); | 1507 __ bind(&non_function); |
| 1508 __ mov(a3, a1); | 1508 __ mov(a3, a1); |
| 1509 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); | 1509 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); |
| 1510 } | 1510 } |
| 1511 | 1511 |
| 1512 | 1512 |
| 1513 // StringCharCodeAtGenerator. | 1513 // StringCharCodeAtGenerator. |
| 1514 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 1514 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| 1515 DCHECK(!a4.is(index_)); | 1515 DCHECK(!a4.is(index_)); |
| 1516 DCHECK(!a4.is(result_)); | 1516 DCHECK(!a4.is(result_)); |
| 1517 DCHECK(!a4.is(object_)); | 1517 DCHECK(!a4.is(object_)); |
| 1518 | 1518 |
| 1519 // If the receiver is a smi trigger the non-string case. | 1519 // If the receiver is a smi trigger the non-string case. |
| 1520 if (check_mode_ == RECEIVER_IS_UNKNOWN) { | 1520 if (check_mode_ == RECEIVER_IS_UNKNOWN) { |
| 1521 __ JumpIfSmi(object_, receiver_not_string_); | 1521 __ JumpIfSmi(object_, receiver_not_string_); |
| 1522 | 1522 |
| 1523 // Fetch the instance type of the receiver into result register. | 1523 // Fetch the instance type of the receiver into result register. |
| 1524 __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 1524 __ Ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 1525 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 1525 __ Lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 1526 // If the receiver is not a string trigger the non-string case. | 1526 // If the receiver is not a string trigger the non-string case. |
| 1527 __ And(a4, result_, Operand(kIsNotStringMask)); | 1527 __ And(a4, result_, Operand(kIsNotStringMask)); |
| 1528 __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg)); | 1528 __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg)); |
| 1529 } | 1529 } |
| 1530 | 1530 |
| 1531 // If the index is non-smi trigger the non-smi case. | 1531 // If the index is non-smi trigger the non-smi case. |
| 1532 __ JumpIfNotSmi(index_, &index_not_smi_); | 1532 __ JumpIfNotSmi(index_, &index_not_smi_); |
| 1533 | 1533 |
| 1534 __ bind(&got_smi_index_); | 1534 __ bind(&got_smi_index_); |
| 1535 | 1535 |
| 1536 // Check for index out of range. | 1536 // Check for index out of range. |
| 1537 __ ld(a4, FieldMemOperand(object_, String::kLengthOffset)); | 1537 __ Ld(a4, FieldMemOperand(object_, String::kLengthOffset)); |
| 1538 __ Branch(index_out_of_range_, ls, a4, Operand(index_)); | 1538 __ Branch(index_out_of_range_, ls, a4, Operand(index_)); |
| 1539 | 1539 |
| 1540 __ SmiUntag(index_); | 1540 __ SmiUntag(index_); |
| 1541 | 1541 |
| 1542 StringCharLoadGenerator::Generate(masm, | 1542 StringCharLoadGenerator::Generate(masm, |
| 1543 object_, | 1543 object_, |
| 1544 index_, | 1544 index_, |
| 1545 result_, | 1545 result_, |
| 1546 &call_runtime_); | 1546 &call_runtime_); |
| 1547 | 1547 |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1576 // have a chance to overwrite it. | 1576 // have a chance to overwrite it. |
| 1577 | 1577 |
| 1578 __ Move(index_, v0); | 1578 __ Move(index_, v0); |
| 1579 if (embed_mode == PART_OF_IC_HANDLER) { | 1579 if (embed_mode == PART_OF_IC_HANDLER) { |
| 1580 __ Pop(LoadWithVectorDescriptor::VectorRegister(), | 1580 __ Pop(LoadWithVectorDescriptor::VectorRegister(), |
| 1581 LoadWithVectorDescriptor::SlotRegister(), object_); | 1581 LoadWithVectorDescriptor::SlotRegister(), object_); |
| 1582 } else { | 1582 } else { |
| 1583 __ pop(object_); | 1583 __ pop(object_); |
| 1584 } | 1584 } |
| 1585 // Reload the instance type. | 1585 // Reload the instance type. |
| 1586 __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 1586 __ Ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 1587 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 1587 __ Lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 1588 call_helper.AfterCall(masm); | 1588 call_helper.AfterCall(masm); |
| 1589 // If index is still not a smi, it must be out of range. | 1589 // If index is still not a smi, it must be out of range. |
| 1590 __ JumpIfNotSmi(index_, index_out_of_range_); | 1590 __ JumpIfNotSmi(index_, index_out_of_range_); |
| 1591 // Otherwise, return to the fast path. | 1591 // Otherwise, return to the fast path. |
| 1592 __ Branch(&got_smi_index_); | 1592 __ Branch(&got_smi_index_); |
| 1593 | 1593 |
| 1594 // Call runtime. We get here when the receiver is a string and the | 1594 // Call runtime. We get here when the receiver is a string and the |
| 1595 // index is a number, but the code of getting the actual character | 1595 // index is a number, but the code of getting the actual character |
| 1596 // is too complex (e.g., when the string needs to be flattened). | 1596 // is too complex (e.g., when the string needs to be flattened). |
| 1597 __ bind(&call_runtime_); | 1597 __ bind(&call_runtime_); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1608 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); | 1608 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); |
| 1609 } | 1609 } |
| 1610 | 1610 |
| 1611 void StringHelper::GenerateFlatOneByteStringEquals( | 1611 void StringHelper::GenerateFlatOneByteStringEquals( |
| 1612 MacroAssembler* masm, Register left, Register right, Register scratch1, | 1612 MacroAssembler* masm, Register left, Register right, Register scratch1, |
| 1613 Register scratch2, Register scratch3) { | 1613 Register scratch2, Register scratch3) { |
| 1614 Register length = scratch1; | 1614 Register length = scratch1; |
| 1615 | 1615 |
| 1616 // Compare lengths. | 1616 // Compare lengths. |
| 1617 Label strings_not_equal, check_zero_length; | 1617 Label strings_not_equal, check_zero_length; |
| 1618 __ ld(length, FieldMemOperand(left, String::kLengthOffset)); | 1618 __ Ld(length, FieldMemOperand(left, String::kLengthOffset)); |
| 1619 __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 1619 __ Ld(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
| 1620 __ Branch(&check_zero_length, eq, length, Operand(scratch2)); | 1620 __ Branch(&check_zero_length, eq, length, Operand(scratch2)); |
| 1621 __ bind(&strings_not_equal); | 1621 __ bind(&strings_not_equal); |
| 1622 // Can not put li in delayslot, it has multi instructions. | 1622 // Can not put li in delayslot, it has multi instructions. |
| 1623 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); | 1623 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); |
| 1624 __ Ret(); | 1624 __ Ret(); |
| 1625 | 1625 |
| 1626 // Check if the length is zero. | 1626 // Check if the length is zero. |
| 1627 Label compare_chars; | 1627 Label compare_chars; |
| 1628 __ bind(&check_zero_length); | 1628 __ bind(&check_zero_length); |
| 1629 STATIC_ASSERT(kSmiTag == 0); | 1629 STATIC_ASSERT(kSmiTag == 0); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1642 __ Ret(USE_DELAY_SLOT); | 1642 __ Ret(USE_DELAY_SLOT); |
| 1643 __ li(v0, Operand(Smi::FromInt(EQUAL))); | 1643 __ li(v0, Operand(Smi::FromInt(EQUAL))); |
| 1644 } | 1644 } |
| 1645 | 1645 |
| 1646 | 1646 |
| 1647 void StringHelper::GenerateCompareFlatOneByteStrings( | 1647 void StringHelper::GenerateCompareFlatOneByteStrings( |
| 1648 MacroAssembler* masm, Register left, Register right, Register scratch1, | 1648 MacroAssembler* masm, Register left, Register right, Register scratch1, |
| 1649 Register scratch2, Register scratch3, Register scratch4) { | 1649 Register scratch2, Register scratch3, Register scratch4) { |
| 1650 Label result_not_equal, compare_lengths; | 1650 Label result_not_equal, compare_lengths; |
| 1651 // Find minimum length and length difference. | 1651 // Find minimum length and length difference. |
| 1652 __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset)); | 1652 __ Ld(scratch1, FieldMemOperand(left, String::kLengthOffset)); |
| 1653 __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 1653 __ Ld(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
| 1654 __ Dsubu(scratch3, scratch1, Operand(scratch2)); | 1654 __ Dsubu(scratch3, scratch1, Operand(scratch2)); |
| 1655 Register length_delta = scratch3; | 1655 Register length_delta = scratch3; |
| 1656 __ slt(scratch4, scratch2, scratch1); | 1656 __ slt(scratch4, scratch2, scratch1); |
| 1657 __ Movn(scratch1, scratch2, scratch4); | 1657 __ Movn(scratch1, scratch2, scratch4); |
| 1658 Register min_length = scratch1; | 1658 Register min_length = scratch1; |
| 1659 STATIC_ASSERT(kSmiTag == 0); | 1659 STATIC_ASSERT(kSmiTag == 0); |
| 1660 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); | 1660 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); |
| 1661 | 1661 |
| 1662 // Compare loop. | 1662 // Compare loop. |
| 1663 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2, | 1663 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2, |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1697 __ Daddu(left, left, Operand(scratch1)); | 1697 __ Daddu(left, left, Operand(scratch1)); |
| 1698 __ Daddu(right, right, Operand(scratch1)); | 1698 __ Daddu(right, right, Operand(scratch1)); |
| 1699 __ Dsubu(length, zero_reg, length); | 1699 __ Dsubu(length, zero_reg, length); |
| 1700 Register index = length; // index = -length; | 1700 Register index = length; // index = -length; |
| 1701 | 1701 |
| 1702 | 1702 |
| 1703 // Compare loop. | 1703 // Compare loop. |
| 1704 Label loop; | 1704 Label loop; |
| 1705 __ bind(&loop); | 1705 __ bind(&loop); |
| 1706 __ Daddu(scratch3, left, index); | 1706 __ Daddu(scratch3, left, index); |
| 1707 __ lbu(scratch1, MemOperand(scratch3)); | 1707 __ Lbu(scratch1, MemOperand(scratch3)); |
| 1708 __ Daddu(scratch3, right, index); | 1708 __ Daddu(scratch3, right, index); |
| 1709 __ lbu(scratch2, MemOperand(scratch3)); | 1709 __ Lbu(scratch2, MemOperand(scratch3)); |
| 1710 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2)); | 1710 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2)); |
| 1711 __ Daddu(index, index, 1); | 1711 __ Daddu(index, index, 1); |
| 1712 __ Branch(&loop, ne, index, Operand(zero_reg)); | 1712 __ Branch(&loop, ne, index, Operand(zero_reg)); |
| 1713 } | 1713 } |
| 1714 | 1714 |
| 1715 | 1715 |
| 1716 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | 1716 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { |
| 1717 // ----------- S t a t e ------------- | 1717 // ----------- S t a t e ------------- |
| 1718 // -- a1 : left | 1718 // -- a1 : left |
| 1719 // -- a0 : right | 1719 // -- a0 : right |
| 1720 // -- ra : return address | 1720 // -- ra : return address |
| 1721 // ----------------------------------- | 1721 // ----------------------------------- |
| 1722 | 1722 |
| 1723 // Load a2 with the allocation site. We stick an undefined dummy value here | 1723 // Load a2 with the allocation site. We stick an undefined dummy value here |
| 1724 // and replace it with the real allocation site later when we instantiate this | 1724 // and replace it with the real allocation site later when we instantiate this |
| 1725 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). | 1725 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). |
| 1726 __ li(a2, isolate()->factory()->undefined_value()); | 1726 __ li(a2, isolate()->factory()->undefined_value()); |
| 1727 | 1727 |
| 1728 // Make sure that we actually patched the allocation site. | 1728 // Make sure that we actually patched the allocation site. |
| 1729 if (FLAG_debug_code) { | 1729 if (FLAG_debug_code) { |
| 1730 __ And(at, a2, Operand(kSmiTagMask)); | 1730 __ And(at, a2, Operand(kSmiTagMask)); |
| 1731 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg)); | 1731 __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg)); |
| 1732 __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset)); | 1732 __ Ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset)); |
| 1733 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); | 1733 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); |
| 1734 __ Assert(eq, kExpectedAllocationSite, a4, Operand(at)); | 1734 __ Assert(eq, kExpectedAllocationSite, a4, Operand(at)); |
| 1735 } | 1735 } |
| 1736 | 1736 |
| 1737 // Tail call into the stub that handles binary operations with allocation | 1737 // Tail call into the stub that handles binary operations with allocation |
| 1738 // sites. | 1738 // sites. |
| 1739 BinaryOpWithAllocationSiteStub stub(isolate(), state()); | 1739 BinaryOpWithAllocationSiteStub stub(isolate(), state()); |
| 1740 __ TailCallStub(&stub); | 1740 __ TailCallStub(&stub); |
| 1741 } | 1741 } |
| 1742 | 1742 |
| 1743 | 1743 |
| 1744 void CompareICStub::GenerateBooleans(MacroAssembler* masm) { | 1744 void CompareICStub::GenerateBooleans(MacroAssembler* masm) { |
| 1745 DCHECK_EQ(CompareICState::BOOLEAN, state()); | 1745 DCHECK_EQ(CompareICState::BOOLEAN, state()); |
| 1746 Label miss; | 1746 Label miss; |
| 1747 | 1747 |
| 1748 __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); | 1748 __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); |
| 1749 __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); | 1749 __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); |
| 1750 if (!Token::IsEqualityOp(op())) { | 1750 if (!Token::IsEqualityOp(op())) { |
| 1751 __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset)); | 1751 __ Ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset)); |
| 1752 __ AssertSmi(a1); | 1752 __ AssertSmi(a1); |
| 1753 __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset)); | 1753 __ Ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset)); |
| 1754 __ AssertSmi(a0); | 1754 __ AssertSmi(a0); |
| 1755 } | 1755 } |
| 1756 __ Ret(USE_DELAY_SLOT); | 1756 __ Ret(USE_DELAY_SLOT); |
| 1757 __ Dsubu(v0, a1, a0); | 1757 __ Dsubu(v0, a1, a0); |
| 1758 | 1758 |
| 1759 __ bind(&miss); | 1759 __ bind(&miss); |
| 1760 GenerateMiss(masm); | 1760 GenerateMiss(masm); |
| 1761 } | 1761 } |
| 1762 | 1762 |
| 1763 | 1763 |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1799 } | 1799 } |
| 1800 | 1800 |
| 1801 // Inlining the double comparison and falling back to the general compare | 1801 // Inlining the double comparison and falling back to the general compare |
| 1802 // stub if NaN is involved. | 1802 // stub if NaN is involved. |
| 1803 // Load left and right operand. | 1803 // Load left and right operand. |
| 1804 Label done, left, left_smi, right_smi; | 1804 Label done, left, left_smi, right_smi; |
| 1805 __ JumpIfSmi(a0, &right_smi); | 1805 __ JumpIfSmi(a0, &right_smi); |
| 1806 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 1806 __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
| 1807 DONT_DO_SMI_CHECK); | 1807 DONT_DO_SMI_CHECK); |
| 1808 __ Dsubu(a2, a0, Operand(kHeapObjectTag)); | 1808 __ Dsubu(a2, a0, Operand(kHeapObjectTag)); |
| 1809 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); | 1809 __ Ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); |
| 1810 __ Branch(&left); | 1810 __ Branch(&left); |
| 1811 __ bind(&right_smi); | 1811 __ bind(&right_smi); |
| 1812 __ SmiUntag(a2, a0); // Can't clobber a0 yet. | 1812 __ SmiUntag(a2, a0); // Can't clobber a0 yet. |
| 1813 FPURegister single_scratch = f6; | 1813 FPURegister single_scratch = f6; |
| 1814 __ mtc1(a2, single_scratch); | 1814 __ mtc1(a2, single_scratch); |
| 1815 __ cvt_d_w(f2, single_scratch); | 1815 __ cvt_d_w(f2, single_scratch); |
| 1816 | 1816 |
| 1817 __ bind(&left); | 1817 __ bind(&left); |
| 1818 __ JumpIfSmi(a1, &left_smi); | 1818 __ JumpIfSmi(a1, &left_smi); |
| 1819 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | 1819 __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
| 1820 DONT_DO_SMI_CHECK); | 1820 DONT_DO_SMI_CHECK); |
| 1821 __ Dsubu(a2, a1, Operand(kHeapObjectTag)); | 1821 __ Dsubu(a2, a1, Operand(kHeapObjectTag)); |
| 1822 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); | 1822 __ Ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); |
| 1823 __ Branch(&done); | 1823 __ Branch(&done); |
| 1824 __ bind(&left_smi); | 1824 __ bind(&left_smi); |
| 1825 __ SmiUntag(a2, a1); // Can't clobber a1 yet. | 1825 __ SmiUntag(a2, a1); // Can't clobber a1 yet. |
| 1826 single_scratch = f8; | 1826 single_scratch = f8; |
| 1827 __ mtc1(a2, single_scratch); | 1827 __ mtc1(a2, single_scratch); |
| 1828 __ cvt_d_w(f0, single_scratch); | 1828 __ cvt_d_w(f0, single_scratch); |
| 1829 | 1829 |
| 1830 __ bind(&done); | 1830 __ bind(&done); |
| 1831 | 1831 |
| 1832 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. | 1832 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1884 // Registers containing left and right operands respectively. | 1884 // Registers containing left and right operands respectively. |
| 1885 Register left = a1; | 1885 Register left = a1; |
| 1886 Register right = a0; | 1886 Register right = a0; |
| 1887 Register tmp1 = a2; | 1887 Register tmp1 = a2; |
| 1888 Register tmp2 = a3; | 1888 Register tmp2 = a3; |
| 1889 | 1889 |
| 1890 // Check that both operands are heap objects. | 1890 // Check that both operands are heap objects. |
| 1891 __ JumpIfEitherSmi(left, right, &miss); | 1891 __ JumpIfEitherSmi(left, right, &miss); |
| 1892 | 1892 |
| 1893 // Check that both operands are internalized strings. | 1893 // Check that both operands are internalized strings. |
| 1894 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 1894 __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| 1895 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 1895 __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| 1896 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 1896 __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| 1897 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 1897 __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| 1898 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 1898 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
| 1899 __ Or(tmp1, tmp1, Operand(tmp2)); | 1899 __ Or(tmp1, tmp1, Operand(tmp2)); |
| 1900 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 1900 __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
| 1901 __ Branch(&miss, ne, at, Operand(zero_reg)); | 1901 __ Branch(&miss, ne, at, Operand(zero_reg)); |
| 1902 | 1902 |
| 1903 // Make sure a0 is non-zero. At this point input operands are | 1903 // Make sure a0 is non-zero. At this point input operands are |
| 1904 // guaranteed to be non-zero. | 1904 // guaranteed to be non-zero. |
| 1905 DCHECK(right.is(a0)); | 1905 DCHECK(right.is(a0)); |
| 1906 STATIC_ASSERT(EQUAL == 0); | 1906 STATIC_ASSERT(EQUAL == 0); |
| 1907 STATIC_ASSERT(kSmiTag == 0); | 1907 STATIC_ASSERT(kSmiTag == 0); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 1926 Register left = a1; | 1926 Register left = a1; |
| 1927 Register right = a0; | 1927 Register right = a0; |
| 1928 Register tmp1 = a2; | 1928 Register tmp1 = a2; |
| 1929 Register tmp2 = a3; | 1929 Register tmp2 = a3; |
| 1930 | 1930 |
| 1931 // Check that both operands are heap objects. | 1931 // Check that both operands are heap objects. |
| 1932 __ JumpIfEitherSmi(left, right, &miss); | 1932 __ JumpIfEitherSmi(left, right, &miss); |
| 1933 | 1933 |
| 1934 // Check that both operands are unique names. This leaves the instance | 1934 // Check that both operands are unique names. This leaves the instance |
| 1935 // types loaded in tmp1 and tmp2. | 1935 // types loaded in tmp1 and tmp2. |
| 1936 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 1936 __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| 1937 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 1937 __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| 1938 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 1938 __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| 1939 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 1939 __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| 1940 | 1940 |
| 1941 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss); | 1941 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss); |
| 1942 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss); | 1942 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss); |
| 1943 | 1943 |
| 1944 // Use a0 as result | 1944 // Use a0 as result |
| 1945 __ mov(v0, a0); | 1945 __ mov(v0, a0); |
| 1946 | 1946 |
| 1947 // Unique names are compared by identity. | 1947 // Unique names are compared by identity. |
| 1948 Label done; | 1948 Label done; |
| 1949 __ Branch(&done, ne, left, Operand(right)); | 1949 __ Branch(&done, ne, left, Operand(right)); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1974 Register tmp2 = a3; | 1974 Register tmp2 = a3; |
| 1975 Register tmp3 = a4; | 1975 Register tmp3 = a4; |
| 1976 Register tmp4 = a5; | 1976 Register tmp4 = a5; |
| 1977 Register tmp5 = a6; | 1977 Register tmp5 = a6; |
| 1978 | 1978 |
| 1979 // Check that both operands are heap objects. | 1979 // Check that both operands are heap objects. |
| 1980 __ JumpIfEitherSmi(left, right, &miss); | 1980 __ JumpIfEitherSmi(left, right, &miss); |
| 1981 | 1981 |
| 1982 // Check that both operands are strings. This leaves the instance | 1982 // Check that both operands are strings. This leaves the instance |
| 1983 // types loaded in tmp1 and tmp2. | 1983 // types loaded in tmp1 and tmp2. |
| 1984 __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 1984 __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| 1985 __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 1985 __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| 1986 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 1986 __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| 1987 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 1987 __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| 1988 STATIC_ASSERT(kNotStringTag != 0); | 1988 STATIC_ASSERT(kNotStringTag != 0); |
| 1989 __ Or(tmp3, tmp1, tmp2); | 1989 __ Or(tmp3, tmp1, tmp2); |
| 1990 __ And(tmp5, tmp3, Operand(kIsNotStringMask)); | 1990 __ And(tmp5, tmp3, Operand(kIsNotStringMask)); |
| 1991 __ Branch(&miss, ne, tmp5, Operand(zero_reg)); | 1991 __ Branch(&miss, ne, tmp5, Operand(zero_reg)); |
| 1992 | 1992 |
| 1993 // Fast check for identical strings. | 1993 // Fast check for identical strings. |
| 1994 Label left_ne_right; | 1994 Label left_ne_right; |
| 1995 STATIC_ASSERT(EQUAL == 0); | 1995 STATIC_ASSERT(EQUAL == 0); |
| 1996 STATIC_ASSERT(kSmiTag == 0); | 1996 STATIC_ASSERT(kSmiTag == 0); |
| 1997 __ Branch(&left_ne_right, ne, left, Operand(right)); | 1997 __ Branch(&left_ne_right, ne, left, Operand(right)); |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2074 GenerateMiss(masm); | 2074 GenerateMiss(masm); |
| 2075 } | 2075 } |
| 2076 | 2076 |
| 2077 | 2077 |
| 2078 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) { | 2078 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) { |
| 2079 Label miss; | 2079 Label miss; |
| 2080 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_); | 2080 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_); |
| 2081 __ And(a2, a1, a0); | 2081 __ And(a2, a1, a0); |
| 2082 __ JumpIfSmi(a2, &miss); | 2082 __ JumpIfSmi(a2, &miss); |
| 2083 __ GetWeakValue(a4, cell); | 2083 __ GetWeakValue(a4, cell); |
| 2084 __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); | 2084 __ Ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); |
| 2085 __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset)); | 2085 __ Ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset)); |
| 2086 __ Branch(&miss, ne, a2, Operand(a4)); | 2086 __ Branch(&miss, ne, a2, Operand(a4)); |
| 2087 __ Branch(&miss, ne, a3, Operand(a4)); | 2087 __ Branch(&miss, ne, a3, Operand(a4)); |
| 2088 | 2088 |
| 2089 if (Token::IsEqualityOp(op())) { | 2089 if (Token::IsEqualityOp(op())) { |
| 2090 __ Ret(USE_DELAY_SLOT); | 2090 __ Ret(USE_DELAY_SLOT); |
| 2091 __ dsubu(v0, a0, a1); | 2091 __ dsubu(v0, a0, a1); |
| 2092 } else { | 2092 } else { |
| 2093 if (op() == Token::LT || op() == Token::LTE) { | 2093 if (op() == Token::LT || op() == Token::LTE) { |
| 2094 __ li(a2, Operand(Smi::FromInt(GREATER))); | 2094 __ li(a2, Operand(Smi::FromInt(GREATER))); |
| 2095 } else { | 2095 } else { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 2107 void CompareICStub::GenerateMiss(MacroAssembler* masm) { | 2107 void CompareICStub::GenerateMiss(MacroAssembler* masm) { |
| 2108 { | 2108 { |
| 2109 // Call the runtime system in a fresh internal frame. | 2109 // Call the runtime system in a fresh internal frame. |
| 2110 FrameScope scope(masm, StackFrame::INTERNAL); | 2110 FrameScope scope(masm, StackFrame::INTERNAL); |
| 2111 __ Push(a1, a0); | 2111 __ Push(a1, a0); |
| 2112 __ Push(ra, a1, a0); | 2112 __ Push(ra, a1, a0); |
| 2113 __ li(a4, Operand(Smi::FromInt(op()))); | 2113 __ li(a4, Operand(Smi::FromInt(op()))); |
| 2114 __ daddiu(sp, sp, -kPointerSize); | 2114 __ daddiu(sp, sp, -kPointerSize); |
| 2115 __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs, | 2115 __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs, |
| 2116 USE_DELAY_SLOT); | 2116 USE_DELAY_SLOT); |
| 2117 __ sd(a4, MemOperand(sp)); // In the delay slot. | 2117 __ Sd(a4, MemOperand(sp)); // In the delay slot. |
| 2118 // Compute the entry point of the rewritten stub. | 2118 // Compute the entry point of the rewritten stub. |
| 2119 __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); | 2119 __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 2120 // Restore registers. | 2120 // Restore registers. |
| 2121 __ Pop(a1, a0, ra); | 2121 __ Pop(a1, a0, ra); |
| 2122 } | 2122 } |
| 2123 __ Jump(a2); | 2123 __ Jump(a2); |
| 2124 } | 2124 } |
| 2125 | 2125 |
| 2126 | 2126 |
| 2127 void DirectCEntryStub::Generate(MacroAssembler* masm) { | 2127 void DirectCEntryStub::Generate(MacroAssembler* masm) { |
| 2128 // Make place for arguments to fit C calling convention. Most of the callers | 2128 // Make place for arguments to fit C calling convention. Most of the callers |
| 2129 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame | 2129 // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame |
| 2130 // so they handle stack restoring and we don't have to do that here. | 2130 // so they handle stack restoring and we don't have to do that here. |
| 2131 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping | 2131 // Any caller of DirectCEntryStub::GenerateCall must take care of dropping |
| 2132 // kCArgsSlotsSize stack space after the call. | 2132 // kCArgsSlotsSize stack space after the call. |
| 2133 __ daddiu(sp, sp, -kCArgsSlotsSize); | 2133 __ daddiu(sp, sp, -kCArgsSlotsSize); |
| 2134 // Place the return address on the stack, making the call | 2134 // Place the return address on the stack, making the call |
| 2135 // GC safe. The RegExp backend also relies on this. | 2135 // GC safe. The RegExp backend also relies on this. |
| 2136 __ sd(ra, MemOperand(sp, kCArgsSlotsSize)); | 2136 __ Sd(ra, MemOperand(sp, kCArgsSlotsSize)); |
| 2137 __ Call(t9); // Call the C++ function. | 2137 __ Call(t9); // Call the C++ function. |
| 2138 __ ld(t9, MemOperand(sp, kCArgsSlotsSize)); | 2138 __ Ld(t9, MemOperand(sp, kCArgsSlotsSize)); |
| 2139 | 2139 |
| 2140 if (FLAG_debug_code && FLAG_enable_slow_asserts) { | 2140 if (FLAG_debug_code && FLAG_enable_slow_asserts) { |
| 2141 // In case of an error the return address may point to a memory area | 2141 // In case of an error the return address may point to a memory area |
| 2142 // filled with kZapValue by the GC. | 2142 // filled with kZapValue by the GC. |
| 2143 // Dereference the address and check for this. | 2143 // Dereference the address and check for this. |
| 2144 __ Uld(a4, MemOperand(t9)); | 2144 __ Uld(a4, MemOperand(t9)); |
| 2145 __ Assert(ne, kReceivedInvalidReturnAddress, a4, | 2145 __ Assert(ne, kReceivedInvalidReturnAddress, a4, |
| 2146 Operand(reinterpret_cast<uint64_t>(kZapValue))); | 2146 Operand(reinterpret_cast<uint64_t>(kZapValue))); |
| 2147 } | 2147 } |
| 2148 __ Jump(t9); | 2148 __ Jump(t9); |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2185 // Scale the index by multiplying by the entry size. | 2185 // Scale the index by multiplying by the entry size. |
| 2186 STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 2186 STATIC_ASSERT(NameDictionary::kEntrySize == 3); |
| 2187 __ Dlsa(index, index, index, 1); // index *= 3. | 2187 __ Dlsa(index, index, index, 1); // index *= 3. |
| 2188 | 2188 |
| 2189 Register entity_name = scratch0; | 2189 Register entity_name = scratch0; |
| 2190 // Having undefined at this place means the name is not contained. | 2190 // Having undefined at this place means the name is not contained. |
| 2191 STATIC_ASSERT(kSmiTagSize == 1); | 2191 STATIC_ASSERT(kSmiTagSize == 1); |
| 2192 Register tmp = properties; | 2192 Register tmp = properties; |
| 2193 | 2193 |
| 2194 __ Dlsa(tmp, properties, index, kPointerSizeLog2); | 2194 __ Dlsa(tmp, properties, index, kPointerSizeLog2); |
| 2195 __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 2195 __ Ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
| 2196 | 2196 |
| 2197 DCHECK(!tmp.is(entity_name)); | 2197 DCHECK(!tmp.is(entity_name)); |
| 2198 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); | 2198 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); |
| 2199 __ Branch(done, eq, entity_name, Operand(tmp)); | 2199 __ Branch(done, eq, entity_name, Operand(tmp)); |
| 2200 | 2200 |
| 2201 // Load the hole ready for use below: | 2201 // Load the hole ready for use below: |
| 2202 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); | 2202 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); |
| 2203 | 2203 |
| 2204 // Stop if found the property. | 2204 // Stop if found the property. |
| 2205 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); | 2205 __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); |
| 2206 | 2206 |
| 2207 Label good; | 2207 Label good; |
| 2208 __ Branch(&good, eq, entity_name, Operand(tmp)); | 2208 __ Branch(&good, eq, entity_name, Operand(tmp)); |
| 2209 | 2209 |
| 2210 // Check if the entry name is not a unique name. | 2210 // Check if the entry name is not a unique name. |
| 2211 __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); | 2211 __ Ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); |
| 2212 __ lbu(entity_name, | 2212 __ Lbu(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); |
| 2213 FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); | |
| 2214 __ JumpIfNotUniqueNameInstanceType(entity_name, miss); | 2213 __ JumpIfNotUniqueNameInstanceType(entity_name, miss); |
| 2215 __ bind(&good); | 2214 __ bind(&good); |
| 2216 | 2215 |
| 2217 // Restore the properties. | 2216 // Restore the properties. |
| 2218 __ ld(properties, | 2217 __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 2219 FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 2220 } | 2218 } |
| 2221 | 2219 |
| 2222 const int spill_mask = | 2220 const int spill_mask = |
| 2223 (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() | | 2221 (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() | |
| 2224 a2.bit() | a1.bit() | a0.bit() | v0.bit()); | 2222 a2.bit() | a1.bit() | a0.bit() | v0.bit()); |
| 2225 | 2223 |
| 2226 __ MultiPush(spill_mask); | 2224 __ MultiPush(spill_mask); |
| 2227 __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 2225 __ Ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 2228 __ li(a1, Operand(Handle<Name>(name))); | 2226 __ li(a1, Operand(Handle<Name>(name))); |
| 2229 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); | 2227 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); |
| 2230 __ CallStub(&stub); | 2228 __ CallStub(&stub); |
| 2231 __ mov(at, v0); | 2229 __ mov(at, v0); |
| 2232 __ MultiPop(spill_mask); | 2230 __ MultiPop(spill_mask); |
| 2233 | 2231 |
| 2234 __ Branch(done, eq, at, Operand(zero_reg)); | 2232 __ Branch(done, eq, at, Operand(zero_reg)); |
| 2235 __ Branch(miss, ne, at, Operand(zero_reg)); | 2233 __ Branch(miss, ne, at, Operand(zero_reg)); |
| 2236 } | 2234 } |
| 2237 | 2235 |
| (...skipping 13 matching lines...) Expand all Loading... |
| 2251 Register dictionary = a0; | 2249 Register dictionary = a0; |
| 2252 Register key = a1; | 2250 Register key = a1; |
| 2253 Register index = a2; | 2251 Register index = a2; |
| 2254 Register mask = a3; | 2252 Register mask = a3; |
| 2255 Register hash = a4; | 2253 Register hash = a4; |
| 2256 Register undefined = a5; | 2254 Register undefined = a5; |
| 2257 Register entry_key = a6; | 2255 Register entry_key = a6; |
| 2258 | 2256 |
| 2259 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; | 2257 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; |
| 2260 | 2258 |
| 2261 __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset)); | 2259 __ Ld(mask, FieldMemOperand(dictionary, kCapacityOffset)); |
| 2262 __ SmiUntag(mask); | 2260 __ SmiUntag(mask); |
| 2263 __ Dsubu(mask, mask, Operand(1)); | 2261 __ Dsubu(mask, mask, Operand(1)); |
| 2264 | 2262 |
| 2265 __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 2263 __ Lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
| 2266 | 2264 |
| 2267 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 2265 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| 2268 | 2266 |
| 2269 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 2267 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
| 2270 // Compute the masked index: (hash + i + i * i) & mask. | 2268 // Compute the masked index: (hash + i + i * i) & mask. |
| 2271 // Capacity is smi 2^n. | 2269 // Capacity is smi 2^n. |
| 2272 if (i > 0) { | 2270 if (i > 0) { |
| 2273 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 2271 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 2274 // the hash in a separate instruction. The value hash + i + i * i is right | 2272 // the hash in a separate instruction. The value hash + i + i * i is right |
| 2275 // shifted in the following and instruction. | 2273 // shifted in the following and instruction. |
| 2276 DCHECK(NameDictionary::GetProbeOffset(i) < | 2274 DCHECK(NameDictionary::GetProbeOffset(i) < |
| 2277 1 << (32 - Name::kHashFieldOffset)); | 2275 1 << (32 - Name::kHashFieldOffset)); |
| 2278 __ Daddu(index, hash, Operand( | 2276 __ Daddu(index, hash, Operand( |
| 2279 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 2277 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
| 2280 } else { | 2278 } else { |
| 2281 __ mov(index, hash); | 2279 __ mov(index, hash); |
| 2282 } | 2280 } |
| 2283 __ dsrl(index, index, Name::kHashShift); | 2281 __ dsrl(index, index, Name::kHashShift); |
| 2284 __ And(index, mask, index); | 2282 __ And(index, mask, index); |
| 2285 | 2283 |
| 2286 // Scale the index by multiplying by the entry size. | 2284 // Scale the index by multiplying by the entry size. |
| 2287 STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 2285 STATIC_ASSERT(NameDictionary::kEntrySize == 3); |
| 2288 // index *= 3. | 2286 // index *= 3. |
| 2289 __ Dlsa(index, index, index, 1); | 2287 __ Dlsa(index, index, index, 1); |
| 2290 | 2288 |
| 2291 STATIC_ASSERT(kSmiTagSize == 1); | 2289 STATIC_ASSERT(kSmiTagSize == 1); |
| 2292 __ Dlsa(index, dictionary, index, kPointerSizeLog2); | 2290 __ Dlsa(index, dictionary, index, kPointerSizeLog2); |
| 2293 __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 2291 __ Ld(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
| 2294 | 2292 |
| 2295 // Having undefined at this place means the name is not contained. | 2293 // Having undefined at this place means the name is not contained. |
| 2296 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); | 2294 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); |
| 2297 | 2295 |
| 2298 // Stop if found the property. | 2296 // Stop if found the property. |
| 2299 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); | 2297 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); |
| 2300 | 2298 |
| 2301 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) { | 2299 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) { |
| 2302 // Check if the entry name is not a unique name. | 2300 // Check if the entry name is not a unique name. |
| 2303 __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); | 2301 __ Ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); |
| 2304 __ lbu(entry_key, | 2302 __ Lbu(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); |
| 2305 FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); | |
| 2306 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary); | 2303 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary); |
| 2307 } | 2304 } |
| 2308 } | 2305 } |
| 2309 | 2306 |
| 2310 __ bind(&maybe_in_dictionary); | 2307 __ bind(&maybe_in_dictionary); |
| 2311 // If we are doing negative lookup then probing failure should be | 2308 // If we are doing negative lookup then probing failure should be |
| 2312 // treated as a lookup success. For positive lookup probing failure | 2309 // treated as a lookup success. For positive lookup probing failure |
| 2313 // should be treated as lookup failure. | 2310 // should be treated as lookup failure. |
| 2314 if (mode() == POSITIVE_LOOKUP) { | 2311 if (mode() == POSITIVE_LOOKUP) { |
| 2315 __ Ret(USE_DELAY_SLOT); | 2312 __ Ret(USE_DELAY_SLOT); |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2377 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize); | 2374 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize); |
| 2378 } | 2375 } |
| 2379 | 2376 |
| 2380 | 2377 |
| 2381 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | 2378 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { |
| 2382 regs_.Save(masm); | 2379 regs_.Save(masm); |
| 2383 | 2380 |
| 2384 if (remembered_set_action() == EMIT_REMEMBERED_SET) { | 2381 if (remembered_set_action() == EMIT_REMEMBERED_SET) { |
| 2385 Label dont_need_remembered_set; | 2382 Label dont_need_remembered_set; |
| 2386 | 2383 |
| 2387 __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 2384 __ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0)); |
| 2388 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. | 2385 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. |
| 2389 regs_.scratch0(), | 2386 regs_.scratch0(), |
| 2390 &dont_need_remembered_set); | 2387 &dont_need_remembered_set); |
| 2391 | 2388 |
| 2392 __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(), | 2389 __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(), |
| 2393 &dont_need_remembered_set); | 2390 &dont_need_remembered_set); |
| 2394 | 2391 |
| 2395 // First notify the incremental marker if necessary, then update the | 2392 // First notify the incremental marker if necessary, then update the |
| 2396 // remembered set. | 2393 // remembered set. |
| 2397 CheckNeedsToInformIncrementalMarker( | 2394 CheckNeedsToInformIncrementalMarker( |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2455 value(), | 2452 value(), |
| 2456 save_fp_regs_mode(), | 2453 save_fp_regs_mode(), |
| 2457 MacroAssembler::kReturnAtEnd); | 2454 MacroAssembler::kReturnAtEnd); |
| 2458 } else { | 2455 } else { |
| 2459 __ Ret(); | 2456 __ Ret(); |
| 2460 } | 2457 } |
| 2461 | 2458 |
| 2462 __ bind(&on_black); | 2459 __ bind(&on_black); |
| 2463 | 2460 |
| 2464 // Get the value from the slot. | 2461 // Get the value from the slot. |
| 2465 __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 2462 __ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0)); |
| 2466 | 2463 |
| 2467 if (mode == INCREMENTAL_COMPACTION) { | 2464 if (mode == INCREMENTAL_COMPACTION) { |
| 2468 Label ensure_not_white; | 2465 Label ensure_not_white; |
| 2469 | 2466 |
| 2470 __ CheckPageFlag(regs_.scratch0(), // Contains value. | 2467 __ CheckPageFlag(regs_.scratch0(), // Contains value. |
| 2471 regs_.scratch1(), // Scratch. | 2468 regs_.scratch1(), // Scratch. |
| 2472 MemoryChunk::kEvacuationCandidateMask, | 2469 MemoryChunk::kEvacuationCandidateMask, |
| 2473 eq, | 2470 eq, |
| 2474 &ensure_not_white); | 2471 &ensure_not_white); |
| 2475 | 2472 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2510 | 2507 |
| 2511 // Fall through when we need to inform the incremental marker. | 2508 // Fall through when we need to inform the incremental marker. |
| 2512 } | 2509 } |
| 2513 | 2510 |
| 2514 | 2511 |
| 2515 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 2512 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
| 2516 CEntryStub ces(isolate(), 1, kSaveFPRegs); | 2513 CEntryStub ces(isolate(), 1, kSaveFPRegs); |
| 2517 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); | 2514 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); |
| 2518 int parameter_count_offset = | 2515 int parameter_count_offset = |
| 2519 StubFailureTrampolineFrameConstants::kArgumentsLengthOffset; | 2516 StubFailureTrampolineFrameConstants::kArgumentsLengthOffset; |
| 2520 __ ld(a1, MemOperand(fp, parameter_count_offset)); | 2517 __ Ld(a1, MemOperand(fp, parameter_count_offset)); |
| 2521 if (function_mode() == JS_FUNCTION_STUB_MODE) { | 2518 if (function_mode() == JS_FUNCTION_STUB_MODE) { |
| 2522 __ Daddu(a1, a1, Operand(1)); | 2519 __ Daddu(a1, a1, Operand(1)); |
| 2523 } | 2520 } |
| 2524 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | 2521 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
| 2525 __ dsll(a1, a1, kPointerSizeLog2); | 2522 __ dsll(a1, a1, kPointerSizeLog2); |
| 2526 __ Ret(USE_DELAY_SLOT); | 2523 __ Ret(USE_DELAY_SLOT); |
| 2527 __ Daddu(sp, sp, a1); | 2524 __ Daddu(sp, sp, a1); |
| 2528 } | 2525 } |
| 2529 | 2526 |
| 2530 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | 2527 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2638 STATIC_ASSERT(FAST_ELEMENTS == 2); | 2635 STATIC_ASSERT(FAST_ELEMENTS == 2); |
| 2639 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 2636 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
| 2640 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); | 2637 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); |
| 2641 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); | 2638 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); |
| 2642 | 2639 |
| 2643 // is the low bit set? If so, we are holey and that is good. | 2640 // is the low bit set? If so, we are holey and that is good. |
| 2644 __ And(at, a3, Operand(1)); | 2641 __ And(at, a3, Operand(1)); |
| 2645 __ Branch(&normal_sequence, ne, at, Operand(zero_reg)); | 2642 __ Branch(&normal_sequence, ne, at, Operand(zero_reg)); |
| 2646 } | 2643 } |
| 2647 // look at the first argument | 2644 // look at the first argument |
| 2648 __ ld(a5, MemOperand(sp, 0)); | 2645 __ Ld(a5, MemOperand(sp, 0)); |
| 2649 __ Branch(&normal_sequence, eq, a5, Operand(zero_reg)); | 2646 __ Branch(&normal_sequence, eq, a5, Operand(zero_reg)); |
| 2650 | 2647 |
| 2651 if (mode == DISABLE_ALLOCATION_SITES) { | 2648 if (mode == DISABLE_ALLOCATION_SITES) { |
| 2652 ElementsKind initial = GetInitialFastElementsKind(); | 2649 ElementsKind initial = GetInitialFastElementsKind(); |
| 2653 ElementsKind holey_initial = GetHoleyElementsKind(initial); | 2650 ElementsKind holey_initial = GetHoleyElementsKind(initial); |
| 2654 | 2651 |
| 2655 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), | 2652 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(), |
| 2656 holey_initial, | 2653 holey_initial, |
| 2657 DISABLE_ALLOCATION_SITES); | 2654 DISABLE_ALLOCATION_SITES); |
| 2658 __ TailCallStub(&stub_holey); | 2655 __ TailCallStub(&stub_holey); |
| 2659 | 2656 |
| 2660 __ bind(&normal_sequence); | 2657 __ bind(&normal_sequence); |
| 2661 ArraySingleArgumentConstructorStub stub(masm->isolate(), | 2658 ArraySingleArgumentConstructorStub stub(masm->isolate(), |
| 2662 initial, | 2659 initial, |
| 2663 DISABLE_ALLOCATION_SITES); | 2660 DISABLE_ALLOCATION_SITES); |
| 2664 __ TailCallStub(&stub); | 2661 __ TailCallStub(&stub); |
| 2665 } else if (mode == DONT_OVERRIDE) { | 2662 } else if (mode == DONT_OVERRIDE) { |
| 2666 // We are going to create a holey array, but our kind is non-holey. | 2663 // We are going to create a holey array, but our kind is non-holey. |
| 2667 // Fix kind and retry (only if we have an allocation site in the slot). | 2664 // Fix kind and retry (only if we have an allocation site in the slot). |
| 2668 __ Daddu(a3, a3, Operand(1)); | 2665 __ Daddu(a3, a3, Operand(1)); |
| 2669 | 2666 |
| 2670 if (FLAG_debug_code) { | 2667 if (FLAG_debug_code) { |
| 2671 __ ld(a5, FieldMemOperand(a2, 0)); | 2668 __ Ld(a5, FieldMemOperand(a2, 0)); |
| 2672 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); | 2669 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); |
| 2673 __ Assert(eq, kExpectedAllocationSite, a5, Operand(at)); | 2670 __ Assert(eq, kExpectedAllocationSite, a5, Operand(at)); |
| 2674 } | 2671 } |
| 2675 | 2672 |
| 2676 // Save the resulting elements kind in type info. We can't just store a3 | 2673 // Save the resulting elements kind in type info. We can't just store a3 |
| 2677 // in the AllocationSite::transition_info field because elements kind is | 2674 // in the AllocationSite::transition_info field because elements kind is |
| 2678 // restricted to a portion of the field...upper bits need to be left alone. | 2675 // restricted to a portion of the field...upper bits need to be left alone. |
| 2679 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | 2676 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); |
| 2680 __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); | 2677 __ Ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); |
| 2681 __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley))); | 2678 __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley))); |
| 2682 __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); | 2679 __ Sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); |
| 2683 | |
| 2684 | 2680 |
| 2685 __ bind(&normal_sequence); | 2681 __ bind(&normal_sequence); |
| 2686 int last_index = GetSequenceIndexFromFastElementsKind( | 2682 int last_index = GetSequenceIndexFromFastElementsKind( |
| 2687 TERMINAL_FAST_ELEMENTS_KIND); | 2683 TERMINAL_FAST_ELEMENTS_KIND); |
| 2688 for (int i = 0; i <= last_index; ++i) { | 2684 for (int i = 0; i <= last_index; ++i) { |
| 2689 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | 2685 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); |
| 2690 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); | 2686 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); |
| 2691 __ TailCallStub(&stub, eq, a3, Operand(kind)); | 2687 __ TailCallStub(&stub, eq, a3, Operand(kind)); |
| 2692 } | 2688 } |
| 2693 | 2689 |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2757 // -- a2 : AllocationSite or undefined | 2753 // -- a2 : AllocationSite or undefined |
| 2758 // -- a3 : new target | 2754 // -- a3 : new target |
| 2759 // -- sp[0] : last argument | 2755 // -- sp[0] : last argument |
| 2760 // ----------------------------------- | 2756 // ----------------------------------- |
| 2761 | 2757 |
| 2762 if (FLAG_debug_code) { | 2758 if (FLAG_debug_code) { |
| 2763 // The array construct code is only set for the global and natives | 2759 // The array construct code is only set for the global and natives |
| 2764 // builtin Array functions which always have maps. | 2760 // builtin Array functions which always have maps. |
| 2765 | 2761 |
| 2766 // Initial map for the builtin Array function should be a map. | 2762 // Initial map for the builtin Array function should be a map. |
| 2767 __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); | 2763 __ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
| 2768 // Will both indicate a NULL and a Smi. | 2764 // Will both indicate a NULL and a Smi. |
| 2769 __ SmiTst(a4, at); | 2765 __ SmiTst(a4, at); |
| 2770 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, | 2766 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, |
| 2771 at, Operand(zero_reg)); | 2767 at, Operand(zero_reg)); |
| 2772 __ GetObjectType(a4, a4, a5); | 2768 __ GetObjectType(a4, a4, a5); |
| 2773 __ Assert(eq, kUnexpectedInitialMapForArrayFunction, | 2769 __ Assert(eq, kUnexpectedInitialMapForArrayFunction, |
| 2774 a5, Operand(MAP_TYPE)); | 2770 a5, Operand(MAP_TYPE)); |
| 2775 | 2771 |
| 2776 // We should either have undefined in a2 or a valid AllocationSite | 2772 // We should either have undefined in a2 or a valid AllocationSite |
| 2777 __ AssertUndefinedOrAllocationSite(a2, a4); | 2773 __ AssertUndefinedOrAllocationSite(a2, a4); |
| 2778 } | 2774 } |
| 2779 | 2775 |
| 2780 // Enter the context of the Array function. | 2776 // Enter the context of the Array function. |
| 2781 __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); | 2777 __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
| 2782 | 2778 |
| 2783 Label subclassing; | 2779 Label subclassing; |
| 2784 __ Branch(&subclassing, ne, a1, Operand(a3)); | 2780 __ Branch(&subclassing, ne, a1, Operand(a3)); |
| 2785 | 2781 |
| 2786 Label no_info; | 2782 Label no_info; |
| 2787 // Get the elements kind and case on that. | 2783 // Get the elements kind and case on that. |
| 2788 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 2784 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 2789 __ Branch(&no_info, eq, a2, Operand(at)); | 2785 __ Branch(&no_info, eq, a2, Operand(at)); |
| 2790 | 2786 |
| 2791 __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); | 2787 __ Ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset)); |
| 2792 __ SmiUntag(a3); | 2788 __ SmiUntag(a3); |
| 2793 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | 2789 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); |
| 2794 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask)); | 2790 __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask)); |
| 2795 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); | 2791 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); |
| 2796 | 2792 |
| 2797 __ bind(&no_info); | 2793 __ bind(&no_info); |
| 2798 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); | 2794 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); |
| 2799 | 2795 |
| 2800 // Subclassing. | 2796 // Subclassing. |
| 2801 __ bind(&subclassing); | 2797 __ bind(&subclassing); |
| 2802 __ Dlsa(at, sp, a0, kPointerSizeLog2); | 2798 __ Dlsa(at, sp, a0, kPointerSizeLog2); |
| 2803 __ sd(a1, MemOperand(at)); | 2799 __ Sd(a1, MemOperand(at)); |
| 2804 __ li(at, Operand(3)); | 2800 __ li(at, Operand(3)); |
| 2805 __ Daddu(a0, a0, at); | 2801 __ Daddu(a0, a0, at); |
| 2806 __ Push(a3, a2); | 2802 __ Push(a3, a2); |
| 2807 __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate())); | 2803 __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate())); |
| 2808 } | 2804 } |
| 2809 | 2805 |
| 2810 | 2806 |
| 2811 void InternalArrayConstructorStub::GenerateCase( | 2807 void InternalArrayConstructorStub::GenerateCase( |
| 2812 MacroAssembler* masm, ElementsKind kind) { | 2808 MacroAssembler* masm, ElementsKind kind) { |
| 2813 | 2809 |
| 2814 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); | 2810 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); |
| 2815 __ TailCallStub(&stub0, lo, a0, Operand(1)); | 2811 __ TailCallStub(&stub0, lo, a0, Operand(1)); |
| 2816 | 2812 |
| 2817 ArrayNArgumentsConstructorStub stubN(isolate()); | 2813 ArrayNArgumentsConstructorStub stubN(isolate()); |
| 2818 __ TailCallStub(&stubN, hi, a0, Operand(1)); | 2814 __ TailCallStub(&stubN, hi, a0, Operand(1)); |
| 2819 | 2815 |
| 2820 if (IsFastPackedElementsKind(kind)) { | 2816 if (IsFastPackedElementsKind(kind)) { |
| 2821 // We might need to create a holey array | 2817 // We might need to create a holey array |
| 2822 // look at the first argument. | 2818 // look at the first argument. |
| 2823 __ ld(at, MemOperand(sp, 0)); | 2819 __ Ld(at, MemOperand(sp, 0)); |
| 2824 | 2820 |
| 2825 InternalArraySingleArgumentConstructorStub | 2821 InternalArraySingleArgumentConstructorStub |
| 2826 stub1_holey(isolate(), GetHoleyElementsKind(kind)); | 2822 stub1_holey(isolate(), GetHoleyElementsKind(kind)); |
| 2827 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg)); | 2823 __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg)); |
| 2828 } | 2824 } |
| 2829 | 2825 |
| 2830 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); | 2826 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); |
| 2831 __ TailCallStub(&stub1); | 2827 __ TailCallStub(&stub1); |
| 2832 } | 2828 } |
| 2833 | 2829 |
| 2834 | 2830 |
| 2835 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { | 2831 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { |
| 2836 // ----------- S t a t e ------------- | 2832 // ----------- S t a t e ------------- |
| 2837 // -- a0 : argc | 2833 // -- a0 : argc |
| 2838 // -- a1 : constructor | 2834 // -- a1 : constructor |
| 2839 // -- sp[0] : return address | 2835 // -- sp[0] : return address |
| 2840 // -- sp[4] : last argument | 2836 // -- sp[4] : last argument |
| 2841 // ----------------------------------- | 2837 // ----------------------------------- |
| 2842 | 2838 |
| 2843 if (FLAG_debug_code) { | 2839 if (FLAG_debug_code) { |
| 2844 // The array construct code is only set for the global and natives | 2840 // The array construct code is only set for the global and natives |
| 2845 // builtin Array functions which always have maps. | 2841 // builtin Array functions which always have maps. |
| 2846 | 2842 |
| 2847 // Initial map for the builtin Array function should be a map. | 2843 // Initial map for the builtin Array function should be a map. |
| 2848 __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); | 2844 __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
| 2849 // Will both indicate a NULL and a Smi. | 2845 // Will both indicate a NULL and a Smi. |
| 2850 __ SmiTst(a3, at); | 2846 __ SmiTst(a3, at); |
| 2851 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, | 2847 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, |
| 2852 at, Operand(zero_reg)); | 2848 at, Operand(zero_reg)); |
| 2853 __ GetObjectType(a3, a3, a4); | 2849 __ GetObjectType(a3, a3, a4); |
| 2854 __ Assert(eq, kUnexpectedInitialMapForArrayFunction, | 2850 __ Assert(eq, kUnexpectedInitialMapForArrayFunction, |
| 2855 a4, Operand(MAP_TYPE)); | 2851 a4, Operand(MAP_TYPE)); |
| 2856 } | 2852 } |
| 2857 | 2853 |
| 2858 // Figure out the right elements kind. | 2854 // Figure out the right elements kind. |
| 2859 __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); | 2855 __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
| 2860 | 2856 |
| 2861 // Load the map's "bit field 2" into a3. We only need the first byte, | 2857 // Load the map's "bit field 2" into a3. We only need the first byte, |
| 2862 // but the following bit field extraction takes care of that anyway. | 2858 // but the following bit field extraction takes care of that anyway. |
| 2863 __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset)); | 2859 __ Lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset)); |
| 2864 // Retrieve elements_kind from bit field 2. | 2860 // Retrieve elements_kind from bit field 2. |
| 2865 __ DecodeField<Map::ElementsKindBits>(a3); | 2861 __ DecodeField<Map::ElementsKindBits>(a3); |
| 2866 | 2862 |
| 2867 if (FLAG_debug_code) { | 2863 if (FLAG_debug_code) { |
| 2868 Label done; | 2864 Label done; |
| 2869 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS)); | 2865 __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS)); |
| 2870 __ Assert( | 2866 __ Assert( |
| 2871 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, | 2867 eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, |
| 2872 a3, Operand(FAST_HOLEY_ELEMENTS)); | 2868 a3, Operand(FAST_HOLEY_ELEMENTS)); |
| 2873 __ bind(&done); | 2869 __ bind(&done); |
| (...skipping 29 matching lines...) Expand all Loading... |
| 2903 const int kLimitOffset = AddressOffset( | 2899 const int kLimitOffset = AddressOffset( |
| 2904 ExternalReference::handle_scope_limit_address(isolate), next_address); | 2900 ExternalReference::handle_scope_limit_address(isolate), next_address); |
| 2905 const int kLevelOffset = AddressOffset( | 2901 const int kLevelOffset = AddressOffset( |
| 2906 ExternalReference::handle_scope_level_address(isolate), next_address); | 2902 ExternalReference::handle_scope_level_address(isolate), next_address); |
| 2907 | 2903 |
| 2908 DCHECK(function_address.is(a1) || function_address.is(a2)); | 2904 DCHECK(function_address.is(a1) || function_address.is(a2)); |
| 2909 | 2905 |
| 2910 Label profiler_disabled; | 2906 Label profiler_disabled; |
| 2911 Label end_profiler_check; | 2907 Label end_profiler_check; |
| 2912 __ li(t9, Operand(ExternalReference::is_profiling_address(isolate))); | 2908 __ li(t9, Operand(ExternalReference::is_profiling_address(isolate))); |
| 2913 __ lb(t9, MemOperand(t9, 0)); | 2909 __ Lb(t9, MemOperand(t9, 0)); |
| 2914 __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); | 2910 __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); |
| 2915 | 2911 |
| 2916 // Additional parameter is the address of the actual callback. | 2912 // Additional parameter is the address of the actual callback. |
| 2917 __ li(t9, Operand(thunk_ref)); | 2913 __ li(t9, Operand(thunk_ref)); |
| 2918 __ jmp(&end_profiler_check); | 2914 __ jmp(&end_profiler_check); |
| 2919 | 2915 |
| 2920 __ bind(&profiler_disabled); | 2916 __ bind(&profiler_disabled); |
| 2921 __ mov(t9, function_address); | 2917 __ mov(t9, function_address); |
| 2922 __ bind(&end_profiler_check); | 2918 __ bind(&end_profiler_check); |
| 2923 | 2919 |
| 2924 // Allocate HandleScope in callee-save registers. | 2920 // Allocate HandleScope in callee-save registers. |
| 2925 __ li(s3, Operand(next_address)); | 2921 __ li(s3, Operand(next_address)); |
| 2926 __ ld(s0, MemOperand(s3, kNextOffset)); | 2922 __ Ld(s0, MemOperand(s3, kNextOffset)); |
| 2927 __ ld(s1, MemOperand(s3, kLimitOffset)); | 2923 __ Ld(s1, MemOperand(s3, kLimitOffset)); |
| 2928 __ lw(s2, MemOperand(s3, kLevelOffset)); | 2924 __ Lw(s2, MemOperand(s3, kLevelOffset)); |
| 2929 __ Addu(s2, s2, Operand(1)); | 2925 __ Addu(s2, s2, Operand(1)); |
| 2930 __ sw(s2, MemOperand(s3, kLevelOffset)); | 2926 __ Sw(s2, MemOperand(s3, kLevelOffset)); |
| 2931 | 2927 |
| 2932 if (FLAG_log_timer_events) { | 2928 if (FLAG_log_timer_events) { |
| 2933 FrameScope frame(masm, StackFrame::MANUAL); | 2929 FrameScope frame(masm, StackFrame::MANUAL); |
| 2934 __ PushSafepointRegisters(); | 2930 __ PushSafepointRegisters(); |
| 2935 __ PrepareCallCFunction(1, a0); | 2931 __ PrepareCallCFunction(1, a0); |
| 2936 __ li(a0, Operand(ExternalReference::isolate_address(isolate))); | 2932 __ li(a0, Operand(ExternalReference::isolate_address(isolate))); |
| 2937 __ CallCFunction(ExternalReference::log_enter_external_function(isolate), | 2933 __ CallCFunction(ExternalReference::log_enter_external_function(isolate), |
| 2938 1); | 2934 1); |
| 2939 __ PopSafepointRegisters(); | 2935 __ PopSafepointRegisters(); |
| 2940 } | 2936 } |
| (...skipping 13 matching lines...) Expand all Loading... |
| 2954 1); | 2950 1); |
| 2955 __ PopSafepointRegisters(); | 2951 __ PopSafepointRegisters(); |
| 2956 } | 2952 } |
| 2957 | 2953 |
| 2958 Label promote_scheduled_exception; | 2954 Label promote_scheduled_exception; |
| 2959 Label delete_allocated_handles; | 2955 Label delete_allocated_handles; |
| 2960 Label leave_exit_frame; | 2956 Label leave_exit_frame; |
| 2961 Label return_value_loaded; | 2957 Label return_value_loaded; |
| 2962 | 2958 |
| 2963 // Load value from ReturnValue. | 2959 // Load value from ReturnValue. |
| 2964 __ ld(v0, return_value_operand); | 2960 __ Ld(v0, return_value_operand); |
| 2965 __ bind(&return_value_loaded); | 2961 __ bind(&return_value_loaded); |
| 2966 | 2962 |
| 2967 // No more valid handles (the result handle was the last one). Restore | 2963 // No more valid handles (the result handle was the last one). Restore |
| 2968 // previous handle scope. | 2964 // previous handle scope. |
| 2969 __ sd(s0, MemOperand(s3, kNextOffset)); | 2965 __ Sd(s0, MemOperand(s3, kNextOffset)); |
| 2970 if (__ emit_debug_code()) { | 2966 if (__ emit_debug_code()) { |
| 2971 __ lw(a1, MemOperand(s3, kLevelOffset)); | 2967 __ Lw(a1, MemOperand(s3, kLevelOffset)); |
| 2972 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); | 2968 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); |
| 2973 } | 2969 } |
| 2974 __ Subu(s2, s2, Operand(1)); | 2970 __ Subu(s2, s2, Operand(1)); |
| 2975 __ sw(s2, MemOperand(s3, kLevelOffset)); | 2971 __ Sw(s2, MemOperand(s3, kLevelOffset)); |
| 2976 __ ld(at, MemOperand(s3, kLimitOffset)); | 2972 __ Ld(at, MemOperand(s3, kLimitOffset)); |
| 2977 __ Branch(&delete_allocated_handles, ne, s1, Operand(at)); | 2973 __ Branch(&delete_allocated_handles, ne, s1, Operand(at)); |
| 2978 | 2974 |
| 2979 // Leave the API exit frame. | 2975 // Leave the API exit frame. |
| 2980 __ bind(&leave_exit_frame); | 2976 __ bind(&leave_exit_frame); |
| 2981 | 2977 |
| 2982 bool restore_context = context_restore_operand != NULL; | 2978 bool restore_context = context_restore_operand != NULL; |
| 2983 if (restore_context) { | 2979 if (restore_context) { |
| 2984 __ ld(cp, *context_restore_operand); | 2980 __ Ld(cp, *context_restore_operand); |
| 2985 } | 2981 } |
| 2986 if (stack_space_offset != kInvalidStackOffset) { | 2982 if (stack_space_offset != kInvalidStackOffset) { |
| 2987 DCHECK(kCArgsSlotsSize == 0); | 2983 DCHECK(kCArgsSlotsSize == 0); |
| 2988 __ ld(s0, MemOperand(sp, stack_space_offset)); | 2984 __ Ld(s0, MemOperand(sp, stack_space_offset)); |
| 2989 } else { | 2985 } else { |
| 2990 __ li(s0, Operand(stack_space)); | 2986 __ li(s0, Operand(stack_space)); |
| 2991 } | 2987 } |
| 2992 __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN, | 2988 __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN, |
| 2993 stack_space_offset != kInvalidStackOffset); | 2989 stack_space_offset != kInvalidStackOffset); |
| 2994 | 2990 |
| 2995 // Check if the function scheduled an exception. | 2991 // Check if the function scheduled an exception. |
| 2996 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); | 2992 __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); |
| 2997 __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate))); | 2993 __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate))); |
| 2998 __ ld(a5, MemOperand(at)); | 2994 __ Ld(a5, MemOperand(at)); |
| 2999 __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); | 2995 __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); |
| 3000 | 2996 |
| 3001 __ Ret(); | 2997 __ Ret(); |
| 3002 | 2998 |
| 3003 // Re-throw by promoting a scheduled exception. | 2999 // Re-throw by promoting a scheduled exception. |
| 3004 __ bind(&promote_scheduled_exception); | 3000 __ bind(&promote_scheduled_exception); |
| 3005 __ TailCallRuntime(Runtime::kPromoteScheduledException); | 3001 __ TailCallRuntime(Runtime::kPromoteScheduledException); |
| 3006 | 3002 |
| 3007 // HandleScope limit has changed. Delete allocated extensions. | 3003 // HandleScope limit has changed. Delete allocated extensions. |
| 3008 __ bind(&delete_allocated_handles); | 3004 __ bind(&delete_allocated_handles); |
| 3009 __ sd(s1, MemOperand(s3, kLimitOffset)); | 3005 __ Sd(s1, MemOperand(s3, kLimitOffset)); |
| 3010 __ mov(s0, v0); | 3006 __ mov(s0, v0); |
| 3011 __ mov(a0, v0); | 3007 __ mov(a0, v0); |
| 3012 __ PrepareCallCFunction(1, s1); | 3008 __ PrepareCallCFunction(1, s1); |
| 3013 __ li(a0, Operand(ExternalReference::isolate_address(isolate))); | 3009 __ li(a0, Operand(ExternalReference::isolate_address(isolate))); |
| 3014 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate), | 3010 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate), |
| 3015 1); | 3011 1); |
| 3016 __ mov(v0, s0); | 3012 __ mov(v0, s0); |
| 3017 __ jmp(&leave_exit_frame); | 3013 __ jmp(&leave_exit_frame); |
| 3018 } | 3014 } |
| 3019 | 3015 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 3049 STATIC_ASSERT(FCA::kNewTargetIndex == 7); | 3045 STATIC_ASSERT(FCA::kNewTargetIndex == 7); |
| 3050 STATIC_ASSERT(FCA::kArgsLength == 8); | 3046 STATIC_ASSERT(FCA::kArgsLength == 8); |
| 3051 | 3047 |
| 3052 // new target | 3048 // new target |
| 3053 __ PushRoot(Heap::kUndefinedValueRootIndex); | 3049 __ PushRoot(Heap::kUndefinedValueRootIndex); |
| 3054 | 3050 |
| 3055 // Save context, callee and call data. | 3051 // Save context, callee and call data. |
| 3056 __ Push(context, callee, call_data); | 3052 __ Push(context, callee, call_data); |
| 3057 if (!is_lazy()) { | 3053 if (!is_lazy()) { |
| 3058 // Load context from callee. | 3054 // Load context from callee. |
| 3059 __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset)); | 3055 __ Ld(context, FieldMemOperand(callee, JSFunction::kContextOffset)); |
| 3060 } | 3056 } |
| 3061 | 3057 |
| 3062 Register scratch = call_data; | 3058 Register scratch = call_data; |
| 3063 if (!call_data_undefined()) { | 3059 if (!call_data_undefined()) { |
| 3064 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3060 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3065 } | 3061 } |
| 3066 // Push return value and default return value. | 3062 // Push return value and default return value. |
| 3067 __ Push(scratch, scratch); | 3063 __ Push(scratch, scratch); |
| 3068 __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate()))); | 3064 __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate()))); |
| 3069 // Push isolate and holder. | 3065 // Push isolate and holder. |
| 3070 __ Push(scratch, holder); | 3066 __ Push(scratch, holder); |
| 3071 | 3067 |
| 3072 // Prepare arguments. | 3068 // Prepare arguments. |
| 3073 __ mov(scratch, sp); | 3069 __ mov(scratch, sp); |
| 3074 | 3070 |
| 3075 // Allocate the v8::Arguments structure in the arguments' space since | 3071 // Allocate the v8::Arguments structure in the arguments' space since |
| 3076 // it's not controlled by GC. | 3072 // it's not controlled by GC. |
| 3077 const int kApiStackSpace = 3; | 3073 const int kApiStackSpace = 3; |
| 3078 | 3074 |
| 3079 FrameScope frame_scope(masm, StackFrame::MANUAL); | 3075 FrameScope frame_scope(masm, StackFrame::MANUAL); |
| 3080 __ EnterExitFrame(false, kApiStackSpace); | 3076 __ EnterExitFrame(false, kApiStackSpace); |
| 3081 | 3077 |
| 3082 DCHECK(!api_function_address.is(a0) && !scratch.is(a0)); | 3078 DCHECK(!api_function_address.is(a0) && !scratch.is(a0)); |
| 3083 // a0 = FunctionCallbackInfo& | 3079 // a0 = FunctionCallbackInfo& |
| 3084 // Arguments is after the return address. | 3080 // Arguments is after the return address. |
| 3085 __ Daddu(a0, sp, Operand(1 * kPointerSize)); | 3081 __ Daddu(a0, sp, Operand(1 * kPointerSize)); |
| 3086 // FunctionCallbackInfo::implicit_args_ | 3082 // FunctionCallbackInfo::implicit_args_ |
| 3087 __ sd(scratch, MemOperand(a0, 0 * kPointerSize)); | 3083 __ Sd(scratch, MemOperand(a0, 0 * kPointerSize)); |
| 3088 // FunctionCallbackInfo::values_ | 3084 // FunctionCallbackInfo::values_ |
| 3089 __ Daddu(at, scratch, | 3085 __ Daddu(at, scratch, |
| 3090 Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize)); | 3086 Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize)); |
| 3091 __ sd(at, MemOperand(a0, 1 * kPointerSize)); | 3087 __ Sd(at, MemOperand(a0, 1 * kPointerSize)); |
| 3092 // FunctionCallbackInfo::length_ = argc | 3088 // FunctionCallbackInfo::length_ = argc |
| 3093 // Stored as int field, 32-bit integers within struct on stack always left | 3089 // Stored as int field, 32-bit integers within struct on stack always left |
| 3094 // justified by n64 ABI. | 3090 // justified by n64 ABI. |
| 3095 __ li(at, Operand(argc())); | 3091 __ li(at, Operand(argc())); |
| 3096 __ sw(at, MemOperand(a0, 2 * kPointerSize)); | 3092 __ Sw(at, MemOperand(a0, 2 * kPointerSize)); |
| 3097 | 3093 |
| 3098 ExternalReference thunk_ref = | 3094 ExternalReference thunk_ref = |
| 3099 ExternalReference::invoke_function_callback(masm->isolate()); | 3095 ExternalReference::invoke_function_callback(masm->isolate()); |
| 3100 | 3096 |
| 3101 AllowExternalCallThatCantCauseGC scope(masm); | 3097 AllowExternalCallThatCantCauseGC scope(masm); |
| 3102 MemOperand context_restore_operand( | 3098 MemOperand context_restore_operand( |
| 3103 fp, (2 + FCA::kContextSaveIndex) * kPointerSize); | 3099 fp, (2 + FCA::kContextSaveIndex) * kPointerSize); |
| 3104 // Stores return the first js argument. | 3100 // Stores return the first js argument. |
| 3105 int return_value_offset = 0; | 3101 int return_value_offset = 0; |
| 3106 if (is_store()) { | 3102 if (is_store()) { |
| (...skipping 29 matching lines...) Expand all Loading... |
| 3136 Register holder = ApiGetterDescriptor::HolderRegister(); | 3132 Register holder = ApiGetterDescriptor::HolderRegister(); |
| 3137 Register callback = ApiGetterDescriptor::CallbackRegister(); | 3133 Register callback = ApiGetterDescriptor::CallbackRegister(); |
| 3138 Register scratch = a4; | 3134 Register scratch = a4; |
| 3139 DCHECK(!AreAliased(receiver, holder, callback, scratch)); | 3135 DCHECK(!AreAliased(receiver, holder, callback, scratch)); |
| 3140 | 3136 |
| 3141 Register api_function_address = a2; | 3137 Register api_function_address = a2; |
| 3142 | 3138 |
| 3143 // Here and below +1 is for name() pushed after the args_ array. | 3139 // Here and below +1 is for name() pushed after the args_ array. |
| 3144 typedef PropertyCallbackArguments PCA; | 3140 typedef PropertyCallbackArguments PCA; |
| 3145 __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize); | 3141 __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize); |
| 3146 __ sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize)); | 3142 __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize)); |
| 3147 __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset)); | 3143 __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset)); |
| 3148 __ sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize)); | 3144 __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize)); |
| 3149 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3145 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3150 __ sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize)); | 3146 __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize)); |
| 3151 __ sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) * | 3147 __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) * |
| 3152 kPointerSize)); | 3148 kPointerSize)); |
| 3153 __ li(scratch, Operand(ExternalReference::isolate_address(isolate()))); | 3149 __ li(scratch, Operand(ExternalReference::isolate_address(isolate()))); |
| 3154 __ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize)); | 3150 __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize)); |
| 3155 __ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize)); | 3151 __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize)); |
| 3156 // should_throw_on_error -> false | 3152 // should_throw_on_error -> false |
| 3157 DCHECK(Smi::kZero == nullptr); | 3153 DCHECK(Smi::kZero == nullptr); |
| 3158 __ sd(zero_reg, | 3154 __ Sd(zero_reg, |
| 3159 MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize)); | 3155 MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize)); |
| 3160 __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset)); | 3156 __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset)); |
| 3161 __ sd(scratch, MemOperand(sp, 0 * kPointerSize)); | 3157 __ Sd(scratch, MemOperand(sp, 0 * kPointerSize)); |
| 3162 | 3158 |
| 3163 // v8::PropertyCallbackInfo::args_ array and name handle. | 3159 // v8::PropertyCallbackInfo::args_ array and name handle. |
| 3164 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; | 3160 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; |
| 3165 | 3161 |
| 3166 // Load address of v8::PropertyAccessorInfo::args_ array and name handle. | 3162 // Load address of v8::PropertyAccessorInfo::args_ array and name handle. |
| 3167 __ mov(a0, sp); // a0 = Handle<Name> | 3163 __ mov(a0, sp); // a0 = Handle<Name> |
| 3168 __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_ | 3164 __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_ |
| 3169 | 3165 |
| 3170 const int kApiStackSpace = 1; | 3166 const int kApiStackSpace = 1; |
| 3171 FrameScope frame_scope(masm, StackFrame::MANUAL); | 3167 FrameScope frame_scope(masm, StackFrame::MANUAL); |
| 3172 __ EnterExitFrame(false, kApiStackSpace); | 3168 __ EnterExitFrame(false, kApiStackSpace); |
| 3173 | 3169 |
| 3174 // Create v8::PropertyCallbackInfo object on the stack and initialize | 3170 // Create v8::PropertyCallbackInfo object on the stack and initialize |
| 3175 // it's args_ field. | 3171 // it's args_ field. |
| 3176 __ sd(a1, MemOperand(sp, 1 * kPointerSize)); | 3172 __ Sd(a1, MemOperand(sp, 1 * kPointerSize)); |
| 3177 __ Daddu(a1, sp, Operand(1 * kPointerSize)); | 3173 __ Daddu(a1, sp, Operand(1 * kPointerSize)); |
| 3178 // a1 = v8::PropertyCallbackInfo& | 3174 // a1 = v8::PropertyCallbackInfo& |
| 3179 | 3175 |
| 3180 ExternalReference thunk_ref = | 3176 ExternalReference thunk_ref = |
| 3181 ExternalReference::invoke_accessor_getter_callback(isolate()); | 3177 ExternalReference::invoke_accessor_getter_callback(isolate()); |
| 3182 | 3178 |
| 3183 __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset)); | 3179 __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset)); |
| 3184 __ ld(api_function_address, | 3180 __ Ld(api_function_address, |
| 3185 FieldMemOperand(scratch, Foreign::kForeignAddressOffset)); | 3181 FieldMemOperand(scratch, Foreign::kForeignAddressOffset)); |
| 3186 | 3182 |
| 3187 // +3 is to skip prolog, return address and name handle. | 3183 // +3 is to skip prolog, return address and name handle. |
| 3188 MemOperand return_value_operand( | 3184 MemOperand return_value_operand( |
| 3189 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); | 3185 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); |
| 3190 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, | 3186 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, |
| 3191 kStackUnwindSpace, kInvalidStackOffset, | 3187 kStackUnwindSpace, kInvalidStackOffset, |
| 3192 return_value_operand, NULL); | 3188 return_value_operand, NULL); |
| 3193 } | 3189 } |
| 3194 | 3190 |
| 3195 #undef __ | 3191 #undef __ |
| 3196 | 3192 |
| 3197 } // namespace internal | 3193 } // namespace internal |
| 3198 } // namespace v8 | 3194 } // namespace v8 |
| 3199 | 3195 |
| 3200 #endif // V8_TARGET_ARCH_MIPS64 | 3196 #endif // V8_TARGET_ARCH_MIPS64 |
| OLD | NEW |