| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_MIPS64 | 7 #if V8_TARGET_ARCH_MIPS64 |
| 8 | 8 |
| 9 #include "src/codegen.h" | 9 #include "src/codegen.h" |
| 10 #include "src/macro-assembler.h" | 10 #include "src/macro-assembler.h" |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 58 if (!IsMipsSoftFloatABI) { | 58 if (!IsMipsSoftFloatABI) { |
| 59 // Result is already in f0, nothing to do. | 59 // Result is already in f0, nothing to do. |
| 60 } else { | 60 } else { |
| 61 __ Move(v0, v1, result); | 61 __ Move(v0, v1, result); |
| 62 } | 62 } |
| 63 __ Ret(); | 63 __ Ret(); |
| 64 } | 64 } |
| 65 | 65 |
| 66 CodeDesc desc; | 66 CodeDesc desc; |
| 67 masm.GetCode(&desc); | 67 masm.GetCode(&desc); |
| 68 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 68 DCHECK(!RelocInfo::RequiresRelocation(desc)); |
| 69 | 69 |
| 70 CpuFeatures::FlushICache(buffer, actual_size); | 70 CpuFeatures::FlushICache(buffer, actual_size); |
| 71 base::OS::ProtectCode(buffer, actual_size); | 71 base::OS::ProtectCode(buffer, actual_size); |
| 72 | 72 |
| 73 #if !defined(USE_SIMULATOR) | 73 #if !defined(USE_SIMULATOR) |
| 74 return FUNCTION_CAST<UnaryMathFunction>(buffer); | 74 return FUNCTION_CAST<UnaryMathFunction>(buffer); |
| 75 #else | 75 #else |
| 76 fast_exp_mips_machine_code = buffer; | 76 fast_exp_mips_machine_code = buffer; |
| 77 return &fast_exp_simulator; | 77 return &fast_exp_simulator; |
| 78 #endif | 78 #endif |
| (...skipping 21 matching lines...) Expand all Loading... |
| 100 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw, | 100 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw, |
| 101 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop; | 101 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop; |
| 102 | 102 |
| 103 // The size of each prefetch. | 103 // The size of each prefetch. |
| 104 uint32_t pref_chunk = 32; | 104 uint32_t pref_chunk = 32; |
| 105 // The maximum size of a prefetch, it must not be less then pref_chunk. | 105 // The maximum size of a prefetch, it must not be less then pref_chunk. |
| 106 // If the real size of a prefetch is greater then max_pref_size and | 106 // If the real size of a prefetch is greater then max_pref_size and |
| 107 // the kPrefHintPrepareForStore hint is used, the code will not work | 107 // the kPrefHintPrepareForStore hint is used, the code will not work |
| 108 // correctly. | 108 // correctly. |
| 109 uint32_t max_pref_size = 128; | 109 uint32_t max_pref_size = 128; |
| 110 ASSERT(pref_chunk < max_pref_size); | 110 DCHECK(pref_chunk < max_pref_size); |
| 111 | 111 |
| 112 // pref_limit is set based on the fact that we never use an offset | 112 // pref_limit is set based on the fact that we never use an offset |
| 113 // greater then 5 on a store pref and that a single pref can | 113 // greater then 5 on a store pref and that a single pref can |
| 114 // never be larger then max_pref_size. | 114 // never be larger then max_pref_size. |
| 115 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size; | 115 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size; |
| 116 int32_t pref_hint_load = kPrefHintLoadStreamed; | 116 int32_t pref_hint_load = kPrefHintLoadStreamed; |
| 117 int32_t pref_hint_store = kPrefHintPrepareForStore; | 117 int32_t pref_hint_store = kPrefHintPrepareForStore; |
| 118 uint32_t loadstore_chunk = 4; | 118 uint32_t loadstore_chunk = 4; |
| 119 | 119 |
| 120 // The initial prefetches may fetch bytes that are before the buffer being | 120 // The initial prefetches may fetch bytes that are before the buffer being |
| 121 // copied. Start copies with an offset of 4 so avoid this situation when | 121 // copied. Start copies with an offset of 4 so avoid this situation when |
| 122 // using kPrefHintPrepareForStore. | 122 // using kPrefHintPrepareForStore. |
| 123 ASSERT(pref_hint_store != kPrefHintPrepareForStore || | 123 DCHECK(pref_hint_store != kPrefHintPrepareForStore || |
| 124 pref_chunk * 4 >= max_pref_size); | 124 pref_chunk * 4 >= max_pref_size); |
| 125 // If the size is less than 8, go to lastb. Regardless of size, | 125 // If the size is less than 8, go to lastb. Regardless of size, |
| 126 // copy dst pointer to v0 for the retuen value. | 126 // copy dst pointer to v0 for the retuen value. |
| 127 __ slti(a6, a2, 2 * loadstore_chunk); | 127 __ slti(a6, a2, 2 * loadstore_chunk); |
| 128 __ bne(a6, zero_reg, &lastb); | 128 __ bne(a6, zero_reg, &lastb); |
| 129 __ mov(v0, a0); // In delay slot. | 129 __ mov(v0, a0); // In delay slot. |
| 130 | 130 |
| 131 // If src and dst have different alignments, go to unaligned, if they | 131 // If src and dst have different alignments, go to unaligned, if they |
| 132 // have the same alignment (but are not actually aligned) do a partial | 132 // have the same alignment (but are not actually aligned) do a partial |
| 133 // load/store to make them aligned. If they are both already aligned | 133 // load/store to make them aligned. If they are both already aligned |
| (...skipping 349 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 483 __ addiu(a0, a0, 1); | 483 __ addiu(a0, a0, 1); |
| 484 __ addiu(a1, a1, 1); | 484 __ addiu(a1, a1, 1); |
| 485 __ bne(a0, a3, &ua_smallCopy_loop); | 485 __ bne(a0, a3, &ua_smallCopy_loop); |
| 486 __ sb(v1, MemOperand(a0, -1)); // In delay slot. | 486 __ sb(v1, MemOperand(a0, -1)); // In delay slot. |
| 487 | 487 |
| 488 __ jr(ra); | 488 __ jr(ra); |
| 489 __ nop(); | 489 __ nop(); |
| 490 } | 490 } |
| 491 CodeDesc desc; | 491 CodeDesc desc; |
| 492 masm.GetCode(&desc); | 492 masm.GetCode(&desc); |
| 493 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 493 DCHECK(!RelocInfo::RequiresRelocation(desc)); |
| 494 | 494 |
| 495 CpuFeatures::FlushICache(buffer, actual_size); | 495 CpuFeatures::FlushICache(buffer, actual_size); |
| 496 base::OS::ProtectCode(buffer, actual_size); | 496 base::OS::ProtectCode(buffer, actual_size); |
| 497 return FUNCTION_CAST<MemCopyUint8Function>(buffer); | 497 return FUNCTION_CAST<MemCopyUint8Function>(buffer); |
| 498 #endif | 498 #endif |
| 499 } | 499 } |
| 500 #endif | 500 #endif |
| 501 | 501 |
| 502 UnaryMathFunction CreateSqrtFunction() { | 502 UnaryMathFunction CreateSqrtFunction() { |
| 503 #if defined(USE_SIMULATOR) | 503 #if defined(USE_SIMULATOR) |
| 504 return &std::sqrt; | 504 return &std::sqrt; |
| 505 #else | 505 #else |
| 506 size_t actual_size; | 506 size_t actual_size; |
| 507 byte* buffer = | 507 byte* buffer = |
| 508 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); | 508 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); |
| 509 if (buffer == NULL) return &std::sqrt; | 509 if (buffer == NULL) return &std::sqrt; |
| 510 | 510 |
| 511 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); | 511 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); |
| 512 | 512 |
| 513 __ MovFromFloatParameter(f12); | 513 __ MovFromFloatParameter(f12); |
| 514 __ sqrt_d(f0, f12); | 514 __ sqrt_d(f0, f12); |
| 515 __ MovToFloatResult(f0); | 515 __ MovToFloatResult(f0); |
| 516 __ Ret(); | 516 __ Ret(); |
| 517 | 517 |
| 518 CodeDesc desc; | 518 CodeDesc desc; |
| 519 masm.GetCode(&desc); | 519 masm.GetCode(&desc); |
| 520 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 520 DCHECK(!RelocInfo::RequiresRelocation(desc)); |
| 521 | 521 |
| 522 CpuFeatures::FlushICache(buffer, actual_size); | 522 CpuFeatures::FlushICache(buffer, actual_size); |
| 523 base::OS::ProtectCode(buffer, actual_size); | 523 base::OS::ProtectCode(buffer, actual_size); |
| 524 return FUNCTION_CAST<UnaryMathFunction>(buffer); | 524 return FUNCTION_CAST<UnaryMathFunction>(buffer); |
| 525 #endif | 525 #endif |
| 526 } | 526 } |
| 527 | 527 |
| 528 #undef __ | 528 #undef __ |
| 529 | 529 |
| 530 | 530 |
| 531 // ------------------------------------------------------------------------- | 531 // ------------------------------------------------------------------------- |
| 532 // Platform-specific RuntimeCallHelper functions. | 532 // Platform-specific RuntimeCallHelper functions. |
| 533 | 533 |
| 534 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { | 534 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { |
| 535 masm->EnterFrame(StackFrame::INTERNAL); | 535 masm->EnterFrame(StackFrame::INTERNAL); |
| 536 ASSERT(!masm->has_frame()); | 536 DCHECK(!masm->has_frame()); |
| 537 masm->set_has_frame(true); | 537 masm->set_has_frame(true); |
| 538 } | 538 } |
| 539 | 539 |
| 540 | 540 |
| 541 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { | 541 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { |
| 542 masm->LeaveFrame(StackFrame::INTERNAL); | 542 masm->LeaveFrame(StackFrame::INTERNAL); |
| 543 ASSERT(masm->has_frame()); | 543 DCHECK(masm->has_frame()); |
| 544 masm->set_has_frame(false); | 544 masm->set_has_frame(false); |
| 545 } | 545 } |
| 546 | 546 |
| 547 | 547 |
| 548 // ------------------------------------------------------------------------- | 548 // ------------------------------------------------------------------------- |
| 549 // Code generators | 549 // Code generators |
| 550 | 550 |
| 551 #define __ ACCESS_MASM(masm) | 551 #define __ ACCESS_MASM(masm) |
| 552 | 552 |
| 553 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | 553 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
| 554 MacroAssembler* masm, | 554 MacroAssembler* masm, |
| 555 Register receiver, | 555 Register receiver, |
| 556 Register key, | 556 Register key, |
| 557 Register value, | 557 Register value, |
| 558 Register target_map, | 558 Register target_map, |
| 559 AllocationSiteMode mode, | 559 AllocationSiteMode mode, |
| 560 Label* allocation_memento_found) { | 560 Label* allocation_memento_found) { |
| 561 Register scratch_elements = a4; | 561 Register scratch_elements = a4; |
| 562 ASSERT(!AreAliased(receiver, key, value, target_map, | 562 DCHECK(!AreAliased(receiver, key, value, target_map, |
| 563 scratch_elements)); | 563 scratch_elements)); |
| 564 | 564 |
| 565 if (mode == TRACK_ALLOCATION_SITE) { | 565 if (mode == TRACK_ALLOCATION_SITE) { |
| 566 __ JumpIfJSArrayHasAllocationMemento( | 566 __ JumpIfJSArrayHasAllocationMemento( |
| 567 receiver, scratch_elements, allocation_memento_found); | 567 receiver, scratch_elements, allocation_memento_found); |
| 568 } | 568 } |
| 569 | 569 |
| 570 // Set transitioned map. | 570 // Set transitioned map. |
| 571 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 571 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 572 __ RecordWriteField(receiver, | 572 __ RecordWriteField(receiver, |
| (...skipping 21 matching lines...) Expand all Loading... |
| 594 Register length = a5; | 594 Register length = a5; |
| 595 Register array = a6; | 595 Register array = a6; |
| 596 Register array_end = array; | 596 Register array_end = array; |
| 597 | 597 |
| 598 // target_map parameter can be clobbered. | 598 // target_map parameter can be clobbered. |
| 599 Register scratch1 = target_map; | 599 Register scratch1 = target_map; |
| 600 Register scratch2 = t1; | 600 Register scratch2 = t1; |
| 601 Register scratch3 = a7; | 601 Register scratch3 = a7; |
| 602 | 602 |
| 603 // Verify input registers don't conflict with locals. | 603 // Verify input registers don't conflict with locals. |
| 604 ASSERT(!AreAliased(receiver, key, value, target_map, | 604 DCHECK(!AreAliased(receiver, key, value, target_map, |
| 605 elements, length, array, scratch2)); | 605 elements, length, array, scratch2)); |
| 606 | 606 |
| 607 Register scratch = t2; | 607 Register scratch = t2; |
| 608 if (mode == TRACK_ALLOCATION_SITE) { | 608 if (mode == TRACK_ALLOCATION_SITE) { |
| 609 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); | 609 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); |
| 610 } | 610 } |
| 611 | 611 |
| 612 // Check for empty arrays, which only require a map transition and no changes | 612 // Check for empty arrays, which only require a map transition and no changes |
| 613 // to the backing store. | 613 // to the backing store. |
| 614 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 614 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 737 AllocationSiteMode mode, | 737 AllocationSiteMode mode, |
| 738 Label* fail) { | 738 Label* fail) { |
| 739 // Register ra contains the return address. | 739 // Register ra contains the return address. |
| 740 Label entry, loop, convert_hole, gc_required, only_change_map; | 740 Label entry, loop, convert_hole, gc_required, only_change_map; |
| 741 Register elements = a4; | 741 Register elements = a4; |
| 742 Register array = a6; | 742 Register array = a6; |
| 743 Register length = a5; | 743 Register length = a5; |
| 744 Register scratch = t1; | 744 Register scratch = t1; |
| 745 | 745 |
| 746 // Verify input registers don't conflict with locals. | 746 // Verify input registers don't conflict with locals. |
| 747 ASSERT(!AreAliased(receiver, key, value, target_map, | 747 DCHECK(!AreAliased(receiver, key, value, target_map, |
| 748 elements, array, length, scratch)); | 748 elements, array, length, scratch)); |
| 749 if (mode == TRACK_ALLOCATION_SITE) { | 749 if (mode == TRACK_ALLOCATION_SITE) { |
| 750 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); | 750 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); |
| 751 } | 751 } |
| 752 | 752 |
| 753 // Check for empty arrays, which only require a map transition and no changes | 753 // Check for empty arrays, which only require a map transition and no changes |
| 754 // to the backing store. | 754 // to the backing store. |
| 755 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 755 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 756 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); | 756 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); |
| 757 __ Branch(&only_change_map, eq, at, Operand(elements)); | 757 __ Branch(&only_change_map, eq, at, Operand(elements)); |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 972 | 972 |
| 973 | 973 |
| 974 void MathExpGenerator::EmitMathExp(MacroAssembler* masm, | 974 void MathExpGenerator::EmitMathExp(MacroAssembler* masm, |
| 975 DoubleRegister input, | 975 DoubleRegister input, |
| 976 DoubleRegister result, | 976 DoubleRegister result, |
| 977 DoubleRegister double_scratch1, | 977 DoubleRegister double_scratch1, |
| 978 DoubleRegister double_scratch2, | 978 DoubleRegister double_scratch2, |
| 979 Register temp1, | 979 Register temp1, |
| 980 Register temp2, | 980 Register temp2, |
| 981 Register temp3) { | 981 Register temp3) { |
| 982 ASSERT(!input.is(result)); | 982 DCHECK(!input.is(result)); |
| 983 ASSERT(!input.is(double_scratch1)); | 983 DCHECK(!input.is(double_scratch1)); |
| 984 ASSERT(!input.is(double_scratch2)); | 984 DCHECK(!input.is(double_scratch2)); |
| 985 ASSERT(!result.is(double_scratch1)); | 985 DCHECK(!result.is(double_scratch1)); |
| 986 ASSERT(!result.is(double_scratch2)); | 986 DCHECK(!result.is(double_scratch2)); |
| 987 ASSERT(!double_scratch1.is(double_scratch2)); | 987 DCHECK(!double_scratch1.is(double_scratch2)); |
| 988 ASSERT(!temp1.is(temp2)); | 988 DCHECK(!temp1.is(temp2)); |
| 989 ASSERT(!temp1.is(temp3)); | 989 DCHECK(!temp1.is(temp3)); |
| 990 ASSERT(!temp2.is(temp3)); | 990 DCHECK(!temp2.is(temp3)); |
| 991 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); | 991 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); |
| 992 | 992 |
| 993 Label zero, infinity, done; | 993 Label zero, infinity, done; |
| 994 __ li(temp3, Operand(ExternalReference::math_exp_constants(0))); | 994 __ li(temp3, Operand(ExternalReference::math_exp_constants(0))); |
| 995 | 995 |
| 996 __ ldc1(double_scratch1, ExpConstant(0, temp3)); | 996 __ ldc1(double_scratch1, ExpConstant(0, temp3)); |
| 997 __ BranchF(&zero, NULL, ge, double_scratch1, input); | 997 __ BranchF(&zero, NULL, ge, double_scratch1, input); |
| 998 | 998 |
| 999 __ ldc1(double_scratch2, ExpConstant(1, temp3)); | 999 __ ldc1(double_scratch2, ExpConstant(1, temp3)); |
| 1000 __ BranchF(&infinity, NULL, ge, input, double_scratch2); | 1000 __ BranchF(&infinity, NULL, ge, input, double_scratch2); |
| 1001 | 1001 |
| 1002 __ ldc1(double_scratch1, ExpConstant(3, temp3)); | 1002 __ ldc1(double_scratch1, ExpConstant(3, temp3)); |
| 1003 __ ldc1(result, ExpConstant(4, temp3)); | 1003 __ ldc1(result, ExpConstant(4, temp3)); |
| 1004 __ mul_d(double_scratch1, double_scratch1, input); | 1004 __ mul_d(double_scratch1, double_scratch1, input); |
| 1005 __ add_d(double_scratch1, double_scratch1, result); | 1005 __ add_d(double_scratch1, double_scratch1, result); |
| 1006 __ FmoveLow(temp2, double_scratch1); | 1006 __ FmoveLow(temp2, double_scratch1); |
| 1007 __ sub_d(double_scratch1, double_scratch1, result); | 1007 __ sub_d(double_scratch1, double_scratch1, result); |
| 1008 __ ldc1(result, ExpConstant(6, temp3)); | 1008 __ ldc1(result, ExpConstant(6, temp3)); |
| 1009 __ ldc1(double_scratch2, ExpConstant(5, temp3)); | 1009 __ ldc1(double_scratch2, ExpConstant(5, temp3)); |
| 1010 __ mul_d(double_scratch1, double_scratch1, double_scratch2); | 1010 __ mul_d(double_scratch1, double_scratch1, double_scratch2); |
| 1011 __ sub_d(double_scratch1, double_scratch1, input); | 1011 __ sub_d(double_scratch1, double_scratch1, input); |
| 1012 __ sub_d(result, result, double_scratch1); | 1012 __ sub_d(result, result, double_scratch1); |
| 1013 __ mul_d(double_scratch2, double_scratch1, double_scratch1); | 1013 __ mul_d(double_scratch2, double_scratch1, double_scratch1); |
| 1014 __ mul_d(result, result, double_scratch2); | 1014 __ mul_d(result, result, double_scratch2); |
| 1015 __ ldc1(double_scratch2, ExpConstant(7, temp3)); | 1015 __ ldc1(double_scratch2, ExpConstant(7, temp3)); |
| 1016 __ mul_d(result, result, double_scratch2); | 1016 __ mul_d(result, result, double_scratch2); |
| 1017 __ sub_d(result, result, double_scratch1); | 1017 __ sub_d(result, result, double_scratch1); |
| 1018 // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1. | 1018 // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1. |
| 1019 ASSERT(*reinterpret_cast<double*> | 1019 DCHECK(*reinterpret_cast<double*> |
| 1020 (ExternalReference::math_exp_constants(8).address()) == 1); | 1020 (ExternalReference::math_exp_constants(8).address()) == 1); |
| 1021 __ Move(double_scratch2, 1); | 1021 __ Move(double_scratch2, 1); |
| 1022 __ add_d(result, result, double_scratch2); | 1022 __ add_d(result, result, double_scratch2); |
| 1023 __ dsrl(temp1, temp2, 11); | 1023 __ dsrl(temp1, temp2, 11); |
| 1024 __ Ext(temp2, temp2, 0, 11); | 1024 __ Ext(temp2, temp2, 0, 11); |
| 1025 __ Daddu(temp1, temp1, Operand(0x3ff)); | 1025 __ Daddu(temp1, temp1, Operand(0x3ff)); |
| 1026 | 1026 |
| 1027 // Must not call ExpConstant() after overwriting temp3! | 1027 // Must not call ExpConstant() after overwriting temp3! |
| 1028 __ li(temp3, Operand(ExternalReference::math_exp_log_table())); | 1028 __ li(temp3, Operand(ExternalReference::math_exp_log_table())); |
| 1029 __ dsll(at, temp2, 3); | 1029 __ dsll(at, temp2, 3); |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1053 __ bind(&done); | 1053 __ bind(&done); |
| 1054 } | 1054 } |
| 1055 | 1055 |
| 1056 #ifdef DEBUG | 1056 #ifdef DEBUG |
| 1057 // nop(CODE_AGE_MARKER_NOP) | 1057 // nop(CODE_AGE_MARKER_NOP) |
| 1058 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; | 1058 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; |
| 1059 #endif | 1059 #endif |
| 1060 | 1060 |
| 1061 | 1061 |
| 1062 CodeAgingHelper::CodeAgingHelper() { | 1062 CodeAgingHelper::CodeAgingHelper() { |
| 1063 ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength); | 1063 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); |
| 1064 // Since patcher is a large object, allocate it dynamically when needed, | 1064 // Since patcher is a large object, allocate it dynamically when needed, |
| 1065 // to avoid overloading the stack in stress conditions. | 1065 // to avoid overloading the stack in stress conditions. |
| 1066 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in | 1066 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in |
| 1067 // the process, before MIPS simulator ICache is setup. | 1067 // the process, before MIPS simulator ICache is setup. |
| 1068 SmartPointer<CodePatcher> patcher( | 1068 SmartPointer<CodePatcher> patcher( |
| 1069 new CodePatcher(young_sequence_.start(), | 1069 new CodePatcher(young_sequence_.start(), |
| 1070 young_sequence_.length() / Assembler::kInstrSize, | 1070 young_sequence_.length() / Assembler::kInstrSize, |
| 1071 CodePatcher::DONT_FLUSH)); | 1071 CodePatcher::DONT_FLUSH)); |
| 1072 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); | 1072 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); |
| 1073 patcher->masm()->Push(ra, fp, cp, a1); | 1073 patcher->masm()->Push(ra, fp, cp, a1); |
| 1074 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); | 1074 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); |
| 1075 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); | 1075 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); |
| 1076 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); | 1076 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); |
| 1077 patcher->masm()->Daddu( | 1077 patcher->masm()->Daddu( |
| 1078 fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 1078 fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
| 1079 } | 1079 } |
| 1080 | 1080 |
| 1081 | 1081 |
| 1082 #ifdef DEBUG | 1082 #ifdef DEBUG |
| 1083 bool CodeAgingHelper::IsOld(byte* candidate) const { | 1083 bool CodeAgingHelper::IsOld(byte* candidate) const { |
| 1084 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction; | 1084 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction; |
| 1085 } | 1085 } |
| 1086 #endif | 1086 #endif |
| 1087 | 1087 |
| 1088 | 1088 |
| 1089 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { | 1089 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { |
| 1090 bool result = isolate->code_aging_helper()->IsYoung(sequence); | 1090 bool result = isolate->code_aging_helper()->IsYoung(sequence); |
| 1091 ASSERT(result || isolate->code_aging_helper()->IsOld(sequence)); | 1091 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); |
| 1092 return result; | 1092 return result; |
| 1093 } | 1093 } |
| 1094 | 1094 |
| 1095 | 1095 |
| 1096 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, | 1096 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, |
| 1097 MarkingParity* parity) { | 1097 MarkingParity* parity) { |
| 1098 if (IsYoungSequence(isolate, sequence)) { | 1098 if (IsYoungSequence(isolate, sequence)) { |
| 1099 *age = kNoAgeCodeAge; | 1099 *age = kNoAgeCodeAge; |
| 1100 *parity = NO_MARKING_PARITY; | 1100 *parity = NO_MARKING_PARITY; |
| 1101 } else { | 1101 } else { |
| (...skipping 30 matching lines...) Expand all Loading... |
| 1132 patcher.masm()->nop(); // Pad the empty space. | 1132 patcher.masm()->nop(); // Pad the empty space. |
| 1133 } | 1133 } |
| 1134 } | 1134 } |
| 1135 | 1135 |
| 1136 | 1136 |
| 1137 #undef __ | 1137 #undef __ |
| 1138 | 1138 |
| 1139 } } // namespace v8::internal | 1139 } } // namespace v8::internal |
| 1140 | 1140 |
| 1141 #endif // V8_TARGET_ARCH_MIPS64 | 1141 #endif // V8_TARGET_ARCH_MIPS64 |
| OLD | NEW |