Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(588)

Side by Side Diff: src/mips/code-stubs-mips.cc

Issue 12317141: Added Isolate parameter to CodeStub::GetCode(). (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fixed whitespace. Rebased. Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/full-codegen-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 644 matching lines...) Expand 10 before | Expand all | Expand 10 after
655 if (destination == kCoreRegisters) { 655 if (destination == kCoreRegisters) {
656 __ Move(a2, a3, f14); 656 __ Move(a2, a3, f14);
657 __ Move(a0, a1, f12); 657 __ Move(a0, a1, f12);
658 } 658 }
659 } else { 659 } else {
660 ASSERT(destination == kCoreRegisters); 660 ASSERT(destination == kCoreRegisters);
661 // Write Smi from a0 to a3 and a2 in double format. 661 // Write Smi from a0 to a3 and a2 in double format.
662 __ mov(scratch1, a0); 662 __ mov(scratch1, a0);
663 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2); 663 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
664 __ push(ra); 664 __ push(ra);
665 __ Call(stub1.GetCode()); 665 __ Call(stub1.GetCode(masm->isolate()));
666 // Write Smi from a1 to a1 and a0 in double format. 666 // Write Smi from a1 to a1 and a0 in double format.
667 __ mov(scratch1, a1); 667 __ mov(scratch1, a1);
668 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2); 668 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
669 __ Call(stub2.GetCode()); 669 __ Call(stub2.GetCode(masm->isolate()));
670 __ pop(ra); 670 __ pop(ra);
671 } 671 }
672 } 672 }
673 673
674 674
675 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, 675 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
676 Destination destination, 676 Destination destination,
677 Register object, 677 Register object,
678 FPURegister dst, 678 FPURegister dst,
679 Register dst1, 679 Register dst1,
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
722 if (destination == kCoreRegisters) { 722 if (destination == kCoreRegisters) {
723 // Load the converted smi to dst1 and dst2 in double format. 723 // Load the converted smi to dst1 and dst2 in double format.
724 __ Move(dst1, dst2, dst); 724 __ Move(dst1, dst2, dst);
725 } 725 }
726 } else { 726 } else {
727 ASSERT(destination == kCoreRegisters); 727 ASSERT(destination == kCoreRegisters);
728 // Write smi to dst1 and dst2 double format. 728 // Write smi to dst1 and dst2 double format.
729 __ mov(scratch1, object); 729 __ mov(scratch1, object);
730 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); 730 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
731 __ push(ra); 731 __ push(ra);
732 __ Call(stub.GetCode()); 732 __ Call(stub.GetCode(masm->isolate()));
733 __ pop(ra); 733 __ pop(ra);
734 } 734 }
735 735
736 __ bind(&done); 736 __ bind(&done);
737 } 737 }
738 738
739 739
740 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, 740 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
741 Register object, 741 Register object,
742 Register dst, 742 Register dst,
(...skipping 430 matching lines...) Expand 10 before | Expand all | Expand 10 after
1173 sign_.is(a0)) { 1173 sign_.is(a0)) {
1174 return true; 1174 return true;
1175 } 1175 }
1176 // Other register combinations are generated as and when they are needed, 1176 // Other register combinations are generated as and when they are needed,
1177 // so it is unsafe to call them from stubs (we can't generate a stub while 1177 // so it is unsafe to call them from stubs (we can't generate a stub while
1178 // we are generating a stub). 1178 // we are generating a stub).
1179 return false; 1179 return false;
1180 } 1180 }
1181 1181
1182 1182
1183 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { 1183 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
1184 Isolate* isolate) {
1184 WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3); 1185 WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
1185 WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0); 1186 WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
1186 stub1.GetCode()->set_is_pregenerated(true); 1187 stub1.GetCode(isolate)->set_is_pregenerated(true);
1187 stub2.GetCode()->set_is_pregenerated(true); 1188 stub2.GetCode(isolate)->set_is_pregenerated(true);
1188 } 1189 }
1189 1190
1190 1191
1191 // See comment for class, this does NOT work for int32's that are in Smi range. 1192 // See comment for class, this does NOT work for int32's that are in Smi range.
1192 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { 1193 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1193 Label max_negative_int; 1194 Label max_negative_int;
1194 // the_int_ has the answer which is a signed int32 but not a Smi. 1195 // the_int_ has the answer which is a signed int32 but not a Smi.
1195 // We test for the special value that has a different exponent. 1196 // We test for the special value that has a different exponent.
1196 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 1197 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1197 // Test sign, and save for later conditionals. 1198 // Test sign, and save for later conditionals.
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
1374 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1375 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1375 } else { 1376 } else {
1376 // Load lhs to a double in a2, a3. 1377 // Load lhs to a double in a2, a3.
1377 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); 1378 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1378 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1379 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1379 1380
1380 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch. 1381 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1381 __ mov(t6, rhs); 1382 __ mov(t6, rhs);
1382 ConvertToDoubleStub stub1(a1, a0, t6, t5); 1383 ConvertToDoubleStub stub1(a1, a0, t6, t5);
1383 __ push(ra); 1384 __ push(ra);
1384 __ Call(stub1.GetCode()); 1385 __ Call(stub1.GetCode(masm->isolate()));
1385 1386
1386 __ pop(ra); 1387 __ pop(ra);
1387 } 1388 }
1388 1389
1389 // We now have both loaded as doubles. 1390 // We now have both loaded as doubles.
1390 __ jmp(both_loaded_as_doubles); 1391 __ jmp(both_loaded_as_doubles);
1391 1392
1392 __ bind(&lhs_is_smi); 1393 __ bind(&lhs_is_smi);
1393 // Lhs is a Smi. Check whether the non-smi is a heap number. 1394 // Lhs is a Smi. Check whether the non-smi is a heap number.
1394 __ GetObjectType(rhs, t4, t4); 1395 __ GetObjectType(rhs, t4, t4);
(...skipping 14 matching lines...) Expand all
1409 CpuFeatures::Scope scope(FPU); 1410 CpuFeatures::Scope scope(FPU);
1410 __ sra(at, lhs, kSmiTagSize); 1411 __ sra(at, lhs, kSmiTagSize);
1411 __ mtc1(at, f12); 1412 __ mtc1(at, f12);
1412 __ cvt_d_w(f12, f12); 1413 __ cvt_d_w(f12, f12);
1413 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1414 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1414 } else { 1415 } else {
1415 // Convert lhs to a double format. t5 is scratch. 1416 // Convert lhs to a double format. t5 is scratch.
1416 __ mov(t6, lhs); 1417 __ mov(t6, lhs);
1417 ConvertToDoubleStub stub2(a3, a2, t6, t5); 1418 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1418 __ push(ra); 1419 __ push(ra);
1419 __ Call(stub2.GetCode()); 1420 __ Call(stub2.GetCode(masm->isolate()));
1420 __ pop(ra); 1421 __ pop(ra);
1421 // Load rhs to a double in a1, a0. 1422 // Load rhs to a double in a1, a0.
1422 if (rhs.is(a0)) { 1423 if (rhs.is(a0)) {
1423 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); 1424 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1424 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1425 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1425 } else { 1426 } else {
1426 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1427 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1427 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); 1428 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1428 } 1429 }
1429 } 1430 }
(...skipping 946 matching lines...) Expand 10 before | Expand all | Expand 10 after
2376 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. 2377 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2377 CpuFeatures::Scope scope(FPU); 2378 CpuFeatures::Scope scope(FPU);
2378 __ mtc1(a1, f0); 2379 __ mtc1(a1, f0);
2379 __ cvt_d_w(f0, f0); 2380 __ cvt_d_w(f0, f0);
2380 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); 2381 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2381 __ Ret(); 2382 __ Ret();
2382 } else { 2383 } else {
2383 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not 2384 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2384 // have to set up a frame. 2385 // have to set up a frame.
2385 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3); 2386 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2386 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 2387 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
2387 } 2388 }
2388 2389
2389 __ bind(&impossible); 2390 __ bind(&impossible);
2390 if (FLAG_debug_code) { 2391 if (FLAG_debug_code) {
2391 __ stop("Incorrect assumption in bit-not stub"); 2392 __ stop("Incorrect assumption in bit-not stub");
2392 } 2393 }
2393 } 2394 }
2394 2395
2395 2396
2396 // TODO(svenpanne): Use virtual functions instead of switch. 2397 // TODO(svenpanne): Use virtual functions instead of switch.
(...skipping 1526 matching lines...) Expand 10 before | Expand all | Expand 10 after
3923 return true; 3924 return true;
3924 } 3925 }
3925 3926
3926 3927
3927 bool CEntryStub::IsPregenerated() { 3928 bool CEntryStub::IsPregenerated() {
3928 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && 3929 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3929 result_size_ == 1; 3930 result_size_ == 1;
3930 } 3931 }
3931 3932
3932 3933
3933 void CodeStub::GenerateStubsAheadOfTime() { 3934 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3934 CEntryStub::GenerateAheadOfTime(); 3935 CEntryStub::GenerateAheadOfTime(isolate);
3935 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); 3936 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
3936 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); 3937 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
3937 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); 3938 RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3938 } 3939 }
3939 3940
3940 3941
3941 void CodeStub::GenerateFPStubs() { 3942 void CodeStub::GenerateFPStubs(Isolate* isolate) {
3942 SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU) 3943 SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
3943 ? kSaveFPRegs 3944 ? kSaveFPRegs
3944 : kDontSaveFPRegs; 3945 : kDontSaveFPRegs;
3945 CEntryStub save_doubles(1, mode); 3946 CEntryStub save_doubles(1, mode);
3946 StoreBufferOverflowStub stub(mode); 3947 StoreBufferOverflowStub stub(mode);
3947 // These stubs might already be in the snapshot, detect that and don't 3948 // These stubs might already be in the snapshot, detect that and don't
3948 // regenerate, which would lead to code stub initialization state being messed 3949 // regenerate, which would lead to code stub initialization state being messed
3949 // up. 3950 // up.
3950 Code* save_doubles_code = NULL; 3951 Code* save_doubles_code = NULL;
3951 Code* store_buffer_overflow_code = NULL; 3952 Code* store_buffer_overflow_code = NULL;
3952 if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) { 3953 if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
3953 if (CpuFeatures::IsSupported(FPU)) { 3954 if (CpuFeatures::IsSupported(FPU)) {
3954 CpuFeatures::Scope scope2(FPU); 3955 CpuFeatures::Scope scope2(FPU);
3955 save_doubles_code = *save_doubles.GetCode(); 3956 save_doubles_code = *save_doubles.GetCode(isolate);
3956 store_buffer_overflow_code = *stub.GetCode(); 3957 store_buffer_overflow_code = *stub.GetCode(isolate);
3957 } else { 3958 } else {
3958 save_doubles_code = *save_doubles.GetCode(); 3959 save_doubles_code = *save_doubles.GetCode(isolate);
3959 store_buffer_overflow_code = *stub.GetCode(); 3960 store_buffer_overflow_code = *stub.GetCode(isolate);
3960 } 3961 }
3961 save_doubles_code->set_is_pregenerated(true); 3962 save_doubles_code->set_is_pregenerated(true);
3962 store_buffer_overflow_code->set_is_pregenerated(true); 3963 store_buffer_overflow_code->set_is_pregenerated(true);
3963 } 3964 }
3964 ISOLATE->set_fp_stubs_generated(true); 3965 ISOLATE->set_fp_stubs_generated(true);
3965 } 3966 }
3966 3967
3967 3968
3968 void CEntryStub::GenerateAheadOfTime() { 3969 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
3969 CEntryStub stub(1, kDontSaveFPRegs); 3970 CEntryStub stub(1, kDontSaveFPRegs);
3970 Handle<Code> code = stub.GetCode(); 3971 Handle<Code> code = stub.GetCode(isolate);
3971 code->set_is_pregenerated(true); 3972 code->set_is_pregenerated(true);
3972 } 3973 }
3973 3974
3974 3975
3975 static void JumpIfOOM(MacroAssembler* masm, 3976 static void JumpIfOOM(MacroAssembler* masm,
3976 Register value, 3977 Register value,
3977 Register scratch, 3978 Register scratch,
3978 Label* oom_label) { 3979 Label* oom_label) {
3979 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); 3980 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
3980 STATIC_ASSERT(kFailureTag == 3); 3981 STATIC_ASSERT(kFailureTag == 3);
(...skipping 3126 matching lines...) Expand 10 before | Expand all | Expand 10 after
7107 7108
7108 __ bind(&fpu_lt); 7109 __ bind(&fpu_lt);
7109 __ li(v0, Operand(LESS)); 7110 __ li(v0, Operand(LESS));
7110 __ Ret(); 7111 __ Ret();
7111 } 7112 }
7112 7113
7113 __ bind(&unordered); 7114 __ bind(&unordered);
7114 __ bind(&generic_stub); 7115 __ bind(&generic_stub);
7115 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, 7116 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
7116 CompareIC::GENERIC); 7117 CompareIC::GENERIC);
7117 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 7118 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
7118 7119
7119 __ bind(&maybe_undefined1); 7120 __ bind(&maybe_undefined1);
7120 if (Token::IsOrderedRelationalCompareOp(op_)) { 7121 if (Token::IsOrderedRelationalCompareOp(op_)) {
7121 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 7122 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
7122 __ Branch(&miss, ne, a0, Operand(at)); 7123 __ Branch(&miss, ne, a0, Operand(at));
7123 __ JumpIfSmi(a1, &unordered); 7124 __ JumpIfSmi(a1, &unordered);
7124 __ GetObjectType(a1, a2, a2); 7125 __ GetObjectType(a1, a2, a2);
7125 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE)); 7126 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
7126 __ jmp(&unordered); 7127 __ jmp(&unordered);
7127 } 7128 }
(...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after
7358 Label find_ra; 7359 Label find_ra;
7359 masm->bal(&find_ra); // ra = pc + 8. 7360 masm->bal(&find_ra); // ra = pc + 8.
7360 masm->nop(); // Branch delay slot nop. 7361 masm->nop(); // Branch delay slot nop.
7361 masm->bind(&find_ra); 7362 masm->bind(&find_ra);
7362 7363
7363 const int kNumInstructionsToJump = 6; 7364 const int kNumInstructionsToJump = 6;
7364 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize); 7365 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
7365 // Push return address (accessible to GC through exit frame pc). 7366 // Push return address (accessible to GC through exit frame pc).
7366 // This spot for ra was reserved in EnterExitFrame. 7367 // This spot for ra was reserved in EnterExitFrame.
7367 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize)); 7368 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
7368 masm->li(ra, 7369 intptr_t loc =
7369 Operand(reinterpret_cast<intptr_t>(GetCode().location()), 7370 reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
7370 RelocInfo::CODE_TARGET), 7371 masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
7371 CONSTANT_SIZE);
7372 // Call the function. 7372 // Call the function.
7373 masm->Jump(t9); 7373 masm->Jump(t9);
7374 // Make sure the stored 'ra' points to this position. 7374 // Make sure the stored 'ra' points to this position.
7375 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra)); 7375 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
7376 } 7376 }
7377 7377
7378 7378
7379 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, 7379 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
7380 Label* miss, 7380 Label* miss,
7381 Label* done, 7381 Label* done,
(...skipping 310 matching lines...) Expand 10 before | Expand all | Expand 10 after
7692 } 7692 }
7693 return false; 7693 return false;
7694 } 7694 }
7695 7695
7696 7696
7697 bool StoreBufferOverflowStub::IsPregenerated() { 7697 bool StoreBufferOverflowStub::IsPregenerated() {
7698 return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); 7698 return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7699 } 7699 }
7700 7700
7701 7701
7702 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { 7702 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
7703 Isolate* isolate) {
7703 StoreBufferOverflowStub stub1(kDontSaveFPRegs); 7704 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7704 stub1.GetCode()->set_is_pregenerated(true); 7705 stub1.GetCode(isolate)->set_is_pregenerated(true);
7705 } 7706 }
7706 7707
7707 7708
7708 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { 7709 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
7709 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 7710 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7710 !entry->object.is(no_reg); 7711 !entry->object.is(no_reg);
7711 entry++) { 7712 entry++) {
7712 RecordWriteStub stub(entry->object, 7713 RecordWriteStub stub(entry->object,
7713 entry->value, 7714 entry->value,
7714 entry->address, 7715 entry->address,
7715 entry->action, 7716 entry->action,
7716 kDontSaveFPRegs); 7717 kDontSaveFPRegs);
7717 stub.GetCode()->set_is_pregenerated(true); 7718 stub.GetCode(isolate)->set_is_pregenerated(true);
7718 } 7719 }
7719 } 7720 }
7720 7721
7721 7722
7722 bool CodeStub::CanUseFPRegisters() { 7723 bool CodeStub::CanUseFPRegisters() {
7723 return CpuFeatures::IsSupported(FPU); 7724 return CpuFeatures::IsSupported(FPU);
7724 } 7725 }
7725 7726
7726 7727
7727 // Takes the input in 3 registers: address_ value_ and object_. A pointer to 7728 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after
7984 &slow_elements); 7985 &slow_elements);
7985 __ Ret(USE_DELAY_SLOT); 7986 __ Ret(USE_DELAY_SLOT);
7986 __ mov(v0, a0); 7987 __ mov(v0, a0);
7987 } 7988 }
7988 7989
7989 7990
7990 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { 7991 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
7991 ASSERT(!Serializer::enabled()); 7992 ASSERT(!Serializer::enabled());
7992 bool save_fp_regs = CpuFeatures::IsSupported(FPU); 7993 bool save_fp_regs = CpuFeatures::IsSupported(FPU);
7993 CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs); 7994 CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
7994 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); 7995 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
7995 int parameter_count_offset = 7996 int parameter_count_offset =
7996 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; 7997 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
7997 __ lw(a1, MemOperand(fp, parameter_count_offset)); 7998 __ lw(a1, MemOperand(fp, parameter_count_offset));
7998 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); 7999 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
7999 __ sll(a1, a1, kPointerSizeLog2); 8000 __ sll(a1, a1, kPointerSizeLog2);
8000 __ Addu(sp, sp, a1); 8001 __ Addu(sp, sp, a1);
8001 __ Ret(); 8002 __ Ret();
8002 } 8003 }
8003 8004
8004 8005
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
8060 __ Pop(ra, t1, a1); 8061 __ Pop(ra, t1, a1);
8061 __ Ret(); 8062 __ Ret();
8062 } 8063 }
8063 8064
8064 8065
8065 #undef __ 8066 #undef __
8066 8067
8067 } } // namespace v8::internal 8068 } } // namespace v8::internal
8068 8069
8069 #endif // V8_TARGET_ARCH_MIPS 8070 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/code-stubs-mips.h ('k') | src/mips/full-codegen-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698