Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(520)

Side by Side Diff: src/mips/macro-assembler-mips.cc

Issue 8139027: Version 3.6.5 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/macro-assembler-mips.h ('k') | src/mips/regexp-macro-assembler-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 24 matching lines...) Expand all
35 #include "codegen.h" 35 #include "codegen.h"
36 #include "debug.h" 36 #include "debug.h"
37 #include "runtime.h" 37 #include "runtime.h"
38 38
39 namespace v8 { 39 namespace v8 {
40 namespace internal { 40 namespace internal {
41 41
42 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) 42 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43 : Assembler(arg_isolate, buffer, size), 43 : Assembler(arg_isolate, buffer, size),
44 generating_stub_(false), 44 generating_stub_(false),
45 allow_stub_calls_(true) { 45 allow_stub_calls_(true),
46 has_frame_(false) {
46 if (isolate() != NULL) { 47 if (isolate() != NULL) {
47 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), 48 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
48 isolate()); 49 isolate());
49 } 50 }
50 } 51 }
51 52
52 53
53 void MacroAssembler::LoadRoot(Register destination, 54 void MacroAssembler::LoadRoot(Register destination,
54 Heap::RootListIndex index) { 55 Heap::RootListIndex index) {
55 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); 56 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
112 sw(scratch, MemOperand(object, Page::kDirtyFlagOffset)); 113 sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
113 } 114 }
114 115
115 116
116 // Push and pop all registers that can hold pointers. 117 // Push and pop all registers that can hold pointers.
117 void MacroAssembler::PushSafepointRegisters() { 118 void MacroAssembler::PushSafepointRegisters() {
118 // Safepoints expect a block of kNumSafepointRegisters values on the 119 // Safepoints expect a block of kNumSafepointRegisters values on the
119 // stack, so adjust the stack for unsaved registers. 120 // stack, so adjust the stack for unsaved registers.
120 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 121 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
121 ASSERT(num_unsaved >= 0); 122 ASSERT(num_unsaved >= 0);
122 Subu(sp, sp, Operand(num_unsaved * kPointerSize)); 123 if (num_unsaved > 0) {
124 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
125 }
123 MultiPush(kSafepointSavedRegisters); 126 MultiPush(kSafepointSavedRegisters);
124 } 127 }
125 128
126 129
127 void MacroAssembler::PopSafepointRegisters() { 130 void MacroAssembler::PopSafepointRegisters() {
128 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 131 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
129 MultiPop(kSafepointSavedRegisters); 132 MultiPop(kSafepointSavedRegisters);
130 Addu(sp, sp, Operand(num_unsaved * kPointerSize)); 133 if (num_unsaved > 0) {
134 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
135 }
131 } 136 }
132 137
133 138
134 void MacroAssembler::PushSafepointRegistersAndDoubles() { 139 void MacroAssembler::PushSafepointRegistersAndDoubles() {
135 PushSafepointRegisters(); 140 PushSafepointRegisters();
136 Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize)); 141 Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
137 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) { 142 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
138 FPURegister reg = FPURegister::FromAllocationIndex(i); 143 FPURegister reg = FPURegister::FromAllocationIndex(i);
139 sdc1(reg, MemOperand(sp, i * kDoubleSize)); 144 sdc1(reg, MemOperand(sp, i * kDoubleSize));
140 } 145 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
173 return kSafepointRegisterStackIndexMap[reg_code]; 178 return kSafepointRegisterStackIndexMap[reg_code];
174 } 179 }
175 180
176 181
177 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { 182 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
178 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); 183 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
179 } 184 }
180 185
181 186
182 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { 187 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
188 UNIMPLEMENTED_MIPS();
183 // General purpose registers are pushed last on the stack. 189 // General purpose registers are pushed last on the stack.
184 int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize; 190 int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
185 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; 191 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
186 return MemOperand(sp, doubles_size + register_offset); 192 return MemOperand(sp, doubles_size + register_offset);
187 } 193 }
188 194
189 195
190
191
192 void MacroAssembler::InNewSpace(Register object, 196 void MacroAssembler::InNewSpace(Register object,
193 Register scratch, 197 Register scratch,
194 Condition cc, 198 Condition cc,
195 Label* branch) { 199 Label* branch) {
196 ASSERT(cc == eq || cc == ne); 200 ASSERT(cc == eq || cc == ne);
197 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); 201 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
198 Branch(branch, cc, scratch, 202 Branch(branch, cc, scratch,
199 Operand(ExternalReference::new_space_start(isolate()))); 203 Operand(ExternalReference::new_space_start(isolate())));
200 } 204 }
201 205
(...skipping 498 matching lines...) Expand 10 before | Expand all | Expand 10 after
700 ori(rd, rd, (j.imm32_ & kImm16Mask)); 704 ori(rd, rd, (j.imm32_ & kImm16Mask));
701 } 705 }
702 } 706 }
703 707
704 708
705 void MacroAssembler::MultiPush(RegList regs) { 709 void MacroAssembler::MultiPush(RegList regs) {
706 int16_t num_to_push = NumberOfBitsSet(regs); 710 int16_t num_to_push = NumberOfBitsSet(regs);
707 int16_t stack_offset = num_to_push * kPointerSize; 711 int16_t stack_offset = num_to_push * kPointerSize;
708 712
709 Subu(sp, sp, Operand(stack_offset)); 713 Subu(sp, sp, Operand(stack_offset));
710 for (int16_t i = kNumRegisters; i > 0; i--) { 714 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
711 if ((regs & (1 << i)) != 0) { 715 if ((regs & (1 << i)) != 0) {
712 stack_offset -= kPointerSize; 716 stack_offset -= kPointerSize;
713 sw(ToRegister(i), MemOperand(sp, stack_offset)); 717 sw(ToRegister(i), MemOperand(sp, stack_offset));
714 } 718 }
715 } 719 }
716 } 720 }
717 721
718 722
719 void MacroAssembler::MultiPushReversed(RegList regs) { 723 void MacroAssembler::MultiPushReversed(RegList regs) {
720 int16_t num_to_push = NumberOfBitsSet(regs); 724 int16_t num_to_push = NumberOfBitsSet(regs);
(...skipping 18 matching lines...) Expand all
739 stack_offset += kPointerSize; 743 stack_offset += kPointerSize;
740 } 744 }
741 } 745 }
742 addiu(sp, sp, stack_offset); 746 addiu(sp, sp, stack_offset);
743 } 747 }
744 748
745 749
746 void MacroAssembler::MultiPopReversed(RegList regs) { 750 void MacroAssembler::MultiPopReversed(RegList regs) {
747 int16_t stack_offset = 0; 751 int16_t stack_offset = 0;
748 752
749 for (int16_t i = kNumRegisters; i > 0; i--) { 753 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
750 if ((regs & (1 << i)) != 0) { 754 if ((regs & (1 << i)) != 0) {
751 lw(ToRegister(i), MemOperand(sp, stack_offset)); 755 lw(ToRegister(i), MemOperand(sp, stack_offset));
752 stack_offset += kPointerSize; 756 stack_offset += kPointerSize;
753 } 757 }
754 } 758 }
755 addiu(sp, sp, stack_offset); 759 addiu(sp, sp, stack_offset);
756 } 760 }
757 761
758 762
759 void MacroAssembler::MultiPushFPU(RegList regs) { 763 void MacroAssembler::MultiPushFPU(RegList regs) {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
807 for (int16_t i = kNumRegisters; i > 0; i--) { 811 for (int16_t i = kNumRegisters; i > 0; i--) {
808 if ((regs & (1 << i)) != 0) { 812 if ((regs & (1 << i)) != 0) {
809 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); 813 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
810 stack_offset += kDoubleSize; 814 stack_offset += kDoubleSize;
811 } 815 }
812 } 816 }
813 addiu(sp, sp, stack_offset); 817 addiu(sp, sp, stack_offset);
814 } 818 }
815 819
816 820
821 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
822 RegList saved_regs = kJSCallerSaved | ra.bit();
823 MultiPush(saved_regs);
824 AllowExternalCallThatCantCauseGC scope(this);
825
826 // Save to a0 in case address == t0.
827 Move(a0, address);
828 PrepareCallCFunction(2, t0);
829
830 li(a1, instructions * kInstrSize);
831 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
832 MultiPop(saved_regs);
833 }
834
835
817 void MacroAssembler::Ext(Register rt, 836 void MacroAssembler::Ext(Register rt,
818 Register rs, 837 Register rs,
819 uint16_t pos, 838 uint16_t pos,
820 uint16_t size) { 839 uint16_t size) {
821 ASSERT(pos < 32); 840 ASSERT(pos < 32);
822 ASSERT(pos + size < 33); 841 ASSERT(pos + size < 33);
823 842
824 if (mips32r2) { 843 if (mips32r2) {
825 ext_(rt, rs, pos, size); 844 ext_(rt, rs, pos, size);
826 } else { 845 } else {
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
933 Register rs, 952 Register rs,
934 FPURegister scratch) { 953 FPURegister scratch) {
935 ASSERT(!fd.is(scratch)); 954 ASSERT(!fd.is(scratch));
936 ASSERT(!rs.is(at)); 955 ASSERT(!rs.is(at));
937 956
938 // Load 2^31 into scratch as its float representation. 957 // Load 2^31 into scratch as its float representation.
939 li(at, 0x41E00000); 958 li(at, 0x41E00000);
940 mtc1(at, FPURegister::from_code(scratch.code() + 1)); 959 mtc1(at, FPURegister::from_code(scratch.code() + 1));
941 mtc1(zero_reg, scratch); 960 mtc1(zero_reg, scratch);
942 // Test if scratch > fd. 961 // Test if scratch > fd.
943 c(OLT, D, fd, scratch); 962 // If fd < 2^31 we can convert it normally.
944
945 Label simple_convert; 963 Label simple_convert;
946 // If fd < 2^31 we can convert it normally. 964 BranchF(&simple_convert, NULL, lt, fd, scratch);
947 bc1t(&simple_convert);
948 965
949 // First we subtract 2^31 from fd, then trunc it to rs 966 // First we subtract 2^31 from fd, then trunc it to rs
950 // and add 2^31 to rs. 967 // and add 2^31 to rs.
951 sub_d(scratch, fd, scratch); 968 sub_d(scratch, fd, scratch);
952 trunc_w_d(scratch, scratch); 969 trunc_w_d(scratch, scratch);
953 mfc1(rs, scratch); 970 mfc1(rs, scratch);
954 Or(rs, rs, 1 << 31); 971 Or(rs, rs, 1 << 31);
955 972
956 Label done; 973 Label done;
957 Branch(&done); 974 Branch(&done);
958 // Simple conversion. 975 // Simple conversion.
959 bind(&simple_convert); 976 bind(&simple_convert);
960 trunc_w_d(scratch, fd); 977 trunc_w_d(scratch, fd);
961 mfc1(rs, scratch); 978 mfc1(rs, scratch);
962 979
963 bind(&done); 980 bind(&done);
964 } 981 }
965 982
966 983
984 void MacroAssembler::BranchF(Label* target,
985 Label* nan,
986 Condition cc,
987 FPURegister cmp1,
988 FPURegister cmp2,
989 BranchDelaySlot bd) {
990 if (cc == al) {
991 Branch(bd, target);
992 return;
993 }
994
995 ASSERT(nan || target);
996 // Check for unordered (NaN) cases.
997 if (nan) {
998 c(UN, D, cmp1, cmp2);
999 bc1t(nan);
1000 }
1001
1002 if (target) {
1003 // Here NaN cases were either handled by this function or are assumed to
1004 // have been handled by the caller.
1005 // Unsigned conditions are treated as their signed counterpart.
1006 switch (cc) {
1007 case Uless:
1008 case less:
1009 c(OLT, D, cmp1, cmp2);
1010 bc1t(target);
1011 break;
1012 case Ugreater:
1013 case greater:
1014 c(ULE, D, cmp1, cmp2);
1015 bc1f(target);
1016 break;
1017 case Ugreater_equal:
1018 case greater_equal:
1019 c(ULT, D, cmp1, cmp2);
1020 bc1f(target);
1021 break;
1022 case Uless_equal:
1023 case less_equal:
1024 c(OLE, D, cmp1, cmp2);
1025 bc1t(target);
1026 break;
1027 case eq:
1028 c(EQ, D, cmp1, cmp2);
1029 bc1t(target);
1030 break;
1031 case ne:
1032 c(EQ, D, cmp1, cmp2);
1033 bc1f(target);
1034 break;
1035 default:
1036 CHECK(0);
1037 };
1038 }
1039
1040 if (bd == PROTECT) {
1041 nop();
1042 }
1043 }
1044
1045
1046 void MacroAssembler::Move(FPURegister dst, double imm) {
1047 ASSERT(CpuFeatures::IsEnabled(FPU));
1048 static const DoubleRepresentation minus_zero(-0.0);
1049 static const DoubleRepresentation zero(0.0);
1050 DoubleRepresentation value(imm);
1051 // Handle special values first.
1052 bool force_load = dst.is(kDoubleRegZero);
1053 if (value.bits == zero.bits && !force_load) {
1054 mov_d(dst, kDoubleRegZero);
1055 } else if (value.bits == minus_zero.bits && !force_load) {
1056 neg_d(dst, kDoubleRegZero);
1057 } else {
1058 uint32_t lo, hi;
1059 DoubleAsTwoUInt32(imm, &lo, &hi);
1060 // Move the low part of the double into the lower of the corresponding FPU
1061 // register of FPU register pair.
1062 if (lo != 0) {
1063 li(at, Operand(lo));
1064 mtc1(at, dst);
1065 } else {
1066 mtc1(zero_reg, dst);
1067 }
1068 // Move the high part of the double into the higher of the corresponding FPU
1069 // register of FPU register pair.
1070 if (hi != 0) {
1071 li(at, Operand(hi));
1072 mtc1(at, dst.high());
1073 } else {
1074 mtc1(zero_reg, dst.high());
1075 }
1076 }
1077 }
1078
1079
967 // Tries to get a signed int32 out of a double precision floating point heap 1080 // Tries to get a signed int32 out of a double precision floating point heap
968 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the 1081 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
969 // 32bits signed integer range. 1082 // 32bits signed integer range.
970 // This method implementation differs from the ARM version for performance 1083 // This method implementation differs from the ARM version for performance
971 // reasons. 1084 // reasons.
972 void MacroAssembler::ConvertToInt32(Register source, 1085 void MacroAssembler::ConvertToInt32(Register source,
973 Register dest, 1086 Register dest,
974 Register scratch, 1087 Register scratch,
975 Register scratch2, 1088 Register scratch2,
976 FPURegister double_scratch, 1089 FPURegister double_scratch,
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1055 // Trick to check sign bit (msb) held in dest, count leading zero. 1168 // Trick to check sign bit (msb) held in dest, count leading zero.
1056 // 0 indicates negative, save negative version with conditional move. 1169 // 0 indicates negative, save negative version with conditional move.
1057 clz(dest, dest); 1170 clz(dest, dest);
1058 movz(scratch, scratch2, dest); 1171 movz(scratch, scratch2, dest);
1059 mov(dest, scratch); 1172 mov(dest, scratch);
1060 } 1173 }
1061 bind(&done); 1174 bind(&done);
1062 } 1175 }
1063 1176
1064 1177
1178 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1179 FPURegister result,
1180 DoubleRegister double_input,
1181 Register scratch1,
1182 Register except_flag,
1183 CheckForInexactConversion check_inexact) {
1184 ASSERT(CpuFeatures::IsSupported(FPU));
1185 CpuFeatures::Scope scope(FPU);
1186
1187 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1188
1189 if (check_inexact == kDontCheckForInexactConversion) {
1190 // Ingore inexact exceptions.
1191 except_mask &= ~kFCSRInexactFlagMask;
1192 }
1193
1194 // Save FCSR.
1195 cfc1(scratch1, FCSR);
1196 // Disable FPU exceptions.
1197 ctc1(zero_reg, FCSR);
1198
1199 // Do operation based on rounding mode.
1200 switch (rounding_mode) {
1201 case kRoundToNearest:
1202 round_w_d(result, double_input);
1203 break;
1204 case kRoundToZero:
1205 trunc_w_d(result, double_input);
1206 break;
1207 case kRoundToPlusInf:
1208 ceil_w_d(result, double_input);
1209 break;
1210 case kRoundToMinusInf:
1211 floor_w_d(result, double_input);
1212 break;
1213 } // End of switch-statement.
1214
1215 // Retrieve FCSR.
1216 cfc1(except_flag, FCSR);
1217 // Restore FCSR.
1218 ctc1(scratch1, FCSR);
1219
1220 // Check for fpu exceptions.
1221 And(except_flag, except_flag, Operand(except_mask));
1222 }
1223
1224
1065 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, 1225 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1066 Register input_high, 1226 Register input_high,
1067 Register input_low, 1227 Register input_low,
1068 Register scratch) { 1228 Register scratch) {
1069 Label done, normal_exponent, restore_sign; 1229 Label done, normal_exponent, restore_sign;
1070 // Extract the biased exponent in result. 1230 // Extract the biased exponent in result.
1071 Ext(result, 1231 Ext(result,
1072 input_high, 1232 input_high,
1073 HeapNumber::kExponentShift, 1233 HeapNumber::kExponentShift,
1074 HeapNumber::kExponentBits); 1234 HeapNumber::kExponentBits);
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
1141 Subu(result, zero_reg, input_high); 1301 Subu(result, zero_reg, input_high);
1142 movz(result, input_high, scratch); 1302 movz(result, input_high, scratch);
1143 bind(&done); 1303 bind(&done);
1144 } 1304 }
1145 1305
1146 1306
1147 void MacroAssembler::EmitECMATruncate(Register result, 1307 void MacroAssembler::EmitECMATruncate(Register result,
1148 FPURegister double_input, 1308 FPURegister double_input,
1149 FPURegister single_scratch, 1309 FPURegister single_scratch,
1150 Register scratch, 1310 Register scratch,
1151 Register input_high, 1311 Register scratch2,
1152 Register input_low) { 1312 Register scratch3) {
1153 CpuFeatures::Scope scope(FPU); 1313 CpuFeatures::Scope scope(FPU);
1154 ASSERT(!input_high.is(result)); 1314 ASSERT(!scratch2.is(result));
1155 ASSERT(!input_low.is(result)); 1315 ASSERT(!scratch3.is(result));
1156 ASSERT(!input_low.is(input_high)); 1316 ASSERT(!scratch3.is(scratch2));
1157 ASSERT(!scratch.is(result) && 1317 ASSERT(!scratch.is(result) &&
1158 !scratch.is(input_high) && 1318 !scratch.is(scratch2) &&
1159 !scratch.is(input_low)); 1319 !scratch.is(scratch3));
1160 ASSERT(!single_scratch.is(double_input)); 1320 ASSERT(!single_scratch.is(double_input));
1161 1321
1162 Label done; 1322 Label done;
1163 Label manual; 1323 Label manual;
1164 1324
1165 // Clear cumulative exception flags and save the FCSR. 1325 // Clear cumulative exception flags and save the FCSR.
1166 Register scratch2 = input_high;
1167 cfc1(scratch2, FCSR); 1326 cfc1(scratch2, FCSR);
1168 ctc1(zero_reg, FCSR); 1327 ctc1(zero_reg, FCSR);
1169 // Try a conversion to a signed integer. 1328 // Try a conversion to a signed integer.
1170 trunc_w_d(single_scratch, double_input); 1329 trunc_w_d(single_scratch, double_input);
1171 mfc1(result, single_scratch); 1330 mfc1(result, single_scratch);
1172 // Retrieve and restore the FCSR. 1331 // Retrieve and restore the FCSR.
1173 cfc1(scratch, FCSR); 1332 cfc1(scratch, FCSR);
1174 ctc1(scratch2, FCSR); 1333 ctc1(scratch2, FCSR);
1175 // Check for overflow and NaNs. 1334 // Check for overflow and NaNs.
1176 And(scratch, 1335 And(scratch,
1177 scratch, 1336 scratch,
1178 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask); 1337 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1179 // If we had no exceptions we are done. 1338 // If we had no exceptions we are done.
1180 Branch(&done, eq, scratch, Operand(zero_reg)); 1339 Branch(&done, eq, scratch, Operand(zero_reg));
1181 1340
1182 // Load the double value and perform a manual truncation. 1341 // Load the double value and perform a manual truncation.
1342 Register input_high = scratch2;
1343 Register input_low = scratch3;
1183 Move(input_low, input_high, double_input); 1344 Move(input_low, input_high, double_input);
1184 EmitOutOfInt32RangeTruncate(result, 1345 EmitOutOfInt32RangeTruncate(result,
1185 input_high, 1346 input_high,
1186 input_low, 1347 input_low,
1187 scratch); 1348 scratch);
1188 bind(&done); 1349 bind(&done);
1189 } 1350 }
1190 1351
1191 1352
1192 void MacroAssembler::GetLeastBitsFromSmi(Register dst, 1353 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
(...skipping 11 matching lines...) Expand all
1204 1365
1205 1366
1206 // Emulated condtional branches do not emit a nop in the branch delay slot. 1367 // Emulated condtional branches do not emit a nop in the branch delay slot.
1207 // 1368 //
1208 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. 1369 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1209 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ 1370 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1210 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ 1371 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1211 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) 1372 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1212 1373
1213 1374
1214 bool MacroAssembler::UseAbsoluteCodePointers() {
1215 if (is_trampoline_emitted()) {
1216 return true;
1217 } else {
1218 return false;
1219 }
1220 }
1221
1222
1223 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { 1375 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1224 BranchShort(offset, bdslot); 1376 BranchShort(offset, bdslot);
1225 } 1377 }
1226 1378
1227 1379
1228 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, 1380 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1229 const Operand& rt, 1381 const Operand& rt,
1230 BranchDelaySlot bdslot) { 1382 BranchDelaySlot bdslot) {
1231 BranchShort(offset, cond, rs, rt, bdslot); 1383 BranchShort(offset, cond, rs, rt, bdslot);
1232 } 1384 }
1233 1385
1234 1386
1235 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { 1387 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1236 bool is_label_near = is_near(L); 1388 if (L->is_bound()) {
1237 if (UseAbsoluteCodePointers() && !is_label_near) { 1389 if (is_near(L)) {
1238 Jr(L, bdslot); 1390 BranchShort(L, bdslot);
1391 } else {
1392 Jr(L, bdslot);
1393 }
1239 } else { 1394 } else {
1240 BranchShort(L, bdslot); 1395 if (is_trampoline_emitted()) {
1396 Jr(L, bdslot);
1397 } else {
1398 BranchShort(L, bdslot);
1399 }
1241 } 1400 }
1242 } 1401 }
1243 1402
1244 1403
1245 void MacroAssembler::Branch(Label* L, Condition cond, Register rs, 1404 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1246 const Operand& rt, 1405 const Operand& rt,
1247 BranchDelaySlot bdslot) { 1406 BranchDelaySlot bdslot) {
1248 bool is_label_near = is_near(L); 1407 if (L->is_bound()) {
1249 if (UseAbsoluteCodePointers() && !is_label_near) { 1408 if (is_near(L)) {
1250 Label skip; 1409 BranchShort(L, cond, rs, rt, bdslot);
1251 Condition neg_cond = NegateCondition(cond); 1410 } else {
1252 BranchShort(&skip, neg_cond, rs, rt); 1411 Label skip;
1253 Jr(L, bdslot); 1412 Condition neg_cond = NegateCondition(cond);
1254 bind(&skip); 1413 BranchShort(&skip, neg_cond, rs, rt);
1414 Jr(L, bdslot);
1415 bind(&skip);
1416 }
1255 } else { 1417 } else {
1256 BranchShort(L, cond, rs, rt, bdslot); 1418 if (is_trampoline_emitted()) {
1419 Label skip;
1420 Condition neg_cond = NegateCondition(cond);
1421 BranchShort(&skip, neg_cond, rs, rt);
1422 Jr(L, bdslot);
1423 bind(&skip);
1424 } else {
1425 BranchShort(L, cond, rs, rt, bdslot);
1426 }
1257 } 1427 }
1258 } 1428 }
1259 1429
1260 1430
1261 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) { 1431 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1262 b(offset); 1432 b(offset);
1263 1433
1264 // Emit a nop in the branch delay slot if required. 1434 // Emit a nop in the branch delay slot if required.
1265 if (bdslot == PROTECT) 1435 if (bdslot == PROTECT)
1266 nop(); 1436 nop();
1267 } 1437 }
1268 1438
1269 1439
1270 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, 1440 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1271 const Operand& rt, 1441 const Operand& rt,
1272 BranchDelaySlot bdslot) { 1442 BranchDelaySlot bdslot) {
1273 BRANCH_ARGS_CHECK(cond, rs, rt); 1443 BRANCH_ARGS_CHECK(cond, rs, rt);
1274 ASSERT(!rs.is(zero_reg)); 1444 ASSERT(!rs.is(zero_reg));
1275 Register r2 = no_reg; 1445 Register r2 = no_reg;
1276 Register scratch = at; 1446 Register scratch = at;
1277 1447
1278 if (rt.is_reg()) { 1448 if (rt.is_reg()) {
1279 // We don't want any other register but scratch clobbered. 1449 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1280 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_)); 1450 // rt.
1281 r2 = rt.rm_; 1451 r2 = rt.rm_;
1282 switch (cond) { 1452 switch (cond) {
1283 case cc_always: 1453 case cc_always:
1284 b(offset); 1454 b(offset);
1285 break; 1455 break;
1286 case eq: 1456 case eq:
1287 beq(rs, r2, offset); 1457 beq(rs, r2, offset);
1288 break; 1458 break;
1289 case ne: 1459 case ne:
1290 bne(rs, r2, offset); 1460 bne(rs, r2, offset);
(...skipping 481 matching lines...) Expand 10 before | Expand all | Expand 10 after
1772 1942
1773 1943
1774 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs, 1944 void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1775 const Operand& rt, 1945 const Operand& rt,
1776 BranchDelaySlot bdslot) { 1946 BranchDelaySlot bdslot) {
1777 BranchAndLinkShort(offset, cond, rs, rt, bdslot); 1947 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
1778 } 1948 }
1779 1949
1780 1950
1781 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { 1951 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
1782 bool is_label_near = is_near(L); 1952 if (L->is_bound()) {
1783 if (UseAbsoluteCodePointers() && !is_label_near) { 1953 if (is_near(L)) {
1784 Jalr(L, bdslot); 1954 BranchAndLinkShort(L, bdslot);
1955 } else {
1956 Jalr(L, bdslot);
1957 }
1785 } else { 1958 } else {
1786 BranchAndLinkShort(L, bdslot); 1959 if (is_trampoline_emitted()) {
1960 Jalr(L, bdslot);
1961 } else {
1962 BranchAndLinkShort(L, bdslot);
1963 }
1787 } 1964 }
1788 } 1965 }
1789 1966
1790 1967
1791 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, 1968 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
1792 const Operand& rt, 1969 const Operand& rt,
1793 BranchDelaySlot bdslot) { 1970 BranchDelaySlot bdslot) {
1794 bool is_label_near = is_near(L); 1971 if (L->is_bound()) {
1795 if (UseAbsoluteCodePointers() && !is_label_near) { 1972 if (is_near(L)) {
1796 Label skip; 1973 BranchAndLinkShort(L, cond, rs, rt, bdslot);
1797 Condition neg_cond = NegateCondition(cond); 1974 } else {
1798 BranchShort(&skip, neg_cond, rs, rt); 1975 Label skip;
1799 Jalr(L, bdslot); 1976 Condition neg_cond = NegateCondition(cond);
1800 bind(&skip); 1977 BranchShort(&skip, neg_cond, rs, rt);
1978 Jalr(L, bdslot);
1979 bind(&skip);
1980 }
1801 } else { 1981 } else {
1802 BranchAndLinkShort(L, cond, rs, rt, bdslot); 1982 if (is_trampoline_emitted()) {
1983 Label skip;
1984 Condition neg_cond = NegateCondition(cond);
1985 BranchShort(&skip, neg_cond, rs, rt);
1986 Jalr(L, bdslot);
1987 bind(&skip);
1988 } else {
1989 BranchAndLinkShort(L, cond, rs, rt, bdslot);
1990 }
1803 } 1991 }
1804 } 1992 }
1805 1993
1806 1994
1807 // We need to use a bgezal or bltzal, but they can't be used directly with the 1995 // We need to use a bgezal or bltzal, but they can't be used directly with the
1808 // slt instructions. We could use sub or add instead but we would miss overflow 1996 // slt instructions. We could use sub or add instead but we would miss overflow
1809 // cases, so we keep slt and add an intermediate third instruction. 1997 // cases, so we keep slt and add an intermediate third instruction.
1810 void MacroAssembler::BranchAndLinkShort(int16_t offset, 1998 void MacroAssembler::BranchAndLinkShort(int16_t offset,
1811 BranchDelaySlot bdslot) { 1999 BranchDelaySlot bdslot) {
1812 bal(offset); 2000 bal(offset);
(...skipping 486 matching lines...) Expand 10 before | Expand all | Expand 10 after
2299 2487
2300 void MacroAssembler::Push(Handle<Object> handle) { 2488 void MacroAssembler::Push(Handle<Object> handle) {
2301 li(at, Operand(handle)); 2489 li(at, Operand(handle));
2302 push(at); 2490 push(at);
2303 } 2491 }
2304 2492
2305 2493
2306 #ifdef ENABLE_DEBUGGER_SUPPORT 2494 #ifdef ENABLE_DEBUGGER_SUPPORT
2307 2495
2308 void MacroAssembler::DebugBreak() { 2496 void MacroAssembler::DebugBreak() {
2309 ASSERT(allow_stub_calls());
2310 mov(a0, zero_reg); 2497 mov(a0, zero_reg);
2311 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); 2498 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
2312 CEntryStub ces(1); 2499 CEntryStub ces(1);
2500 ASSERT(AllowThisStubCall(&ces));
2313 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 2501 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2314 } 2502 }
2315 2503
2316 #endif // ENABLE_DEBUGGER_SUPPORT 2504 #endif // ENABLE_DEBUGGER_SUPPORT
2317 2505
2318 2506
2319 // --------------------------------------------------------------------------- 2507 // ---------------------------------------------------------------------------
2320 // Exception handling. 2508 // Exception handling.
2321 2509
2322 void MacroAssembler::PushTryHandler(CodeLocation try_location, 2510 void MacroAssembler::PushTryHandler(CodeLocation try_location,
(...skipping 645 matching lines...) Expand 10 before | Expand all | Expand 10 after
2968 Addu(dst, dst, 1); 3156 Addu(dst, dst, 1);
2969 Subu(length, length, Operand(1)); 3157 Subu(length, length, Operand(1));
2970 Branch(&byte_loop_1, ne, length, Operand(zero_reg)); 3158 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
2971 bind(&done); 3159 bind(&done);
2972 } 3160 }
2973 3161
2974 3162
2975 void MacroAssembler::CheckFastElements(Register map, 3163 void MacroAssembler::CheckFastElements(Register map,
2976 Register scratch, 3164 Register scratch,
2977 Label* fail) { 3165 Label* fail) {
2978 STATIC_ASSERT(FAST_ELEMENTS == 0); 3166 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
3167 STATIC_ASSERT(FAST_ELEMENTS == 1);
2979 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); 3168 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2980 Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue)); 3169 Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
2981 } 3170 }
2982 3171
2983 3172
2984 void MacroAssembler::CheckMap(Register obj, 3173 void MacroAssembler::CheckMap(Register obj,
2985 Register scratch, 3174 Register scratch,
2986 Handle<Map> map, 3175 Handle<Map> map,
2987 Label* fail, 3176 Label* fail,
2988 SmiCheckType smi_check_type) { 3177 SmiCheckType smi_check_type) {
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
3164 } 3353 }
3165 } 3354 }
3166 3355
3167 3356
3168 void MacroAssembler::InvokeCode(Register code, 3357 void MacroAssembler::InvokeCode(Register code,
3169 const ParameterCount& expected, 3358 const ParameterCount& expected,
3170 const ParameterCount& actual, 3359 const ParameterCount& actual,
3171 InvokeFlag flag, 3360 InvokeFlag flag,
3172 const CallWrapper& call_wrapper, 3361 const CallWrapper& call_wrapper,
3173 CallKind call_kind) { 3362 CallKind call_kind) {
3363 // You can't call a function without a valid frame.
3364 ASSERT(flag == JUMP_FUNCTION || has_frame());
3365
3174 Label done; 3366 Label done;
3175 3367
3176 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, 3368 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
3177 call_wrapper, call_kind); 3369 call_wrapper, call_kind);
3178 if (flag == CALL_FUNCTION) { 3370 if (flag == CALL_FUNCTION) {
3371 call_wrapper.BeforeCall(CallSize(code));
3179 SetCallKind(t1, call_kind); 3372 SetCallKind(t1, call_kind);
3180 Call(code); 3373 Call(code);
3374 call_wrapper.AfterCall();
3181 } else { 3375 } else {
3182 ASSERT(flag == JUMP_FUNCTION); 3376 ASSERT(flag == JUMP_FUNCTION);
3183 SetCallKind(t1, call_kind); 3377 SetCallKind(t1, call_kind);
3184 Jump(code); 3378 Jump(code);
3185 } 3379 }
3186 // Continue here if InvokePrologue does handle the invocation due to 3380 // Continue here if InvokePrologue does handle the invocation due to
3187 // mismatched parameter counts. 3381 // mismatched parameter counts.
3188 bind(&done); 3382 bind(&done);
3189 } 3383 }
3190 3384
3191 3385
3192 void MacroAssembler::InvokeCode(Handle<Code> code, 3386 void MacroAssembler::InvokeCode(Handle<Code> code,
3193 const ParameterCount& expected, 3387 const ParameterCount& expected,
3194 const ParameterCount& actual, 3388 const ParameterCount& actual,
3195 RelocInfo::Mode rmode, 3389 RelocInfo::Mode rmode,
3196 InvokeFlag flag, 3390 InvokeFlag flag,
3197 CallKind call_kind) { 3391 CallKind call_kind) {
3392 // You can't call a function without a valid frame.
3393 ASSERT(flag == JUMP_FUNCTION || has_frame());
3394
3198 Label done; 3395 Label done;
3199 3396
3200 InvokePrologue(expected, actual, code, no_reg, &done, flag, 3397 InvokePrologue(expected, actual, code, no_reg, &done, flag,
3201 NullCallWrapper(), call_kind); 3398 NullCallWrapper(), call_kind);
3202 if (flag == CALL_FUNCTION) { 3399 if (flag == CALL_FUNCTION) {
3203 SetCallKind(t1, call_kind); 3400 SetCallKind(t1, call_kind);
3204 Call(code, rmode); 3401 Call(code, rmode);
3205 } else { 3402 } else {
3206 SetCallKind(t1, call_kind); 3403 SetCallKind(t1, call_kind);
3207 Jump(code, rmode); 3404 Jump(code, rmode);
3208 } 3405 }
3209 // Continue here if InvokePrologue does handle the invocation due to 3406 // Continue here if InvokePrologue does handle the invocation due to
3210 // mismatched parameter counts. 3407 // mismatched parameter counts.
3211 bind(&done); 3408 bind(&done);
3212 } 3409 }
3213 3410
3214 3411
3215 void MacroAssembler::InvokeFunction(Register function, 3412 void MacroAssembler::InvokeFunction(Register function,
3216 const ParameterCount& actual, 3413 const ParameterCount& actual,
3217 InvokeFlag flag, 3414 InvokeFlag flag,
3218 const CallWrapper& call_wrapper, 3415 const CallWrapper& call_wrapper,
3219 CallKind call_kind) { 3416 CallKind call_kind) {
3417 // You can't call a function without a valid frame.
3418 ASSERT(flag == JUMP_FUNCTION || has_frame());
3419
3220 // Contract with called JS functions requires that function is passed in a1. 3420 // Contract with called JS functions requires that function is passed in a1.
3221 ASSERT(function.is(a1)); 3421 ASSERT(function.is(a1));
3222 Register expected_reg = a2; 3422 Register expected_reg = a2;
3223 Register code_reg = a3; 3423 Register code_reg = a3;
3224 3424
3225 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); 3425 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3226 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 3426 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3227 lw(expected_reg, 3427 lw(expected_reg,
3228 FieldMemOperand(code_reg, 3428 FieldMemOperand(code_reg,
3229 SharedFunctionInfo::kFormalParameterCountOffset)); 3429 SharedFunctionInfo::kFormalParameterCountOffset));
3230 sra(expected_reg, expected_reg, kSmiTagSize); 3430 sra(expected_reg, expected_reg, kSmiTagSize);
3231 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 3431 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3232 3432
3233 ParameterCount expected(expected_reg); 3433 ParameterCount expected(expected_reg);
3234 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind); 3434 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
3235 } 3435 }
3236 3436
3237 3437
3238 void MacroAssembler::InvokeFunction(JSFunction* function, 3438 void MacroAssembler::InvokeFunction(JSFunction* function,
3239 const ParameterCount& actual, 3439 const ParameterCount& actual,
3240 InvokeFlag flag, 3440 InvokeFlag flag,
3241 CallKind call_kind) { 3441 CallKind call_kind) {
3442 // You can't call a function without a valid frame.
3443 ASSERT(flag == JUMP_FUNCTION || has_frame());
3444
3242 ASSERT(function->is_compiled()); 3445 ASSERT(function->is_compiled());
3243 3446
3244 // Get the function and setup the context. 3447 // Get the function and setup the context.
3245 li(a1, Operand(Handle<JSFunction>(function))); 3448 li(a1, Operand(Handle<JSFunction>(function)));
3246 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 3449 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3247 3450
3248 // Invoke the cached code. 3451 // Invoke the cached code.
3249 Handle<Code> code(function->code()); 3452 Handle<Code> code(function->code());
3250 ParameterCount expected(function->shared()->formal_parameter_count()); 3453 ParameterCount expected(function->shared()->formal_parameter_count());
3251 if (V8::UseCrankshaft()) { 3454 if (V8::UseCrankshaft()) {
3252 UNIMPLEMENTED_MIPS(); 3455 // TODO(kasperl): For now, we always call indirectly through the
3456 // code field in the function to allow recompilation to take effect
3457 // without changing any of the call sites.
3458 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3459 InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind);
3253 } else { 3460 } else {
3254 InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind); 3461 InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
3255 } 3462 }
3256 } 3463 }
3257 3464
3258 3465
3259 void MacroAssembler::IsObjectJSObjectType(Register heap_object, 3466 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3260 Register map, 3467 Register map,
3261 Register scratch, 3468 Register scratch,
3262 Label* fail) { 3469 Label* fail) {
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
3342 lw(map, FieldMemOperand(object, HeapObject::kMapOffset)); 3549 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3343 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); 3550 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3344 } 3551 }
3345 3552
3346 3553
3347 // ----------------------------------------------------------------------------- 3554 // -----------------------------------------------------------------------------
3348 // Runtime calls. 3555 // Runtime calls.
3349 3556
3350 void MacroAssembler::CallStub(CodeStub* stub, Condition cond, 3557 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
3351 Register r1, const Operand& r2) { 3558 Register r1, const Operand& r2) {
3352 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. 3559 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
3353 Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2); 3560 Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
3354 } 3561 }
3355 3562
3356 3563
3357 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond, 3564 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
3358 Register r1, const Operand& r2) { 3565 Register r1, const Operand& r2) {
3359 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. 3566 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
3360 Object* result; 3567 Object* result;
3361 { MaybeObject* maybe_result = stub->TryGetCode(); 3568 { MaybeObject* maybe_result = stub->TryGetCode();
3362 if (!maybe_result->ToObject(&result)) return maybe_result; 3569 if (!maybe_result->ToObject(&result)) return maybe_result;
3363 } 3570 }
3364 Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, 3571 Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
3365 kNoASTId, cond, r1, r2); 3572 kNoASTId, cond, r1, r2);
3366 return result; 3573 return result;
3367 } 3574 }
3368 3575
3369 3576
3370 void MacroAssembler::TailCallStub(CodeStub* stub) { 3577 void MacroAssembler::TailCallStub(CodeStub* stub) {
3371 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. 3578 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
3372 Jump(stub->GetCode(), RelocInfo::CODE_TARGET); 3579 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
3373 } 3580 }
3374 3581
3375 3582
3376 MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, 3583 MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
3377 Condition cond, 3584 Condition cond,
3378 Register r1, 3585 Register r1,
3379 const Operand& r2) { 3586 const Operand& r2) {
3380 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
3381 Object* result; 3587 Object* result;
3382 { MaybeObject* maybe_result = stub->TryGetCode(); 3588 { MaybeObject* maybe_result = stub->TryGetCode();
3383 if (!maybe_result->ToObject(&result)) return maybe_result; 3589 if (!maybe_result->ToObject(&result)) return maybe_result;
3384 } 3590 }
3385 Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2); 3591 Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
3386 return result; 3592 return result;
3387 } 3593 }
3388 3594
3389 3595
3390 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { 3596 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
3479 li(a0, Operand(ExternalReference::isolate_address())); 3685 li(a0, Operand(ExternalReference::isolate_address()));
3480 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()), 3686 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
3481 1); 3687 1);
3482 mov(v0, s0); 3688 mov(v0, s0);
3483 jmp(&leave_exit_frame); 3689 jmp(&leave_exit_frame);
3484 3690
3485 return result; 3691 return result;
3486 } 3692 }
3487 3693
3488 3694
3695 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3696 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
3697 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
3698 }
3699
3700
3489 void MacroAssembler::IllegalOperation(int num_arguments) { 3701 void MacroAssembler::IllegalOperation(int num_arguments) {
3490 if (num_arguments > 0) { 3702 if (num_arguments > 0) {
3491 addiu(sp, sp, num_arguments * kPointerSize); 3703 addiu(sp, sp, num_arguments * kPointerSize);
3492 } 3704 }
3493 LoadRoot(v0, Heap::kUndefinedValueRootIndex); 3705 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
3494 } 3706 }
3495 3707
3496 3708
3497 void MacroAssembler::IndexFromHash(Register hash, 3709 void MacroAssembler::IndexFromHash(Register hash,
3498 Register index) { 3710 Register index) {
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
3559 void MacroAssembler::AdduAndCheckForOverflow(Register dst, 3771 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
3560 Register left, 3772 Register left,
3561 Register right, 3773 Register right,
3562 Register overflow_dst, 3774 Register overflow_dst,
3563 Register scratch) { 3775 Register scratch) {
3564 ASSERT(!dst.is(overflow_dst)); 3776 ASSERT(!dst.is(overflow_dst));
3565 ASSERT(!dst.is(scratch)); 3777 ASSERT(!dst.is(scratch));
3566 ASSERT(!overflow_dst.is(scratch)); 3778 ASSERT(!overflow_dst.is(scratch));
3567 ASSERT(!overflow_dst.is(left)); 3779 ASSERT(!overflow_dst.is(left));
3568 ASSERT(!overflow_dst.is(right)); 3780 ASSERT(!overflow_dst.is(right));
3569 ASSERT(!left.is(right)); 3781
3782 if (left.is(right) && dst.is(left)) {
3783 ASSERT(!dst.is(t9));
3784 ASSERT(!scratch.is(t9));
3785 ASSERT(!left.is(t9));
3786 ASSERT(!right.is(t9));
3787 ASSERT(!overflow_dst.is(t9));
3788 mov(t9, right);
3789 right = t9;
3790 }
3570 3791
3571 if (dst.is(left)) { 3792 if (dst.is(left)) {
3572 mov(scratch, left); // Preserve left. 3793 mov(scratch, left); // Preserve left.
3573 addu(dst, left, right); // Left is overwritten. 3794 addu(dst, left, right); // Left is overwritten.
3574 xor_(scratch, dst, scratch); // Original left. 3795 xor_(scratch, dst, scratch); // Original left.
3575 xor_(overflow_dst, dst, right); 3796 xor_(overflow_dst, dst, right);
3576 and_(overflow_dst, overflow_dst, scratch); 3797 and_(overflow_dst, overflow_dst, scratch);
3577 } else if (dst.is(right)) { 3798 } else if (dst.is(right)) {
3578 mov(scratch, right); // Preserve right. 3799 mov(scratch, right); // Preserve right.
3579 addu(dst, left, right); // Right is overwritten. 3800 addu(dst, left, right); // Right is overwritten.
(...skipping 12 matching lines...) Expand all
3592 void MacroAssembler::SubuAndCheckForOverflow(Register dst, 3813 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
3593 Register left, 3814 Register left,
3594 Register right, 3815 Register right,
3595 Register overflow_dst, 3816 Register overflow_dst,
3596 Register scratch) { 3817 Register scratch) {
3597 ASSERT(!dst.is(overflow_dst)); 3818 ASSERT(!dst.is(overflow_dst));
3598 ASSERT(!dst.is(scratch)); 3819 ASSERT(!dst.is(scratch));
3599 ASSERT(!overflow_dst.is(scratch)); 3820 ASSERT(!overflow_dst.is(scratch));
3600 ASSERT(!overflow_dst.is(left)); 3821 ASSERT(!overflow_dst.is(left));
3601 ASSERT(!overflow_dst.is(right)); 3822 ASSERT(!overflow_dst.is(right));
3602 ASSERT(!left.is(right));
3603 ASSERT(!scratch.is(left)); 3823 ASSERT(!scratch.is(left));
3604 ASSERT(!scratch.is(right)); 3824 ASSERT(!scratch.is(right));
3605 3825
3826 // This happens with some crankshaft code. Since Subu works fine if
3827 // left == right, let's not make that restriction here.
3828 if (left.is(right)) {
3829 mov(dst, zero_reg);
3830 mov(overflow_dst, zero_reg);
3831 return;
3832 }
3833
3606 if (dst.is(left)) { 3834 if (dst.is(left)) {
3607 mov(scratch, left); // Preserve left. 3835 mov(scratch, left); // Preserve left.
3608 subu(dst, left, right); // Left is overwritten. 3836 subu(dst, left, right); // Left is overwritten.
3609 xor_(overflow_dst, dst, scratch); // scratch is original left. 3837 xor_(overflow_dst, dst, scratch); // scratch is original left.
3610 xor_(scratch, scratch, right); // scratch is original left. 3838 xor_(scratch, scratch, right); // scratch is original left.
3611 and_(overflow_dst, scratch, overflow_dst); 3839 and_(overflow_dst, scratch, overflow_dst);
3612 } else if (dst.is(right)) { 3840 } else if (dst.is(right)) {
3613 mov(scratch, right); // Preserve right. 3841 mov(scratch, right); // Preserve right.
3614 subu(dst, left, right); // Right is overwritten. 3842 subu(dst, left, right); // Right is overwritten.
3615 xor_(overflow_dst, dst, left); 3843 xor_(overflow_dst, dst, left);
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
3715 const ExternalReference& builtin) { 3943 const ExternalReference& builtin) {
3716 li(a1, Operand(builtin)); 3944 li(a1, Operand(builtin));
3717 CEntryStub stub(1); 3945 CEntryStub stub(1);
3718 return TryTailCallStub(&stub); 3946 return TryTailCallStub(&stub);
3719 } 3947 }
3720 3948
3721 3949
3722 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, 3950 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
3723 InvokeFlag flag, 3951 InvokeFlag flag,
3724 const CallWrapper& call_wrapper) { 3952 const CallWrapper& call_wrapper) {
3953 // You can't call a builtin without a valid frame.
3954 ASSERT(flag == JUMP_FUNCTION || has_frame());
3955
3725 GetBuiltinEntry(t9, id); 3956 GetBuiltinEntry(t9, id);
3726 if (flag == CALL_FUNCTION) { 3957 if (flag == CALL_FUNCTION) {
3727 call_wrapper.BeforeCall(CallSize(t9)); 3958 call_wrapper.BeforeCall(CallSize(t9));
3728 SetCallKind(t1, CALL_AS_METHOD); 3959 SetCallKind(t1, CALL_AS_METHOD);
3729 Call(t9); 3960 Call(t9);
3730 call_wrapper.AfterCall(); 3961 call_wrapper.AfterCall();
3731 } else { 3962 } else {
3732 ASSERT(flag == JUMP_FUNCTION); 3963 ASSERT(flag == JUMP_FUNCTION);
3733 SetCallKind(t1, CALL_AS_METHOD); 3964 SetCallKind(t1, CALL_AS_METHOD);
3734 Jump(t9); 3965 Jump(t9);
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
3847 // from the real pointer as a smi. 4078 // from the real pointer as a smi.
3848 intptr_t p1 = reinterpret_cast<intptr_t>(msg); 4079 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
3849 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; 4080 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
3850 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); 4081 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
3851 #ifdef DEBUG 4082 #ifdef DEBUG
3852 if (msg != NULL) { 4083 if (msg != NULL) {
3853 RecordComment("Abort message: "); 4084 RecordComment("Abort message: ");
3854 RecordComment(msg); 4085 RecordComment(msg);
3855 } 4086 }
3856 #endif 4087 #endif
3857 // Disable stub call restrictions to always allow calls to abort.
3858 AllowStubCallsScope allow_scope(this, true);
3859 4088
3860 li(a0, Operand(p0)); 4089 li(a0, Operand(p0));
3861 push(a0); 4090 push(a0);
3862 li(a0, Operand(Smi::FromInt(p1 - p0))); 4091 li(a0, Operand(Smi::FromInt(p1 - p0)));
3863 push(a0); 4092 push(a0);
3864 CallRuntime(Runtime::kAbort, 2); 4093 // Disable stub call restrictions to always allow calls to abort.
4094 if (!has_frame_) {
4095 // We don't actually want to generate a pile of code for this, so just
4096 // claim there is a stack frame, without generating one.
4097 FrameScope scope(this, StackFrame::NONE);
4098 CallRuntime(Runtime::kAbort, 2);
4099 } else {
4100 CallRuntime(Runtime::kAbort, 2);
4101 }
3865 // Will not return here. 4102 // Will not return here.
3866 if (is_trampoline_pool_blocked()) { 4103 if (is_trampoline_pool_blocked()) {
3867 // If the calling code cares about the exact number of 4104 // If the calling code cares about the exact number of
3868 // instructions generated, we insert padding here to keep the size 4105 // instructions generated, we insert padding here to keep the size
3869 // of the Abort macro constant. 4106 // of the Abort macro constant.
3870 // Currently in debug mode with debug_code enabled the number of 4107 // Currently in debug mode with debug_code enabled the number of
3871 // generated instructions is 14, so we use this as a maximum value. 4108 // generated instructions is 14, so we use this as a maximum value.
3872 static const int kExpectedAbortInstructions = 14; 4109 static const int kExpectedAbortInstructions = 14;
3873 int abort_instructions = InstructionsGeneratedSince(&abort_start); 4110 int abort_instructions = InstructionsGeneratedSince(&abort_start);
3874 ASSERT(abort_instructions <= kExpectedAbortInstructions); 4111 ASSERT(abort_instructions <= kExpectedAbortInstructions);
(...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after
4238 int kFlatAsciiStringMask = 4475 int kFlatAsciiStringMask =
4239 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 4476 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4240 int kFlatAsciiStringTag = ASCII_STRING_TYPE; 4477 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4241 And(scratch, type, Operand(kFlatAsciiStringMask)); 4478 And(scratch, type, Operand(kFlatAsciiStringMask));
4242 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag)); 4479 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
4243 } 4480 }
4244 4481
4245 4482
4246 static const int kRegisterPassedArguments = 4; 4483 static const int kRegisterPassedArguments = 4;
4247 4484
4248 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { 4485 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
4486 int num_double_arguments) {
4487 int stack_passed_words = 0;
4488 num_reg_arguments += 2 * num_double_arguments;
4489
4490 // Up to four simple arguments are passed in registers a0..a3.
4491 if (num_reg_arguments > kRegisterPassedArguments) {
4492 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
4493 }
4494 stack_passed_words += kCArgSlotCount;
4495 return stack_passed_words;
4496 }
4497
4498
4499 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4500 int num_double_arguments,
4501 Register scratch) {
4249 int frame_alignment = ActivationFrameAlignment(); 4502 int frame_alignment = ActivationFrameAlignment();
4250 4503
4251 // Up to four simple arguments are passed in registers a0..a3. 4504 // Up to four simple arguments are passed in registers a0..a3.
4252 // Those four arguments must have reserved argument slots on the stack for 4505 // Those four arguments must have reserved argument slots on the stack for
4253 // mips, even though those argument slots are not normally used. 4506 // mips, even though those argument slots are not normally used.
4254 // Remaining arguments are pushed on the stack, above (higher address than) 4507 // Remaining arguments are pushed on the stack, above (higher address than)
4255 // the argument slots. 4508 // the argument slots.
4256 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? 4509 int stack_passed_arguments = CalculateStackPassedWords(
4257 0 : num_arguments - kRegisterPassedArguments) + 4510 num_reg_arguments, num_double_arguments);
4258 kCArgSlotCount;
4259 if (frame_alignment > kPointerSize) { 4511 if (frame_alignment > kPointerSize) {
4260 // Make stack end at alignment and make room for num_arguments - 4 words 4512 // Make stack end at alignment and make room for num_arguments - 4 words
4261 // and the original value of sp. 4513 // and the original value of sp.
4262 mov(scratch, sp); 4514 mov(scratch, sp);
4263 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); 4515 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4264 ASSERT(IsPowerOf2(frame_alignment)); 4516 ASSERT(IsPowerOf2(frame_alignment));
4265 And(sp, sp, Operand(-frame_alignment)); 4517 And(sp, sp, Operand(-frame_alignment));
4266 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); 4518 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
4267 } else { 4519 } else {
4268 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); 4520 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
4269 } 4521 }
4270 } 4522 }
4271 4523
4272 4524
4525 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4526 Register scratch) {
4527 PrepareCallCFunction(num_reg_arguments, 0, scratch);
4528 }
4529
4530
4531 void MacroAssembler::CallCFunction(ExternalReference function,
4532 int num_reg_arguments,
4533 int num_double_arguments) {
4534 CallCFunctionHelper(no_reg,
4535 function,
4536 t8,
4537 num_reg_arguments,
4538 num_double_arguments);
4539 }
4540
4541
4542 void MacroAssembler::CallCFunction(Register function,
4543 Register scratch,
4544 int num_reg_arguments,
4545 int num_double_arguments) {
4546 CallCFunctionHelper(function,
4547 ExternalReference::the_hole_value_location(isolate()),
4548 scratch,
4549 num_reg_arguments,
4550 num_double_arguments);
4551 }
4552
4553
4273 void MacroAssembler::CallCFunction(ExternalReference function, 4554 void MacroAssembler::CallCFunction(ExternalReference function,
4274 int num_arguments) { 4555 int num_arguments) {
4275 CallCFunctionHelper(no_reg, function, t8, num_arguments); 4556 CallCFunction(function, num_arguments, 0);
4276 } 4557 }
4277 4558
4278 4559
4279 void MacroAssembler::CallCFunction(Register function, 4560 void MacroAssembler::CallCFunction(Register function,
4280 Register scratch, 4561 Register scratch,
4281 int num_arguments) { 4562 int num_arguments) {
4282 CallCFunctionHelper(function, 4563 CallCFunction(function, scratch, num_arguments, 0);
4283 ExternalReference::the_hole_value_location(isolate()),
4284 scratch,
4285 num_arguments);
4286 } 4564 }
4287 4565
4288 4566
4289 void MacroAssembler::CallCFunctionHelper(Register function, 4567 void MacroAssembler::CallCFunctionHelper(Register function,
4290 ExternalReference function_reference, 4568 ExternalReference function_reference,
4291 Register scratch, 4569 Register scratch,
4292 int num_arguments) { 4570 int num_reg_arguments,
4571 int num_double_arguments) {
4572 ASSERT(has_frame());
4293 // Make sure that the stack is aligned before calling a C function unless 4573 // Make sure that the stack is aligned before calling a C function unless
4294 // running in the simulator. The simulator has its own alignment check which 4574 // running in the simulator. The simulator has its own alignment check which
4295 // provides more information. 4575 // provides more information.
4296 // The argument stots are presumed to have been set up by 4576 // The argument stots are presumed to have been set up by
4297 // PrepareCallCFunction. The C function must be called via t9, for mips ABI. 4577 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
4298 4578
4299 #if defined(V8_HOST_ARCH_MIPS) 4579 #if defined(V8_HOST_ARCH_MIPS)
4300 if (emit_debug_code()) { 4580 if (emit_debug_code()) {
4301 int frame_alignment = OS::ActivationFrameAlignment(); 4581 int frame_alignment = OS::ActivationFrameAlignment();
4302 int frame_alignment_mask = frame_alignment - 1; 4582 int frame_alignment_mask = frame_alignment - 1;
(...skipping 17 matching lines...) Expand all
4320 if (function.is(no_reg)) { 4600 if (function.is(no_reg)) {
4321 function = t9; 4601 function = t9;
4322 li(function, Operand(function_reference)); 4602 li(function, Operand(function_reference));
4323 } else if (!function.is(t9)) { 4603 } else if (!function.is(t9)) {
4324 mov(t9, function); 4604 mov(t9, function);
4325 function = t9; 4605 function = t9;
4326 } 4606 }
4327 4607
4328 Call(function); 4608 Call(function);
4329 4609
4330 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? 4610 int stack_passed_arguments = CalculateStackPassedWords(
4331 0 : num_arguments - kRegisterPassedArguments) + 4611 num_reg_arguments, num_double_arguments);
4332 kCArgSlotCount;
4333 4612
4334 if (OS::ActivationFrameAlignment() > kPointerSize) { 4613 if (OS::ActivationFrameAlignment() > kPointerSize) {
4335 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); 4614 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
4336 } else { 4615 } else {
4337 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); 4616 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
4338 } 4617 }
4339 } 4618 }
4340 4619
4341 4620
4342 #undef BRANCH_ARGS_CHECK 4621 #undef BRANCH_ARGS_CHECK
4343 4622
4344 4623
4624 void MacroAssembler::PatchRelocatedValue(Register li_location,
4625 Register scratch,
4626 Register new_value) {
4627 lw(scratch, MemOperand(li_location));
4628 // At this point scratch is a lui(at, ...) instruction.
4629 if (emit_debug_code()) {
4630 And(scratch, scratch, kOpcodeMask);
4631 Check(eq, "The instruction to patch should be a lui.",
4632 scratch, Operand(LUI));
4633 lw(scratch, MemOperand(li_location));
4634 }
4635 srl(t9, new_value, kImm16Bits);
4636 Ins(scratch, t9, 0, kImm16Bits);
4637 sw(scratch, MemOperand(li_location));
4638
4639 lw(scratch, MemOperand(li_location, kInstrSize));
4640 // scratch is now ori(at, ...).
4641 if (emit_debug_code()) {
4642 And(scratch, scratch, kOpcodeMask);
4643 Check(eq, "The instruction to patch should be an ori.",
4644 scratch, Operand(ORI));
4645 lw(scratch, MemOperand(li_location, kInstrSize));
4646 }
4647 Ins(scratch, new_value, 0, kImm16Bits);
4648 sw(scratch, MemOperand(li_location, kInstrSize));
4649
4650 // Update the I-cache so the new lui and ori can be executed.
4651 FlushICache(li_location, 2);
4652 }
4653
4654
4345 void MacroAssembler::LoadInstanceDescriptors(Register map, 4655 void MacroAssembler::LoadInstanceDescriptors(Register map,
4346 Register descriptors) { 4656 Register descriptors) {
4347 lw(descriptors, 4657 lw(descriptors,
4348 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset)); 4658 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
4349 Label not_smi; 4659 Label not_smi;
4350 JumpIfNotSmi(descriptors, &not_smi); 4660 JumpIfNotSmi(descriptors, &not_smi);
4351 li(descriptors, Operand(FACTORY->empty_descriptor_array())); 4661 li(descriptors, Operand(FACTORY->empty_descriptor_array()));
4352 bind(&not_smi); 4662 bind(&not_smi);
4353 } 4663 }
4354 4664
4355 4665
4666 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
4667 ASSERT(!output_reg.is(input_reg));
4668 Label done;
4669 li(output_reg, Operand(255));
4670 // Normal branch: nop in delay slot.
4671 Branch(&done, gt, input_reg, Operand(output_reg));
4672 // Use delay slot in this branch.
4673 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
4674 mov(output_reg, zero_reg); // In delay slot.
4675 mov(output_reg, input_reg); // Value is in range 0..255.
4676 bind(&done);
4677 }
4678
4679
4680 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
4681 DoubleRegister input_reg,
4682 DoubleRegister temp_double_reg) {
4683 Label above_zero;
4684 Label done;
4685 Label in_bounds;
4686
4687 Move(temp_double_reg, 0.0);
4688 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
4689
4690 // Double value is less than zero, NaN or Inf, return 0.
4691 mov(result_reg, zero_reg);
4692 Branch(&done);
4693
4694 // Double value is >= 255, return 255.
4695 bind(&above_zero);
4696 Move(temp_double_reg, 255.0);
4697 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
4698 li(result_reg, Operand(255));
4699 Branch(&done);
4700
4701 // In 0-255 range, round and truncate.
4702 bind(&in_bounds);
4703 round_w_d(temp_double_reg, input_reg);
4704 mfc1(result_reg, temp_double_reg);
4705 bind(&done);
4706 }
4707
4708
4356 CodePatcher::CodePatcher(byte* address, int instructions) 4709 CodePatcher::CodePatcher(byte* address, int instructions)
4357 : address_(address), 4710 : address_(address),
4358 instructions_(instructions), 4711 instructions_(instructions),
4359 size_(instructions * Assembler::kInstrSize), 4712 size_(instructions * Assembler::kInstrSize),
4360 masm_(Isolate::Current(), address, size_ + Assembler::kGap) { 4713 masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
4361 // Create a new macro assembler pointing to the address of the code to patch. 4714 // Create a new macro assembler pointing to the address of the code to patch.
4362 // The size is adjusted with kGap on order for the assembler to generate size 4715 // The size is adjusted with kGap on order for the assembler to generate size
4363 // bytes of instructions without failing with buffer size constraints. 4716 // bytes of instructions without failing with buffer size constraints.
4364 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 4717 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4365 } 4718 }
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
4403 opcode == BGTZL); 4756 opcode == BGTZL);
4404 opcode = (cond == eq) ? BEQ : BNE; 4757 opcode = (cond == eq) ? BEQ : BNE;
4405 instr = (instr & ~kOpcodeMask) | opcode; 4758 instr = (instr & ~kOpcodeMask) | opcode;
4406 masm_.emit(instr); 4759 masm_.emit(instr);
4407 } 4760 }
4408 4761
4409 4762
4410 } } // namespace v8::internal 4763 } } // namespace v8::internal
4411 4764
4412 #endif // V8_TARGET_ARCH_MIPS 4765 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/macro-assembler-mips.h ('k') | src/mips/regexp-macro-assembler-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698