| OLD | NEW |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. |
| 6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) |
| 7 | 7 |
| 8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
| 9 | 9 |
| 10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
| (...skipping 11 matching lines...) Expand all Loading... |
| 22 // S4: Arguments descriptor | 22 // S4: Arguments descriptor |
| 23 // RA: Return address | 23 // RA: Return address |
| 24 // The S4 register can be destroyed only if there is no slow-path, i.e. | 24 // The S4 register can be destroyed only if there is no slow-path, i.e. |
| 25 // if the intrinsified method always executes a return. | 25 // if the intrinsified method always executes a return. |
| 26 // The FP register should not be modified, because it is used by the profiler. | 26 // The FP register should not be modified, because it is used by the profiler. |
| 27 // The PP and THR registers (see constants_mips.h) must be preserved. | 27 // The PP and THR registers (see constants_mips.h) must be preserved. |
| 28 | 28 |
| 29 #define __ assembler-> | 29 #define __ assembler-> |
| 30 | 30 |
| 31 | 31 |
| 32 intptr_t Intrinsifier::ParameterSlotFromSp() { return -1; } | 32 intptr_t Intrinsifier::ParameterSlotFromSp() { |
| 33 return -1; |
| 34 } |
| 33 | 35 |
| 34 | 36 |
| 35 static bool IsABIPreservedRegister(Register reg) { | 37 static bool IsABIPreservedRegister(Register reg) { |
| 36 return ((1 << reg) & kAbiPreservedCpuRegs) != 0; | 38 return ((1 << reg) & kAbiPreservedCpuRegs) != 0; |
| 37 } | 39 } |
| 38 | 40 |
| 39 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { | 41 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { |
| 40 ASSERT(IsABIPreservedRegister(CODE_REG)); | 42 ASSERT(IsABIPreservedRegister(CODE_REG)); |
| 41 ASSERT(IsABIPreservedRegister(ARGS_DESC_REG)); | 43 ASSERT(IsABIPreservedRegister(ARGS_DESC_REG)); |
| 42 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP)); | 44 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP)); |
| (...skipping 26 matching lines...) Expand all Loading... |
| 69 | 71 |
| 70 __ lw(T0, Address(SP, 2 * kWordSize)); // Array. | 72 __ lw(T0, Address(SP, 2 * kWordSize)); // Array. |
| 71 // Range check. | 73 // Range check. |
| 72 __ lw(T3, FieldAddress(T0, Array::length_offset())); // Array length. | 74 __ lw(T3, FieldAddress(T0, Array::length_offset())); // Array length. |
| 73 // Runtime throws exception. | 75 // Runtime throws exception. |
| 74 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); | 76 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); |
| 75 | 77 |
| 76 // Note that T1 is Smi, i.e, times 2. | 78 // Note that T1 is Smi, i.e, times 2. |
| 77 ASSERT(kSmiTagShift == 1); | 79 ASSERT(kSmiTagShift == 1); |
| 78 __ lw(T2, Address(SP, 0 * kWordSize)); // Value. | 80 __ lw(T2, Address(SP, 0 * kWordSize)); // Value. |
| 79 __ sll(T1, T1, 1); // T1 is Smi. | 81 __ sll(T1, T1, 1); // T1 is Smi. |
| 80 __ addu(T1, T0, T1); | 82 __ addu(T1, T0, T1); |
| 81 __ StoreIntoObject(T0, | 83 __ StoreIntoObject(T0, FieldAddress(T1, Array::data_offset()), T2); |
| 82 FieldAddress(T1, Array::data_offset()), | |
| 83 T2); | |
| 84 // Caller is responsible for preserving the value if necessary. | 84 // Caller is responsible for preserving the value if necessary. |
| 85 __ Ret(); | 85 __ Ret(); |
| 86 __ Bind(&fall_through); | 86 __ Bind(&fall_through); |
| 87 } | 87 } |
| 88 | 88 |
| 89 | 89 |
| 90 // Allocate a GrowableObjectArray using the backing array specified. | 90 // Allocate a GrowableObjectArray using the backing array specified. |
| 91 // On stack: type argument (+1), data (+0). | 91 // On stack: type argument (+1), data (+0). |
| 92 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { | 92 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { |
| 93 // The newly allocated object is returned in V0. | 93 // The newly allocated object is returned in V0. |
| 94 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; | 94 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; |
| 95 const intptr_t kArrayOffset = 0 * kWordSize; | 95 const intptr_t kArrayOffset = 0 * kWordSize; |
| 96 Label fall_through; | 96 Label fall_through; |
| 97 | 97 |
| 98 // Try allocating in new space. | 98 // Try allocating in new space. |
| 99 const Class& cls = Class::Handle( | 99 const Class& cls = Class::Handle( |
| 100 Isolate::Current()->object_store()->growable_object_array_class()); | 100 Isolate::Current()->object_store()->growable_object_array_class()); |
| 101 __ TryAllocate(cls, &fall_through, V0, T1); | 101 __ TryAllocate(cls, &fall_through, V0, T1); |
| 102 | 102 |
| 103 // Store backing array object in growable array object. | 103 // Store backing array object in growable array object. |
| 104 __ lw(T1, Address(SP, kArrayOffset)); // Data argument. | 104 __ lw(T1, Address(SP, kArrayOffset)); // Data argument. |
| 105 // V0 is new, no barrier needed. | 105 // V0 is new, no barrier needed. |
| 106 __ StoreIntoObjectNoBarrier( | 106 __ StoreIntoObjectNoBarrier( |
| 107 V0, | 107 V0, FieldAddress(V0, GrowableObjectArray::data_offset()), T1); |
| 108 FieldAddress(V0, GrowableObjectArray::data_offset()), | |
| 109 T1); | |
| 110 | 108 |
| 111 // V0: new growable array object start as a tagged pointer. | 109 // V0: new growable array object start as a tagged pointer. |
| 112 // Store the type argument field in the growable array object. | 110 // Store the type argument field in the growable array object. |
| 113 __ lw(T1, Address(SP, kTypeArgumentsOffset)); // Type argument. | 111 __ lw(T1, Address(SP, kTypeArgumentsOffset)); // Type argument. |
| 114 __ StoreIntoObjectNoBarrier( | 112 __ StoreIntoObjectNoBarrier( |
| 115 V0, | 113 V0, FieldAddress(V0, GrowableObjectArray::type_arguments_offset()), T1); |
| 116 FieldAddress(V0, GrowableObjectArray::type_arguments_offset()), | |
| 117 T1); | |
| 118 // Set the length field in the growable array object to 0. | 114 // Set the length field in the growable array object to 0. |
| 119 __ Ret(); // Returns the newly allocated object in V0. | 115 __ Ret(); // Returns the newly allocated object in V0. |
| 120 __ delay_slot()->sw(ZR, | 116 __ delay_slot()->sw(ZR, |
| 121 FieldAddress(V0, GrowableObjectArray::length_offset())); | 117 FieldAddress(V0, GrowableObjectArray::length_offset())); |
| 122 | 118 |
| 123 __ Bind(&fall_through); | 119 __ Bind(&fall_through); |
| 124 } | 120 } |
| 125 | 121 |
| 126 | 122 |
| 127 // Add an element to growable array if it doesn't need to grow, otherwise | 123 // Add an element to growable array if it doesn't need to grow, otherwise |
| 128 // call into regular code. | 124 // call into regular code. |
| 129 // On stack: growable array (+1), value (+0). | 125 // On stack: growable array (+1), value (+0). |
| 130 void Intrinsifier::GrowableArray_add(Assembler* assembler) { | 126 void Intrinsifier::GrowableArray_add(Assembler* assembler) { |
| 131 // In checked mode we need to type-check the incoming argument. | 127 // In checked mode we need to type-check the incoming argument. |
| 132 if (Isolate::Current()->type_checks()) return; | 128 if (Isolate::Current()->type_checks()) return; |
| 133 Label fall_through; | 129 Label fall_through; |
| 134 __ lw(T0, Address(SP, 1 * kWordSize)); // Array. | 130 __ lw(T0, Address(SP, 1 * kWordSize)); // Array. |
| 135 __ lw(T1, FieldAddress(T0, GrowableObjectArray::length_offset())); | 131 __ lw(T1, FieldAddress(T0, GrowableObjectArray::length_offset())); |
| 136 // T1: length. | 132 // T1: length. |
| 137 __ lw(T2, FieldAddress(T0, GrowableObjectArray::data_offset())); | 133 __ lw(T2, FieldAddress(T0, GrowableObjectArray::data_offset())); |
| 138 // T2: data. | 134 // T2: data. |
| 139 __ lw(T3, FieldAddress(T2, Array::length_offset())); | 135 __ lw(T3, FieldAddress(T2, Array::length_offset())); |
| 140 // Compare length with capacity. | 136 // Compare length with capacity. |
| 141 // T3: capacity. | 137 // T3: capacity. |
| 142 __ beq(T1, T3, &fall_through); // Must grow data. | 138 __ beq(T1, T3, &fall_through); // Must grow data. |
| 143 const int32_t value_one = reinterpret_cast<int32_t>(Smi::New(1)); | 139 const int32_t value_one = reinterpret_cast<int32_t>(Smi::New(1)); |
| 144 // len = len + 1; | 140 // len = len + 1; |
| 145 __ addiu(T3, T1, Immediate(value_one)); | 141 __ addiu(T3, T1, Immediate(value_one)); |
| 146 __ sw(T3, FieldAddress(T0, GrowableObjectArray::length_offset())); | 142 __ sw(T3, FieldAddress(T0, GrowableObjectArray::length_offset())); |
| 147 __ lw(T0, Address(SP, 0 * kWordSize)); // Value. | 143 __ lw(T0, Address(SP, 0 * kWordSize)); // Value. |
| 148 ASSERT(kSmiTagShift == 1); | 144 ASSERT(kSmiTagShift == 1); |
| 149 __ sll(T1, T1, 1); | 145 __ sll(T1, T1, 1); |
| 150 __ addu(T1, T2, T1); | 146 __ addu(T1, T2, T1); |
| 151 __ StoreIntoObject(T2, | 147 __ StoreIntoObject(T2, FieldAddress(T1, Array::data_offset()), T0); |
| 152 FieldAddress(T1, Array::data_offset()), | |
| 153 T0); | |
| 154 __ LoadObject(T7, Object::null_object()); | 148 __ LoadObject(T7, Object::null_object()); |
| 155 __ Ret(); | 149 __ Ret(); |
| 156 __ delay_slot()->mov(V0, T7); | 150 __ delay_slot()->mov(V0, T7); |
| 157 __ Bind(&fall_through); | 151 __ Bind(&fall_through); |
| 158 } | 152 } |
| 159 | 153 |
| 160 | 154 |
| 161 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ | 155 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ |
| 162 Label fall_through; \ | 156 Label fall_through; \ |
| 163 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ | 157 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ |
| 164 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, T2, &fall_through)); \ | 158 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, T2, &fall_through)); \ |
| 165 __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | 159 __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ |
| 166 /* Check that length is a positive Smi. */ \ | 160 /* Check that length is a positive Smi. */ \ |
| 167 /* T2: requested array length argument. */ \ | 161 /* T2: requested array length argument. */ \ |
| 168 __ andi(CMPRES1, T2, Immediate(kSmiTagMask)); \ | 162 __ andi(CMPRES1, T2, Immediate(kSmiTagMask)); \ |
| 169 __ bne(CMPRES1, ZR, &fall_through); \ | 163 __ bne(CMPRES1, ZR, &fall_through); \ |
| 170 __ BranchSignedLess(T2, Immediate(0), &fall_through); \ | 164 __ BranchSignedLess(T2, Immediate(0), &fall_through); \ |
| 171 __ SmiUntag(T2); \ | 165 __ SmiUntag(T2); \ |
| 172 /* Check for maximum allowed length. */ \ | 166 /* Check for maximum allowed length. */ \ |
| 173 /* T2: untagged array length. */ \ | 167 /* T2: untagged array length. */ \ |
| 174 __ BranchSignedGreater(T2, Immediate(max_len), &fall_through); \ | 168 __ BranchSignedGreater(T2, Immediate(max_len), &fall_through); \ |
| 175 __ sll(T2, T2, scale_shift); \ | 169 __ sll(T2, T2, scale_shift); \ |
| (...skipping 26 matching lines...) Expand all Loading... |
| 202 /* Initialize the tags. */ \ | 196 /* Initialize the tags. */ \ |
| 203 /* V0: new object start as a tagged pointer. */ \ | 197 /* V0: new object start as a tagged pointer. */ \ |
| 204 /* T1: new object end address. */ \ | 198 /* T1: new object end address. */ \ |
| 205 /* T2: allocation size. */ \ | 199 /* T2: allocation size. */ \ |
| 206 { \ | 200 { \ |
| 207 Label size_tag_overflow, done; \ | 201 Label size_tag_overflow, done; \ |
| 208 __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), \ | 202 __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), \ |
| 209 &size_tag_overflow); \ | 203 &size_tag_overflow); \ |
| 210 __ b(&done); \ | 204 __ b(&done); \ |
| 211 __ delay_slot()->sll(T2, T2, \ | 205 __ delay_slot()->sll(T2, T2, \ |
| 212 RawObject::kSizeTagPos - kObjectAlignmentLog2); \ | 206 RawObject::kSizeTagPos - kObjectAlignmentLog2); \ |
| 213 \ | 207 \ |
| 214 __ Bind(&size_tag_overflow); \ | 208 __ Bind(&size_tag_overflow); \ |
| 215 __ mov(T2, ZR); \ | 209 __ mov(T2, ZR); \ |
| 216 __ Bind(&done); \ | 210 __ Bind(&done); \ |
| 217 \ | 211 \ |
| 218 /* Get the class index and insert it into the tags. */ \ | 212 /* Get the class index and insert it into the tags. */ \ |
| 219 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \ | 213 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \ |
| 220 __ or_(T2, T2, TMP); \ | 214 __ or_(T2, T2, TMP); \ |
| 221 __ sw(T2, FieldAddress(V0, type_name::tags_offset())); /* Tags. */ \ | 215 __ sw(T2, FieldAddress(V0, type_name::tags_offset())); /* Tags. */ \ |
| 222 } \ | 216 } \ |
| 223 /* Set the length field. */ \ | 217 /* Set the length field. */ \ |
| 224 /* V0: new object start as a tagged pointer. */ \ | 218 /* V0: new object start as a tagged pointer. */ \ |
| 225 /* T1: new object end address. */ \ | 219 /* T1: new object end address. */ \ |
| 226 __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | 220 __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ |
| 227 __ StoreIntoObjectNoBarrier(V0, \ | 221 __ StoreIntoObjectNoBarrier( \ |
| 228 FieldAddress(V0, type_name::length_offset()), \ | 222 V0, FieldAddress(V0, type_name::length_offset()), T2); \ |
| 229 T2); \ | |
| 230 /* Initialize all array elements to 0. */ \ | 223 /* Initialize all array elements to 0. */ \ |
| 231 /* V0: new object start as a tagged pointer. */ \ | 224 /* V0: new object start as a tagged pointer. */ \ |
| 232 /* T1: new object end address. */ \ | 225 /* T1: new object end address. */ \ |
| 233 /* T2: iterator which initially points to the start of the variable */ \ | 226 /* T2: iterator which initially points to the start of the variable */ \ |
| 234 /* data area to be initialized. */ \ | 227 /* data area to be initialized. */ \ |
| 235 __ AddImmediate(T2, V0, sizeof(Raw##type_name) - 1); \ | 228 __ AddImmediate(T2, V0, sizeof(Raw##type_name) - 1); \ |
| 236 Label done, init_loop; \ | 229 Label done, init_loop; \ |
| 237 __ Bind(&init_loop); \ | 230 __ Bind(&init_loop); \ |
| 238 __ BranchUnsignedGreaterEqual(T2, T1, &done); \ | 231 __ BranchUnsignedGreaterEqual(T2, T1, &done); \ |
| 239 __ sw(ZR, Address(T2, 0)); \ | 232 __ sw(ZR, Address(T2, 0)); \ |
| 240 __ b(&init_loop); \ | 233 __ b(&init_loop); \ |
| 241 __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); \ | 234 __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); \ |
| 242 __ Bind(&done); \ | 235 __ Bind(&done); \ |
| 243 \ | 236 \ |
| 244 __ Ret(); \ | 237 __ Ret(); \ |
| 245 __ Bind(&fall_through); \ | 238 __ Bind(&fall_through); |
| 246 | 239 |
| 247 | 240 |
| 248 static int GetScaleFactor(intptr_t size) { | 241 static int GetScaleFactor(intptr_t size) { |
| 249 switch (size) { | 242 switch (size) { |
| 250 case 1: return 0; | 243 case 1: |
| 251 case 2: return 1; | 244 return 0; |
| 252 case 4: return 2; | 245 case 2: |
| 253 case 8: return 3; | 246 return 1; |
| 254 case 16: return 4; | 247 case 4: |
| 248 return 2; |
| 249 case 8: |
| 250 return 3; |
| 251 case 16: |
| 252 return 4; |
| 255 } | 253 } |
| 256 UNREACHABLE(); | 254 UNREACHABLE(); |
| 257 return -1; | 255 return -1; |
| 258 } | 256 } |
| 259 | 257 |
| 260 | 258 |
| 261 #define TYPED_DATA_ALLOCATOR(clazz) \ | 259 #define TYPED_DATA_ALLOCATOR(clazz) \ |
| 262 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ | 260 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ |
| 263 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ | 261 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ |
| 264 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ | 262 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ |
| 265 int shift = GetScaleFactor(size); \ | 263 int shift = GetScaleFactor(size); \ |
| 266 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ | 264 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ |
| 267 } | 265 } |
| 268 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) | 266 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) |
| 269 #undef TYPED_DATA_ALLOCATOR | 267 #undef TYPED_DATA_ALLOCATOR |
| 270 | 268 |
| 271 | 269 |
| 272 // Loads args from stack into T0 and T1 | 270 // Loads args from stack into T0 and T1 |
| 273 // Tests if they are smis, jumps to label not_smi if not. | 271 // Tests if they are smis, jumps to label not_smi if not. |
| 274 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { | 272 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { |
| 275 __ lw(T0, Address(SP, 0 * kWordSize)); | 273 __ lw(T0, Address(SP, 0 * kWordSize)); |
| 276 __ lw(T1, Address(SP, 1 * kWordSize)); | 274 __ lw(T1, Address(SP, 1 * kWordSize)); |
| 277 __ or_(CMPRES1, T0, T1); | 275 __ or_(CMPRES1, T0, T1); |
| 278 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); | 276 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); |
| 279 __ bne(CMPRES1, ZR, not_smi); | 277 __ bne(CMPRES1, ZR, not_smi); |
| 280 return; | 278 return; |
| 281 } | 279 } |
| 282 | 280 |
| 283 | 281 |
| 284 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { | 282 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { |
| 285 Label fall_through; | 283 Label fall_through; |
| 286 | 284 |
| 287 TestBothArgumentsSmis(assembler, &fall_through); // Checks two Smis. | 285 TestBothArgumentsSmis(assembler, &fall_through); // Checks two Smis. |
| 288 __ AdduDetectOverflow(V0, T0, T1, CMPRES1); // Add. | 286 __ AdduDetectOverflow(V0, T0, T1, CMPRES1); // Add. |
| 289 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. | 287 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. |
| 290 __ Ret(); // Nothing in branch delay slot. | 288 __ Ret(); // Nothing in branch delay slot. |
| 291 __ Bind(&fall_through); | 289 __ Bind(&fall_through); |
| 292 } | 290 } |
| 293 | 291 |
| 294 | 292 |
| 295 void Intrinsifier::Integer_add(Assembler* assembler) { | 293 void Intrinsifier::Integer_add(Assembler* assembler) { |
| 296 Integer_addFromInteger(assembler); | 294 Integer_addFromInteger(assembler); |
| 297 } | 295 } |
| 298 | 296 |
| 299 | 297 |
| 300 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { | 298 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { |
| 301 Label fall_through; | 299 Label fall_through; |
| 302 | 300 |
| 303 TestBothArgumentsSmis(assembler, &fall_through); | 301 TestBothArgumentsSmis(assembler, &fall_through); |
| 304 __ SubuDetectOverflow(V0, T0, T1, CMPRES1); // Subtract. | 302 __ SubuDetectOverflow(V0, T0, T1, CMPRES1); // Subtract. |
| 305 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. | 303 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. |
| 306 __ Ret(); | 304 __ Ret(); |
| 307 __ Bind(&fall_through); | 305 __ Bind(&fall_through); |
| 308 } | 306 } |
| 309 | 307 |
| 310 | 308 |
| 311 void Intrinsifier::Integer_sub(Assembler* assembler) { | 309 void Intrinsifier::Integer_sub(Assembler* assembler) { |
| 312 Label fall_through; | 310 Label fall_through; |
| 313 | 311 |
| 314 TestBothArgumentsSmis(assembler, &fall_through); | 312 TestBothArgumentsSmis(assembler, &fall_through); |
| 315 __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract. | 313 __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract. |
| 316 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. | 314 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. |
| 317 __ Ret(); // Nothing in branch delay slot. | 315 __ Ret(); // Nothing in branch delay slot. |
| 318 __ Bind(&fall_through); | 316 __ Bind(&fall_through); |
| 319 } | 317 } |
| 320 | 318 |
| 321 | 319 |
| 322 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { | 320 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { |
| 323 Label fall_through; | 321 Label fall_through; |
| 324 | 322 |
| 325 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 323 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
| 326 __ SmiUntag(T0); // untags T0. only want result shifted by one | 324 __ SmiUntag(T0); // untags T0. only want result shifted by one |
| 327 | 325 |
| 328 __ mult(T0, T1); // HI:LO <- T0 * T1. | 326 __ mult(T0, T1); // HI:LO <- T0 * T1. |
| 329 __ mflo(V0); // V0 <- LO. | 327 __ mflo(V0); // V0 <- LO. |
| 330 __ mfhi(T2); // T2 <- HI. | 328 __ mfhi(T2); // T2 <- HI. |
| 331 __ sra(T3, V0, 31); // T3 <- V0 >> 31. | 329 __ sra(T3, V0, 31); // T3 <- V0 >> 31. |
| 332 __ bne(T2, T3, &fall_through); // Fall through on overflow. | 330 __ bne(T2, T3, &fall_through); // Fall through on overflow. |
| 333 __ Ret(); | 331 __ Ret(); |
| 334 __ Bind(&fall_through); | 332 __ Bind(&fall_through); |
| 335 } | 333 } |
| 336 | 334 |
| 337 | 335 |
| 338 void Intrinsifier::Integer_mul(Assembler* assembler) { | 336 void Intrinsifier::Integer_mul(Assembler* assembler) { |
| 339 Integer_mulFromInteger(assembler); | 337 Integer_mulFromInteger(assembler); |
| 340 } | 338 } |
| 341 | 339 |
| (...skipping 26 matching lines...) Expand all Loading... |
| 368 __ delay_slot()->mov(result, left); | 366 __ delay_slot()->mov(result, left); |
| 369 | 367 |
| 370 __ Bind(&return_zero); | 368 __ Bind(&return_zero); |
| 371 __ Ret(); | 369 __ Ret(); |
| 372 __ delay_slot()->mov(result, ZR); | 370 __ delay_slot()->mov(result, ZR); |
| 373 | 371 |
| 374 __ Bind(&modulo); | 372 __ Bind(&modulo); |
| 375 __ SmiUntag(right); | 373 __ SmiUntag(right); |
| 376 __ SmiUntag(left); | 374 __ SmiUntag(left); |
| 377 __ div(left, right); // Divide, remainder goes in HI. | 375 __ div(left, right); // Divide, remainder goes in HI. |
| 378 __ mfhi(result); // result <- HI. | 376 __ mfhi(result); // result <- HI. |
| 379 return; | 377 return; |
| 380 } | 378 } |
| 381 | 379 |
| 382 | 380 |
| 383 // Implementation: | 381 // Implementation: |
| 384 // res = left % right; | 382 // res = left % right; |
| 385 // if (res < 0) { | 383 // if (res < 0) { |
| 386 // if (right < 0) { | 384 // if (right < 0) { |
| 387 // res = res - right; | 385 // res = res - right; |
| 388 // } else { | 386 // } else { |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 426 | 424 |
| 427 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { | 425 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { |
| 428 Label fall_through; | 426 Label fall_through; |
| 429 | 427 |
| 430 TestBothArgumentsSmis(assembler, &fall_through); | 428 TestBothArgumentsSmis(assembler, &fall_through); |
| 431 __ beq(T0, ZR, &fall_through); // If b is 0, fall through. | 429 __ beq(T0, ZR, &fall_through); // If b is 0, fall through. |
| 432 | 430 |
| 433 __ SmiUntag(T0); | 431 __ SmiUntag(T0); |
| 434 __ SmiUntag(T1); | 432 __ SmiUntag(T1); |
| 435 __ div(T1, T0); // LO <- T1 / T0 | 433 __ div(T1, T0); // LO <- T1 / T0 |
| 436 __ mflo(V0); // V0 <- LO | 434 __ mflo(V0); // V0 <- LO |
| 437 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we | 435 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we |
| 438 // cannot tag the result. | 436 // cannot tag the result. |
| 439 __ BranchEqual(V0, Immediate(0x40000000), &fall_through); | 437 __ BranchEqual(V0, Immediate(0x40000000), &fall_through); |
| 440 __ Ret(); | 438 __ Ret(); |
| 441 __ delay_slot()->SmiTag(V0); | 439 __ delay_slot()->SmiTag(V0); |
| 442 __ Bind(&fall_through); | 440 __ Bind(&fall_through); |
| 443 } | 441 } |
| 444 | 442 |
| 445 | 443 |
| 446 void Intrinsifier::Integer_negate(Assembler* assembler) { | 444 void Intrinsifier::Integer_negate(Assembler* assembler) { |
| 447 Label fall_through; | 445 Label fall_through; |
| 448 | 446 |
| 449 __ lw(T0, Address(SP, + 0 * kWordSize)); // Grabs first argument. | 447 __ lw(T0, Address(SP, +0 * kWordSize)); // Grabs first argument. |
| 450 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); // Test for Smi. | 448 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); // Test for Smi. |
| 451 __ bne(CMPRES1, ZR, &fall_through); // Fall through if not a Smi. | 449 __ bne(CMPRES1, ZR, &fall_through); // Fall through if not a Smi. |
| 452 __ SubuDetectOverflow(V0, ZR, T0, CMPRES1); | 450 __ SubuDetectOverflow(V0, ZR, T0, CMPRES1); |
| 453 __ bltz(CMPRES1, &fall_through); // There was overflow. | 451 __ bltz(CMPRES1, &fall_through); // There was overflow. |
| 454 __ Ret(); | 452 __ Ret(); |
| 455 __ Bind(&fall_through); | 453 __ Bind(&fall_through); |
| 456 } | 454 } |
| 457 | 455 |
| 458 | 456 |
| 459 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { | 457 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { |
| 460 Label fall_through; | 458 Label fall_through; |
| 461 | 459 |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 500 Integer_bitXorFromInteger(assembler); | 498 Integer_bitXorFromInteger(assembler); |
| 501 } | 499 } |
| 502 | 500 |
| 503 | 501 |
| 504 void Intrinsifier::Integer_shl(Assembler* assembler) { | 502 void Intrinsifier::Integer_shl(Assembler* assembler) { |
| 505 ASSERT(kSmiTagShift == 1); | 503 ASSERT(kSmiTagShift == 1); |
| 506 ASSERT(kSmiTag == 0); | 504 ASSERT(kSmiTag == 0); |
| 507 Label fall_through, overflow; | 505 Label fall_through, overflow; |
| 508 | 506 |
| 509 TestBothArgumentsSmis(assembler, &fall_through); | 507 TestBothArgumentsSmis(assembler, &fall_through); |
| 510 __ BranchUnsignedGreater( | 508 __ BranchUnsignedGreater(T0, Immediate(Smi::RawValue(Smi::kBits)), |
| 511 T0, Immediate(Smi::RawValue(Smi::kBits)), &fall_through); | 509 &fall_through); |
| 512 __ SmiUntag(T0); | 510 __ SmiUntag(T0); |
| 513 | 511 |
| 514 // Check for overflow by shifting left and shifting back arithmetically. | 512 // Check for overflow by shifting left and shifting back arithmetically. |
| 515 // If the result is different from the original, there was overflow. | 513 // If the result is different from the original, there was overflow. |
| 516 __ sllv(TMP, T1, T0); | 514 __ sllv(TMP, T1, T0); |
| 517 __ srav(CMPRES1, TMP, T0); | 515 __ srav(CMPRES1, TMP, T0); |
| 518 __ bne(CMPRES1, T1, &overflow); | 516 __ bne(CMPRES1, T1, &overflow); |
| 519 | 517 |
| 520 // No overflow, result in V0. | 518 // No overflow, result in V0. |
| 521 __ Ret(); | 519 __ Ret(); |
| 522 __ delay_slot()->sllv(V0, T1, T0); | 520 __ delay_slot()->sllv(V0, T1, T0); |
| 523 | 521 |
| 524 __ Bind(&overflow); | 522 __ Bind(&overflow); |
| 525 // Arguments are Smi but the shift produced an overflow to Mint. | 523 // Arguments are Smi but the shift produced an overflow to Mint. |
| 526 __ bltz(T1, &fall_through); | 524 __ bltz(T1, &fall_through); |
| 527 __ SmiUntag(T1); | 525 __ SmiUntag(T1); |
| 528 | 526 |
| 529 // Pull off high bits that will be shifted off of T1 by making a mask | 527 // Pull off high bits that will be shifted off of T1 by making a mask |
| 530 // ((1 << T0) - 1), shifting it to the right, masking T1, then shifting back. | 528 // ((1 << T0) - 1), shifting it to the right, masking T1, then shifting back. |
| 531 // high bits = (((1 << T0) - 1) << (32 - T0)) & T1) >> (32 - T0) | 529 // high bits = (((1 << T0) - 1) << (32 - T0)) & T1) >> (32 - T0) |
| 532 // lo bits = T1 << T0 | 530 // lo bits = T1 << T0 |
| 533 __ LoadImmediate(T3, 1); | 531 __ LoadImmediate(T3, 1); |
| 534 __ sllv(T3, T3, T0); // T3 <- T3 << T0 | 532 __ sllv(T3, T3, T0); // T3 <- T3 << T0 |
| 535 __ addiu(T3, T3, Immediate(-1)); // T3 <- T3 - 1 | 533 __ addiu(T3, T3, Immediate(-1)); // T3 <- T3 - 1 |
| 536 __ subu(T4, ZR, T0); // T4 <- -T0 | 534 __ subu(T4, ZR, T0); // T4 <- -T0 |
| 537 __ addiu(T4, T4, Immediate(32)); // T4 <- 32 - T0 | 535 __ addiu(T4, T4, Immediate(32)); // T4 <- 32 - T0 |
| 538 __ sllv(T3, T3, T4); // T3 <- T3 << T4 | 536 __ sllv(T3, T3, T4); // T3 <- T3 << T4 |
| 539 __ and_(T3, T3, T1); // T3 <- T3 & T1 | 537 __ and_(T3, T3, T1); // T3 <- T3 & T1 |
| 540 __ srlv(T3, T3, T4); // T3 <- T3 >> T4 | 538 __ srlv(T3, T3, T4); // T3 <- T3 >> T4 |
| 541 // Now T3 has the bits that fall off of T1 on a left shift. | 539 // Now T3 has the bits that fall off of T1 on a left shift. |
| 542 __ sllv(T0, T1, T0); // T0 gets low bits. | 540 __ sllv(T0, T1, T0); // T0 gets low bits. |
| 543 | 541 |
| 544 const Class& mint_class = Class::Handle( | 542 const Class& mint_class = |
| 545 Isolate::Current()->object_store()->mint_class()); | 543 Class::Handle(Isolate::Current()->object_store()->mint_class()); |
| 546 __ TryAllocate(mint_class, &fall_through, V0, T1); | 544 __ TryAllocate(mint_class, &fall_through, V0, T1); |
| 547 | 545 |
| 548 __ sw(T0, FieldAddress(V0, Mint::value_offset())); | 546 __ sw(T0, FieldAddress(V0, Mint::value_offset())); |
| 549 __ Ret(); | 547 __ Ret(); |
| 550 __ delay_slot()->sw(T3, FieldAddress(V0, Mint::value_offset() + kWordSize)); | 548 __ delay_slot()->sw(T3, FieldAddress(V0, Mint::value_offset() + kWordSize)); |
| 551 __ Bind(&fall_through); | 549 __ Bind(&fall_through); |
| 552 } | 550 } |
| 553 | 551 |
| 554 | 552 |
| 555 static void Get64SmiOrMint(Assembler* assembler, | 553 static void Get64SmiOrMint(Assembler* assembler, |
| (...skipping 22 matching lines...) Expand all Loading... |
| 578 return; | 576 return; |
| 579 } | 577 } |
| 580 | 578 |
| 581 | 579 |
| 582 static void CompareIntegers(Assembler* assembler, RelationOperator rel_op) { | 580 static void CompareIntegers(Assembler* assembler, RelationOperator rel_op) { |
| 583 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through; | 581 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through; |
| 584 TestBothArgumentsSmis(assembler, &try_mint_smi); | 582 TestBothArgumentsSmis(assembler, &try_mint_smi); |
| 585 // T0 contains the right argument. T1 contains left argument | 583 // T0 contains the right argument. T1 contains left argument |
| 586 | 584 |
| 587 switch (rel_op) { | 585 switch (rel_op) { |
| 588 case LT: __ BranchSignedLess(T1, T0, &is_true); break; | 586 case LT: |
| 589 case LE: __ BranchSignedLessEqual(T1, T0, &is_true); break; | 587 __ BranchSignedLess(T1, T0, &is_true); |
| 590 case GT: __ BranchSignedGreater(T1, T0, &is_true); break; | 588 break; |
| 591 case GE: __ BranchSignedGreaterEqual(T1, T0, &is_true); break; | 589 case LE: |
| 590 __ BranchSignedLessEqual(T1, T0, &is_true); |
| 591 break; |
| 592 case GT: |
| 593 __ BranchSignedGreater(T1, T0, &is_true); |
| 594 break; |
| 595 case GE: |
| 596 __ BranchSignedGreaterEqual(T1, T0, &is_true); |
| 597 break; |
| 592 default: | 598 default: |
| 593 UNREACHABLE(); | 599 UNREACHABLE(); |
| 594 break; | 600 break; |
| 595 } | 601 } |
| 596 | 602 |
| 597 __ Bind(&is_false); | 603 __ Bind(&is_false); |
| 598 __ LoadObject(V0, Bool::False()); | 604 __ LoadObject(V0, Bool::False()); |
| 599 __ Ret(); | 605 __ Ret(); |
| 600 __ Bind(&is_true); | 606 __ Bind(&is_true); |
| 601 __ LoadObject(V0, Bool::True()); | 607 __ LoadObject(V0, Bool::True()); |
| (...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 736 void Intrinsifier::Integer_sar(Assembler* assembler) { | 742 void Intrinsifier::Integer_sar(Assembler* assembler) { |
| 737 Label fall_through; | 743 Label fall_through; |
| 738 | 744 |
| 739 TestBothArgumentsSmis(assembler, &fall_through); | 745 TestBothArgumentsSmis(assembler, &fall_through); |
| 740 // Shift amount in T0. Value to shift in T1. | 746 // Shift amount in T0. Value to shift in T1. |
| 741 | 747 |
| 742 __ SmiUntag(T0); | 748 __ SmiUntag(T0); |
| 743 __ bltz(T0, &fall_through); | 749 __ bltz(T0, &fall_through); |
| 744 | 750 |
| 745 __ LoadImmediate(T2, 0x1F); | 751 __ LoadImmediate(T2, 0x1F); |
| 746 __ slt(CMPRES1, T2, T0); // CMPRES1 <- 0x1F < T0 ? 1 : 0 | 752 __ slt(CMPRES1, T2, T0); // CMPRES1 <- 0x1F < T0 ? 1 : 0 |
| 747 __ movn(T0, T2, CMPRES1); // T0 <- 0x1F < T0 ? 0x1F : T0 | 753 __ movn(T0, T2, CMPRES1); // T0 <- 0x1F < T0 ? 0x1F : T0 |
| 748 | 754 |
| 749 __ SmiUntag(T1); | 755 __ SmiUntag(T1); |
| 750 __ srav(V0, T1, T0); | 756 __ srav(V0, T1, T0); |
| 751 __ Ret(); | 757 __ Ret(); |
| 752 __ delay_slot()->SmiTag(V0); | 758 __ delay_slot()->SmiTag(V0); |
| 753 __ Bind(&fall_through); | 759 __ Bind(&fall_through); |
| 754 } | 760 } |
| 755 | 761 |
| 756 | 762 |
| (...skipping 414 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1171 __ addiu(T4, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); | 1177 __ addiu(T4, T3, Immediate(TypedData::data_offset() - kHeapObjectTag)); |
| 1172 | 1178 |
| 1173 // T3 = x = *xip++, return if x == 0 | 1179 // T3 = x = *xip++, return if x == 0 |
| 1174 Label x_zero; | 1180 Label x_zero; |
| 1175 __ lw(T3, Address(T4, 0)); | 1181 __ lw(T3, Address(T4, 0)); |
| 1176 __ beq(T3, ZR, &x_zero); | 1182 __ beq(T3, ZR, &x_zero); |
| 1177 __ delay_slot()->addiu(T4, T4, Immediate(Bigint::kBytesPerDigit)); | 1183 __ delay_slot()->addiu(T4, T4, Immediate(Bigint::kBytesPerDigit)); |
| 1178 | 1184 |
| 1179 // T5 = ajp = &a_digits[i] | 1185 // T5 = ajp = &a_digits[i] |
| 1180 __ lw(T1, Address(SP, 1 * kWordSize)); // a_digits | 1186 __ lw(T1, Address(SP, 1 * kWordSize)); // a_digits |
| 1181 __ sll(T0, T2, 2); // j == 2*i, i is Smi. | 1187 __ sll(T0, T2, 2); // j == 2*i, i is Smi. |
| 1182 __ addu(T1, T0, T1); | 1188 __ addu(T1, T0, T1); |
| 1183 __ addiu(T5, T1, Immediate(TypedData::data_offset() - kHeapObjectTag)); | 1189 __ addiu(T5, T1, Immediate(TypedData::data_offset() - kHeapObjectTag)); |
| 1184 | 1190 |
| 1185 // T6:T0 = t = x*x + *ajp | 1191 // T6:T0 = t = x*x + *ajp |
| 1186 __ lw(T0, Address(T5, 0)); // *ajp. | 1192 __ lw(T0, Address(T5, 0)); // *ajp. |
| 1187 __ mthi(ZR); | 1193 __ mthi(ZR); |
| 1188 __ mtlo(T0); | 1194 __ mtlo(T0); |
| 1189 __ maddu(T3, T3); // HI:LO = T3*T3 + *ajp. | 1195 __ maddu(T3, T3); // HI:LO = T3*T3 + *ajp. |
| 1190 __ mfhi(T6); | 1196 __ mfhi(T6); |
| 1191 __ mflo(T0); | 1197 __ mflo(T0); |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1232 __ srl(T1, A0, 31); | 1238 __ srl(T1, A0, 31); |
| 1233 __ or_(A1, A1, T1); | 1239 __ or_(A1, A1, T1); |
| 1234 __ sll(A0, A0, 1); // A2:A1:A0 = 2*x*xi. | 1240 __ sll(A0, A0, 1); // A2:A1:A0 = 2*x*xi. |
| 1235 __ addu(A0, A0, T0); | 1241 __ addu(A0, A0, T0); |
| 1236 __ sltu(T1, A0, T0); | 1242 __ sltu(T1, A0, T0); |
| 1237 __ addu(A1, A1, T1); // No carry out possible; A2:A1:A0 = 2*x*xi + aj. | 1243 __ addu(A1, A1, T1); // No carry out possible; A2:A1:A0 = 2*x*xi + aj. |
| 1238 __ addu(T0, A0, T6); | 1244 __ addu(T0, A0, T6); |
| 1239 __ sltu(T1, T0, T6); | 1245 __ sltu(T1, T0, T6); |
| 1240 __ addu(T6, A1, T1); // No carry out; A2:T6:T0 = 2*x*xi + aj + low32(c). | 1246 __ addu(T6, A1, T1); // No carry out; A2:T6:T0 = 2*x*xi + aj + low32(c). |
| 1241 __ addu(T6, T6, T7); // No carry out; A2:T6:T0 = 2*x*xi + aj + c. | 1247 __ addu(T6, T6, T7); // No carry out; A2:T6:T0 = 2*x*xi + aj + c. |
| 1242 __ mov(T7, A2); // T7:T6:T0 = 2*x*xi + aj + c. | 1248 __ mov(T7, A2); // T7:T6:T0 = 2*x*xi + aj + c. |
| 1243 | 1249 |
| 1244 // *ajp++ = low32(t) = T0 | 1250 // *ajp++ = low32(t) = T0 |
| 1245 __ sw(T0, Address(T5, 0)); | 1251 __ sw(T0, Address(T5, 0)); |
| 1246 __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); | 1252 __ addiu(T5, T5, Immediate(Bigint::kBytesPerDigit)); |
| 1247 | 1253 |
| 1248 // while (n-- > 0) | 1254 // while (n-- > 0) |
| 1249 __ bgtz(V0, &loop); | 1255 __ bgtz(V0, &loop); |
| 1250 __ delay_slot()->addiu(V0, V0, Immediate(-1)); // --n | 1256 __ delay_slot()->addiu(V0, V0, Immediate(-1)); // --n |
| 1251 | 1257 |
| 1252 __ Bind(&done); | 1258 __ Bind(&done); |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1281 // uint32_t d = digits[i >> 1]; // i is Smi. | 1287 // uint32_t d = digits[i >> 1]; // i is Smi. |
| 1282 // uint64_t t = rho*d; | 1288 // uint64_t t = rho*d; |
| 1283 // args[_MU] = t mod DIGIT_BASE; // _MU == 4. | 1289 // args[_MU] = t mod DIGIT_BASE; // _MU == 4. |
| 1284 // return 1; | 1290 // return 1; |
| 1285 // } | 1291 // } |
| 1286 | 1292 |
| 1287 // T4 = args | 1293 // T4 = args |
| 1288 __ lw(T4, Address(SP, 2 * kWordSize)); // args | 1294 __ lw(T4, Address(SP, 2 * kWordSize)); // args |
| 1289 | 1295 |
| 1290 // T3 = rho = args[2] | 1296 // T3 = rho = args[2] |
| 1291 __ lw(T3, | 1297 __ lw(T3, FieldAddress( |
| 1292 FieldAddress(T4, TypedData::data_offset() + 2*Bigint::kBytesPerDigit)); | 1298 T4, TypedData::data_offset() + 2 * Bigint::kBytesPerDigit)); |
| 1293 | 1299 |
| 1294 // T2 = d = digits[i >> 1] | 1300 // T2 = d = digits[i >> 1] |
| 1295 __ lw(T0, Address(SP, 0 * kWordSize)); // T0 = i as Smi. | 1301 __ lw(T0, Address(SP, 0 * kWordSize)); // T0 = i as Smi. |
| 1296 __ lw(T1, Address(SP, 1 * kWordSize)); // T1 = digits. | 1302 __ lw(T1, Address(SP, 1 * kWordSize)); // T1 = digits. |
| 1297 __ sll(T0, T0, 1); | 1303 __ sll(T0, T0, 1); |
| 1298 __ addu(T1, T0, T1); | 1304 __ addu(T1, T0, T1); |
| 1299 __ lw(T2, FieldAddress(T1, TypedData::data_offset())); | 1305 __ lw(T2, FieldAddress(T1, TypedData::data_offset())); |
| 1300 | 1306 |
| 1301 // HI:LO = t = rho*d | 1307 // HI:LO = t = rho*d |
| 1302 __ multu(T2, T3); | 1308 __ multu(T2, T3); |
| 1303 | 1309 |
| 1304 // args[4] = t mod DIGIT_BASE = low32(t) | 1310 // args[4] = t mod DIGIT_BASE = low32(t) |
| 1305 __ mflo(T0); | 1311 __ mflo(T0); |
| 1306 __ sw(T0, | 1312 __ sw(T0, FieldAddress( |
| 1307 FieldAddress(T4, TypedData::data_offset() + 4*Bigint::kBytesPerDigit)); | 1313 T4, TypedData::data_offset() + 4 * Bigint::kBytesPerDigit)); |
| 1308 | 1314 |
| 1309 __ addiu(V0, ZR, Immediate(Smi::RawValue(1))); // One digit processed. | 1315 __ addiu(V0, ZR, Immediate(Smi::RawValue(1))); // One digit processed. |
| 1310 __ Ret(); | 1316 __ Ret(); |
| 1311 } | 1317 } |
| 1312 | 1318 |
| 1313 | 1319 |
| 1314 // Check if the last argument is a double, jump to label 'is_smi' if smi | 1320 // Check if the last argument is a double, jump to label 'is_smi' if smi |
| 1315 // (easy to convert to double), otherwise jump to label 'not_double_smi', | 1321 // (easy to convert to double), otherwise jump to label 'not_double_smi', |
| 1316 // Returns the last argument in T0. | 1322 // Returns the last argument in T0. |
| 1317 static void TestLastArgumentIsDouble(Assembler* assembler, | 1323 static void TestLastArgumentIsDouble(Assembler* assembler, |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1342 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); | 1348 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); |
| 1343 // Now, left is in D0, right is in D1. | 1349 // Now, left is in D0, right is in D1. |
| 1344 | 1350 |
| 1345 __ cund(D0, D1); // Check for NaN. | 1351 __ cund(D0, D1); // Check for NaN. |
| 1346 __ bc1f(&no_NaN); | 1352 __ bc1f(&no_NaN); |
| 1347 __ LoadObject(V0, Bool::False()); // Return false if either is NaN. | 1353 __ LoadObject(V0, Bool::False()); // Return false if either is NaN. |
| 1348 __ Ret(); | 1354 __ Ret(); |
| 1349 __ Bind(&no_NaN); | 1355 __ Bind(&no_NaN); |
| 1350 | 1356 |
| 1351 switch (rel_op) { | 1357 switch (rel_op) { |
| 1352 case EQ: __ ceqd(D0, D1); break; | 1358 case EQ: |
| 1353 case LT: __ coltd(D0, D1); break; | 1359 __ ceqd(D0, D1); |
| 1354 case LE: __ coled(D0, D1); break; | 1360 break; |
| 1355 case GT: __ coltd(D1, D0); break; | 1361 case LT: |
| 1356 case GE: __ coled(D1, D0); break; | 1362 __ coltd(D0, D1); |
| 1363 break; |
| 1364 case LE: |
| 1365 __ coled(D0, D1); |
| 1366 break; |
| 1367 case GT: |
| 1368 __ coltd(D1, D0); |
| 1369 break; |
| 1370 case GE: |
| 1371 __ coled(D1, D0); |
| 1372 break; |
| 1357 default: { | 1373 default: { |
| 1358 // Only passing the above conditions to this function. | 1374 // Only passing the above conditions to this function. |
| 1359 UNREACHABLE(); | 1375 UNREACHABLE(); |
| 1360 break; | 1376 break; |
| 1361 } | 1377 } |
| 1362 } | 1378 } |
| 1363 | 1379 |
| 1364 Label is_true; | 1380 Label is_true; |
| 1365 __ bc1t(&is_true); | 1381 __ bc1t(&is_true); |
| 1366 __ LoadObject(V0, Bool::False()); | 1382 __ LoadObject(V0, Bool::False()); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1413 | 1429 |
| 1414 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1430 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
| 1415 // Both arguments are double, right operand is in T0. | 1431 // Both arguments are double, right operand is in T0. |
| 1416 __ lwc1(F2, FieldAddress(T0, Double::value_offset())); | 1432 __ lwc1(F2, FieldAddress(T0, Double::value_offset())); |
| 1417 __ lwc1(F3, FieldAddress(T0, Double::value_offset() + kWordSize)); | 1433 __ lwc1(F3, FieldAddress(T0, Double::value_offset() + kWordSize)); |
| 1418 __ Bind(&double_op); | 1434 __ Bind(&double_op); |
| 1419 __ lw(T0, Address(SP, 1 * kWordSize)); // Left argument. | 1435 __ lw(T0, Address(SP, 1 * kWordSize)); // Left argument. |
| 1420 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); | 1436 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); |
| 1421 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); | 1437 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); |
| 1422 switch (kind) { | 1438 switch (kind) { |
| 1423 case Token::kADD: __ addd(D0, D0, D1); break; | 1439 case Token::kADD: |
| 1424 case Token::kSUB: __ subd(D0, D0, D1); break; | 1440 __ addd(D0, D0, D1); |
| 1425 case Token::kMUL: __ muld(D0, D0, D1); break; | 1441 break; |
| 1426 case Token::kDIV: __ divd(D0, D0, D1); break; | 1442 case Token::kSUB: |
| 1427 default: UNREACHABLE(); | 1443 __ subd(D0, D0, D1); |
| 1444 break; |
| 1445 case Token::kMUL: |
| 1446 __ muld(D0, D0, D1); |
| 1447 break; |
| 1448 case Token::kDIV: |
| 1449 __ divd(D0, D0, D1); |
| 1450 break; |
| 1451 default: |
| 1452 UNREACHABLE(); |
| 1428 } | 1453 } |
| 1429 const Class& double_class = Class::Handle( | 1454 const Class& double_class = |
| 1430 Isolate::Current()->object_store()->double_class()); | 1455 Class::Handle(Isolate::Current()->object_store()->double_class()); |
| 1431 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. | 1456 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. |
| 1432 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | 1457 __ swc1(F0, FieldAddress(V0, Double::value_offset())); |
| 1433 __ Ret(); | 1458 __ Ret(); |
| 1434 __ delay_slot()->swc1(F1, | 1459 __ delay_slot()->swc1(F1, |
| 1435 FieldAddress(V0, Double::value_offset() + kWordSize)); | 1460 FieldAddress(V0, Double::value_offset() + kWordSize)); |
| 1436 | 1461 |
| 1437 __ Bind(&is_smi); | 1462 __ Bind(&is_smi); |
| 1438 __ SmiUntag(T0); | 1463 __ SmiUntag(T0); |
| 1439 __ mtc1(T0, STMP1); | 1464 __ mtc1(T0, STMP1); |
| 1440 __ b(&double_op); | 1465 __ b(&double_op); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1474 | 1499 |
| 1475 // Is Smi. | 1500 // Is Smi. |
| 1476 __ SmiUntag(T0); | 1501 __ SmiUntag(T0); |
| 1477 __ mtc1(T0, F4); | 1502 __ mtc1(T0, F4); |
| 1478 __ cvtdw(D1, F4); | 1503 __ cvtdw(D1, F4); |
| 1479 | 1504 |
| 1480 __ lw(T0, Address(SP, 1 * kWordSize)); | 1505 __ lw(T0, Address(SP, 1 * kWordSize)); |
| 1481 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); | 1506 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); |
| 1482 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); | 1507 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); |
| 1483 __ muld(D0, D0, D1); | 1508 __ muld(D0, D0, D1); |
| 1484 const Class& double_class = Class::Handle( | 1509 const Class& double_class = |
| 1485 Isolate::Current()->object_store()->double_class()); | 1510 Class::Handle(Isolate::Current()->object_store()->double_class()); |
| 1486 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. | 1511 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. |
| 1487 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | 1512 __ swc1(F0, FieldAddress(V0, Double::value_offset())); |
| 1488 __ Ret(); | 1513 __ Ret(); |
| 1489 __ delay_slot()->swc1(F1, | 1514 __ delay_slot()->swc1(F1, |
| 1490 FieldAddress(V0, Double::value_offset() + kWordSize)); | 1515 FieldAddress(V0, Double::value_offset() + kWordSize)); |
| 1491 __ Bind(&fall_through); | 1516 __ Bind(&fall_through); |
| 1492 } | 1517 } |
| 1493 | 1518 |
| 1494 | 1519 |
| 1495 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { | 1520 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { |
| 1496 Label fall_through; | 1521 Label fall_through; |
| 1497 | 1522 |
| 1498 __ lw(T0, Address(SP, 0 * kWordSize)); | 1523 __ lw(T0, Address(SP, 0 * kWordSize)); |
| 1499 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); | 1524 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); |
| 1500 __ bne(CMPRES1, ZR, &fall_through); | 1525 __ bne(CMPRES1, ZR, &fall_through); |
| 1501 | 1526 |
| 1502 // Is Smi. | 1527 // Is Smi. |
| 1503 __ SmiUntag(T0); | 1528 __ SmiUntag(T0); |
| 1504 __ mtc1(T0, F4); | 1529 __ mtc1(T0, F4); |
| 1505 __ cvtdw(D0, F4); | 1530 __ cvtdw(D0, F4); |
| 1506 const Class& double_class = Class::Handle( | 1531 const Class& double_class = |
| 1507 Isolate::Current()->object_store()->double_class()); | 1532 Class::Handle(Isolate::Current()->object_store()->double_class()); |
| 1508 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. | 1533 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. |
| 1509 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | 1534 __ swc1(F0, FieldAddress(V0, Double::value_offset())); |
| 1510 __ Ret(); | 1535 __ Ret(); |
| 1511 __ delay_slot()->swc1(F1, | 1536 __ delay_slot()->swc1(F1, |
| 1512 FieldAddress(V0, Double::value_offset() + kWordSize)); | 1537 FieldAddress(V0, Double::value_offset() + kWordSize)); |
| 1513 __ Bind(&fall_through); | 1538 __ Bind(&fall_through); |
| 1514 } | 1539 } |
| 1515 | 1540 |
| 1516 | 1541 |
| 1517 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { | 1542 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1569 __ Bind(&is_true); | 1594 __ Bind(&is_true); |
| 1570 __ LoadObject(V0, Bool::True()); | 1595 __ LoadObject(V0, Bool::True()); |
| 1571 __ Ret(); | 1596 __ Ret(); |
| 1572 | 1597 |
| 1573 __ Bind(&is_false); | 1598 __ Bind(&is_false); |
| 1574 __ LoadObject(V0, Bool::False()); | 1599 __ LoadObject(V0, Bool::False()); |
| 1575 __ Ret(); | 1600 __ Ret(); |
| 1576 | 1601 |
| 1577 __ Bind(&is_zero); | 1602 __ Bind(&is_zero); |
| 1578 // Check for negative zero by looking at the sign bit. | 1603 // Check for negative zero by looking at the sign bit. |
| 1579 __ mfc1(T0, F1); // Moves bits 32...63 of D0 to T0. | 1604 __ mfc1(T0, F1); // Moves bits 32...63 of D0 to T0. |
| 1580 __ srl(T0, T0, 31); // Get the sign bit down to bit 0 of T0. | 1605 __ srl(T0, T0, 31); // Get the sign bit down to bit 0 of T0. |
| 1581 __ andi(CMPRES1, T0, Immediate(1)); // Check if the bit is set. | 1606 __ andi(CMPRES1, T0, Immediate(1)); // Check if the bit is set. |
| 1582 __ bne(T0, ZR, &is_true); // Sign bit set. True. | 1607 __ bne(T0, ZR, &is_true); // Sign bit set. True. |
| 1583 __ b(&is_false); | 1608 __ b(&is_false); |
| 1584 } | 1609 } |
| 1585 | 1610 |
| 1586 | 1611 |
| 1587 void Intrinsifier::DoubleToInteger(Assembler* assembler) { | 1612 void Intrinsifier::DoubleToInteger(Assembler* assembler) { |
| 1588 __ lw(T0, Address(SP, 0 * kWordSize)); | 1613 __ lw(T0, Address(SP, 0 * kWordSize)); |
| 1589 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); | 1614 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); |
| 1590 | 1615 |
| 1591 __ truncwd(F2, D0); | 1616 __ truncwd(F2, D0); |
| 1592 __ mfc1(V0, F2); | 1617 __ mfc1(V0, F2); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1603 } | 1628 } |
| 1604 | 1629 |
| 1605 | 1630 |
| 1606 void Intrinsifier::MathSqrt(Assembler* assembler) { | 1631 void Intrinsifier::MathSqrt(Assembler* assembler) { |
| 1607 Label fall_through, is_smi, double_op; | 1632 Label fall_through, is_smi, double_op; |
| 1608 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1633 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
| 1609 // Argument is double and is in T0. | 1634 // Argument is double and is in T0. |
| 1610 __ LoadDFromOffset(D1, T0, Double::value_offset() - kHeapObjectTag); | 1635 __ LoadDFromOffset(D1, T0, Double::value_offset() - kHeapObjectTag); |
| 1611 __ Bind(&double_op); | 1636 __ Bind(&double_op); |
| 1612 __ sqrtd(D0, D1); | 1637 __ sqrtd(D0, D1); |
| 1613 const Class& double_class = Class::Handle( | 1638 const Class& double_class = |
| 1614 Isolate::Current()->object_store()->double_class()); | 1639 Class::Handle(Isolate::Current()->object_store()->double_class()); |
| 1615 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. | 1640 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. |
| 1616 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | 1641 __ swc1(F0, FieldAddress(V0, Double::value_offset())); |
| 1617 __ Ret(); | 1642 __ Ret(); |
| 1618 __ delay_slot()->swc1(F1, | 1643 __ delay_slot()->swc1(F1, |
| 1619 FieldAddress(V0, Double::value_offset() + kWordSize)); | 1644 FieldAddress(V0, Double::value_offset() + kWordSize)); |
| 1620 | 1645 |
| 1621 __ Bind(&is_smi); | 1646 __ Bind(&is_smi); |
| 1622 __ SmiUntag(T0); | 1647 __ SmiUntag(T0); |
| 1623 __ mtc1(T0, F2); | 1648 __ mtc1(T0, F2); |
| 1624 __ b(&double_op); | 1649 __ b(&double_op); |
| 1625 __ delay_slot()->cvtdw(D1, F2); | 1650 __ delay_slot()->cvtdw(D1, F2); |
| 1626 __ Bind(&fall_through); | 1651 __ Bind(&fall_through); |
| 1627 } | 1652 } |
| 1628 | 1653 |
| 1629 | 1654 |
| 1630 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; | 1655 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; |
| 1631 // _state[kSTATE_LO] = state & _MASK_32; | 1656 // _state[kSTATE_LO] = state & _MASK_32; |
| 1632 // _state[kSTATE_HI] = state >> 32; | 1657 // _state[kSTATE_HI] = state >> 32; |
| 1633 void Intrinsifier::Random_nextState(Assembler* assembler) { | 1658 void Intrinsifier::Random_nextState(Assembler* assembler) { |
| 1634 const Library& math_lib = Library::Handle(Library::MathLibrary()); | 1659 const Library& math_lib = Library::Handle(Library::MathLibrary()); |
| 1635 ASSERT(!math_lib.IsNull()); | 1660 ASSERT(!math_lib.IsNull()); |
| 1636 const Class& random_class = Class::Handle( | 1661 const Class& random_class = |
| 1637 math_lib.LookupClassAllowPrivate(Symbols::_Random())); | 1662 Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random())); |
| 1638 ASSERT(!random_class.IsNull()); | 1663 ASSERT(!random_class.IsNull()); |
| 1639 const Field& state_field = Field::ZoneHandle( | 1664 const Field& state_field = Field::ZoneHandle( |
| 1640 random_class.LookupInstanceFieldAllowPrivate(Symbols::_state())); | 1665 random_class.LookupInstanceFieldAllowPrivate(Symbols::_state())); |
| 1641 ASSERT(!state_field.IsNull()); | 1666 ASSERT(!state_field.IsNull()); |
| 1642 const Field& random_A_field = Field::ZoneHandle( | 1667 const Field& random_A_field = Field::ZoneHandle( |
| 1643 random_class.LookupStaticFieldAllowPrivate(Symbols::_A())); | 1668 random_class.LookupStaticFieldAllowPrivate(Symbols::_A())); |
| 1644 ASSERT(!random_A_field.IsNull()); | 1669 ASSERT(!random_A_field.IsNull()); |
| 1645 ASSERT(random_A_field.is_const()); | 1670 ASSERT(random_A_field.is_const()); |
| 1646 const Instance& a_value = Instance::Handle(random_A_field.StaticValue()); | 1671 const Instance& a_value = Instance::Handle(random_A_field.StaticValue()); |
| 1647 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); | 1672 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1682 __ lw(T1, Address(SP, 1 * kWordSize)); | 1707 __ lw(T1, Address(SP, 1 * kWordSize)); |
| 1683 __ beq(T0, T1, &is_true); | 1708 __ beq(T0, T1, &is_true); |
| 1684 __ LoadObject(V0, Bool::False()); | 1709 __ LoadObject(V0, Bool::False()); |
| 1685 __ Ret(); | 1710 __ Ret(); |
| 1686 __ Bind(&is_true); | 1711 __ Bind(&is_true); |
| 1687 __ LoadObject(V0, Bool::True()); | 1712 __ LoadObject(V0, Bool::True()); |
| 1688 __ Ret(); | 1713 __ Ret(); |
| 1689 } | 1714 } |
| 1690 | 1715 |
| 1691 | 1716 |
| 1692 enum RangeCheckCondition { | 1717 enum RangeCheckCondition { kIfNotInRange, kIfInRange }; |
| 1693 kIfNotInRange, kIfInRange | |
| 1694 }; | |
| 1695 | 1718 |
| 1696 | 1719 |
| 1697 static void RangeCheck(Assembler* assembler, | 1720 static void RangeCheck(Assembler* assembler, |
| 1698 Register val, | 1721 Register val, |
| 1699 Register tmp, | 1722 Register tmp, |
| 1700 intptr_t low, | 1723 intptr_t low, |
| 1701 intptr_t high, | 1724 intptr_t high, |
| 1702 RangeCheckCondition cc, | 1725 RangeCheckCondition cc, |
| 1703 Label* target) { | 1726 Label* target) { |
| 1704 __ AddImmediate(tmp, val, -low); | 1727 __ AddImmediate(tmp, val, -low); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1721 | 1744 |
| 1722 static void JumpIfNotInteger(Assembler* assembler, | 1745 static void JumpIfNotInteger(Assembler* assembler, |
| 1723 Register cid, | 1746 Register cid, |
| 1724 Register tmp, | 1747 Register tmp, |
| 1725 Label* target) { | 1748 Label* target) { |
| 1726 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target); | 1749 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target); |
| 1727 } | 1750 } |
| 1728 | 1751 |
| 1729 | 1752 |
| 1730 static void JumpIfString(Assembler* assembler, | 1753 static void JumpIfString(Assembler* assembler, |
| 1731 Register cid, | 1754 Register cid, |
| 1732 Register tmp, | 1755 Register tmp, |
| 1733 Label* target) { | 1756 Label* target) { |
| 1734 RangeCheck(assembler, | 1757 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, |
| 1735 cid, | 1758 kIfInRange, target); |
| 1736 tmp, | |
| 1737 kOneByteStringCid, | |
| 1738 kExternalTwoByteStringCid, | |
| 1739 kIfInRange, | |
| 1740 target); | |
| 1741 } | 1759 } |
| 1742 | 1760 |
| 1743 | 1761 |
| 1744 static void JumpIfNotString(Assembler* assembler, | 1762 static void JumpIfNotString(Assembler* assembler, |
| 1745 Register cid, | 1763 Register cid, |
| 1746 Register tmp, | 1764 Register tmp, |
| 1747 Label* target) { | 1765 Label* target) { |
| 1748 RangeCheck(assembler, | 1766 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, |
| 1749 cid, | 1767 kIfNotInRange, target); |
| 1750 tmp, | |
| 1751 kOneByteStringCid, | |
| 1752 kExternalTwoByteStringCid, | |
| 1753 kIfNotInRange, | |
| 1754 target); | |
| 1755 } | 1768 } |
| 1756 | 1769 |
| 1757 | 1770 |
| 1758 // Return type quickly for simple types (not parameterized and not signature). | 1771 // Return type quickly for simple types (not parameterized and not signature). |
| 1759 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { | 1772 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { |
| 1760 Label fall_through, use_canonical_type, not_integer, not_double; | 1773 Label fall_through, use_canonical_type, not_integer, not_double; |
| 1761 __ lw(T0, Address(SP, 0 * kWordSize)); | 1774 __ lw(T0, Address(SP, 0 * kWordSize)); |
| 1762 __ LoadClassIdMayBeSmi(T1, T0); | 1775 __ LoadClassIdMayBeSmi(T1, T0); |
| 1763 | 1776 |
| 1764 // Closures are handled in the runtime. | 1777 // Closures are handled in the runtime. |
| 1765 __ BranchEqual(T1, Immediate(kClosureCid), &fall_through); | 1778 __ BranchEqual(T1, Immediate(kClosureCid), &fall_through); |
| 1766 | 1779 |
| 1767 __ BranchUnsignedGreaterEqual( | 1780 __ BranchUnsignedGreaterEqual(T1, Immediate(kNumPredefinedCids), |
| 1768 T1, Immediate(kNumPredefinedCids), &use_canonical_type); | 1781 &use_canonical_type); |
| 1769 | 1782 |
| 1770 __ BranchNotEqual(T1, Immediate(kDoubleCid), ¬_double); | 1783 __ BranchNotEqual(T1, Immediate(kDoubleCid), ¬_double); |
| 1771 // Object is a double. | 1784 // Object is a double. |
| 1772 __ LoadIsolate(T1); | 1785 __ LoadIsolate(T1); |
| 1773 __ LoadFromOffset(T1, T1, Isolate::object_store_offset()); | 1786 __ LoadFromOffset(T1, T1, Isolate::object_store_offset()); |
| 1774 __ LoadFromOffset(V0, T1, ObjectStore::double_type_offset()); | 1787 __ LoadFromOffset(V0, T1, ObjectStore::double_type_offset()); |
| 1775 __ Ret(); | 1788 __ Ret(); |
| 1776 | 1789 |
| 1777 __ Bind(¬_double); | 1790 __ Bind(¬_double); |
| 1778 JumpIfNotInteger(assembler, T1, T2, ¬_integer); | 1791 JumpIfNotInteger(assembler, T1, T2, ¬_integer); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1827 __ lhu(T1, FieldAddress(T2, Class::num_type_arguments_offset())); | 1840 __ lhu(T1, FieldAddress(T2, Class::num_type_arguments_offset())); |
| 1828 __ BranchNotEqual(T1, Immediate(0), &fall_through); | 1841 __ BranchNotEqual(T1, Immediate(0), &fall_through); |
| 1829 | 1842 |
| 1830 __ Bind(&equal); | 1843 __ Bind(&equal); |
| 1831 __ LoadObject(V0, Bool::True()); | 1844 __ LoadObject(V0, Bool::True()); |
| 1832 __ Ret(); | 1845 __ Ret(); |
| 1833 | 1846 |
| 1834 // Class ids are different. Check if we are comparing runtime types of | 1847 // Class ids are different. Check if we are comparing runtime types of |
| 1835 // two strings (with different representations) or two integers. | 1848 // two strings (with different representations) or two integers. |
| 1836 __ Bind(&different_cids); | 1849 __ Bind(&different_cids); |
| 1837 __ BranchUnsignedGreaterEqual( | 1850 __ BranchUnsignedGreaterEqual(T1, Immediate(kNumPredefinedCids), ¬_equal); |
| 1838 T1, Immediate(kNumPredefinedCids), ¬_equal); | |
| 1839 | 1851 |
| 1840 // Check if both are integers. | 1852 // Check if both are integers. |
| 1841 JumpIfNotInteger(assembler, T1, T0, ¬_integer); | 1853 JumpIfNotInteger(assembler, T1, T0, ¬_integer); |
| 1842 JumpIfInteger(assembler, T2, T0, &equal); | 1854 JumpIfInteger(assembler, T2, T0, &equal); |
| 1843 __ b(¬_equal); | 1855 __ b(¬_equal); |
| 1844 | 1856 |
| 1845 __ Bind(¬_integer); | 1857 __ Bind(¬_integer); |
| 1846 // Check if both are strings. | 1858 // Check if both are strings. |
| 1847 JumpIfNotString(assembler, T1, T0, ¬_equal); | 1859 JumpIfNotString(assembler, T1, T0, ¬_equal); |
| 1848 JumpIfString(assembler, T2, T0, &equal); | 1860 JumpIfString(assembler, T2, T0, &equal); |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1943 | 1955 |
| 1944 __ andi(CMPRES1, A1, Immediate(kSmiTagMask)); | 1956 __ andi(CMPRES1, A1, Immediate(kSmiTagMask)); |
| 1945 __ bne(CMPRES1, ZR, &fall_through); // 'start' is not a Smi. | 1957 __ bne(CMPRES1, ZR, &fall_through); // 'start' is not a Smi. |
| 1946 | 1958 |
| 1947 __ LoadClassId(CMPRES1, A2); | 1959 __ LoadClassId(CMPRES1, A2); |
| 1948 __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), &fall_through); | 1960 __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), &fall_through); |
| 1949 | 1961 |
| 1950 __ LoadClassId(CMPRES1, A0); | 1962 __ LoadClassId(CMPRES1, A0); |
| 1951 __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), &try_two_byte); | 1963 __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), &try_two_byte); |
| 1952 | 1964 |
| 1953 GenerateSubstringMatchesSpecialization(assembler, | 1965 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid, |
| 1954 kOneByteStringCid, | 1966 kOneByteStringCid, &return_true, |
| 1955 kOneByteStringCid, | |
| 1956 &return_true, | |
| 1957 &return_false); | 1967 &return_false); |
| 1958 | 1968 |
| 1959 __ Bind(&try_two_byte); | 1969 __ Bind(&try_two_byte); |
| 1960 __ LoadClassId(CMPRES1, A0); | 1970 __ LoadClassId(CMPRES1, A0); |
| 1961 __ BranchNotEqual(CMPRES1, Immediate(kTwoByteStringCid), &fall_through); | 1971 __ BranchNotEqual(CMPRES1, Immediate(kTwoByteStringCid), &fall_through); |
| 1962 | 1972 |
| 1963 GenerateSubstringMatchesSpecialization(assembler, | 1973 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid, |
| 1964 kTwoByteStringCid, | 1974 kOneByteStringCid, &return_true, |
| 1965 kOneByteStringCid, | |
| 1966 &return_true, | |
| 1967 &return_false); | 1975 &return_false); |
| 1968 | 1976 |
| 1969 __ Bind(&return_true); | 1977 __ Bind(&return_true); |
| 1970 __ LoadObject(V0, Bool::True()); | 1978 __ LoadObject(V0, Bool::True()); |
| 1971 __ Ret(); | 1979 __ Ret(); |
| 1972 | 1980 |
| 1973 __ Bind(&return_false); | 1981 __ Bind(&return_false); |
| 1974 __ LoadObject(V0, Bool::False()); | 1982 __ LoadObject(V0, Bool::False()); |
| 1975 __ Ret(); | 1983 __ Ret(); |
| 1976 | 1984 |
| 1977 __ Bind(&fall_through); | 1985 __ Bind(&fall_through); |
| 1978 } | 1986 } |
| 1979 | 1987 |
| 1980 | 1988 |
| 1981 void Intrinsifier::StringBaseCharAt(Assembler* assembler) { | 1989 void Intrinsifier::StringBaseCharAt(Assembler* assembler) { |
| 1982 Label fall_through, try_two_byte_string; | 1990 Label fall_through, try_two_byte_string; |
| 1983 | 1991 |
| 1984 __ lw(T1, Address(SP, 0 * kWordSize)); // Index. | 1992 __ lw(T1, Address(SP, 0 * kWordSize)); // Index. |
| 1985 __ lw(T0, Address(SP, 1 * kWordSize)); // String. | 1993 __ lw(T0, Address(SP, 1 * kWordSize)); // String. |
| 1986 | 1994 |
| 1987 // Checks. | 1995 // Checks. |
| 1988 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); | 1996 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); |
| 1989 __ bne(CMPRES1, ZR, &fall_through); // Index is not a Smi. | 1997 __ bne(CMPRES1, ZR, &fall_through); // Index is not a Smi. |
| 1990 __ lw(T2, FieldAddress(T0, String::length_offset())); // Range check. | 1998 __ lw(T2, FieldAddress(T0, String::length_offset())); // Range check. |
| 1991 // Runtime throws exception. | 1999 // Runtime throws exception. |
| 1992 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through); | 2000 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through); |
| 1993 __ LoadClassId(CMPRES1, T0); // Class ID check. | 2001 __ LoadClassId(CMPRES1, T0); // Class ID check. |
| 1994 __ BranchNotEqual( | 2002 __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), |
| 1995 CMPRES1, Immediate(kOneByteStringCid), &try_two_byte_string); | 2003 &try_two_byte_string); |
| 1996 | 2004 |
| 1997 // Grab byte and return. | 2005 // Grab byte and return. |
| 1998 __ SmiUntag(T1); | 2006 __ SmiUntag(T1); |
| 1999 __ addu(T2, T0, T1); | 2007 __ addu(T2, T0, T1); |
| 2000 __ lbu(T2, FieldAddress(T2, OneByteString::data_offset())); | 2008 __ lbu(T2, FieldAddress(T2, OneByteString::data_offset())); |
| 2001 __ BranchUnsignedGreaterEqual( | 2009 __ BranchUnsignedGreaterEqual( |
| 2002 T2, Immediate(Symbols::kNumberOfOneCharCodeSymbols), &fall_through); | 2010 T2, Immediate(Symbols::kNumberOfOneCharCodeSymbols), &fall_through); |
| 2003 __ lw(V0, Address(THR, Thread::predefined_symbols_address_offset())); | 2011 __ lw(V0, Address(THR, Thread::predefined_symbols_address_offset())); |
| 2004 __ AddImmediate(V0, Symbols::kNullCharCodeSymbolOffset * kWordSize); | 2012 __ AddImmediate(V0, Symbols::kNullCharCodeSymbolOffset * kWordSize); |
| 2005 __ sll(T2, T2, 2); | 2013 __ sll(T2, T2, 2); |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2148 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T3, space)); | 2156 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T3, space)); |
| 2149 | 2157 |
| 2150 // Initialize the tags. | 2158 // Initialize the tags. |
| 2151 // V0: new object start as a tagged pointer. | 2159 // V0: new object start as a tagged pointer. |
| 2152 // T1: new object end address. | 2160 // T1: new object end address. |
| 2153 // T2: allocation size. | 2161 // T2: allocation size. |
| 2154 { | 2162 { |
| 2155 Label overflow, done; | 2163 Label overflow, done; |
| 2156 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | 2164 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; |
| 2157 | 2165 |
| 2158 __ BranchUnsignedGreater( | 2166 __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), |
| 2159 T2, Immediate(RawObject::SizeTag::kMaxSizeTag), &overflow); | 2167 &overflow); |
| 2160 __ b(&done); | 2168 __ b(&done); |
| 2161 __ delay_slot()->sll(T2, T2, shift); | 2169 __ delay_slot()->sll(T2, T2, shift); |
| 2162 __ Bind(&overflow); | 2170 __ Bind(&overflow); |
| 2163 __ mov(T2, ZR); | 2171 __ mov(T2, ZR); |
| 2164 __ Bind(&done); | 2172 __ Bind(&done); |
| 2165 | 2173 |
| 2166 // Get the class index and insert it into the tags. | 2174 // Get the class index and insert it into the tags. |
| 2167 // T2: size and bit tags. | 2175 // T2: size and bit tags. |
| 2168 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); | 2176 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); |
| 2169 __ or_(T2, T2, TMP); | 2177 __ or_(T2, T2, TMP); |
| 2170 __ sw(T2, FieldAddress(V0, String::tags_offset())); // Store tags. | 2178 __ sw(T2, FieldAddress(V0, String::tags_offset())); // Store tags. |
| 2171 } | 2179 } |
| 2172 | 2180 |
| 2173 // Set the length field using the saved length (T6). | 2181 // Set the length field using the saved length (T6). |
| 2174 __ StoreIntoObjectNoBarrier(V0, | 2182 __ StoreIntoObjectNoBarrier(V0, FieldAddress(V0, String::length_offset()), |
| 2175 FieldAddress(V0, String::length_offset()), | |
| 2176 T6); | 2183 T6); |
| 2177 // Clear hash. | 2184 // Clear hash. |
| 2178 __ b(ok); | 2185 __ b(ok); |
| 2179 __ delay_slot()->sw(ZR, FieldAddress(V0, String::hash_offset())); | 2186 __ delay_slot()->sw(ZR, FieldAddress(V0, String::hash_offset())); |
| 2180 } | 2187 } |
| 2181 | 2188 |
| 2182 | 2189 |
| 2183 // Arg0: OneByteString (receiver). | 2190 // Arg0: OneByteString (receiver). |
| 2184 // Arg1: Start index as Smi. | 2191 // Arg1: Start index as Smi. |
| 2185 // Arg2: End index as Smi. | 2192 // Arg2: End index as Smi. |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2268 Label fall_through, is_true, is_false, loop; | 2275 Label fall_through, is_true, is_false, loop; |
| 2269 __ lw(T0, Address(SP, 1 * kWordSize)); // This. | 2276 __ lw(T0, Address(SP, 1 * kWordSize)); // This. |
| 2270 __ lw(T1, Address(SP, 0 * kWordSize)); // Other. | 2277 __ lw(T1, Address(SP, 0 * kWordSize)); // Other. |
| 2271 | 2278 |
| 2272 // Are identical? | 2279 // Are identical? |
| 2273 __ beq(T0, T1, &is_true); | 2280 __ beq(T0, T1, &is_true); |
| 2274 | 2281 |
| 2275 // Is other OneByteString? | 2282 // Is other OneByteString? |
| 2276 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); | 2283 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); |
| 2277 __ beq(CMPRES1, ZR, &fall_through); // Other is Smi. | 2284 __ beq(CMPRES1, ZR, &fall_through); // Other is Smi. |
| 2278 __ LoadClassId(CMPRES1, T1); // Class ID check. | 2285 __ LoadClassId(CMPRES1, T1); // Class ID check. |
| 2279 __ BranchNotEqual(CMPRES1, Immediate(string_cid), &fall_through); | 2286 __ BranchNotEqual(CMPRES1, Immediate(string_cid), &fall_through); |
| 2280 | 2287 |
| 2281 // Have same length? | 2288 // Have same length? |
| 2282 __ lw(T2, FieldAddress(T0, String::length_offset())); | 2289 __ lw(T2, FieldAddress(T0, String::length_offset())); |
| 2283 __ lw(T3, FieldAddress(T1, String::length_offset())); | 2290 __ lw(T3, FieldAddress(T1, String::length_offset())); |
| 2284 __ bne(T2, T3, &is_false); | 2291 __ bne(T2, T3, &is_false); |
| 2285 | 2292 |
| 2286 // Check contents, no fall-through possible. | 2293 // Check contents, no fall-through possible. |
| 2287 ASSERT((string_cid == kOneByteStringCid) || | 2294 ASSERT((string_cid == kOneByteStringCid) || |
| 2288 (string_cid == kTwoByteStringCid)); | 2295 (string_cid == kTwoByteStringCid)); |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2360 } | 2367 } |
| 2361 | 2368 |
| 2362 | 2369 |
| 2363 // On stack: user tag (+0). | 2370 // On stack: user tag (+0). |
| 2364 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { | 2371 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { |
| 2365 // T1: Isolate. | 2372 // T1: Isolate. |
| 2366 __ LoadIsolate(T1); | 2373 __ LoadIsolate(T1); |
| 2367 // V0: Current user tag. | 2374 // V0: Current user tag. |
| 2368 __ lw(V0, Address(T1, Isolate::current_tag_offset())); | 2375 __ lw(V0, Address(T1, Isolate::current_tag_offset())); |
| 2369 // T2: UserTag. | 2376 // T2: UserTag. |
| 2370 __ lw(T2, Address(SP, + 0 * kWordSize)); | 2377 __ lw(T2, Address(SP, +0 * kWordSize)); |
| 2371 // Set Isolate::current_tag_. | 2378 // Set Isolate::current_tag_. |
| 2372 __ sw(T2, Address(T1, Isolate::current_tag_offset())); | 2379 __ sw(T2, Address(T1, Isolate::current_tag_offset())); |
| 2373 // T2: UserTag's tag. | 2380 // T2: UserTag's tag. |
| 2374 __ lw(T2, FieldAddress(T2, UserTag::tag_offset())); | 2381 __ lw(T2, FieldAddress(T2, UserTag::tag_offset())); |
| 2375 // Set Isolate::user_tag_. | 2382 // Set Isolate::user_tag_. |
| 2376 __ sw(T2, Address(T1, Isolate::user_tag_offset())); | 2383 __ sw(T2, Address(T1, Isolate::user_tag_offset())); |
| 2377 __ Ret(); | 2384 __ Ret(); |
| 2378 __ delay_slot()->sw(T2, Address(T1, Isolate::user_tag_offset())); | 2385 __ delay_slot()->sw(T2, Address(T1, Isolate::user_tag_offset())); |
| 2379 } | 2386 } |
| 2380 | 2387 |
| (...skipping 24 matching lines...) Expand all Loading... |
| 2405 __ lw(T0, Address(V0, TimelineStream::enabled_offset())); | 2412 __ lw(T0, Address(V0, TimelineStream::enabled_offset())); |
| 2406 __ LoadObject(V0, Bool::True()); | 2413 __ LoadObject(V0, Bool::True()); |
| 2407 __ LoadObject(V1, Bool::False()); | 2414 __ LoadObject(V1, Bool::False()); |
| 2408 __ Ret(); | 2415 __ Ret(); |
| 2409 __ delay_slot()->movz(V0, V1, T0); // V0 = (T0 == 0) ? V1 : V0. | 2416 __ delay_slot()->movz(V0, V1, T0); // V0 = (T0 == 0) ? V1 : V0. |
| 2410 } | 2417 } |
| 2411 | 2418 |
| 2412 } // namespace dart | 2419 } // namespace dart |
| 2413 | 2420 |
| 2414 #endif // defined TARGET_ARCH_MIPS | 2421 #endif // defined TARGET_ARCH_MIPS |
| OLD | NEW |