OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. |
6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) |
7 | 7 |
8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
9 | 9 |
10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
(...skipping 13 matching lines...) Expand all Loading... |
24 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; | 24 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; |
25 const intptr_t kArrayLengthOffset = 0 * kWordSize; | 25 const intptr_t kArrayLengthOffset = 0 * kWordSize; |
26 Label fall_through; | 26 Label fall_through; |
27 | 27 |
28 // Compute the size to be allocated, it is based on the array length | 28 // Compute the size to be allocated, it is based on the array length |
29 // and is computed as: | 29 // and is computed as: |
30 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)). | 30 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)). |
31 __ lw(T3, Address(SP, kArrayLengthOffset)); // Array length. | 31 __ lw(T3, Address(SP, kArrayLengthOffset)); // Array length. |
32 | 32 |
33 // Check that length is a positive Smi. | 33 // Check that length is a positive Smi. |
34 __ andi(CMPRES, T3, Immediate(kSmiTagMask)); | 34 __ andi(CMPRES1, T3, Immediate(kSmiTagMask)); |
35 __ bne(CMPRES, ZR, &fall_through); | 35 __ bne(CMPRES1, ZR, &fall_through); |
36 __ bltz(T3, &fall_through); | 36 __ bltz(T3, &fall_through); |
37 | 37 |
38 // Check for maximum allowed length. | 38 // Check for maximum allowed length. |
39 const intptr_t max_len = | 39 const intptr_t max_len = |
40 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements)); | 40 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements)); |
41 __ BranchUnsignedGreater(T3, max_len, &fall_through); | 41 __ BranchUnsignedGreater(T3, max_len, &fall_through); |
42 | 42 |
43 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; | 43 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; |
44 __ LoadImmediate(T2, fixed_size); | 44 __ LoadImmediate(T2, fixed_size); |
45 __ sll(T3, T3, 1); // T3 is a Smi. | 45 __ sll(T3, T3, 1); // T3 is a Smi. |
46 __ addu(T2, T2, T3); | 46 __ addu(T2, T2, T3); |
47 ASSERT(kSmiTagShift == 1); | 47 ASSERT(kSmiTagShift == 1); |
48 __ LoadImmediate(T3, ~(kObjectAlignment - 1)); | 48 __ LoadImmediate(T3, ~(kObjectAlignment - 1)); |
49 __ and_(T2, T2, T3); | 49 __ and_(T2, T2, T3); |
50 | 50 |
51 // T2: Allocation size. | 51 // T2: Allocation size. |
52 | 52 |
53 Isolate* isolate = Isolate::Current(); | 53 Isolate* isolate = Isolate::Current(); |
54 Heap* heap = isolate->heap(); | 54 Heap* heap = isolate->heap(); |
55 | 55 |
56 __ LoadImmediate(T3, heap->TopAddress()); | 56 __ LoadImmediate(T3, heap->TopAddress()); |
57 __ lw(T0, Address(T3, 0)); // Potential new object start. | 57 __ lw(T0, Address(T3, 0)); // Potential new object start. |
58 | 58 |
59 __ AdduDetectOverflow(T1, T0, T2, CMPRES); // Potential next object start. | 59 __ AdduDetectOverflow(T1, T0, T2, CMPRES1); // Potential next object start. |
60 __ bltz(CMPRES, &fall_through); // CMPRES < 0 on overflow. | 60 __ bltz(CMPRES1, &fall_through); // CMPRES1 < 0 on overflow. |
61 | 61 |
62 // Check if the allocation fits into the remaining space. | 62 // Check if the allocation fits into the remaining space. |
63 // T0: potential new object start. | 63 // T0: potential new object start. |
64 // T1: potential next object start. | 64 // T1: potential next object start. |
65 // T2: allocation size. | 65 // T2: allocation size. |
66 __ LoadImmediate(T4, heap->TopAddress()); | 66 __ LoadImmediate(T4, heap->TopAddress()); |
67 __ lw(T4, Address(T4, 0)); | 67 __ lw(T4, Address(T4, 0)); |
68 __ BranchUnsignedGreaterEqual(T1, T4, &fall_through); | 68 __ BranchUnsignedGreaterEqual(T1, T4, &fall_through); |
69 | 69 |
70 // Successfully allocated the object(s), now update top to point to | 70 // Successfully allocated the object(s), now update top to point to |
(...skipping 12 matching lines...) Expand all Loading... |
83 | 83 |
84 __ BranchUnsignedGreater(T2, RawObject::SizeTag::kMaxSizeTag, &overflow); | 84 __ BranchUnsignedGreater(T2, RawObject::SizeTag::kMaxSizeTag, &overflow); |
85 __ b(&done); | 85 __ b(&done); |
86 __ delay_slot()->sll(T2, T2, shift); | 86 __ delay_slot()->sll(T2, T2, shift); |
87 __ Bind(&overflow); | 87 __ Bind(&overflow); |
88 __ mov(T2, ZR); | 88 __ mov(T2, ZR); |
89 __ Bind(&done); | 89 __ Bind(&done); |
90 | 90 |
91 // Get the class index and insert it into the tags. | 91 // Get the class index and insert it into the tags. |
92 // T2: size and bit tags. | 92 // T2: size and bit tags. |
93 __ LoadImmediate(TMP1, RawObject::ClassIdTag::encode(cls.id())); | 93 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cls.id())); |
94 __ or_(T2, T2, TMP1); | 94 __ or_(T2, T2, TMP); |
95 __ sw(T2, FieldAddress(T0, Array::tags_offset())); // Store tags. | 95 __ sw(T2, FieldAddress(T0, Array::tags_offset())); // Store tags. |
96 } | 96 } |
97 | 97 |
98 // T0: new object start as a tagged pointer. | 98 // T0: new object start as a tagged pointer. |
99 // T1: new object end address. | 99 // T1: new object end address. |
100 // Store the type argument field. | 100 // Store the type argument field. |
101 __ lw(T2, Address(SP, kTypeArgumentsOffset)); // Type argument. | 101 __ lw(T2, Address(SP, kTypeArgumentsOffset)); // Type argument. |
102 __ StoreIntoObjectNoBarrier(T0, | 102 __ StoreIntoObjectNoBarrier(T0, |
103 FieldAddress(T0, Array::type_arguments_offset()), | 103 FieldAddress(T0, Array::type_arguments_offset()), |
104 T2); | 104 T2); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
143 void Intrinsifier::ImmutableList_getLength(Assembler* assembler) { | 143 void Intrinsifier::ImmutableList_getLength(Assembler* assembler) { |
144 return Array_getLength(assembler); | 144 return Array_getLength(assembler); |
145 } | 145 } |
146 | 146 |
147 | 147 |
148 void Intrinsifier::Array_getIndexed(Assembler* assembler) { | 148 void Intrinsifier::Array_getIndexed(Assembler* assembler) { |
149 Label fall_through; | 149 Label fall_through; |
150 | 150 |
151 __ lw(T0, Address(SP, + 0 * kWordSize)); // Index | 151 __ lw(T0, Address(SP, + 0 * kWordSize)); // Index |
152 | 152 |
153 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); | 153 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); |
154 __ bne(CMPRES, ZR, &fall_through); // Index is not an smi, fall through | 154 __ bne(CMPRES1, ZR, &fall_through); // Index is not an smi, fall through |
155 __ delay_slot()->lw(T1, Address(SP, + 1 * kWordSize)); // Array | 155 __ delay_slot()->lw(T1, Address(SP, + 1 * kWordSize)); // Array |
156 | 156 |
157 // range check | 157 // range check |
158 __ lw(T2, FieldAddress(T1, Array::length_offset())); | 158 __ lw(T2, FieldAddress(T1, Array::length_offset())); |
159 __ BranchUnsignedGreaterEqual(T0, T2, &fall_through); | 159 __ BranchUnsignedGreaterEqual(T0, T2, &fall_through); |
160 | 160 |
161 ASSERT(kSmiTagShift == 1); | 161 ASSERT(kSmiTagShift == 1); |
162 // array element at T1 + T0*2 + Array::data_offset - 1 | 162 // array element at T1 + T0*2 + Array::data_offset - 1 |
163 __ sll(T2, T0, 1); | 163 __ sll(T2, T0, 1); |
164 __ addu(T2, T1, T2); | 164 __ addu(T2, T1, T2); |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
210 // Check if it's dynamic. | 210 // Check if it's dynamic. |
211 // For now handle only TypeArguments and bail out if InstantiatedTypeArgs. | 211 // For now handle only TypeArguments and bail out if InstantiatedTypeArgs. |
212 __ LoadClassId(CMPRES1, T1); | 212 __ LoadClassId(CMPRES1, T1); |
213 __ BranchNotEqual(CMPRES1, kTypeArgumentsCid, &fall_through); | 213 __ BranchNotEqual(CMPRES1, kTypeArgumentsCid, &fall_through); |
214 | 214 |
215 // Get type at index 0. | 215 // Get type at index 0. |
216 __ lw(T0, FieldAddress(T1, TypeArguments::type_at_offset(0))); | 216 __ lw(T0, FieldAddress(T1, TypeArguments::type_at_offset(0))); |
217 __ BranchEqual(T0, Type::ZoneHandle(Type::DynamicType()), &checked_ok); | 217 __ BranchEqual(T0, Type::ZoneHandle(Type::DynamicType()), &checked_ok); |
218 | 218 |
219 // Check for int and num. | 219 // Check for int and num. |
220 __ andi(CMPRES, T2, Immediate(kSmiTagMask)); | 220 __ andi(CMPRES1, T2, Immediate(kSmiTagMask)); |
221 __ bne(CMPRES, ZR, &fall_through); // Non-smi value. | 221 __ bne(CMPRES1, ZR, &fall_through); // Non-smi value. |
222 | 222 |
223 __ BranchEqual(T0, Type::ZoneHandle(Type::IntType()), &checked_ok); | 223 __ BranchEqual(T0, Type::ZoneHandle(Type::IntType()), &checked_ok); |
224 __ BranchNotEqual(T0, Type::ZoneHandle(Type::Number()), &fall_through); | 224 __ BranchNotEqual(T0, Type::ZoneHandle(Type::Number()), &fall_through); |
225 __ Bind(&checked_ok); | 225 __ Bind(&checked_ok); |
226 } | 226 } |
227 __ lw(T1, Address(SP, 1 * kWordSize)); // Index. | 227 __ lw(T1, Address(SP, 1 * kWordSize)); // Index. |
228 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); | 228 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); |
229 // Index not Smi. | 229 // Index not Smi. |
230 __ bne(CMPRES, ZR, &fall_through); | 230 __ bne(CMPRES1, ZR, &fall_through); |
231 | 231 |
232 __ lw(T0, Address(SP, 2 * kWordSize)); // Array. | 232 __ lw(T0, Address(SP, 2 * kWordSize)); // Array. |
233 // Range check. | 233 // Range check. |
234 __ lw(T3, FieldAddress(T0, Array::length_offset())); // Array length. | 234 __ lw(T3, FieldAddress(T0, Array::length_offset())); // Array length. |
235 // Runtime throws exception. | 235 // Runtime throws exception. |
236 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); | 236 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); |
237 | 237 |
238 // Note that T1 is Smi, i.e, times 2. | 238 // Note that T1 is Smi, i.e, times 2. |
239 ASSERT(kSmiTagShift == 1); | 239 ASSERT(kSmiTagShift == 1); |
240 __ lw(T2, Address(SP, 0 * kWordSize)); // Value. | 240 __ lw(T2, Address(SP, 0 * kWordSize)); // Value. |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
330 __ Ret(); | 330 __ Ret(); |
331 __ delay_slot()->lw(V0, FieldAddress(V0, Array::length_offset())); | 331 __ delay_slot()->lw(V0, FieldAddress(V0, Array::length_offset())); |
332 } | 332 } |
333 | 333 |
334 | 334 |
335 void Intrinsifier::GrowableList_getIndexed(Assembler* assembler) { | 335 void Intrinsifier::GrowableList_getIndexed(Assembler* assembler) { |
336 Label fall_through; | 336 Label fall_through; |
337 | 337 |
338 __ lw(T0, Address(SP, 0 * kWordSize)); // Index | 338 __ lw(T0, Address(SP, 0 * kWordSize)); // Index |
339 | 339 |
340 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); | 340 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); |
341 __ bne(CMPRES, ZR, &fall_through); // Index is not an smi, fall through | 341 __ bne(CMPRES1, ZR, &fall_through); // Index is not an smi, fall through |
342 __ delay_slot()->lw(T1, Address(SP, 1 * kWordSize)); // Array | 342 __ delay_slot()->lw(T1, Address(SP, 1 * kWordSize)); // Array |
343 | 343 |
344 // range check | 344 // range check |
345 __ lw(T2, FieldAddress(T1, GrowableObjectArray::length_offset())); | 345 __ lw(T2, FieldAddress(T1, GrowableObjectArray::length_offset())); |
346 __ BranchUnsignedGreaterEqual(T0, T2, &fall_through); | 346 __ BranchUnsignedGreaterEqual(T0, T2, &fall_through); |
347 | 347 |
348 __ lw(T2, FieldAddress(T1, GrowableObjectArray::data_offset())); // data | 348 __ lw(T2, FieldAddress(T1, GrowableObjectArray::data_offset())); // data |
349 | 349 |
350 ASSERT(kSmiTagShift == 1); | 350 ASSERT(kSmiTagShift == 1); |
351 // array element at T2 + T0 * 2 + Array::data_offset - 1 | 351 // array element at T2 + T0 * 2 + Array::data_offset - 1 |
352 __ sll(T3, T0, 1); | 352 __ sll(T3, T0, 1); |
353 __ addu(T2, T2, T3); | 353 __ addu(T2, T2, T3); |
354 __ Ret(); | 354 __ Ret(); |
355 __ delay_slot()->lw(V0, FieldAddress(T2, Array::data_offset())); | 355 __ delay_slot()->lw(V0, FieldAddress(T2, Array::data_offset())); |
356 __ Bind(&fall_through); | 356 __ Bind(&fall_through); |
357 } | 357 } |
358 | 358 |
359 | 359 |
360 // Set value into growable object array at specified index. | 360 // Set value into growable object array at specified index. |
361 // On stack: growable array (+2), index (+1), value (+0). | 361 // On stack: growable array (+2), index (+1), value (+0). |
362 void Intrinsifier::GrowableList_setIndexed(Assembler* assembler) { | 362 void Intrinsifier::GrowableList_setIndexed(Assembler* assembler) { |
363 if (FLAG_enable_type_checks) { | 363 if (FLAG_enable_type_checks) { |
364 return; | 364 return; |
365 } | 365 } |
366 Label fall_through; | 366 Label fall_through; |
367 __ lw(T1, Address(SP, 1 * kWordSize)); // Index. | 367 __ lw(T1, Address(SP, 1 * kWordSize)); // Index. |
368 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); | 368 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); |
369 __ bne(CMPRES, ZR, &fall_through); // Non-smi index. | 369 __ bne(CMPRES1, ZR, &fall_through); // Non-smi index. |
370 __ delay_slot()->lw(T0, Address(SP, 2 * kWordSize)); // GrowableArray. | 370 __ delay_slot()->lw(T0, Address(SP, 2 * kWordSize)); // GrowableArray. |
371 // Range check using _length field. | 371 // Range check using _length field. |
372 __ lw(T2, FieldAddress(T0, GrowableObjectArray::length_offset())); | 372 __ lw(T2, FieldAddress(T0, GrowableObjectArray::length_offset())); |
373 // Runtime throws exception. | 373 // Runtime throws exception. |
374 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through); | 374 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through); |
375 __ lw(T0, FieldAddress(T0, GrowableObjectArray::data_offset())); // data. | 375 __ lw(T0, FieldAddress(T0, GrowableObjectArray::data_offset())); // data. |
376 __ lw(T2, Address(SP, 0 * kWordSize)); // Value. | 376 __ lw(T2, Address(SP, 0 * kWordSize)); // Value. |
377 // Note that T1 is Smi, i.e, times 2. | 377 // Note that T1 is Smi, i.e, times 2. |
378 ASSERT(kSmiTagShift == 1); | 378 ASSERT(kSmiTagShift == 1); |
379 __ sll(T1, T1, 1); | 379 __ sll(T1, T1, 1); |
380 __ addu(T1, T0, T1); | 380 __ addu(T1, T0, T1); |
381 __ StoreIntoObject(T0, | 381 __ StoreIntoObject(T0, |
382 FieldAddress(T1, Array::data_offset()), | 382 FieldAddress(T1, Array::data_offset()), |
383 T2); | 383 T2); |
384 __ Ret(); | 384 __ Ret(); |
385 __ Bind(&fall_through); | 385 __ Bind(&fall_through); |
386 } | 386 } |
387 | 387 |
388 | 388 |
389 // Set length of growable object array. The length cannot | 389 // Set length of growable object array. The length cannot |
390 // be greater than the length of the data container. | 390 // be greater than the length of the data container. |
391 // On stack: growable array (+1), length (+0). | 391 // On stack: growable array (+1), length (+0). |
392 void Intrinsifier::GrowableList_setLength(Assembler* assembler) { | 392 void Intrinsifier::GrowableList_setLength(Assembler* assembler) { |
393 Label fall_through; | 393 Label fall_through; |
394 __ lw(T1, Address(SP, 0 * kWordSize)); // Length value. | 394 __ lw(T1, Address(SP, 0 * kWordSize)); // Length value. |
395 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); | 395 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); |
396 __ bne(CMPRES, ZR, &fall_through); // Non-smi length. | 396 __ bne(CMPRES1, ZR, &fall_through); // Non-smi length. |
397 __ delay_slot()->lw(T0, Address(SP, 1 * kWordSize)); // Growable array. | 397 __ delay_slot()->lw(T0, Address(SP, 1 * kWordSize)); // Growable array. |
398 __ Ret(); | 398 __ Ret(); |
399 __ delay_slot()->sw(T1, | 399 __ delay_slot()->sw(T1, |
400 FieldAddress(T0, GrowableObjectArray::length_offset())); | 400 FieldAddress(T0, GrowableObjectArray::length_offset())); |
401 __ Bind(&fall_through); | 401 __ Bind(&fall_through); |
402 } | 402 } |
403 | 403 |
404 | 404 |
405 // Set data of growable object array. | 405 // Set data of growable object array. |
406 // On stack: growable array (+1), data (+0). | 406 // On stack: growable array (+1), data (+0). |
407 void Intrinsifier::GrowableList_setData(Assembler* assembler) { | 407 void Intrinsifier::GrowableList_setData(Assembler* assembler) { |
408 if (FLAG_enable_type_checks) { | 408 if (FLAG_enable_type_checks) { |
409 return; | 409 return; |
410 } | 410 } |
411 Label fall_through; | 411 Label fall_through; |
412 __ lw(T1, Address(SP, 0 * kWordSize)); // Data. | 412 __ lw(T1, Address(SP, 0 * kWordSize)); // Data. |
413 // Check that data is an ObjectArray. | 413 // Check that data is an ObjectArray. |
414 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); | 414 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); |
415 __ beq(CMPRES, ZR, &fall_through); // Data is Smi. | 415 __ beq(CMPRES1, ZR, &fall_through); // Data is Smi. |
416 __ LoadClassId(CMPRES1, T1); | 416 __ LoadClassId(CMPRES1, T1); |
417 __ BranchNotEqual(CMPRES1, kArrayCid, &fall_through); | 417 __ BranchNotEqual(CMPRES1, kArrayCid, &fall_through); |
418 __ lw(T0, Address(SP, 1 * kWordSize)); // Growable array. | 418 __ lw(T0, Address(SP, 1 * kWordSize)); // Growable array. |
419 __ StoreIntoObject(T0, | 419 __ StoreIntoObject(T0, |
420 FieldAddress(T0, GrowableObjectArray::data_offset()), | 420 FieldAddress(T0, GrowableObjectArray::data_offset()), |
421 T1); | 421 T1); |
422 __ Ret(); | 422 __ Ret(); |
423 __ Bind(&fall_through); | 423 __ Bind(&fall_through); |
424 } | 424 } |
425 | 425 |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
457 __ Bind(&fall_through); | 457 __ Bind(&fall_through); |
458 } | 458 } |
459 | 459 |
460 | 460 |
461 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ | 461 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ |
462 Label fall_through; \ | 462 Label fall_through; \ |
463 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ | 463 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ |
464 __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | 464 __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ |
465 /* Check that length is a positive Smi. */ \ | 465 /* Check that length is a positive Smi. */ \ |
466 /* T2: requested array length argument. */ \ | 466 /* T2: requested array length argument. */ \ |
467 __ andi(CMPRES, T2, Immediate(kSmiTagMask)); \ | 467 __ andi(CMPRES1, T2, Immediate(kSmiTagMask)); \ |
468 __ bne(CMPRES, ZR, &fall_through); \ | 468 __ bne(CMPRES1, ZR, &fall_through); \ |
469 __ BranchSignedLess(T2, 0, &fall_through); \ | 469 __ BranchSignedLess(T2, 0, &fall_through); \ |
470 __ SmiUntag(T2); \ | 470 __ SmiUntag(T2); \ |
471 /* Check for maximum allowed length. */ \ | 471 /* Check for maximum allowed length. */ \ |
472 /* T2: untagged array length. */ \ | 472 /* T2: untagged array length. */ \ |
473 __ BranchSignedGreater(T2, max_len, &fall_through); \ | 473 __ BranchSignedGreater(T2, max_len, &fall_through); \ |
474 __ sll(T2, T2, scale_shift); \ | 474 __ sll(T2, T2, scale_shift); \ |
475 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ | 475 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ |
476 __ AddImmediate(T2, fixed_size); \ | 476 __ AddImmediate(T2, fixed_size); \ |
477 __ LoadImmediate(TMP, -kObjectAlignment); \ | 477 __ LoadImmediate(TMP, -kObjectAlignment); \ |
478 __ and_(T2, T2, TMP); \ | 478 __ and_(T2, T2, TMP); \ |
479 Heap* heap = Isolate::Current()->heap(); \ | 479 Heap* heap = Isolate::Current()->heap(); \ |
480 \ | 480 \ |
481 __ LoadImmediate(V0, heap->TopAddress()); \ | 481 __ LoadImmediate(V0, heap->TopAddress()); \ |
482 __ lw(V0, Address(V0, 0)); \ | 482 __ lw(V0, Address(V0, 0)); \ |
483 \ | 483 \ |
484 /* T2: allocation size. */ \ | 484 /* T2: allocation size. */ \ |
485 __ AdduDetectOverflow(T1, V0, T2, CMPRES); \ | 485 __ AdduDetectOverflow(T1, V0, T2, CMPRES1); \ |
486 __ bltz(CMPRES, &fall_through); \ | 486 __ bltz(CMPRES1, &fall_through); \ |
487 \ | 487 \ |
488 /* Check if the allocation fits into the remaining space. */ \ | 488 /* Check if the allocation fits into the remaining space. */ \ |
489 /* V0: potential new object start. */ \ | 489 /* V0: potential new object start. */ \ |
490 /* T1: potential next object start. */ \ | 490 /* T1: potential next object start. */ \ |
491 /* T2: allocation size. */ \ | 491 /* T2: allocation size. */ \ |
492 __ LoadImmediate(T3, heap->EndAddress()); \ | 492 __ LoadImmediate(T3, heap->EndAddress()); \ |
493 __ lw(T3, Address(T3, 0)); \ | 493 __ lw(T3, Address(T3, 0)); \ |
494 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); \ | 494 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); \ |
495 \ | 495 \ |
496 /* Successfully allocated the object(s), now update top to point to */ \ | 496 /* Successfully allocated the object(s), now update top to point to */ \ |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
581 } | 581 } |
582 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) | 582 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) |
583 #undef TYPED_DATA_ALLOCATOR | 583 #undef TYPED_DATA_ALLOCATOR |
584 | 584 |
585 | 585 |
586 // Loads args from stack into T0 and T1 | 586 // Loads args from stack into T0 and T1 |
587 // Tests if they are smis, jumps to label not_smi if not. | 587 // Tests if they are smis, jumps to label not_smi if not. |
588 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { | 588 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { |
589 __ lw(T0, Address(SP, 0 * kWordSize)); | 589 __ lw(T0, Address(SP, 0 * kWordSize)); |
590 __ lw(T1, Address(SP, 1 * kWordSize)); | 590 __ lw(T1, Address(SP, 1 * kWordSize)); |
591 __ or_(CMPRES, T0, T1); | 591 __ or_(CMPRES1, T0, T1); |
592 __ andi(CMPRES, CMPRES, Immediate(kSmiTagMask)); | 592 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); |
593 __ bne(CMPRES, ZR, not_smi); | 593 __ bne(CMPRES1, ZR, not_smi); |
594 return; | 594 return; |
595 } | 595 } |
596 | 596 |
597 | 597 |
598 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { | 598 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { |
599 Label fall_through; | 599 Label fall_through; |
600 | 600 |
601 TestBothArgumentsSmis(assembler, &fall_through); // Checks two Smis. | 601 TestBothArgumentsSmis(assembler, &fall_through); // Checks two Smis. |
602 __ AdduDetectOverflow(V0, T0, T1, CMPRES); // Add. | 602 __ AdduDetectOverflow(V0, T0, T1, CMPRES1); // Add. |
603 __ bltz(CMPRES, &fall_through); // Fall through on overflow. | 603 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. |
604 __ Ret(); // Nothing in branch delay slot. | 604 __ Ret(); // Nothing in branch delay slot. |
605 __ Bind(&fall_through); | 605 __ Bind(&fall_through); |
606 } | 606 } |
607 | 607 |
608 | 608 |
609 void Intrinsifier::Integer_add(Assembler* assembler) { | 609 void Intrinsifier::Integer_add(Assembler* assembler) { |
610 return Integer_addFromInteger(assembler); | 610 return Integer_addFromInteger(assembler); |
611 } | 611 } |
612 | 612 |
613 | 613 |
614 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { | 614 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { |
615 Label fall_through; | 615 Label fall_through; |
616 | 616 |
617 TestBothArgumentsSmis(assembler, &fall_through); | 617 TestBothArgumentsSmis(assembler, &fall_through); |
618 __ SubuDetectOverflow(V0, T0, T1, CMPRES); // Subtract. | 618 __ SubuDetectOverflow(V0, T0, T1, CMPRES1); // Subtract. |
619 __ bltz(CMPRES, &fall_through); // Fall through on overflow. | 619 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. |
620 __ Ret(); | 620 __ Ret(); |
621 __ Bind(&fall_through); | 621 __ Bind(&fall_through); |
622 } | 622 } |
623 | 623 |
624 | 624 |
625 void Intrinsifier::Integer_sub(Assembler* assembler) { | 625 void Intrinsifier::Integer_sub(Assembler* assembler) { |
626 Label fall_through; | 626 Label fall_through; |
627 | 627 |
628 TestBothArgumentsSmis(assembler, &fall_through); | 628 TestBothArgumentsSmis(assembler, &fall_through); |
629 __ SubuDetectOverflow(V0, T1, T0, CMPRES); // Subtract. | 629 __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract. |
630 __ bltz(CMPRES, &fall_through); // Fall through on overflow. | 630 __ bltz(CMPRES1, &fall_through); // Fall through on overflow. |
631 __ Ret(); // Nothing in branch delay slot. | 631 __ Ret(); // Nothing in branch delay slot. |
632 __ Bind(&fall_through); | 632 __ Bind(&fall_through); |
633 } | 633 } |
634 | 634 |
635 | 635 |
636 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { | 636 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { |
637 Label fall_through; | 637 Label fall_through; |
638 | 638 |
639 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 639 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
640 __ SmiUntag(T0); // untags T0. only want result shifted by one | 640 __ SmiUntag(T0); // untags T0. only want result shifted by one |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
699 // res = res - right; | 699 // res = res - right; |
700 // } else { | 700 // } else { |
701 // res = res + right; | 701 // res = res + right; |
702 // } | 702 // } |
703 // } | 703 // } |
704 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { | 704 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { |
705 Label fall_through, subtract; | 705 Label fall_through, subtract; |
706 // Test arguments for smi. | 706 // Test arguments for smi. |
707 __ lw(T1, Address(SP, 0 * kWordSize)); | 707 __ lw(T1, Address(SP, 0 * kWordSize)); |
708 __ lw(T0, Address(SP, 1 * kWordSize)); | 708 __ lw(T0, Address(SP, 1 * kWordSize)); |
709 __ or_(CMPRES, T0, T1); | 709 __ or_(CMPRES1, T0, T1); |
710 __ andi(CMPRES, CMPRES, Immediate(kSmiTagMask)); | 710 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); |
711 __ bne(CMPRES, ZR, &fall_through); | 711 __ bne(CMPRES1, ZR, &fall_through); |
712 // T1: Tagged left (dividend). | 712 // T1: Tagged left (dividend). |
713 // T0: Tagged right (divisor). | 713 // T0: Tagged right (divisor). |
714 // Check if modulo by zero -> exception thrown in main function. | 714 // Check if modulo by zero -> exception thrown in main function. |
715 __ beq(T0, ZR, &fall_through); | 715 __ beq(T0, ZR, &fall_through); |
716 EmitRemainderOperation(assembler); | 716 EmitRemainderOperation(assembler); |
717 // Untagged right in T0. Untagged remainder result in V0. | 717 // Untagged right in T0. Untagged remainder result in V0. |
718 | 718 |
719 Label done; | 719 Label done; |
720 __ bgez(V0, &done); | 720 __ bgez(V0, &done); |
721 __ bltz(T0, &subtract); | 721 __ bltz(T0, &subtract); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
770 __ Ret(); | 770 __ Ret(); |
771 __ delay_slot()->SmiTag(V0); | 771 __ delay_slot()->SmiTag(V0); |
772 __ Bind(&fall_through); | 772 __ Bind(&fall_through); |
773 } | 773 } |
774 | 774 |
775 | 775 |
776 void Intrinsifier::Integer_negate(Assembler* assembler) { | 776 void Intrinsifier::Integer_negate(Assembler* assembler) { |
777 Label fall_through; | 777 Label fall_through; |
778 | 778 |
779 __ lw(T0, Address(SP, + 0 * kWordSize)); // Grabs first argument. | 779 __ lw(T0, Address(SP, + 0 * kWordSize)); // Grabs first argument. |
780 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); // Test for Smi. | 780 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); // Test for Smi. |
781 __ bne(CMPRES, ZR, &fall_through); // Fall through if not a Smi. | 781 __ bne(CMPRES1, ZR, &fall_through); // Fall through if not a Smi. |
782 __ SubuDetectOverflow(V0, ZR, T0, CMPRES); | 782 __ SubuDetectOverflow(V0, ZR, T0, CMPRES1); |
783 __ bltz(CMPRES, &fall_through); // There was overflow. | 783 __ bltz(CMPRES1, &fall_through); // There was overflow. |
784 __ Ret(); | 784 __ Ret(); |
785 __ Bind(&fall_through); | 785 __ Bind(&fall_through); |
786 } | 786 } |
787 | 787 |
788 | 788 |
789 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { | 789 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { |
790 Label fall_through; | 790 Label fall_through; |
791 | 791 |
792 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | 792 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. |
793 __ Ret(); | 793 __ Ret(); |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
880 __ Bind(&fall_through); | 880 __ Bind(&fall_through); |
881 } | 881 } |
882 | 882 |
883 | 883 |
884 static void Get64SmiOrMint(Assembler* assembler, | 884 static void Get64SmiOrMint(Assembler* assembler, |
885 Register res_hi, | 885 Register res_hi, |
886 Register res_lo, | 886 Register res_lo, |
887 Register reg, | 887 Register reg, |
888 Label* not_smi_or_mint) { | 888 Label* not_smi_or_mint) { |
889 Label not_smi, done; | 889 Label not_smi, done; |
890 __ andi(CMPRES, reg, Immediate(kSmiTagMask)); | 890 __ andi(CMPRES1, reg, Immediate(kSmiTagMask)); |
891 __ bne(CMPRES, ZR, ¬_smi); | 891 __ bne(CMPRES1, ZR, ¬_smi); |
892 __ SmiUntag(reg); | 892 __ SmiUntag(reg); |
893 | 893 |
894 // Sign extend to 64 bit | 894 // Sign extend to 64 bit |
895 __ mov(res_lo, reg); | 895 __ mov(res_lo, reg); |
896 __ b(&done); | 896 __ b(&done); |
897 __ delay_slot()->sra(res_hi, reg, 31); | 897 __ delay_slot()->sra(res_hi, reg, 31); |
898 | 898 |
899 __ Bind(¬_smi); | 899 __ Bind(¬_smi); |
900 __ LoadClassId(CMPRES1, reg); | 900 __ LoadClassId(CMPRES1, reg); |
901 __ BranchNotEqual(CMPRES1, kMintCid, not_smi_or_mint); | 901 __ BranchNotEqual(CMPRES1, kMintCid, not_smi_or_mint); |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1008 // This is called for Smi, Mint and Bigint receivers. The right argument | 1008 // This is called for Smi, Mint and Bigint receivers. The right argument |
1009 // can be Smi, Mint, Bigint or double. | 1009 // can be Smi, Mint, Bigint or double. |
1010 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { | 1010 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { |
1011 Label fall_through, true_label, check_for_mint; | 1011 Label fall_through, true_label, check_for_mint; |
1012 // For integer receiver '===' check first. | 1012 // For integer receiver '===' check first. |
1013 __ lw(T0, Address(SP, 0 * kWordSize)); | 1013 __ lw(T0, Address(SP, 0 * kWordSize)); |
1014 __ lw(T1, Address(SP, 1 * kWordSize)); | 1014 __ lw(T1, Address(SP, 1 * kWordSize)); |
1015 __ beq(T0, T1, &true_label); | 1015 __ beq(T0, T1, &true_label); |
1016 | 1016 |
1017 __ or_(T2, T0, T1); | 1017 __ or_(T2, T0, T1); |
1018 __ andi(CMPRES, T2, Immediate(kSmiTagMask)); | 1018 __ andi(CMPRES1, T2, Immediate(kSmiTagMask)); |
1019 // If T0 or T1 is not a smi do Mint checks. | 1019 // If T0 or T1 is not a smi do Mint checks. |
1020 __ bne(CMPRES, ZR, &check_for_mint); | 1020 __ bne(CMPRES1, ZR, &check_for_mint); |
1021 | 1021 |
1022 // Both arguments are smi, '===' is good enough. | 1022 // Both arguments are smi, '===' is good enough. |
1023 __ LoadObject(V0, Bool::False()); | 1023 __ LoadObject(V0, Bool::False()); |
1024 __ Ret(); | 1024 __ Ret(); |
1025 __ Bind(&true_label); | 1025 __ Bind(&true_label); |
1026 __ LoadObject(V0, Bool::True()); | 1026 __ LoadObject(V0, Bool::True()); |
1027 __ Ret(); | 1027 __ Ret(); |
1028 | 1028 |
1029 // At least one of the arguments was not Smi. | 1029 // At least one of the arguments was not Smi. |
1030 Label receiver_not_smi; | 1030 Label receiver_not_smi; |
1031 __ Bind(&check_for_mint); | 1031 __ Bind(&check_for_mint); |
1032 | 1032 |
1033 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); | 1033 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); |
1034 __ bne(CMPRES, ZR, &receiver_not_smi); // Check receiver. | 1034 __ bne(CMPRES1, ZR, &receiver_not_smi); // Check receiver. |
1035 | 1035 |
1036 // Left (receiver) is Smi, return false if right is not Double. | 1036 // Left (receiver) is Smi, return false if right is not Double. |
1037 // Note that an instance of Mint or Bigint never contains a value that can be | 1037 // Note that an instance of Mint or Bigint never contains a value that can be |
1038 // represented by Smi. | 1038 // represented by Smi. |
1039 | 1039 |
1040 __ LoadClassId(CMPRES1, T0); | 1040 __ LoadClassId(CMPRES1, T0); |
1041 __ BranchEqual(CMPRES1, kDoubleCid, &fall_through); | 1041 __ BranchEqual(CMPRES1, kDoubleCid, &fall_through); |
1042 __ LoadObject(V0, Bool::False()); // Smi == Mint -> false. | 1042 __ LoadObject(V0, Bool::False()); // Smi == Mint -> false. |
1043 __ Ret(); | 1043 __ Ret(); |
1044 | 1044 |
1045 __ Bind(&receiver_not_smi); | 1045 __ Bind(&receiver_not_smi); |
1046 // T1:: receiver. | 1046 // T1:: receiver. |
1047 | 1047 |
1048 __ LoadClassId(CMPRES1, T1); | 1048 __ LoadClassId(CMPRES1, T1); |
1049 __ BranchNotEqual(CMPRES1, kMintCid, &fall_through); | 1049 __ BranchNotEqual(CMPRES1, kMintCid, &fall_through); |
1050 // Receiver is Mint, return false if right is Smi. | 1050 // Receiver is Mint, return false if right is Smi. |
1051 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); | 1051 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); |
1052 __ bne(CMPRES, ZR, &fall_through); | 1052 __ bne(CMPRES1, ZR, &fall_through); |
1053 __ LoadObject(V0, Bool::False()); | 1053 __ LoadObject(V0, Bool::False()); |
1054 __ Ret(); | 1054 __ Ret(); |
1055 // TODO(srdjan): Implement Mint == Mint comparison. | 1055 // TODO(srdjan): Implement Mint == Mint comparison. |
1056 | 1056 |
1057 __ Bind(&fall_through); | 1057 __ Bind(&fall_through); |
1058 } | 1058 } |
1059 | 1059 |
1060 | 1060 |
1061 void Intrinsifier::Integer_equal(Assembler* assembler) { | 1061 void Intrinsifier::Integer_equal(Assembler* assembler) { |
1062 return Integer_equalToInteger(assembler); | 1062 return Integer_equalToInteger(assembler); |
1063 } | 1063 } |
1064 | 1064 |
1065 | 1065 |
1066 void Intrinsifier::Integer_sar(Assembler* assembler) { | 1066 void Intrinsifier::Integer_sar(Assembler* assembler) { |
1067 Label fall_through; | 1067 Label fall_through; |
1068 | 1068 |
1069 TestBothArgumentsSmis(assembler, &fall_through); | 1069 TestBothArgumentsSmis(assembler, &fall_through); |
1070 // Shift amount in T0. Value to shift in T1. | 1070 // Shift amount in T0. Value to shift in T1. |
1071 | 1071 |
1072 __ SmiUntag(T0); | 1072 __ SmiUntag(T0); |
1073 __ bltz(T0, &fall_through); | 1073 __ bltz(T0, &fall_through); |
1074 | 1074 |
1075 __ LoadImmediate(T2, 0x1F); | 1075 __ LoadImmediate(T2, 0x1F); |
1076 __ slt(CMPRES, T2, T0); // CMPRES <- 0x1F < T0 ? 1 : 0 | 1076 __ slt(CMPRES1, T2, T0); // CMPRES1 <- 0x1F < T0 ? 1 : 0 |
1077 __ movn(T0, T2, CMPRES); // T0 <- 0x1F < T0 ? 0x1F : T0 | 1077 __ movn(T0, T2, CMPRES1); // T0 <- 0x1F < T0 ? 0x1F : T0 |
1078 | 1078 |
1079 __ SmiUntag(T1); | 1079 __ SmiUntag(T1); |
1080 __ srav(V0, T1, T0); | 1080 __ srav(V0, T1, T0); |
1081 __ Ret(); | 1081 __ Ret(); |
1082 __ delay_slot()->SmiTag(V0); | 1082 __ delay_slot()->SmiTag(V0); |
1083 __ Bind(&fall_through); | 1083 __ Bind(&fall_through); |
1084 } | 1084 } |
1085 | 1085 |
1086 | 1086 |
1087 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { | 1087 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { |
1088 __ lw(T0, Address(SP, 0 * kWordSize)); | 1088 __ lw(T0, Address(SP, 0 * kWordSize)); |
1089 __ nor(V0, T0, ZR); | 1089 __ nor(V0, T0, ZR); |
1090 __ Ret(); | 1090 __ Ret(); |
1091 __ delay_slot()->addiu(V0, V0, Immediate(-1)); // Remove inverted smi-tag. | 1091 __ delay_slot()->addiu(V0, V0, Immediate(-1)); // Remove inverted smi-tag. |
1092 } | 1092 } |
1093 | 1093 |
1094 | 1094 |
1095 void Intrinsifier::Smi_bitLength(Assembler* assembler) { | 1095 void Intrinsifier::Smi_bitLength(Assembler* assembler) { |
1096 // TODO(sra): Implement. | 1096 // TODO(sra): Implement. |
1097 } | 1097 } |
1098 | 1098 |
1099 | 1099 |
1100 // Check if the last argument is a double, jump to label 'is_smi' if smi | 1100 // Check if the last argument is a double, jump to label 'is_smi' if smi |
1101 // (easy to convert to double), otherwise jump to label 'not_double_smi', | 1101 // (easy to convert to double), otherwise jump to label 'not_double_smi', |
1102 // Returns the last argument in T0. | 1102 // Returns the last argument in T0. |
1103 static void TestLastArgumentIsDouble(Assembler* assembler, | 1103 static void TestLastArgumentIsDouble(Assembler* assembler, |
1104 Label* is_smi, | 1104 Label* is_smi, |
1105 Label* not_double_smi) { | 1105 Label* not_double_smi) { |
1106 __ lw(T0, Address(SP, 0 * kWordSize)); | 1106 __ lw(T0, Address(SP, 0 * kWordSize)); |
1107 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); | 1107 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); |
1108 __ beq(CMPRES, ZR, is_smi); | 1108 __ beq(CMPRES1, ZR, is_smi); |
1109 __ LoadClassId(CMPRES1, T0); | 1109 __ LoadClassId(CMPRES1, T0); |
1110 __ BranchNotEqual(CMPRES1, kDoubleCid, not_double_smi); | 1110 __ BranchNotEqual(CMPRES1, kDoubleCid, not_double_smi); |
1111 // Fall through with Double in T0. | 1111 // Fall through with Double in T0. |
1112 } | 1112 } |
1113 | 1113 |
1114 | 1114 |
1115 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown | 1115 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown |
1116 // type. Return true or false object in the register V0. Any NaN argument | 1116 // type. Return true or false object in the register V0. Any NaN argument |
1117 // returns false. Any non-double arg1 causes control flow to fall through to the | 1117 // returns false. Any non-double arg1 causes control flow to fall through to the |
1118 // slow case (compiled method body). | 1118 // slow case (compiled method body). |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1239 void Intrinsifier::Double_div(Assembler* assembler) { | 1239 void Intrinsifier::Double_div(Assembler* assembler) { |
1240 return DoubleArithmeticOperations(assembler, Token::kDIV); | 1240 return DoubleArithmeticOperations(assembler, Token::kDIV); |
1241 } | 1241 } |
1242 | 1242 |
1243 | 1243 |
1244 // Left is double right is integer (Bigint, Mint or Smi) | 1244 // Left is double right is integer (Bigint, Mint or Smi) |
1245 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { | 1245 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { |
1246 Label fall_through; | 1246 Label fall_through; |
1247 // Only smis allowed. | 1247 // Only smis allowed. |
1248 __ lw(T0, Address(SP, 0 * kWordSize)); | 1248 __ lw(T0, Address(SP, 0 * kWordSize)); |
1249 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); | 1249 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); |
1250 __ bne(CMPRES, ZR, &fall_through); | 1250 __ bne(CMPRES1, ZR, &fall_through); |
1251 | 1251 |
1252 // Is Smi. | 1252 // Is Smi. |
1253 __ SmiUntag(T0); | 1253 __ SmiUntag(T0); |
1254 __ mtc1(T0, F4); | 1254 __ mtc1(T0, F4); |
1255 __ cvtdw(D1, F4); | 1255 __ cvtdw(D1, F4); |
1256 | 1256 |
1257 __ lw(T0, Address(SP, 1 * kWordSize)); | 1257 __ lw(T0, Address(SP, 1 * kWordSize)); |
1258 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); | 1258 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); |
1259 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); | 1259 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); |
1260 __ muld(D0, D0, D1); | 1260 __ muld(D0, D0, D1); |
1261 const Class& double_class = Class::Handle( | 1261 const Class& double_class = Class::Handle( |
1262 Isolate::Current()->object_store()->double_class()); | 1262 Isolate::Current()->object_store()->double_class()); |
1263 __ TryAllocate(double_class, &fall_through, V0); // Result register. | 1263 __ TryAllocate(double_class, &fall_through, V0); // Result register. |
1264 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | 1264 __ swc1(F0, FieldAddress(V0, Double::value_offset())); |
1265 __ Ret(); | 1265 __ Ret(); |
1266 __ delay_slot()->swc1(F1, | 1266 __ delay_slot()->swc1(F1, |
1267 FieldAddress(V0, Double::value_offset() + kWordSize)); | 1267 FieldAddress(V0, Double::value_offset() + kWordSize)); |
1268 __ Bind(&fall_through); | 1268 __ Bind(&fall_through); |
1269 } | 1269 } |
1270 | 1270 |
1271 | 1271 |
1272 void Intrinsifier::Double_fromInteger(Assembler* assembler) { | 1272 void Intrinsifier::Double_fromInteger(Assembler* assembler) { |
1273 Label fall_through; | 1273 Label fall_through; |
1274 | 1274 |
1275 __ lw(T0, Address(SP, 0 * kWordSize)); | 1275 __ lw(T0, Address(SP, 0 * kWordSize)); |
1276 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); | 1276 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); |
1277 __ bne(T0, ZR, &fall_through); | 1277 __ bne(T0, ZR, &fall_through); |
1278 | 1278 |
1279 // Is Smi. | 1279 // Is Smi. |
1280 __ SmiUntag(T0); | 1280 __ SmiUntag(T0); |
1281 __ mtc1(T0, F4); | 1281 __ mtc1(T0, F4); |
1282 __ cvtdw(D0, F4); | 1282 __ cvtdw(D0, F4); |
1283 const Class& double_class = Class::Handle( | 1283 const Class& double_class = Class::Handle( |
1284 Isolate::Current()->object_store()->double_class()); | 1284 Isolate::Current()->object_store()->double_class()); |
1285 __ TryAllocate(double_class, &fall_through, V0); // Result register. | 1285 __ TryAllocate(double_class, &fall_through, V0); // Result register. |
1286 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | 1286 __ swc1(F0, FieldAddress(V0, Double::value_offset())); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1327 __ Ret(); | 1327 __ Ret(); |
1328 | 1328 |
1329 __ Bind(&is_false); | 1329 __ Bind(&is_false); |
1330 __ LoadObject(V0, Bool::False()); | 1330 __ LoadObject(V0, Bool::False()); |
1331 __ Ret(); | 1331 __ Ret(); |
1332 | 1332 |
1333 __ Bind(&is_zero); | 1333 __ Bind(&is_zero); |
1334 // Check for negative zero by looking at the sign bit. | 1334 // Check for negative zero by looking at the sign bit. |
1335 __ mfc1(T0, F1); // Moves bits 32...63 of D0 to T0. | 1335 __ mfc1(T0, F1); // Moves bits 32...63 of D0 to T0. |
1336 __ srl(T0, T0, 31); // Get the sign bit down to bit 0 of T0. | 1336 __ srl(T0, T0, 31); // Get the sign bit down to bit 0 of T0. |
1337 __ andi(CMPRES, T0, Immediate(1)); // Check if the bit is set. | 1337 __ andi(CMPRES1, T0, Immediate(1)); // Check if the bit is set. |
1338 __ bne(T0, ZR, &is_true); // Sign bit set. True. | 1338 __ bne(T0, ZR, &is_true); // Sign bit set. True. |
1339 __ b(&is_false); | 1339 __ b(&is_false); |
1340 } | 1340 } |
1341 | 1341 |
1342 | 1342 |
1343 void Intrinsifier::Double_toInt(Assembler* assembler) { | 1343 void Intrinsifier::Double_toInt(Assembler* assembler) { |
1344 __ lw(T0, Address(SP, 0 * kWordSize)); | 1344 __ lw(T0, Address(SP, 0 * kWordSize)); |
1345 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); | 1345 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); |
1346 | 1346 |
1347 __ cvtwd(F2, D0); | 1347 __ cvtwd(F2, D0); |
1348 __ mfc1(V0, F2); | 1348 __ mfc1(V0, F2); |
1349 | 1349 |
1350 // Overflow is signaled with minint. | 1350 // Overflow is signaled with minint. |
1351 Label fall_through; | 1351 Label fall_through; |
1352 // Check for overflow and that it fits into Smi. | 1352 // Check for overflow and that it fits into Smi. |
1353 __ LoadImmediate(TMP, 0xC0000000); | 1353 __ LoadImmediate(TMP, 0xC0000000); |
1354 __ subu(CMPRES, V0, TMP); | 1354 __ subu(CMPRES1, V0, TMP); |
1355 __ bltz(CMPRES, &fall_through); | 1355 __ bltz(CMPRES1, &fall_through); |
1356 __ Ret(); | 1356 __ Ret(); |
1357 __ delay_slot()->SmiTag(V0); | 1357 __ delay_slot()->SmiTag(V0); |
1358 __ Bind(&fall_through); | 1358 __ Bind(&fall_through); |
1359 } | 1359 } |
1360 | 1360 |
1361 | 1361 |
1362 void Intrinsifier::Math_sqrt(Assembler* assembler) { | 1362 void Intrinsifier::Math_sqrt(Assembler* assembler) { |
1363 Label fall_through, is_smi, double_op; | 1363 Label fall_through, is_smi, double_op; |
1364 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1364 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
1365 // Argument is double and is in T0. | 1365 // Argument is double and is in T0. |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1470 } | 1470 } |
1471 | 1471 |
1472 | 1472 |
1473 void Intrinsifier::String_codeUnitAt(Assembler* assembler) { | 1473 void Intrinsifier::String_codeUnitAt(Assembler* assembler) { |
1474 Label fall_through, try_two_byte_string; | 1474 Label fall_through, try_two_byte_string; |
1475 | 1475 |
1476 __ lw(T1, Address(SP, 0 * kWordSize)); // Index. | 1476 __ lw(T1, Address(SP, 0 * kWordSize)); // Index. |
1477 __ lw(T0, Address(SP, 1 * kWordSize)); // String. | 1477 __ lw(T0, Address(SP, 1 * kWordSize)); // String. |
1478 | 1478 |
1479 // Checks. | 1479 // Checks. |
1480 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); | 1480 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); |
1481 __ bne(T1, ZR, &fall_through); // Index is not a Smi. | 1481 __ bne(T1, ZR, &fall_through); // Index is not a Smi. |
1482 __ lw(T2, FieldAddress(T0, String::length_offset())); // Range check. | 1482 __ lw(T2, FieldAddress(T0, String::length_offset())); // Range check. |
1483 // Runtime throws exception. | 1483 // Runtime throws exception. |
1484 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through); | 1484 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through); |
1485 __ LoadClassId(CMPRES1, T0); // Class ID check. | 1485 __ LoadClassId(CMPRES1, T0); // Class ID check. |
1486 __ BranchNotEqual(CMPRES1, kOneByteStringCid, &try_two_byte_string); | 1486 __ BranchNotEqual(CMPRES1, kOneByteStringCid, &try_two_byte_string); |
1487 | 1487 |
1488 // Grab byte and return. | 1488 // Grab byte and return. |
1489 __ SmiUntag(T1); | 1489 __ SmiUntag(T1); |
1490 __ addu(T2, T0, T1); | 1490 __ addu(T2, T0, T1); |
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1601 __ LoadImmediate(TMP, ~(kObjectAlignment - 1)); | 1601 __ LoadImmediate(TMP, ~(kObjectAlignment - 1)); |
1602 __ and_(length_reg, length_reg, TMP); | 1602 __ and_(length_reg, length_reg, TMP); |
1603 | 1603 |
1604 Isolate* isolate = Isolate::Current(); | 1604 Isolate* isolate = Isolate::Current(); |
1605 Heap* heap = isolate->heap(); | 1605 Heap* heap = isolate->heap(); |
1606 | 1606 |
1607 __ LoadImmediate(T3, heap->TopAddress()); | 1607 __ LoadImmediate(T3, heap->TopAddress()); |
1608 __ lw(V0, Address(T3, 0)); | 1608 __ lw(V0, Address(T3, 0)); |
1609 | 1609 |
1610 // length_reg: allocation size. | 1610 // length_reg: allocation size. |
1611 __ AdduDetectOverflow(T1, V0, length_reg, CMPRES); | 1611 __ AdduDetectOverflow(T1, V0, length_reg, CMPRES1); |
1612 __ bltz(CMPRES, failure); // Fail on overflow. | 1612 __ bltz(CMPRES1, failure); // Fail on overflow. |
1613 | 1613 |
1614 // Check if the allocation fits into the remaining space. | 1614 // Check if the allocation fits into the remaining space. |
1615 // V0: potential new object start. | 1615 // V0: potential new object start. |
1616 // T1: potential next object start. | 1616 // T1: potential next object start. |
1617 // T2: allocation size. | 1617 // T2: allocation size. |
1618 // T3: heap->TopAddress(). | 1618 // T3: heap->TopAddress(). |
1619 __ LoadImmediate(T4, heap->EndAddress()); | 1619 __ LoadImmediate(T4, heap->EndAddress()); |
1620 __ lw(T4, Address(T4, 0)); | 1620 __ lw(T4, Address(T4, 0)); |
1621 __ BranchUnsignedGreaterEqual(T1, T4, failure); | 1621 __ BranchUnsignedGreaterEqual(T1, T4, failure); |
1622 | 1622 |
(...skipping 14 matching lines...) Expand all Loading... |
1637 | 1637 |
1638 __ BranchUnsignedGreater(T2, RawObject::SizeTag::kMaxSizeTag, &overflow); | 1638 __ BranchUnsignedGreater(T2, RawObject::SizeTag::kMaxSizeTag, &overflow); |
1639 __ b(&done); | 1639 __ b(&done); |
1640 __ delay_slot()->sll(T2, T2, shift); | 1640 __ delay_slot()->sll(T2, T2, shift); |
1641 __ Bind(&overflow); | 1641 __ Bind(&overflow); |
1642 __ mov(T2, ZR); | 1642 __ mov(T2, ZR); |
1643 __ Bind(&done); | 1643 __ Bind(&done); |
1644 | 1644 |
1645 // Get the class index and insert it into the tags. | 1645 // Get the class index and insert it into the tags. |
1646 // T2: size and bit tags. | 1646 // T2: size and bit tags. |
1647 __ LoadImmediate(TMP1, RawObject::ClassIdTag::encode(cls.id())); | 1647 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cls.id())); |
1648 __ or_(T2, T2, TMP1); | 1648 __ or_(T2, T2, TMP); |
1649 __ sw(T2, FieldAddress(V0, String::tags_offset())); // Store tags. | 1649 __ sw(T2, FieldAddress(V0, String::tags_offset())); // Store tags. |
1650 } | 1650 } |
1651 | 1651 |
1652 // Set the length field using the saved length (T6). | 1652 // Set the length field using the saved length (T6). |
1653 __ StoreIntoObjectNoBarrier(V0, | 1653 __ StoreIntoObjectNoBarrier(V0, |
1654 FieldAddress(V0, String::length_offset()), | 1654 FieldAddress(V0, String::length_offset()), |
1655 T6); | 1655 T6); |
1656 // Clear hash. | 1656 // Clear hash. |
1657 __ b(ok); | 1657 __ b(ok); |
1658 __ delay_slot()->sw(ZR, FieldAddress(V0, String::hash_offset())); | 1658 __ delay_slot()->sw(ZR, FieldAddress(V0, String::hash_offset())); |
1659 } | 1659 } |
1660 | 1660 |
1661 | 1661 |
1662 // Arg0: OneByteString (receiver). | 1662 // Arg0: OneByteString (receiver). |
1663 // Arg1: Start index as Smi. | 1663 // Arg1: Start index as Smi. |
1664 // Arg2: End index as Smi. | 1664 // Arg2: End index as Smi. |
1665 // The indexes must be valid. | 1665 // The indexes must be valid. |
1666 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { | 1666 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { |
1667 const intptr_t kStringOffset = 2 * kWordSize; | 1667 const intptr_t kStringOffset = 2 * kWordSize; |
1668 const intptr_t kStartIndexOffset = 1 * kWordSize; | 1668 const intptr_t kStartIndexOffset = 1 * kWordSize; |
1669 const intptr_t kEndIndexOffset = 0 * kWordSize; | 1669 const intptr_t kEndIndexOffset = 0 * kWordSize; |
1670 Label fall_through, ok; | 1670 Label fall_through, ok; |
1671 | 1671 |
1672 __ lw(T2, Address(SP, kEndIndexOffset)); | 1672 __ lw(T2, Address(SP, kEndIndexOffset)); |
1673 __ lw(TMP, Address(SP, kStartIndexOffset)); | 1673 __ lw(TMP, Address(SP, kStartIndexOffset)); |
1674 __ or_(CMPRES, T2, TMP); | 1674 __ or_(CMPRES1, T2, TMP); |
1675 __ andi(CMPRES, CMPRES, Immediate(kSmiTagMask)); | 1675 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); |
1676 __ bne(CMPRES, ZR, &fall_through); // 'start', 'end' not Smi. | 1676 __ bne(CMPRES1, ZR, &fall_through); // 'start', 'end' not Smi. |
1677 | 1677 |
1678 __ subu(T2, T2, TMP); | 1678 __ subu(T2, T2, TMP); |
1679 TryAllocateOnebyteString(assembler, &ok, &fall_through); | 1679 TryAllocateOnebyteString(assembler, &ok, &fall_through); |
1680 __ Bind(&ok); | 1680 __ Bind(&ok); |
1681 // V0: new string as tagged pointer. | 1681 // V0: new string as tagged pointer. |
1682 // Copy string. | 1682 // Copy string. |
1683 __ lw(T3, Address(SP, kStringOffset)); | 1683 __ lw(T3, Address(SP, kStringOffset)); |
1684 __ lw(T1, Address(SP, kStartIndexOffset)); | 1684 __ lw(T1, Address(SP, kStartIndexOffset)); |
1685 __ SmiUntag(T1); | 1685 __ SmiUntag(T1); |
1686 __ addu(T3, T3, T1); | 1686 __ addu(T3, T3, T1); |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1745 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). | 1745 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). |
1746 void StringEquality(Assembler* assembler, intptr_t string_cid) { | 1746 void StringEquality(Assembler* assembler, intptr_t string_cid) { |
1747 Label fall_through, is_true, is_false, loop; | 1747 Label fall_through, is_true, is_false, loop; |
1748 __ lw(T0, Address(SP, 1 * kWordSize)); // This. | 1748 __ lw(T0, Address(SP, 1 * kWordSize)); // This. |
1749 __ lw(T1, Address(SP, 0 * kWordSize)); // Other. | 1749 __ lw(T1, Address(SP, 0 * kWordSize)); // Other. |
1750 | 1750 |
1751 // Are identical? | 1751 // Are identical? |
1752 __ beq(T0, T1, &is_true); | 1752 __ beq(T0, T1, &is_true); |
1753 | 1753 |
1754 // Is other OneByteString? | 1754 // Is other OneByteString? |
1755 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); | 1755 __ andi(CMPRES1, T1, Immediate(kSmiTagMask)); |
1756 __ beq(CMPRES, ZR, &fall_through); // Other is Smi. | 1756 __ beq(CMPRES1, ZR, &fall_through); // Other is Smi. |
1757 __ LoadClassId(CMPRES1, T1); // Class ID check. | 1757 __ LoadClassId(CMPRES1, T1); // Class ID check. |
1758 __ BranchNotEqual(CMPRES1, string_cid, &fall_through); | 1758 __ BranchNotEqual(CMPRES1, string_cid, &fall_through); |
1759 | 1759 |
1760 // Have same length? | 1760 // Have same length? |
1761 __ lw(T2, FieldAddress(T0, String::length_offset())); | 1761 __ lw(T2, FieldAddress(T0, String::length_offset())); |
1762 __ lw(T3, FieldAddress(T1, String::length_offset())); | 1762 __ lw(T3, FieldAddress(T1, String::length_offset())); |
1763 __ bne(T2, T3, &is_false); | 1763 __ bne(T2, T3, &is_false); |
1764 | 1764 |
1765 // Check contents, no fall-through possible. | 1765 // Check contents, no fall-through possible. |
1766 ASSERT((string_cid == kOneByteStringCid) || | 1766 ASSERT((string_cid == kOneByteStringCid) || |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1801 } | 1801 } |
1802 | 1802 |
1803 | 1803 |
1804 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { | 1804 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { |
1805 StringEquality(assembler, kTwoByteStringCid); | 1805 StringEquality(assembler, kTwoByteStringCid); |
1806 } | 1806 } |
1807 | 1807 |
1808 } // namespace dart | 1808 } // namespace dart |
1809 | 1809 |
1810 #endif // defined TARGET_ARCH_MIPS | 1810 #endif // defined TARGET_ARCH_MIPS |
OLD | NEW |