Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(64)

Side by Side Diff: runtime/vm/intrinsifier_mips.cc

Issue 59613005: Merge (x & y) == 0 pattern to emit a single test instruction. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS.
6 #if defined(TARGET_ARCH_MIPS) 6 #if defined(TARGET_ARCH_MIPS)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
(...skipping 13 matching lines...) Expand all
24 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; 24 const intptr_t kTypeArgumentsOffset = 1 * kWordSize;
25 const intptr_t kArrayLengthOffset = 0 * kWordSize; 25 const intptr_t kArrayLengthOffset = 0 * kWordSize;
26 Label fall_through; 26 Label fall_through;
27 27
28 // Compute the size to be allocated, it is based on the array length 28 // Compute the size to be allocated, it is based on the array length
29 // and is computed as: 29 // and is computed as:
30 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)). 30 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)).
31 __ lw(T3, Address(SP, kArrayLengthOffset)); // Array length. 31 __ lw(T3, Address(SP, kArrayLengthOffset)); // Array length.
32 32
33 // Check that length is a positive Smi. 33 // Check that length is a positive Smi.
34 __ andi(CMPRES, T3, Immediate(kSmiTagMask)); 34 __ andi(CMPRES1, T3, Immediate(kSmiTagMask));
35 __ bne(CMPRES, ZR, &fall_through); 35 __ bne(CMPRES1, ZR, &fall_through);
36 __ bltz(T3, &fall_through); 36 __ bltz(T3, &fall_through);
37 37
38 // Check for maximum allowed length. 38 // Check for maximum allowed length.
39 const intptr_t max_len = 39 const intptr_t max_len =
40 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements)); 40 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements));
41 __ BranchUnsignedGreater(T3, max_len, &fall_through); 41 __ BranchUnsignedGreater(T3, max_len, &fall_through);
42 42
43 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; 43 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1;
44 __ LoadImmediate(T2, fixed_size); 44 __ LoadImmediate(T2, fixed_size);
45 __ sll(T3, T3, 1); // T3 is a Smi. 45 __ sll(T3, T3, 1); // T3 is a Smi.
46 __ addu(T2, T2, T3); 46 __ addu(T2, T2, T3);
47 ASSERT(kSmiTagShift == 1); 47 ASSERT(kSmiTagShift == 1);
48 __ LoadImmediate(T3, ~(kObjectAlignment - 1)); 48 __ LoadImmediate(T3, ~(kObjectAlignment - 1));
49 __ and_(T2, T2, T3); 49 __ and_(T2, T2, T3);
50 50
51 // T2: Allocation size. 51 // T2: Allocation size.
52 52
53 Isolate* isolate = Isolate::Current(); 53 Isolate* isolate = Isolate::Current();
54 Heap* heap = isolate->heap(); 54 Heap* heap = isolate->heap();
55 55
56 __ LoadImmediate(T3, heap->TopAddress()); 56 __ LoadImmediate(T3, heap->TopAddress());
57 __ lw(T0, Address(T3, 0)); // Potential new object start. 57 __ lw(T0, Address(T3, 0)); // Potential new object start.
58 58
59 __ AdduDetectOverflow(T1, T0, T2, CMPRES); // Potential next object start. 59 __ AdduDetectOverflow(T1, T0, T2, CMPRES1); // Potential next object start.
60 __ bltz(CMPRES, &fall_through); // CMPRES < 0 on overflow. 60 __ bltz(CMPRES1, &fall_through); // CMPRES1 < 0 on overflow.
61 61
62 // Check if the allocation fits into the remaining space. 62 // Check if the allocation fits into the remaining space.
63 // T0: potential new object start. 63 // T0: potential new object start.
64 // T1: potential next object start. 64 // T1: potential next object start.
65 // T2: allocation size. 65 // T2: allocation size.
66 __ LoadImmediate(T4, heap->TopAddress()); 66 __ LoadImmediate(T4, heap->TopAddress());
67 __ lw(T4, Address(T4, 0)); 67 __ lw(T4, Address(T4, 0));
68 __ BranchUnsignedGreaterEqual(T1, T4, &fall_through); 68 __ BranchUnsignedGreaterEqual(T1, T4, &fall_through);
69 69
70 // Successfully allocated the object(s), now update top to point to 70 // Successfully allocated the object(s), now update top to point to
(...skipping 12 matching lines...) Expand all
83 83
84 __ BranchUnsignedGreater(T2, RawObject::SizeTag::kMaxSizeTag, &overflow); 84 __ BranchUnsignedGreater(T2, RawObject::SizeTag::kMaxSizeTag, &overflow);
85 __ b(&done); 85 __ b(&done);
86 __ delay_slot()->sll(T2, T2, shift); 86 __ delay_slot()->sll(T2, T2, shift);
87 __ Bind(&overflow); 87 __ Bind(&overflow);
88 __ mov(T2, ZR); 88 __ mov(T2, ZR);
89 __ Bind(&done); 89 __ Bind(&done);
90 90
91 // Get the class index and insert it into the tags. 91 // Get the class index and insert it into the tags.
92 // T2: size and bit tags. 92 // T2: size and bit tags.
93 __ LoadImmediate(TMP1, RawObject::ClassIdTag::encode(cls.id())); 93 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cls.id()));
94 __ or_(T2, T2, TMP1); 94 __ or_(T2, T2, TMP);
95 __ sw(T2, FieldAddress(T0, Array::tags_offset())); // Store tags. 95 __ sw(T2, FieldAddress(T0, Array::tags_offset())); // Store tags.
96 } 96 }
97 97
98 // T0: new object start as a tagged pointer. 98 // T0: new object start as a tagged pointer.
99 // T1: new object end address. 99 // T1: new object end address.
100 // Store the type argument field. 100 // Store the type argument field.
101 __ lw(T2, Address(SP, kTypeArgumentsOffset)); // Type argument. 101 __ lw(T2, Address(SP, kTypeArgumentsOffset)); // Type argument.
102 __ StoreIntoObjectNoBarrier(T0, 102 __ StoreIntoObjectNoBarrier(T0,
103 FieldAddress(T0, Array::type_arguments_offset()), 103 FieldAddress(T0, Array::type_arguments_offset()),
104 T2); 104 T2);
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
143 void Intrinsifier::ImmutableList_getLength(Assembler* assembler) { 143 void Intrinsifier::ImmutableList_getLength(Assembler* assembler) {
144 return Array_getLength(assembler); 144 return Array_getLength(assembler);
145 } 145 }
146 146
147 147
148 void Intrinsifier::Array_getIndexed(Assembler* assembler) { 148 void Intrinsifier::Array_getIndexed(Assembler* assembler) {
149 Label fall_through; 149 Label fall_through;
150 150
151 __ lw(T0, Address(SP, + 0 * kWordSize)); // Index 151 __ lw(T0, Address(SP, + 0 * kWordSize)); // Index
152 152
153 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); 153 __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
154 __ bne(CMPRES, ZR, &fall_through); // Index is not an smi, fall through 154 __ bne(CMPRES1, ZR, &fall_through); // Index is not an smi, fall through
155 __ delay_slot()->lw(T1, Address(SP, + 1 * kWordSize)); // Array 155 __ delay_slot()->lw(T1, Address(SP, + 1 * kWordSize)); // Array
156 156
157 // range check 157 // range check
158 __ lw(T2, FieldAddress(T1, Array::length_offset())); 158 __ lw(T2, FieldAddress(T1, Array::length_offset()));
159 __ BranchUnsignedGreaterEqual(T0, T2, &fall_through); 159 __ BranchUnsignedGreaterEqual(T0, T2, &fall_through);
160 160
161 ASSERT(kSmiTagShift == 1); 161 ASSERT(kSmiTagShift == 1);
162 // array element at T1 + T0*2 + Array::data_offset - 1 162 // array element at T1 + T0*2 + Array::data_offset - 1
163 __ sll(T2, T0, 1); 163 __ sll(T2, T0, 1);
164 __ addu(T2, T1, T2); 164 __ addu(T2, T1, T2);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
210 // Check if it's dynamic. 210 // Check if it's dynamic.
211 // For now handle only TypeArguments and bail out if InstantiatedTypeArgs. 211 // For now handle only TypeArguments and bail out if InstantiatedTypeArgs.
212 __ LoadClassId(CMPRES1, T1); 212 __ LoadClassId(CMPRES1, T1);
213 __ BranchNotEqual(CMPRES1, kTypeArgumentsCid, &fall_through); 213 __ BranchNotEqual(CMPRES1, kTypeArgumentsCid, &fall_through);
214 214
215 // Get type at index 0. 215 // Get type at index 0.
216 __ lw(T0, FieldAddress(T1, TypeArguments::type_at_offset(0))); 216 __ lw(T0, FieldAddress(T1, TypeArguments::type_at_offset(0)));
217 __ BranchEqual(T0, Type::ZoneHandle(Type::DynamicType()), &checked_ok); 217 __ BranchEqual(T0, Type::ZoneHandle(Type::DynamicType()), &checked_ok);
218 218
219 // Check for int and num. 219 // Check for int and num.
220 __ andi(CMPRES, T2, Immediate(kSmiTagMask)); 220 __ andi(CMPRES1, T2, Immediate(kSmiTagMask));
221 __ bne(CMPRES, ZR, &fall_through); // Non-smi value. 221 __ bne(CMPRES1, ZR, &fall_through); // Non-smi value.
222 222
223 __ BranchEqual(T0, Type::ZoneHandle(Type::IntType()), &checked_ok); 223 __ BranchEqual(T0, Type::ZoneHandle(Type::IntType()), &checked_ok);
224 __ BranchNotEqual(T0, Type::ZoneHandle(Type::Number()), &fall_through); 224 __ BranchNotEqual(T0, Type::ZoneHandle(Type::Number()), &fall_through);
225 __ Bind(&checked_ok); 225 __ Bind(&checked_ok);
226 } 226 }
227 __ lw(T1, Address(SP, 1 * kWordSize)); // Index. 227 __ lw(T1, Address(SP, 1 * kWordSize)); // Index.
228 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); 228 __ andi(CMPRES1, T1, Immediate(kSmiTagMask));
229 // Index not Smi. 229 // Index not Smi.
230 __ bne(CMPRES, ZR, &fall_through); 230 __ bne(CMPRES1, ZR, &fall_through);
231 231
232 __ lw(T0, Address(SP, 2 * kWordSize)); // Array. 232 __ lw(T0, Address(SP, 2 * kWordSize)); // Array.
233 // Range check. 233 // Range check.
234 __ lw(T3, FieldAddress(T0, Array::length_offset())); // Array length. 234 __ lw(T3, FieldAddress(T0, Array::length_offset())); // Array length.
235 // Runtime throws exception. 235 // Runtime throws exception.
236 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); 236 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through);
237 237
238 // Note that T1 is Smi, i.e, times 2. 238 // Note that T1 is Smi, i.e, times 2.
239 ASSERT(kSmiTagShift == 1); 239 ASSERT(kSmiTagShift == 1);
240 __ lw(T2, Address(SP, 0 * kWordSize)); // Value. 240 __ lw(T2, Address(SP, 0 * kWordSize)); // Value.
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
330 __ Ret(); 330 __ Ret();
331 __ delay_slot()->lw(V0, FieldAddress(V0, Array::length_offset())); 331 __ delay_slot()->lw(V0, FieldAddress(V0, Array::length_offset()));
332 } 332 }
333 333
334 334
335 void Intrinsifier::GrowableList_getIndexed(Assembler* assembler) { 335 void Intrinsifier::GrowableList_getIndexed(Assembler* assembler) {
336 Label fall_through; 336 Label fall_through;
337 337
338 __ lw(T0, Address(SP, 0 * kWordSize)); // Index 338 __ lw(T0, Address(SP, 0 * kWordSize)); // Index
339 339
340 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); 340 __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
341 __ bne(CMPRES, ZR, &fall_through); // Index is not an smi, fall through 341 __ bne(CMPRES1, ZR, &fall_through); // Index is not an smi, fall through
342 __ delay_slot()->lw(T1, Address(SP, 1 * kWordSize)); // Array 342 __ delay_slot()->lw(T1, Address(SP, 1 * kWordSize)); // Array
343 343
344 // range check 344 // range check
345 __ lw(T2, FieldAddress(T1, GrowableObjectArray::length_offset())); 345 __ lw(T2, FieldAddress(T1, GrowableObjectArray::length_offset()));
346 __ BranchUnsignedGreaterEqual(T0, T2, &fall_through); 346 __ BranchUnsignedGreaterEqual(T0, T2, &fall_through);
347 347
348 __ lw(T2, FieldAddress(T1, GrowableObjectArray::data_offset())); // data 348 __ lw(T2, FieldAddress(T1, GrowableObjectArray::data_offset())); // data
349 349
350 ASSERT(kSmiTagShift == 1); 350 ASSERT(kSmiTagShift == 1);
351 // array element at T2 + T0 * 2 + Array::data_offset - 1 351 // array element at T2 + T0 * 2 + Array::data_offset - 1
352 __ sll(T3, T0, 1); 352 __ sll(T3, T0, 1);
353 __ addu(T2, T2, T3); 353 __ addu(T2, T2, T3);
354 __ Ret(); 354 __ Ret();
355 __ delay_slot()->lw(V0, FieldAddress(T2, Array::data_offset())); 355 __ delay_slot()->lw(V0, FieldAddress(T2, Array::data_offset()));
356 __ Bind(&fall_through); 356 __ Bind(&fall_through);
357 } 357 }
358 358
359 359
360 // Set value into growable object array at specified index. 360 // Set value into growable object array at specified index.
361 // On stack: growable array (+2), index (+1), value (+0). 361 // On stack: growable array (+2), index (+1), value (+0).
362 void Intrinsifier::GrowableList_setIndexed(Assembler* assembler) { 362 void Intrinsifier::GrowableList_setIndexed(Assembler* assembler) {
363 if (FLAG_enable_type_checks) { 363 if (FLAG_enable_type_checks) {
364 return; 364 return;
365 } 365 }
366 Label fall_through; 366 Label fall_through;
367 __ lw(T1, Address(SP, 1 * kWordSize)); // Index. 367 __ lw(T1, Address(SP, 1 * kWordSize)); // Index.
368 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); 368 __ andi(CMPRES1, T1, Immediate(kSmiTagMask));
369 __ bne(CMPRES, ZR, &fall_through); // Non-smi index. 369 __ bne(CMPRES1, ZR, &fall_through); // Non-smi index.
370 __ delay_slot()->lw(T0, Address(SP, 2 * kWordSize)); // GrowableArray. 370 __ delay_slot()->lw(T0, Address(SP, 2 * kWordSize)); // GrowableArray.
371 // Range check using _length field. 371 // Range check using _length field.
372 __ lw(T2, FieldAddress(T0, GrowableObjectArray::length_offset())); 372 __ lw(T2, FieldAddress(T0, GrowableObjectArray::length_offset()));
373 // Runtime throws exception. 373 // Runtime throws exception.
374 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through); 374 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through);
375 __ lw(T0, FieldAddress(T0, GrowableObjectArray::data_offset())); // data. 375 __ lw(T0, FieldAddress(T0, GrowableObjectArray::data_offset())); // data.
376 __ lw(T2, Address(SP, 0 * kWordSize)); // Value. 376 __ lw(T2, Address(SP, 0 * kWordSize)); // Value.
377 // Note that T1 is Smi, i.e, times 2. 377 // Note that T1 is Smi, i.e, times 2.
378 ASSERT(kSmiTagShift == 1); 378 ASSERT(kSmiTagShift == 1);
379 __ sll(T1, T1, 1); 379 __ sll(T1, T1, 1);
380 __ addu(T1, T0, T1); 380 __ addu(T1, T0, T1);
381 __ StoreIntoObject(T0, 381 __ StoreIntoObject(T0,
382 FieldAddress(T1, Array::data_offset()), 382 FieldAddress(T1, Array::data_offset()),
383 T2); 383 T2);
384 __ Ret(); 384 __ Ret();
385 __ Bind(&fall_through); 385 __ Bind(&fall_through);
386 } 386 }
387 387
388 388
389 // Set length of growable object array. The length cannot 389 // Set length of growable object array. The length cannot
390 // be greater than the length of the data container. 390 // be greater than the length of the data container.
391 // On stack: growable array (+1), length (+0). 391 // On stack: growable array (+1), length (+0).
392 void Intrinsifier::GrowableList_setLength(Assembler* assembler) { 392 void Intrinsifier::GrowableList_setLength(Assembler* assembler) {
393 Label fall_through; 393 Label fall_through;
394 __ lw(T1, Address(SP, 0 * kWordSize)); // Length value. 394 __ lw(T1, Address(SP, 0 * kWordSize)); // Length value.
395 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); 395 __ andi(CMPRES1, T1, Immediate(kSmiTagMask));
396 __ bne(CMPRES, ZR, &fall_through); // Non-smi length. 396 __ bne(CMPRES1, ZR, &fall_through); // Non-smi length.
397 __ delay_slot()->lw(T0, Address(SP, 1 * kWordSize)); // Growable array. 397 __ delay_slot()->lw(T0, Address(SP, 1 * kWordSize)); // Growable array.
398 __ Ret(); 398 __ Ret();
399 __ delay_slot()->sw(T1, 399 __ delay_slot()->sw(T1,
400 FieldAddress(T0, GrowableObjectArray::length_offset())); 400 FieldAddress(T0, GrowableObjectArray::length_offset()));
401 __ Bind(&fall_through); 401 __ Bind(&fall_through);
402 } 402 }
403 403
404 404
405 // Set data of growable object array. 405 // Set data of growable object array.
406 // On stack: growable array (+1), data (+0). 406 // On stack: growable array (+1), data (+0).
407 void Intrinsifier::GrowableList_setData(Assembler* assembler) { 407 void Intrinsifier::GrowableList_setData(Assembler* assembler) {
408 if (FLAG_enable_type_checks) { 408 if (FLAG_enable_type_checks) {
409 return; 409 return;
410 } 410 }
411 Label fall_through; 411 Label fall_through;
412 __ lw(T1, Address(SP, 0 * kWordSize)); // Data. 412 __ lw(T1, Address(SP, 0 * kWordSize)); // Data.
413 // Check that data is an ObjectArray. 413 // Check that data is an ObjectArray.
414 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); 414 __ andi(CMPRES1, T1, Immediate(kSmiTagMask));
415 __ beq(CMPRES, ZR, &fall_through); // Data is Smi. 415 __ beq(CMPRES1, ZR, &fall_through); // Data is Smi.
416 __ LoadClassId(CMPRES1, T1); 416 __ LoadClassId(CMPRES1, T1);
417 __ BranchNotEqual(CMPRES1, kArrayCid, &fall_through); 417 __ BranchNotEqual(CMPRES1, kArrayCid, &fall_through);
418 __ lw(T0, Address(SP, 1 * kWordSize)); // Growable array. 418 __ lw(T0, Address(SP, 1 * kWordSize)); // Growable array.
419 __ StoreIntoObject(T0, 419 __ StoreIntoObject(T0,
420 FieldAddress(T0, GrowableObjectArray::data_offset()), 420 FieldAddress(T0, GrowableObjectArray::data_offset()),
421 T1); 421 T1);
422 __ Ret(); 422 __ Ret();
423 __ Bind(&fall_through); 423 __ Bind(&fall_through);
424 } 424 }
425 425
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
457 __ Bind(&fall_through); 457 __ Bind(&fall_through);
458 } 458 }
459 459
460 460
461 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ 461 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \
462 Label fall_through; \ 462 Label fall_through; \
463 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ 463 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \
464 __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ 464 __ lw(T2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
465 /* Check that length is a positive Smi. */ \ 465 /* Check that length is a positive Smi. */ \
466 /* T2: requested array length argument. */ \ 466 /* T2: requested array length argument. */ \
467 __ andi(CMPRES, T2, Immediate(kSmiTagMask)); \ 467 __ andi(CMPRES1, T2, Immediate(kSmiTagMask)); \
468 __ bne(CMPRES, ZR, &fall_through); \ 468 __ bne(CMPRES1, ZR, &fall_through); \
469 __ BranchSignedLess(T2, 0, &fall_through); \ 469 __ BranchSignedLess(T2, 0, &fall_through); \
470 __ SmiUntag(T2); \ 470 __ SmiUntag(T2); \
471 /* Check for maximum allowed length. */ \ 471 /* Check for maximum allowed length. */ \
472 /* T2: untagged array length. */ \ 472 /* T2: untagged array length. */ \
473 __ BranchSignedGreater(T2, max_len, &fall_through); \ 473 __ BranchSignedGreater(T2, max_len, &fall_through); \
474 __ sll(T2, T2, scale_shift); \ 474 __ sll(T2, T2, scale_shift); \
475 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ 475 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \
476 __ AddImmediate(T2, fixed_size); \ 476 __ AddImmediate(T2, fixed_size); \
477 __ LoadImmediate(TMP, -kObjectAlignment); \ 477 __ LoadImmediate(TMP, -kObjectAlignment); \
478 __ and_(T2, T2, TMP); \ 478 __ and_(T2, T2, TMP); \
479 Heap* heap = Isolate::Current()->heap(); \ 479 Heap* heap = Isolate::Current()->heap(); \
480 \ 480 \
481 __ LoadImmediate(V0, heap->TopAddress()); \ 481 __ LoadImmediate(V0, heap->TopAddress()); \
482 __ lw(V0, Address(V0, 0)); \ 482 __ lw(V0, Address(V0, 0)); \
483 \ 483 \
484 /* T2: allocation size. */ \ 484 /* T2: allocation size. */ \
485 __ AdduDetectOverflow(T1, V0, T2, CMPRES); \ 485 __ AdduDetectOverflow(T1, V0, T2, CMPRES1); \
486 __ bltz(CMPRES, &fall_through); \ 486 __ bltz(CMPRES1, &fall_through); \
487 \ 487 \
488 /* Check if the allocation fits into the remaining space. */ \ 488 /* Check if the allocation fits into the remaining space. */ \
489 /* V0: potential new object start. */ \ 489 /* V0: potential new object start. */ \
490 /* T1: potential next object start. */ \ 490 /* T1: potential next object start. */ \
491 /* T2: allocation size. */ \ 491 /* T2: allocation size. */ \
492 __ LoadImmediate(T3, heap->EndAddress()); \ 492 __ LoadImmediate(T3, heap->EndAddress()); \
493 __ lw(T3, Address(T3, 0)); \ 493 __ lw(T3, Address(T3, 0)); \
494 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); \ 494 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); \
495 \ 495 \
496 /* Successfully allocated the object(s), now update top to point to */ \ 496 /* Successfully allocated the object(s), now update top to point to */ \
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
581 } 581 }
582 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) 582 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
583 #undef TYPED_DATA_ALLOCATOR 583 #undef TYPED_DATA_ALLOCATOR
584 584
585 585
586 // Loads args from stack into T0 and T1 586 // Loads args from stack into T0 and T1
587 // Tests if they are smis, jumps to label not_smi if not. 587 // Tests if they are smis, jumps to label not_smi if not.
588 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { 588 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
589 __ lw(T0, Address(SP, 0 * kWordSize)); 589 __ lw(T0, Address(SP, 0 * kWordSize));
590 __ lw(T1, Address(SP, 1 * kWordSize)); 590 __ lw(T1, Address(SP, 1 * kWordSize));
591 __ or_(CMPRES, T0, T1); 591 __ or_(CMPRES1, T0, T1);
592 __ andi(CMPRES, CMPRES, Immediate(kSmiTagMask)); 592 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask));
593 __ bne(CMPRES, ZR, not_smi); 593 __ bne(CMPRES1, ZR, not_smi);
594 return; 594 return;
595 } 595 }
596 596
597 597
598 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { 598 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) {
599 Label fall_through; 599 Label fall_through;
600 600
601 TestBothArgumentsSmis(assembler, &fall_through); // Checks two Smis. 601 TestBothArgumentsSmis(assembler, &fall_through); // Checks two Smis.
602 __ AdduDetectOverflow(V0, T0, T1, CMPRES); // Add. 602 __ AdduDetectOverflow(V0, T0, T1, CMPRES1); // Add.
603 __ bltz(CMPRES, &fall_through); // Fall through on overflow. 603 __ bltz(CMPRES1, &fall_through); // Fall through on overflow.
604 __ Ret(); // Nothing in branch delay slot. 604 __ Ret(); // Nothing in branch delay slot.
605 __ Bind(&fall_through); 605 __ Bind(&fall_through);
606 } 606 }
607 607
608 608
609 void Intrinsifier::Integer_add(Assembler* assembler) { 609 void Intrinsifier::Integer_add(Assembler* assembler) {
610 return Integer_addFromInteger(assembler); 610 return Integer_addFromInteger(assembler);
611 } 611 }
612 612
613 613
614 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { 614 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
615 Label fall_through; 615 Label fall_through;
616 616
617 TestBothArgumentsSmis(assembler, &fall_through); 617 TestBothArgumentsSmis(assembler, &fall_through);
618 __ SubuDetectOverflow(V0, T0, T1, CMPRES); // Subtract. 618 __ SubuDetectOverflow(V0, T0, T1, CMPRES1); // Subtract.
619 __ bltz(CMPRES, &fall_through); // Fall through on overflow. 619 __ bltz(CMPRES1, &fall_through); // Fall through on overflow.
620 __ Ret(); 620 __ Ret();
621 __ Bind(&fall_through); 621 __ Bind(&fall_through);
622 } 622 }
623 623
624 624
625 void Intrinsifier::Integer_sub(Assembler* assembler) { 625 void Intrinsifier::Integer_sub(Assembler* assembler) {
626 Label fall_through; 626 Label fall_through;
627 627
628 TestBothArgumentsSmis(assembler, &fall_through); 628 TestBothArgumentsSmis(assembler, &fall_through);
629 __ SubuDetectOverflow(V0, T1, T0, CMPRES); // Subtract. 629 __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract.
630 __ bltz(CMPRES, &fall_through); // Fall through on overflow. 630 __ bltz(CMPRES1, &fall_through); // Fall through on overflow.
631 __ Ret(); // Nothing in branch delay slot. 631 __ Ret(); // Nothing in branch delay slot.
632 __ Bind(&fall_through); 632 __ Bind(&fall_through);
633 } 633 }
634 634
635 635
636 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { 636 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) {
637 Label fall_through; 637 Label fall_through;
638 638
639 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis 639 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis
640 __ SmiUntag(T0); // untags T0. only want result shifted by one 640 __ SmiUntag(T0); // untags T0. only want result shifted by one
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
699 // res = res - right; 699 // res = res - right;
700 // } else { 700 // } else {
701 // res = res + right; 701 // res = res + right;
702 // } 702 // }
703 // } 703 // }
704 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { 704 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) {
705 Label fall_through, subtract; 705 Label fall_through, subtract;
706 // Test arguments for smi. 706 // Test arguments for smi.
707 __ lw(T1, Address(SP, 0 * kWordSize)); 707 __ lw(T1, Address(SP, 0 * kWordSize));
708 __ lw(T0, Address(SP, 1 * kWordSize)); 708 __ lw(T0, Address(SP, 1 * kWordSize));
709 __ or_(CMPRES, T0, T1); 709 __ or_(CMPRES1, T0, T1);
710 __ andi(CMPRES, CMPRES, Immediate(kSmiTagMask)); 710 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask));
711 __ bne(CMPRES, ZR, &fall_through); 711 __ bne(CMPRES1, ZR, &fall_through);
712 // T1: Tagged left (dividend). 712 // T1: Tagged left (dividend).
713 // T0: Tagged right (divisor). 713 // T0: Tagged right (divisor).
714 // Check if modulo by zero -> exception thrown in main function. 714 // Check if modulo by zero -> exception thrown in main function.
715 __ beq(T0, ZR, &fall_through); 715 __ beq(T0, ZR, &fall_through);
716 EmitRemainderOperation(assembler); 716 EmitRemainderOperation(assembler);
717 // Untagged right in T0. Untagged remainder result in V0. 717 // Untagged right in T0. Untagged remainder result in V0.
718 718
719 Label done; 719 Label done;
720 __ bgez(V0, &done); 720 __ bgez(V0, &done);
721 __ bltz(T0, &subtract); 721 __ bltz(T0, &subtract);
(...skipping 30 matching lines...) Expand all
752 __ Ret(); 752 __ Ret();
753 __ delay_slot()->SmiTag(V0); 753 __ delay_slot()->SmiTag(V0);
754 __ Bind(&fall_through); 754 __ Bind(&fall_through);
755 } 755 }
756 756
757 757
758 void Intrinsifier::Integer_negate(Assembler* assembler) { 758 void Intrinsifier::Integer_negate(Assembler* assembler) {
759 Label fall_through; 759 Label fall_through;
760 760
761 __ lw(T0, Address(SP, + 0 * kWordSize)); // Grabs first argument. 761 __ lw(T0, Address(SP, + 0 * kWordSize)); // Grabs first argument.
762 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); // Test for Smi. 762 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); // Test for Smi.
763 __ bne(CMPRES, ZR, &fall_through); // Fall through if not a Smi. 763 __ bne(CMPRES1, ZR, &fall_through); // Fall through if not a Smi.
764 __ SubuDetectOverflow(V0, ZR, T0, CMPRES); 764 __ SubuDetectOverflow(V0, ZR, T0, CMPRES1);
765 __ bltz(CMPRES, &fall_through); // There was overflow. 765 __ bltz(CMPRES1, &fall_through); // There was overflow.
766 __ Ret(); 766 __ Ret();
767 __ Bind(&fall_through); 767 __ Bind(&fall_through);
768 } 768 }
769 769
770 770
771 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { 771 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) {
772 Label fall_through; 772 Label fall_through;
773 773
774 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. 774 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis.
775 __ Ret(); 775 __ Ret();
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
862 __ Bind(&fall_through); 862 __ Bind(&fall_through);
863 } 863 }
864 864
865 865
866 static void Get64SmiOrMint(Assembler* assembler, 866 static void Get64SmiOrMint(Assembler* assembler,
867 Register res_hi, 867 Register res_hi,
868 Register res_lo, 868 Register res_lo,
869 Register reg, 869 Register reg,
870 Label* not_smi_or_mint) { 870 Label* not_smi_or_mint) {
871 Label not_smi, done; 871 Label not_smi, done;
872 __ andi(CMPRES, reg, Immediate(kSmiTagMask)); 872 __ andi(CMPRES1, reg, Immediate(kSmiTagMask));
873 __ bne(CMPRES, ZR, &not_smi); 873 __ bne(CMPRES1, ZR, &not_smi);
874 __ SmiUntag(reg); 874 __ SmiUntag(reg);
875 875
876 // Sign extend to 64 bit 876 // Sign extend to 64 bit
877 __ mov(res_lo, reg); 877 __ mov(res_lo, reg);
878 __ b(&done); 878 __ b(&done);
879 __ delay_slot()->sra(res_hi, reg, 31); 879 __ delay_slot()->sra(res_hi, reg, 31);
880 880
881 __ Bind(&not_smi); 881 __ Bind(&not_smi);
882 __ LoadClassId(CMPRES1, reg); 882 __ LoadClassId(CMPRES1, reg);
883 __ BranchNotEqual(CMPRES1, kMintCid, not_smi_or_mint); 883 __ BranchNotEqual(CMPRES1, kMintCid, not_smi_or_mint);
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
990 // This is called for Smi, Mint and Bigint receivers. The right argument 990 // This is called for Smi, Mint and Bigint receivers. The right argument
991 // can be Smi, Mint, Bigint or double. 991 // can be Smi, Mint, Bigint or double.
992 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { 992 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
993 Label fall_through, true_label, check_for_mint; 993 Label fall_through, true_label, check_for_mint;
994 // For integer receiver '===' check first. 994 // For integer receiver '===' check first.
995 __ lw(T0, Address(SP, 0 * kWordSize)); 995 __ lw(T0, Address(SP, 0 * kWordSize));
996 __ lw(T1, Address(SP, 1 * kWordSize)); 996 __ lw(T1, Address(SP, 1 * kWordSize));
997 __ beq(T0, T1, &true_label); 997 __ beq(T0, T1, &true_label);
998 998
999 __ or_(T2, T0, T1); 999 __ or_(T2, T0, T1);
1000 __ andi(CMPRES, T2, Immediate(kSmiTagMask)); 1000 __ andi(CMPRES1, T2, Immediate(kSmiTagMask));
1001 // If T0 or T1 is not a smi do Mint checks. 1001 // If T0 or T1 is not a smi do Mint checks.
1002 __ bne(CMPRES, ZR, &check_for_mint); 1002 __ bne(CMPRES1, ZR, &check_for_mint);
1003 1003
1004 // Both arguments are smi, '===' is good enough. 1004 // Both arguments are smi, '===' is good enough.
1005 __ LoadObject(V0, Bool::False()); 1005 __ LoadObject(V0, Bool::False());
1006 __ Ret(); 1006 __ Ret();
1007 __ Bind(&true_label); 1007 __ Bind(&true_label);
1008 __ LoadObject(V0, Bool::True()); 1008 __ LoadObject(V0, Bool::True());
1009 __ Ret(); 1009 __ Ret();
1010 1010
1011 // At least one of the arguments was not Smi. 1011 // At least one of the arguments was not Smi.
1012 Label receiver_not_smi; 1012 Label receiver_not_smi;
1013 __ Bind(&check_for_mint); 1013 __ Bind(&check_for_mint);
1014 1014
1015 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); 1015 __ andi(CMPRES1, T1, Immediate(kSmiTagMask));
1016 __ bne(CMPRES, ZR, &receiver_not_smi); // Check receiver. 1016 __ bne(CMPRES1, ZR, &receiver_not_smi); // Check receiver.
1017 1017
1018 // Left (receiver) is Smi, return false if right is not Double. 1018 // Left (receiver) is Smi, return false if right is not Double.
1019 // Note that an instance of Mint or Bigint never contains a value that can be 1019 // Note that an instance of Mint or Bigint never contains a value that can be
1020 // represented by Smi. 1020 // represented by Smi.
1021 1021
1022 __ LoadClassId(CMPRES1, T0); 1022 __ LoadClassId(CMPRES1, T0);
1023 __ BranchEqual(CMPRES1, kDoubleCid, &fall_through); 1023 __ BranchEqual(CMPRES1, kDoubleCid, &fall_through);
1024 __ LoadObject(V0, Bool::False()); // Smi == Mint -> false. 1024 __ LoadObject(V0, Bool::False()); // Smi == Mint -> false.
1025 __ Ret(); 1025 __ Ret();
1026 1026
1027 __ Bind(&receiver_not_smi); 1027 __ Bind(&receiver_not_smi);
1028 // T1:: receiver. 1028 // T1:: receiver.
1029 1029
1030 __ LoadClassId(CMPRES1, T1); 1030 __ LoadClassId(CMPRES1, T1);
1031 __ BranchNotEqual(CMPRES1, kMintCid, &fall_through); 1031 __ BranchNotEqual(CMPRES1, kMintCid, &fall_through);
1032 // Receiver is Mint, return false if right is Smi. 1032 // Receiver is Mint, return false if right is Smi.
1033 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); 1033 __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
1034 __ bne(CMPRES, ZR, &fall_through); 1034 __ bne(CMPRES1, ZR, &fall_through);
1035 __ LoadObject(V0, Bool::False()); 1035 __ LoadObject(V0, Bool::False());
1036 __ Ret(); 1036 __ Ret();
1037 // TODO(srdjan): Implement Mint == Mint comparison. 1037 // TODO(srdjan): Implement Mint == Mint comparison.
1038 1038
1039 __ Bind(&fall_through); 1039 __ Bind(&fall_through);
1040 } 1040 }
1041 1041
1042 1042
1043 void Intrinsifier::Integer_equal(Assembler* assembler) { 1043 void Intrinsifier::Integer_equal(Assembler* assembler) {
1044 return Integer_equalToInteger(assembler); 1044 return Integer_equalToInteger(assembler);
1045 } 1045 }
1046 1046
1047 1047
1048 void Intrinsifier::Integer_sar(Assembler* assembler) { 1048 void Intrinsifier::Integer_sar(Assembler* assembler) {
1049 Label fall_through; 1049 Label fall_through;
1050 1050
1051 TestBothArgumentsSmis(assembler, &fall_through); 1051 TestBothArgumentsSmis(assembler, &fall_through);
1052 // Shift amount in T0. Value to shift in T1. 1052 // Shift amount in T0. Value to shift in T1.
1053 1053
1054 __ SmiUntag(T0); 1054 __ SmiUntag(T0);
1055 __ bltz(T0, &fall_through); 1055 __ bltz(T0, &fall_through);
1056 1056
1057 __ LoadImmediate(T2, 0x1F); 1057 __ LoadImmediate(T2, 0x1F);
1058 __ slt(CMPRES, T2, T0); // CMPRES <- 0x1F < T0 ? 1 : 0 1058 __ slt(CMPRES1, T2, T0); // CMPRES1 <- 0x1F < T0 ? 1 : 0
1059 __ movn(T0, T2, CMPRES); // T0 <- 0x1F < T0 ? 0x1F : T0 1059 __ movn(T0, T2, CMPRES1); // T0 <- 0x1F < T0 ? 0x1F : T0
1060 1060
1061 __ SmiUntag(T1); 1061 __ SmiUntag(T1);
1062 __ srav(V0, T1, T0); 1062 __ srav(V0, T1, T0);
1063 __ Ret(); 1063 __ Ret();
1064 __ delay_slot()->SmiTag(V0); 1064 __ delay_slot()->SmiTag(V0);
1065 __ Bind(&fall_through); 1065 __ Bind(&fall_through);
1066 } 1066 }
1067 1067
1068 1068
1069 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { 1069 void Intrinsifier::Smi_bitNegate(Assembler* assembler) {
1070 __ lw(T0, Address(SP, 0 * kWordSize)); 1070 __ lw(T0, Address(SP, 0 * kWordSize));
1071 __ nor(V0, T0, ZR); 1071 __ nor(V0, T0, ZR);
1072 __ Ret(); 1072 __ Ret();
1073 __ delay_slot()->addiu(V0, V0, Immediate(-1)); // Remove inverted smi-tag. 1073 __ delay_slot()->addiu(V0, V0, Immediate(-1)); // Remove inverted smi-tag.
1074 } 1074 }
1075 1075
1076 1076
1077 void Intrinsifier::Smi_bitLength(Assembler* assembler) { 1077 void Intrinsifier::Smi_bitLength(Assembler* assembler) {
1078 // TODO(sra): Implement. 1078 // TODO(sra): Implement.
1079 } 1079 }
1080 1080
1081 1081
1082 // Check if the last argument is a double, jump to label 'is_smi' if smi 1082 // Check if the last argument is a double, jump to label 'is_smi' if smi
1083 // (easy to convert to double), otherwise jump to label 'not_double_smi', 1083 // (easy to convert to double), otherwise jump to label 'not_double_smi',
1084 // Returns the last argument in T0. 1084 // Returns the last argument in T0.
1085 static void TestLastArgumentIsDouble(Assembler* assembler, 1085 static void TestLastArgumentIsDouble(Assembler* assembler,
1086 Label* is_smi, 1086 Label* is_smi,
1087 Label* not_double_smi) { 1087 Label* not_double_smi) {
1088 __ lw(T0, Address(SP, 0 * kWordSize)); 1088 __ lw(T0, Address(SP, 0 * kWordSize));
1089 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); 1089 __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
1090 __ beq(CMPRES, ZR, is_smi); 1090 __ beq(CMPRES1, ZR, is_smi);
1091 __ LoadClassId(CMPRES1, T0); 1091 __ LoadClassId(CMPRES1, T0);
1092 __ BranchNotEqual(CMPRES1, kDoubleCid, not_double_smi); 1092 __ BranchNotEqual(CMPRES1, kDoubleCid, not_double_smi);
1093 // Fall through with Double in T0. 1093 // Fall through with Double in T0.
1094 } 1094 }
1095 1095
1096 1096
1097 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown 1097 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown
1098 // type. Return true or false object in the register V0. Any NaN argument 1098 // type. Return true or false object in the register V0. Any NaN argument
1099 // returns false. Any non-double arg1 causes control flow to fall through to the 1099 // returns false. Any non-double arg1 causes control flow to fall through to the
1100 // slow case (compiled method body). 1100 // slow case (compiled method body).
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
1221 void Intrinsifier::Double_div(Assembler* assembler) { 1221 void Intrinsifier::Double_div(Assembler* assembler) {
1222 return DoubleArithmeticOperations(assembler, Token::kDIV); 1222 return DoubleArithmeticOperations(assembler, Token::kDIV);
1223 } 1223 }
1224 1224
1225 1225
1226 // Left is double right is integer (Bigint, Mint or Smi) 1226 // Left is double right is integer (Bigint, Mint or Smi)
1227 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { 1227 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) {
1228 Label fall_through; 1228 Label fall_through;
1229 // Only smis allowed. 1229 // Only smis allowed.
1230 __ lw(T0, Address(SP, 0 * kWordSize)); 1230 __ lw(T0, Address(SP, 0 * kWordSize));
1231 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); 1231 __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
1232 __ bne(CMPRES, ZR, &fall_through); 1232 __ bne(CMPRES1, ZR, &fall_through);
1233 1233
1234 // Is Smi. 1234 // Is Smi.
1235 __ SmiUntag(T0); 1235 __ SmiUntag(T0);
1236 __ mtc1(T0, F4); 1236 __ mtc1(T0, F4);
1237 __ cvtdw(D1, F4); 1237 __ cvtdw(D1, F4);
1238 1238
1239 __ lw(T0, Address(SP, 1 * kWordSize)); 1239 __ lw(T0, Address(SP, 1 * kWordSize));
1240 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); 1240 __ lwc1(F0, FieldAddress(T0, Double::value_offset()));
1241 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); 1241 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize));
1242 __ muld(D0, D0, D1); 1242 __ muld(D0, D0, D1);
1243 const Class& double_class = Class::Handle( 1243 const Class& double_class = Class::Handle(
1244 Isolate::Current()->object_store()->double_class()); 1244 Isolate::Current()->object_store()->double_class());
1245 __ TryAllocate(double_class, &fall_through, V0); // Result register. 1245 __ TryAllocate(double_class, &fall_through, V0); // Result register.
1246 __ swc1(F0, FieldAddress(V0, Double::value_offset())); 1246 __ swc1(F0, FieldAddress(V0, Double::value_offset()));
1247 __ Ret(); 1247 __ Ret();
1248 __ delay_slot()->swc1(F1, 1248 __ delay_slot()->swc1(F1,
1249 FieldAddress(V0, Double::value_offset() + kWordSize)); 1249 FieldAddress(V0, Double::value_offset() + kWordSize));
1250 __ Bind(&fall_through); 1250 __ Bind(&fall_through);
1251 } 1251 }
1252 1252
1253 1253
1254 void Intrinsifier::Double_fromInteger(Assembler* assembler) { 1254 void Intrinsifier::Double_fromInteger(Assembler* assembler) {
1255 Label fall_through; 1255 Label fall_through;
1256 1256
1257 __ lw(T0, Address(SP, 0 * kWordSize)); 1257 __ lw(T0, Address(SP, 0 * kWordSize));
1258 __ andi(CMPRES, T0, Immediate(kSmiTagMask)); 1258 __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
1259 __ bne(T0, ZR, &fall_through); 1259 __ bne(T0, ZR, &fall_through);
1260 1260
1261 // Is Smi. 1261 // Is Smi.
1262 __ SmiUntag(T0); 1262 __ SmiUntag(T0);
1263 __ mtc1(T0, F4); 1263 __ mtc1(T0, F4);
1264 __ cvtdw(D0, F4); 1264 __ cvtdw(D0, F4);
1265 const Class& double_class = Class::Handle( 1265 const Class& double_class = Class::Handle(
1266 Isolate::Current()->object_store()->double_class()); 1266 Isolate::Current()->object_store()->double_class());
1267 __ TryAllocate(double_class, &fall_through, V0); // Result register. 1267 __ TryAllocate(double_class, &fall_through, V0); // Result register.
1268 __ swc1(F0, FieldAddress(V0, Double::value_offset())); 1268 __ swc1(F0, FieldAddress(V0, Double::value_offset()));
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1309 __ Ret(); 1309 __ Ret();
1310 1310
1311 __ Bind(&is_false); 1311 __ Bind(&is_false);
1312 __ LoadObject(V0, Bool::False()); 1312 __ LoadObject(V0, Bool::False());
1313 __ Ret(); 1313 __ Ret();
1314 1314
1315 __ Bind(&is_zero); 1315 __ Bind(&is_zero);
1316 // Check for negative zero by looking at the sign bit. 1316 // Check for negative zero by looking at the sign bit.
1317 __ mfc1(T0, F1); // Moves bits 32...63 of D0 to T0. 1317 __ mfc1(T0, F1); // Moves bits 32...63 of D0 to T0.
1318 __ srl(T0, T0, 31); // Get the sign bit down to bit 0 of T0. 1318 __ srl(T0, T0, 31); // Get the sign bit down to bit 0 of T0.
1319 __ andi(CMPRES, T0, Immediate(1)); // Check if the bit is set. 1319 __ andi(CMPRES1, T0, Immediate(1)); // Check if the bit is set.
1320 __ bne(T0, ZR, &is_true); // Sign bit set. True. 1320 __ bne(T0, ZR, &is_true); // Sign bit set. True.
1321 __ b(&is_false); 1321 __ b(&is_false);
1322 } 1322 }
1323 1323
1324 1324
1325 void Intrinsifier::Double_toInt(Assembler* assembler) { 1325 void Intrinsifier::Double_toInt(Assembler* assembler) {
1326 __ lw(T0, Address(SP, 0 * kWordSize)); 1326 __ lw(T0, Address(SP, 0 * kWordSize));
1327 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag); 1327 __ LoadDFromOffset(D0, T0, Double::value_offset() - kHeapObjectTag);
1328 1328
1329 __ cvtwd(F2, D0); 1329 __ cvtwd(F2, D0);
1330 __ mfc1(V0, F2); 1330 __ mfc1(V0, F2);
1331 1331
1332 // Overflow is signaled with minint. 1332 // Overflow is signaled with minint.
1333 Label fall_through; 1333 Label fall_through;
1334 // Check for overflow and that it fits into Smi. 1334 // Check for overflow and that it fits into Smi.
1335 __ LoadImmediate(TMP, 0xC0000000); 1335 __ LoadImmediate(TMP, 0xC0000000);
1336 __ subu(CMPRES, V0, TMP); 1336 __ subu(CMPRES1, V0, TMP);
1337 __ bltz(CMPRES, &fall_through); 1337 __ bltz(CMPRES1, &fall_through);
1338 __ Ret(); 1338 __ Ret();
1339 __ delay_slot()->SmiTag(V0); 1339 __ delay_slot()->SmiTag(V0);
1340 __ Bind(&fall_through); 1340 __ Bind(&fall_through);
1341 } 1341 }
1342 1342
1343 1343
1344 void Intrinsifier::Math_sqrt(Assembler* assembler) { 1344 void Intrinsifier::Math_sqrt(Assembler* assembler) {
1345 Label fall_through, is_smi, double_op; 1345 Label fall_through, is_smi, double_op;
1346 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); 1346 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through);
1347 // Argument is double and is in T0. 1347 // Argument is double and is in T0.
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1452 } 1452 }
1453 1453
1454 1454
1455 void Intrinsifier::String_codeUnitAt(Assembler* assembler) { 1455 void Intrinsifier::String_codeUnitAt(Assembler* assembler) {
1456 Label fall_through, try_two_byte_string; 1456 Label fall_through, try_two_byte_string;
1457 1457
1458 __ lw(T1, Address(SP, 0 * kWordSize)); // Index. 1458 __ lw(T1, Address(SP, 0 * kWordSize)); // Index.
1459 __ lw(T0, Address(SP, 1 * kWordSize)); // String. 1459 __ lw(T0, Address(SP, 1 * kWordSize)); // String.
1460 1460
1461 // Checks. 1461 // Checks.
1462 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); 1462 __ andi(CMPRES1, T1, Immediate(kSmiTagMask));
1463 __ bne(T1, ZR, &fall_through); // Index is not a Smi. 1463 __ bne(T1, ZR, &fall_through); // Index is not a Smi.
1464 __ lw(T2, FieldAddress(T0, String::length_offset())); // Range check. 1464 __ lw(T2, FieldAddress(T0, String::length_offset())); // Range check.
1465 // Runtime throws exception. 1465 // Runtime throws exception.
1466 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through); 1466 __ BranchUnsignedGreaterEqual(T1, T2, &fall_through);
1467 __ LoadClassId(CMPRES1, T0); // Class ID check. 1467 __ LoadClassId(CMPRES1, T0); // Class ID check.
1468 __ BranchNotEqual(CMPRES1, kOneByteStringCid, &try_two_byte_string); 1468 __ BranchNotEqual(CMPRES1, kOneByteStringCid, &try_two_byte_string);
1469 1469
1470 // Grab byte and return. 1470 // Grab byte and return.
1471 __ SmiUntag(T1); 1471 __ SmiUntag(T1);
1472 __ addu(T2, T0, T1); 1472 __ addu(T2, T0, T1);
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
1583 __ LoadImmediate(TMP, ~(kObjectAlignment - 1)); 1583 __ LoadImmediate(TMP, ~(kObjectAlignment - 1));
1584 __ and_(length_reg, length_reg, TMP); 1584 __ and_(length_reg, length_reg, TMP);
1585 1585
1586 Isolate* isolate = Isolate::Current(); 1586 Isolate* isolate = Isolate::Current();
1587 Heap* heap = isolate->heap(); 1587 Heap* heap = isolate->heap();
1588 1588
1589 __ LoadImmediate(T3, heap->TopAddress()); 1589 __ LoadImmediate(T3, heap->TopAddress());
1590 __ lw(V0, Address(T3, 0)); 1590 __ lw(V0, Address(T3, 0));
1591 1591
1592 // length_reg: allocation size. 1592 // length_reg: allocation size.
1593 __ AdduDetectOverflow(T1, V0, length_reg, CMPRES); 1593 __ AdduDetectOverflow(T1, V0, length_reg, CMPRES1);
1594 __ bltz(CMPRES, failure); // Fail on overflow. 1594 __ bltz(CMPRES1, failure); // Fail on overflow.
1595 1595
1596 // Check if the allocation fits into the remaining space. 1596 // Check if the allocation fits into the remaining space.
1597 // V0: potential new object start. 1597 // V0: potential new object start.
1598 // T1: potential next object start. 1598 // T1: potential next object start.
1599 // T2: allocation size. 1599 // T2: allocation size.
1600 // T3: heap->TopAddress(). 1600 // T3: heap->TopAddress().
1601 __ LoadImmediate(T4, heap->EndAddress()); 1601 __ LoadImmediate(T4, heap->EndAddress());
1602 __ lw(T4, Address(T4, 0)); 1602 __ lw(T4, Address(T4, 0));
1603 __ BranchUnsignedGreaterEqual(T1, T4, failure); 1603 __ BranchUnsignedGreaterEqual(T1, T4, failure);
1604 1604
(...skipping 14 matching lines...) Expand all
1619 1619
1620 __ BranchUnsignedGreater(T2, RawObject::SizeTag::kMaxSizeTag, &overflow); 1620 __ BranchUnsignedGreater(T2, RawObject::SizeTag::kMaxSizeTag, &overflow);
1621 __ b(&done); 1621 __ b(&done);
1622 __ delay_slot()->sll(T2, T2, shift); 1622 __ delay_slot()->sll(T2, T2, shift);
1623 __ Bind(&overflow); 1623 __ Bind(&overflow);
1624 __ mov(T2, ZR); 1624 __ mov(T2, ZR);
1625 __ Bind(&done); 1625 __ Bind(&done);
1626 1626
1627 // Get the class index and insert it into the tags. 1627 // Get the class index and insert it into the tags.
1628 // T2: size and bit tags. 1628 // T2: size and bit tags.
1629 __ LoadImmediate(TMP1, RawObject::ClassIdTag::encode(cls.id())); 1629 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cls.id()));
1630 __ or_(T2, T2, TMP1); 1630 __ or_(T2, T2, TMP);
1631 __ sw(T2, FieldAddress(V0, String::tags_offset())); // Store tags. 1631 __ sw(T2, FieldAddress(V0, String::tags_offset())); // Store tags.
1632 } 1632 }
1633 1633
1634 // Set the length field using the saved length (T6). 1634 // Set the length field using the saved length (T6).
1635 __ StoreIntoObjectNoBarrier(V0, 1635 __ StoreIntoObjectNoBarrier(V0,
1636 FieldAddress(V0, String::length_offset()), 1636 FieldAddress(V0, String::length_offset()),
1637 T6); 1637 T6);
1638 // Clear hash. 1638 // Clear hash.
1639 __ b(ok); 1639 __ b(ok);
1640 __ delay_slot()->sw(ZR, FieldAddress(V0, String::hash_offset())); 1640 __ delay_slot()->sw(ZR, FieldAddress(V0, String::hash_offset()));
1641 } 1641 }
1642 1642
1643 1643
1644 // Arg0: OneByteString (receiver). 1644 // Arg0: OneByteString (receiver).
1645 // Arg1: Start index as Smi. 1645 // Arg1: Start index as Smi.
1646 // Arg2: End index as Smi. 1646 // Arg2: End index as Smi.
1647 // The indexes must be valid. 1647 // The indexes must be valid.
1648 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { 1648 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) {
1649 const intptr_t kStringOffset = 2 * kWordSize; 1649 const intptr_t kStringOffset = 2 * kWordSize;
1650 const intptr_t kStartIndexOffset = 1 * kWordSize; 1650 const intptr_t kStartIndexOffset = 1 * kWordSize;
1651 const intptr_t kEndIndexOffset = 0 * kWordSize; 1651 const intptr_t kEndIndexOffset = 0 * kWordSize;
1652 Label fall_through, ok; 1652 Label fall_through, ok;
1653 1653
1654 __ lw(T2, Address(SP, kEndIndexOffset)); 1654 __ lw(T2, Address(SP, kEndIndexOffset));
1655 __ lw(TMP, Address(SP, kStartIndexOffset)); 1655 __ lw(TMP, Address(SP, kStartIndexOffset));
1656 __ or_(CMPRES, T2, TMP); 1656 __ or_(CMPRES1, T2, TMP);
1657 __ andi(CMPRES, CMPRES, Immediate(kSmiTagMask)); 1657 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask));
1658 __ bne(CMPRES, ZR, &fall_through); // 'start', 'end' not Smi. 1658 __ bne(CMPRES1, ZR, &fall_through); // 'start', 'end' not Smi.
1659 1659
1660 __ subu(T2, T2, TMP); 1660 __ subu(T2, T2, TMP);
1661 TryAllocateOnebyteString(assembler, &ok, &fall_through); 1661 TryAllocateOnebyteString(assembler, &ok, &fall_through);
1662 __ Bind(&ok); 1662 __ Bind(&ok);
1663 // V0: new string as tagged pointer. 1663 // V0: new string as tagged pointer.
1664 // Copy string. 1664 // Copy string.
1665 __ lw(T3, Address(SP, kStringOffset)); 1665 __ lw(T3, Address(SP, kStringOffset));
1666 __ lw(T1, Address(SP, kStartIndexOffset)); 1666 __ lw(T1, Address(SP, kStartIndexOffset));
1667 __ SmiUntag(T1); 1667 __ SmiUntag(T1);
1668 __ addu(T3, T3, T1); 1668 __ addu(T3, T3, T1);
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1727 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). 1727 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings).
1728 void StringEquality(Assembler* assembler, intptr_t string_cid) { 1728 void StringEquality(Assembler* assembler, intptr_t string_cid) {
1729 Label fall_through, is_true, is_false, loop; 1729 Label fall_through, is_true, is_false, loop;
1730 __ lw(T0, Address(SP, 1 * kWordSize)); // This. 1730 __ lw(T0, Address(SP, 1 * kWordSize)); // This.
1731 __ lw(T1, Address(SP, 0 * kWordSize)); // Other. 1731 __ lw(T1, Address(SP, 0 * kWordSize)); // Other.
1732 1732
1733 // Are identical? 1733 // Are identical?
1734 __ beq(T0, T1, &is_true); 1734 __ beq(T0, T1, &is_true);
1735 1735
1736 // Is other OneByteString? 1736 // Is other OneByteString?
1737 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); 1737 __ andi(CMPRES1, T1, Immediate(kSmiTagMask));
1738 __ beq(CMPRES, ZR, &fall_through); // Other is Smi. 1738 __ beq(CMPRES1, ZR, &fall_through); // Other is Smi.
1739 __ LoadClassId(CMPRES1, T1); // Class ID check. 1739 __ LoadClassId(CMPRES1, T1); // Class ID check.
1740 __ BranchNotEqual(CMPRES1, string_cid, &fall_through); 1740 __ BranchNotEqual(CMPRES1, string_cid, &fall_through);
1741 1741
1742 // Have same length? 1742 // Have same length?
1743 __ lw(T2, FieldAddress(T0, String::length_offset())); 1743 __ lw(T2, FieldAddress(T0, String::length_offset()));
1744 __ lw(T3, FieldAddress(T1, String::length_offset())); 1744 __ lw(T3, FieldAddress(T1, String::length_offset()));
1745 __ bne(T2, T3, &is_false); 1745 __ bne(T2, T3, &is_false);
1746 1746
1747 // Check contents, no fall-through possible. 1747 // Check contents, no fall-through possible.
1748 ASSERT((string_cid == kOneByteStringCid) || 1748 ASSERT((string_cid == kOneByteStringCid) ||
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1783 } 1783 }
1784 1784
1785 1785
1786 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { 1786 void Intrinsifier::TwoByteString_equality(Assembler* assembler) {
1787 StringEquality(assembler, kTwoByteStringCid); 1787 StringEquality(assembler, kTwoByteStringCid);
1788 } 1788 }
1789 1789
1790 } // namespace dart 1790 } // namespace dart
1791 1791
1792 #endif // defined TARGET_ARCH_MIPS 1792 #endif // defined TARGET_ARCH_MIPS
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698