Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(367)

Side by Side Diff: runtime/vm/intrinsifier_arm.cc

Issue 2974233002: VM: Re-format to use at most one newline between functions (Closed)
Patch Set: Rebase and merge Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/intrinsifier.cc ('k') | runtime/vm/intrinsifier_arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
(...skipping 11 matching lines...) Expand all
22 // When entering intrinsics code: 22 // When entering intrinsics code:
23 // R4: Arguments descriptor 23 // R4: Arguments descriptor
24 // LR: Return address 24 // LR: Return address
25 // The R4 register can be destroyed only if there is no slow-path, i.e. 25 // The R4 register can be destroyed only if there is no slow-path, i.e.
26 // if the intrinsified method always executes a return. 26 // if the intrinsified method always executes a return.
27 // The FP register should not be modified, because it is used by the profiler. 27 // The FP register should not be modified, because it is used by the profiler.
28 // The PP and THR registers (see constants_arm.h) must be preserved. 28 // The PP and THR registers (see constants_arm.h) must be preserved.
29 29
30 #define __ assembler-> 30 #define __ assembler->
31 31
32
33 intptr_t Intrinsifier::ParameterSlotFromSp() { 32 intptr_t Intrinsifier::ParameterSlotFromSp() {
34 return -1; 33 return -1;
35 } 34 }
36 35
37
38 static bool IsABIPreservedRegister(Register reg) { 36 static bool IsABIPreservedRegister(Register reg) {
39 return ((1 << reg) & kAbiPreservedCpuRegs) != 0; 37 return ((1 << reg) & kAbiPreservedCpuRegs) != 0;
40 } 38 }
41 39
42
43 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { 40 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) {
44 ASSERT(IsABIPreservedRegister(CODE_REG)); 41 ASSERT(IsABIPreservedRegister(CODE_REG));
45 ASSERT(IsABIPreservedRegister(ARGS_DESC_REG)); 42 ASSERT(IsABIPreservedRegister(ARGS_DESC_REG));
46 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP)); 43 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP));
47 44
48 // Save LR by moving it to a callee saved temporary register. 45 // Save LR by moving it to a callee saved temporary register.
49 assembler->Comment("IntrinsicCallPrologue"); 46 assembler->Comment("IntrinsicCallPrologue");
50 assembler->mov(CALLEE_SAVED_TEMP, Operand(LR)); 47 assembler->mov(CALLEE_SAVED_TEMP, Operand(LR));
51 } 48 }
52 49
53
54 void Intrinsifier::IntrinsicCallEpilogue(Assembler* assembler) { 50 void Intrinsifier::IntrinsicCallEpilogue(Assembler* assembler) {
55 // Restore LR. 51 // Restore LR.
56 assembler->Comment("IntrinsicCallEpilogue"); 52 assembler->Comment("IntrinsicCallEpilogue");
57 assembler->mov(LR, Operand(CALLEE_SAVED_TEMP)); 53 assembler->mov(LR, Operand(CALLEE_SAVED_TEMP));
58 } 54 }
59 55
60
61 // Intrinsify only for Smi value and index. Non-smi values need a store buffer 56 // Intrinsify only for Smi value and index. Non-smi values need a store buffer
62 // update. Array length is always a Smi. 57 // update. Array length is always a Smi.
63 void Intrinsifier::ObjectArraySetIndexed(Assembler* assembler) { 58 void Intrinsifier::ObjectArraySetIndexed(Assembler* assembler) {
64 if (Isolate::Current()->type_checks()) { 59 if (Isolate::Current()->type_checks()) {
65 return; 60 return;
66 } 61 }
67 62
68 Label fall_through; 63 Label fall_through;
69 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. 64 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index.
70 __ tst(R1, Operand(kSmiTagMask)); 65 __ tst(R1, Operand(kSmiTagMask));
(...skipping 10 matching lines...) Expand all
81 // Note that R1 is Smi, i.e, times 2. 76 // Note that R1 is Smi, i.e, times 2.
82 ASSERT(kSmiTagShift == 1); 77 ASSERT(kSmiTagShift == 1);
83 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. 78 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value.
84 __ add(R1, R0, Operand(R1, LSL, 1)); // R1 is Smi. 79 __ add(R1, R0, Operand(R1, LSL, 1)); // R1 is Smi.
85 __ StoreIntoObject(R0, FieldAddress(R1, Array::data_offset()), R2); 80 __ StoreIntoObject(R0, FieldAddress(R1, Array::data_offset()), R2);
86 // Caller is responsible for preserving the value if necessary. 81 // Caller is responsible for preserving the value if necessary.
87 __ Ret(); 82 __ Ret();
88 __ Bind(&fall_through); 83 __ Bind(&fall_through);
89 } 84 }
90 85
91
92 // Allocate a GrowableObjectArray using the backing array specified. 86 // Allocate a GrowableObjectArray using the backing array specified.
93 // On stack: type argument (+1), data (+0). 87 // On stack: type argument (+1), data (+0).
94 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { 88 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) {
95 // The newly allocated object is returned in R0. 89 // The newly allocated object is returned in R0.
96 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; 90 const intptr_t kTypeArgumentsOffset = 1 * kWordSize;
97 const intptr_t kArrayOffset = 0 * kWordSize; 91 const intptr_t kArrayOffset = 0 * kWordSize;
98 Label fall_through; 92 Label fall_through;
99 93
100 // Try allocating in new space. 94 // Try allocating in new space.
101 const Class& cls = Class::Handle( 95 const Class& cls = Class::Handle(
(...skipping 14 matching lines...) Expand all
116 110
117 // Set the length field in the growable array object to 0. 111 // Set the length field in the growable array object to 0.
118 __ LoadImmediate(R1, 0); 112 __ LoadImmediate(R1, 0);
119 __ StoreIntoObjectNoBarrier( 113 __ StoreIntoObjectNoBarrier(
120 R0, FieldAddress(R0, GrowableObjectArray::length_offset()), R1); 114 R0, FieldAddress(R0, GrowableObjectArray::length_offset()), R1);
121 __ Ret(); // Returns the newly allocated object in R0. 115 __ Ret(); // Returns the newly allocated object in R0.
122 116
123 __ Bind(&fall_through); 117 __ Bind(&fall_through);
124 } 118 }
125 119
126
127 // Add an element to growable array if it doesn't need to grow, otherwise 120 // Add an element to growable array if it doesn't need to grow, otherwise
128 // call into regular code. 121 // call into regular code.
129 // On stack: growable array (+1), value (+0). 122 // On stack: growable array (+1), value (+0).
130 void Intrinsifier::GrowableArray_add(Assembler* assembler) { 123 void Intrinsifier::GrowableArray_add(Assembler* assembler) {
131 // In checked mode we need to type-check the incoming argument. 124 // In checked mode we need to type-check the incoming argument.
132 if (Isolate::Current()->type_checks()) { 125 if (Isolate::Current()->type_checks()) {
133 return; 126 return;
134 } 127 }
135 Label fall_through; 128 Label fall_through;
136 // R0: Array. 129 // R0: Array.
(...skipping 14 matching lines...) Expand all
151 R3); 144 R3);
152 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value. 145 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value.
153 ASSERT(kSmiTagShift == 1); 146 ASSERT(kSmiTagShift == 1);
154 __ add(R1, R2, Operand(R1, LSL, 1)); 147 __ add(R1, R2, Operand(R1, LSL, 1));
155 __ StoreIntoObject(R2, FieldAddress(R1, Array::data_offset()), R0); 148 __ StoreIntoObject(R2, FieldAddress(R1, Array::data_offset()), R0);
156 __ LoadObject(R0, Object::null_object()); 149 __ LoadObject(R0, Object::null_object());
157 __ Ret(); 150 __ Ret();
158 __ Bind(&fall_through); 151 __ Bind(&fall_through);
159 } 152 }
160 153
161
162 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ 154 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \
163 Label fall_through; \ 155 Label fall_through; \
164 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ 156 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \
165 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &fall_through)); \ 157 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &fall_through)); \
166 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ 158 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
167 /* Check that length is a positive Smi. */ \ 159 /* Check that length is a positive Smi. */ \
168 /* R2: requested array length argument. */ \ 160 /* R2: requested array length argument. */ \
169 __ tst(R2, Operand(kSmiTagMask)); \ 161 __ tst(R2, Operand(kSmiTagMask)); \
170 __ b(&fall_through, NE); \ 162 __ b(&fall_through, NE); \
171 __ CompareImmediate(R2, 0); \ 163 __ CompareImmediate(R2, 0); \
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
243 __ AddImmediate(R3, 2 * kWordSize); \ 235 __ AddImmediate(R3, 2 * kWordSize); \
244 __ cmp(R3, Operand(R1)); \ 236 __ cmp(R3, Operand(R1)); \
245 __ strd(R8, R9, R3, -2 * kWordSize, LS); \ 237 __ strd(R8, R9, R3, -2 * kWordSize, LS); \
246 __ b(&init_loop, CC); \ 238 __ b(&init_loop, CC); \
247 __ str(R8, Address(R3, -2 * kWordSize), HI); \ 239 __ str(R8, Address(R3, -2 * kWordSize), HI); \
248 \ 240 \
249 NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space)); \ 241 NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space)); \
250 __ Ret(); \ 242 __ Ret(); \
251 __ Bind(&fall_through); 243 __ Bind(&fall_through);
252 244
253
254 static int GetScaleFactor(intptr_t size) { 245 static int GetScaleFactor(intptr_t size) {
255 switch (size) { 246 switch (size) {
256 case 1: 247 case 1:
257 return 0; 248 return 0;
258 case 2: 249 case 2:
259 return 1; 250 return 1;
260 case 4: 251 case 4:
261 return 2; 252 return 2;
262 case 8: 253 case 8:
263 return 3; 254 return 3;
264 case 16: 255 case 16:
265 return 4; 256 return 4;
266 } 257 }
267 UNREACHABLE(); 258 UNREACHABLE();
268 return -1; 259 return -1;
269 } 260 }
270 261
271
272 #define TYPED_DATA_ALLOCATOR(clazz) \ 262 #define TYPED_DATA_ALLOCATOR(clazz) \
273 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ 263 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \
274 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ 264 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \
275 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ 265 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \
276 int shift = GetScaleFactor(size); \ 266 int shift = GetScaleFactor(size); \
277 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ 267 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \
278 } 268 }
279 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) 269 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR)
280 #undef TYPED_DATA_ALLOCATOR 270 #undef TYPED_DATA_ALLOCATOR
281 271
282
283 // Loads args from stack into R0 and R1 272 // Loads args from stack into R0 and R1
284 // Tests if they are smis, jumps to label not_smi if not. 273 // Tests if they are smis, jumps to label not_smi if not.
285 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { 274 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
286 __ ldr(R0, Address(SP, +0 * kWordSize)); 275 __ ldr(R0, Address(SP, +0 * kWordSize));
287 __ ldr(R1, Address(SP, +1 * kWordSize)); 276 __ ldr(R1, Address(SP, +1 * kWordSize));
288 __ orr(TMP, R0, Operand(R1)); 277 __ orr(TMP, R0, Operand(R1));
289 __ tst(TMP, Operand(kSmiTagMask)); 278 __ tst(TMP, Operand(kSmiTagMask));
290 __ b(not_smi, NE); 279 __ b(not_smi, NE);
291 return; 280 return;
292 } 281 }
293 282
294
295 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { 283 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) {
296 Label fall_through; 284 Label fall_through;
297 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. 285 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis.
298 __ adds(R0, R0, Operand(R1)); // Adds. 286 __ adds(R0, R0, Operand(R1)); // Adds.
299 __ bx(LR, VC); // Return if no overflow. 287 __ bx(LR, VC); // Return if no overflow.
300 // Otherwise fall through. 288 // Otherwise fall through.
301 __ Bind(&fall_through); 289 __ Bind(&fall_through);
302 } 290 }
303 291
304
305 void Intrinsifier::Integer_add(Assembler* assembler) { 292 void Intrinsifier::Integer_add(Assembler* assembler) {
306 Integer_addFromInteger(assembler); 293 Integer_addFromInteger(assembler);
307 } 294 }
308 295
309
310 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { 296 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
311 Label fall_through; 297 Label fall_through;
312 TestBothArgumentsSmis(assembler, &fall_through); 298 TestBothArgumentsSmis(assembler, &fall_through);
313 __ subs(R0, R0, Operand(R1)); // Subtract. 299 __ subs(R0, R0, Operand(R1)); // Subtract.
314 __ bx(LR, VC); // Return if no overflow. 300 __ bx(LR, VC); // Return if no overflow.
315 // Otherwise fall through. 301 // Otherwise fall through.
316 __ Bind(&fall_through); 302 __ Bind(&fall_through);
317 } 303 }
318 304
319
320 void Intrinsifier::Integer_sub(Assembler* assembler) { 305 void Intrinsifier::Integer_sub(Assembler* assembler) {
321 Label fall_through; 306 Label fall_through;
322 TestBothArgumentsSmis(assembler, &fall_through); 307 TestBothArgumentsSmis(assembler, &fall_through);
323 __ subs(R0, R1, Operand(R0)); // Subtract. 308 __ subs(R0, R1, Operand(R0)); // Subtract.
324 __ bx(LR, VC); // Return if no overflow. 309 __ bx(LR, VC); // Return if no overflow.
325 // Otherwise fall through. 310 // Otherwise fall through.
326 __ Bind(&fall_through); 311 __ Bind(&fall_through);
327 } 312 }
328 313
329
330 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { 314 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) {
331 Label fall_through; 315 Label fall_through;
332 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis 316 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis
333 __ SmiUntag(R0); // Untags R0. We only want result shifted by one. 317 __ SmiUntag(R0); // Untags R0. We only want result shifted by one.
334 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1. 318 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1.
335 __ cmp(IP, Operand(R0, ASR, 31)); 319 __ cmp(IP, Operand(R0, ASR, 31));
336 __ bx(LR, EQ); 320 __ bx(LR, EQ);
337 __ Bind(&fall_through); // Fall through on overflow. 321 __ Bind(&fall_through); // Fall through on overflow.
338 } 322 }
339 323
340
341 void Intrinsifier::Integer_mul(Assembler* assembler) { 324 void Intrinsifier::Integer_mul(Assembler* assembler) {
342 Integer_mulFromInteger(assembler); 325 Integer_mulFromInteger(assembler);
343 } 326 }
344 327
345
346 // Optimizations: 328 // Optimizations:
347 // - result is 0 if: 329 // - result is 0 if:
348 // - left is 0 330 // - left is 0
349 // - left equals right 331 // - left equals right
350 // - result is left if 332 // - result is left if
351 // - left > 0 && left < right 333 // - left > 0 && left < right
352 // R1: Tagged left (dividend). 334 // R1: Tagged left (dividend).
353 // R0: Tagged right (divisor). 335 // R0: Tagged right (divisor).
354 // Returns: 336 // Returns:
355 // R1: Untagged fallthrough result (remainder to be adjusted), or 337 // R1: Untagged fallthrough result (remainder to be adjusted), or
(...skipping 27 matching lines...) Expand all
383 // result <- left - right * (left / right) 365 // result <- left - right * (left / right)
384 __ SmiUntag(left); 366 __ SmiUntag(left);
385 __ SmiUntag(right); 367 __ SmiUntag(right);
386 368
387 __ IntegerDivide(tmp, left, right, D1, D0); 369 __ IntegerDivide(tmp, left, right, D1, D0);
388 370
389 __ mls(result, right, tmp, left); // result <- left - right * TMP 371 __ mls(result, right, tmp, left); // result <- left - right * TMP
390 return; 372 return;
391 } 373 }
392 374
393
394 // Implementation: 375 // Implementation:
395 // res = left % right; 376 // res = left % right;
396 // if (res < 0) { 377 // if (res < 0) {
397 // if (right < 0) { 378 // if (right < 0) {
398 // res = res - right; 379 // res = res - right;
399 // } else { 380 // } else {
400 // res = res + right; 381 // res = res + right;
401 // } 382 // }
402 // } 383 // }
403 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { 384 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) {
(...skipping 22 matching lines...) Expand all
426 // Result is negative, adjust it. 407 // Result is negative, adjust it.
427 __ cmp(R0, Operand(0)); 408 __ cmp(R0, Operand(0));
428 __ sub(R0, R1, Operand(R0), LT); 409 __ sub(R0, R1, Operand(R0), LT);
429 __ add(R0, R1, Operand(R0), GE); 410 __ add(R0, R1, Operand(R0), GE);
430 __ SmiTag(R0); 411 __ SmiTag(R0);
431 __ Ret(); 412 __ Ret();
432 413
433 __ Bind(&fall_through); 414 __ Bind(&fall_through);
434 } 415 }
435 416
436
437 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { 417 void Intrinsifier::Integer_truncDivide(Assembler* assembler) {
438 if (!TargetCPUFeatures::can_divide()) { 418 if (!TargetCPUFeatures::can_divide()) {
439 return; 419 return;
440 } 420 }
441 // Check to see if we have integer division 421 // Check to see if we have integer division
442 Label fall_through; 422 Label fall_through;
443 423
444 TestBothArgumentsSmis(assembler, &fall_through); 424 TestBothArgumentsSmis(assembler, &fall_through);
445 __ cmp(R0, Operand(0)); 425 __ cmp(R0, Operand(0));
446 __ b(&fall_through, EQ); // If b is 0, fall through. 426 __ b(&fall_through, EQ); // If b is 0, fall through.
447 427
448 __ SmiUntag(R0); 428 __ SmiUntag(R0);
449 __ SmiUntag(R1); 429 __ SmiUntag(R1);
450 430
451 __ IntegerDivide(R0, R1, R0, D1, D0); 431 __ IntegerDivide(R0, R1, R0, D1, D0);
452 432
453 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we 433 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
454 // cannot tag the result. 434 // cannot tag the result.
455 __ CompareImmediate(R0, 0x40000000); 435 __ CompareImmediate(R0, 0x40000000);
456 __ SmiTag(R0, NE); // Not equal. Okay to tag and return. 436 __ SmiTag(R0, NE); // Not equal. Okay to tag and return.
457 __ bx(LR, NE); // Return. 437 __ bx(LR, NE); // Return.
458 __ Bind(&fall_through); 438 __ Bind(&fall_through);
459 } 439 }
460 440
461
462 void Intrinsifier::Integer_negate(Assembler* assembler) { 441 void Intrinsifier::Integer_negate(Assembler* assembler) {
463 Label fall_through; 442 Label fall_through;
464 __ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument. 443 __ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument.
465 __ tst(R0, Operand(kSmiTagMask)); // Test for Smi. 444 __ tst(R0, Operand(kSmiTagMask)); // Test for Smi.
466 __ b(&fall_through, NE); 445 __ b(&fall_through, NE);
467 __ rsbs(R0, R0, Operand(0)); // R0 is a Smi. R0 <- 0 - R0. 446 __ rsbs(R0, R0, Operand(0)); // R0 is a Smi. R0 <- 0 - R0.
468 __ bx(LR, VC); // Return if there wasn't overflow, fall through otherwise. 447 __ bx(LR, VC); // Return if there wasn't overflow, fall through otherwise.
469 // R0 is not a Smi. Fall through. 448 // R0 is not a Smi. Fall through.
470 __ Bind(&fall_through); 449 __ Bind(&fall_through);
471 } 450 }
472 451
473
474 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { 452 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) {
475 Label fall_through; 453 Label fall_through;
476 454
477 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis 455 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis
478 __ and_(R0, R0, Operand(R1)); 456 __ and_(R0, R0, Operand(R1));
479 457
480 __ Ret(); 458 __ Ret();
481 __ Bind(&fall_through); 459 __ Bind(&fall_through);
482 } 460 }
483 461
484
485 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { 462 void Intrinsifier::Integer_bitAnd(Assembler* assembler) {
486 Integer_bitAndFromInteger(assembler); 463 Integer_bitAndFromInteger(assembler);
487 } 464 }
488 465
489
490 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { 466 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) {
491 Label fall_through; 467 Label fall_through;
492 468
493 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis 469 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis
494 __ orr(R0, R0, Operand(R1)); 470 __ orr(R0, R0, Operand(R1));
495 471
496 __ Ret(); 472 __ Ret();
497 __ Bind(&fall_through); 473 __ Bind(&fall_through);
498 } 474 }
499 475
500
501 void Intrinsifier::Integer_bitOr(Assembler* assembler) { 476 void Intrinsifier::Integer_bitOr(Assembler* assembler) {
502 Integer_bitOrFromInteger(assembler); 477 Integer_bitOrFromInteger(assembler);
503 } 478 }
504 479
505
506 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { 480 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) {
507 Label fall_through; 481 Label fall_through;
508 482
509 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis 483 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis
510 __ eor(R0, R0, Operand(R1)); 484 __ eor(R0, R0, Operand(R1));
511 485
512 __ Ret(); 486 __ Ret();
513 __ Bind(&fall_through); 487 __ Bind(&fall_through);
514 } 488 }
515 489
516
517 void Intrinsifier::Integer_bitXor(Assembler* assembler) { 490 void Intrinsifier::Integer_bitXor(Assembler* assembler) {
518 Integer_bitXorFromInteger(assembler); 491 Integer_bitXorFromInteger(assembler);
519 } 492 }
520 493
521
522 void Intrinsifier::Integer_shl(Assembler* assembler) { 494 void Intrinsifier::Integer_shl(Assembler* assembler) {
523 ASSERT(kSmiTagShift == 1); 495 ASSERT(kSmiTagShift == 1);
524 ASSERT(kSmiTag == 0); 496 ASSERT(kSmiTag == 0);
525 Label fall_through; 497 Label fall_through;
526 TestBothArgumentsSmis(assembler, &fall_through); 498 TestBothArgumentsSmis(assembler, &fall_through);
527 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits)); 499 __ CompareImmediate(R0, Smi::RawValue(Smi::kBits));
528 __ b(&fall_through, HI); 500 __ b(&fall_through, HI);
529 501
530 __ SmiUntag(R0); 502 __ SmiUntag(R0);
531 503
(...skipping 22 matching lines...) Expand all
554 __ mov(NOTFP, Operand(NOTFP, LSL, R3)); // NOTFP <- NOTFP << R3 526 __ mov(NOTFP, Operand(NOTFP, LSL, R3)); // NOTFP <- NOTFP << R3
555 __ and_(NOTFP, R1, Operand(NOTFP)); // NOTFP <- NOTFP & R1 527 __ and_(NOTFP, R1, Operand(NOTFP)); // NOTFP <- NOTFP & R1
556 __ mov(NOTFP, Operand(NOTFP, LSR, R3)); // NOTFP <- NOTFP >> R3 528 __ mov(NOTFP, Operand(NOTFP, LSR, R3)); // NOTFP <- NOTFP >> R3
557 // Now NOTFP has the bits that fall off of R1 on a left shift. 529 // Now NOTFP has the bits that fall off of R1 on a left shift.
558 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. 530 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
559 531
560 const Class& mint_class = 532 const Class& mint_class =
561 Class::Handle(Isolate::Current()->object_store()->mint_class()); 533 Class::Handle(Isolate::Current()->object_store()->mint_class());
562 __ TryAllocate(mint_class, &fall_through, R0, R2); 534 __ TryAllocate(mint_class, &fall_through, R0, R2);
563 535
564
565 __ str(R1, FieldAddress(R0, Mint::value_offset())); 536 __ str(R1, FieldAddress(R0, Mint::value_offset()));
566 __ str(NOTFP, FieldAddress(R0, Mint::value_offset() + kWordSize)); 537 __ str(NOTFP, FieldAddress(R0, Mint::value_offset() + kWordSize));
567 __ Ret(); 538 __ Ret();
568 __ Bind(&fall_through); 539 __ Bind(&fall_through);
569 } 540 }
570 541
571
572 static void Get64SmiOrMint(Assembler* assembler, 542 static void Get64SmiOrMint(Assembler* assembler,
573 Register res_hi, 543 Register res_hi,
574 Register res_lo, 544 Register res_lo,
575 Register reg, 545 Register reg,
576 Label* not_smi_or_mint) { 546 Label* not_smi_or_mint) {
577 Label not_smi, done; 547 Label not_smi, done;
578 __ tst(reg, Operand(kSmiTagMask)); 548 __ tst(reg, Operand(kSmiTagMask));
579 __ b(&not_smi, NE); 549 __ b(&not_smi, NE);
580 __ SmiUntag(reg); 550 __ SmiUntag(reg);
581 551
582 // Sign extend to 64 bit 552 // Sign extend to 64 bit
583 __ mov(res_lo, Operand(reg)); 553 __ mov(res_lo, Operand(reg));
584 __ mov(res_hi, Operand(res_lo, ASR, 31)); 554 __ mov(res_hi, Operand(res_lo, ASR, 31));
585 __ b(&done); 555 __ b(&done);
586 556
587 __ Bind(&not_smi); 557 __ Bind(&not_smi);
588 __ CompareClassId(reg, kMintCid, res_lo); 558 __ CompareClassId(reg, kMintCid, res_lo);
589 __ b(not_smi_or_mint, NE); 559 __ b(not_smi_or_mint, NE);
590 560
591 // Mint. 561 // Mint.
592 __ ldr(res_lo, FieldAddress(reg, Mint::value_offset())); 562 __ ldr(res_lo, FieldAddress(reg, Mint::value_offset()));
593 __ ldr(res_hi, FieldAddress(reg, Mint::value_offset() + kWordSize)); 563 __ ldr(res_hi, FieldAddress(reg, Mint::value_offset() + kWordSize));
594 __ Bind(&done); 564 __ Bind(&done);
595 return; 565 return;
596 } 566 }
597 567
598
599 static void CompareIntegers(Assembler* assembler, Condition true_condition) { 568 static void CompareIntegers(Assembler* assembler, Condition true_condition) {
600 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through; 569 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through;
601 TestBothArgumentsSmis(assembler, &try_mint_smi); 570 TestBothArgumentsSmis(assembler, &try_mint_smi);
602 // R0 contains the right argument. R1 contains left argument 571 // R0 contains the right argument. R1 contains left argument
603 572
604 __ cmp(R1, Operand(R0)); 573 __ cmp(R1, Operand(R0));
605 __ b(&is_true, true_condition); 574 __ b(&is_true, true_condition);
606 __ Bind(&is_false); 575 __ Bind(&is_false);
607 __ LoadObject(R0, Bool::False()); 576 __ LoadObject(R0, Bool::False());
608 __ Ret(); 577 __ Ret();
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
644 __ b(&is_false, hi_false_cond); 613 __ b(&is_false, hi_false_cond);
645 __ b(&is_true, hi_true_cond); 614 __ b(&is_true, hi_true_cond);
646 __ cmp(R2, Operand(R8)); // Compare left lo, right lo. 615 __ cmp(R2, Operand(R8)); // Compare left lo, right lo.
647 __ b(&is_false, lo_false_cond); 616 __ b(&is_false, lo_false_cond);
648 // Else is true. 617 // Else is true.
649 __ b(&is_true); 618 __ b(&is_true);
650 619
651 __ Bind(&fall_through); 620 __ Bind(&fall_through);
652 } 621 }
653 622
654
655 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { 623 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) {
656 CompareIntegers(assembler, LT); 624 CompareIntegers(assembler, LT);
657 } 625 }
658 626
659
660 void Intrinsifier::Integer_lessThan(Assembler* assembler) { 627 void Intrinsifier::Integer_lessThan(Assembler* assembler) {
661 Integer_greaterThanFromInt(assembler); 628 Integer_greaterThanFromInt(assembler);
662 } 629 }
663 630
664
665 void Intrinsifier::Integer_greaterThan(Assembler* assembler) { 631 void Intrinsifier::Integer_greaterThan(Assembler* assembler) {
666 CompareIntegers(assembler, GT); 632 CompareIntegers(assembler, GT);
667 } 633 }
668 634
669
670 void Intrinsifier::Integer_lessEqualThan(Assembler* assembler) { 635 void Intrinsifier::Integer_lessEqualThan(Assembler* assembler) {
671 CompareIntegers(assembler, LE); 636 CompareIntegers(assembler, LE);
672 } 637 }
673 638
674
675 void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) { 639 void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) {
676 CompareIntegers(assembler, GE); 640 CompareIntegers(assembler, GE);
677 } 641 }
678 642
679
680 // This is called for Smi, Mint and Bigint receivers. The right argument 643 // This is called for Smi, Mint and Bigint receivers. The right argument
681 // can be Smi, Mint, Bigint or double. 644 // can be Smi, Mint, Bigint or double.
682 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { 645 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
683 Label fall_through, true_label, check_for_mint; 646 Label fall_through, true_label, check_for_mint;
684 // For integer receiver '===' check first. 647 // For integer receiver '===' check first.
685 __ ldr(R0, Address(SP, 0 * kWordSize)); 648 __ ldr(R0, Address(SP, 0 * kWordSize));
686 __ ldr(R1, Address(SP, 1 * kWordSize)); 649 __ ldr(R1, Address(SP, 1 * kWordSize));
687 __ cmp(R0, Operand(R1)); 650 __ cmp(R0, Operand(R1));
688 __ b(&true_label, EQ); 651 __ b(&true_label, EQ);
689 652
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
721 __ b(&fall_through, NE); 684 __ b(&fall_through, NE);
722 // Receiver is Mint, return false if right is Smi. 685 // Receiver is Mint, return false if right is Smi.
723 __ tst(R0, Operand(kSmiTagMask)); 686 __ tst(R0, Operand(kSmiTagMask));
724 __ LoadObject(R0, Bool::False(), EQ); 687 __ LoadObject(R0, Bool::False(), EQ);
725 __ bx(LR, EQ); 688 __ bx(LR, EQ);
726 // TODO(srdjan): Implement Mint == Mint comparison. 689 // TODO(srdjan): Implement Mint == Mint comparison.
727 690
728 __ Bind(&fall_through); 691 __ Bind(&fall_through);
729 } 692 }
730 693
731
732 void Intrinsifier::Integer_equal(Assembler* assembler) { 694 void Intrinsifier::Integer_equal(Assembler* assembler) {
733 Integer_equalToInteger(assembler); 695 Integer_equalToInteger(assembler);
734 } 696 }
735 697
736
737 void Intrinsifier::Integer_sar(Assembler* assembler) { 698 void Intrinsifier::Integer_sar(Assembler* assembler) {
738 Label fall_through; 699 Label fall_through;
739 700
740 TestBothArgumentsSmis(assembler, &fall_through); 701 TestBothArgumentsSmis(assembler, &fall_through);
741 // Shift amount in R0. Value to shift in R1. 702 // Shift amount in R0. Value to shift in R1.
742 703
743 // Fall through if shift amount is negative. 704 // Fall through if shift amount is negative.
744 __ SmiUntag(R0); 705 __ SmiUntag(R0);
745 __ CompareImmediate(R0, 0); 706 __ CompareImmediate(R0, 0);
746 __ b(&fall_through, LT); 707 __ b(&fall_through, LT);
747 708
748 // If shift amount is bigger than 31, set to 31. 709 // If shift amount is bigger than 31, set to 31.
749 __ CompareImmediate(R0, 0x1F); 710 __ CompareImmediate(R0, 0x1F);
750 __ LoadImmediate(R0, 0x1F, GT); 711 __ LoadImmediate(R0, 0x1F, GT);
751 __ SmiUntag(R1); 712 __ SmiUntag(R1);
752 __ mov(R0, Operand(R1, ASR, R0)); 713 __ mov(R0, Operand(R1, ASR, R0));
753 __ SmiTag(R0); 714 __ SmiTag(R0);
754 __ Ret(); 715 __ Ret();
755 __ Bind(&fall_through); 716 __ Bind(&fall_through);
756 } 717 }
757 718
758
759 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { 719 void Intrinsifier::Smi_bitNegate(Assembler* assembler) {
760 __ ldr(R0, Address(SP, 0 * kWordSize)); 720 __ ldr(R0, Address(SP, 0 * kWordSize));
761 __ mvn(R0, Operand(R0)); 721 __ mvn(R0, Operand(R0));
762 __ bic(R0, R0, Operand(kSmiTagMask)); // Remove inverted smi-tag. 722 __ bic(R0, R0, Operand(kSmiTagMask)); // Remove inverted smi-tag.
763 __ Ret(); 723 __ Ret();
764 } 724 }
765 725
766
767 void Intrinsifier::Smi_bitLength(Assembler* assembler) { 726 void Intrinsifier::Smi_bitLength(Assembler* assembler) {
768 __ ldr(R0, Address(SP, 0 * kWordSize)); 727 __ ldr(R0, Address(SP, 0 * kWordSize));
769 __ SmiUntag(R0); 728 __ SmiUntag(R0);
770 // XOR with sign bit to complement bits if value is negative. 729 // XOR with sign bit to complement bits if value is negative.
771 __ eor(R0, R0, Operand(R0, ASR, 31)); 730 __ eor(R0, R0, Operand(R0, ASR, 31));
772 __ clz(R0, R0); 731 __ clz(R0, R0);
773 __ rsb(R0, R0, Operand(32)); 732 __ rsb(R0, R0, Operand(32));
774 __ SmiTag(R0); 733 __ SmiTag(R0);
775 __ Ret(); 734 __ Ret();
776 } 735 }
777 736
778
779 void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler) { 737 void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler) {
780 Integer_bitAndFromInteger(assembler); 738 Integer_bitAndFromInteger(assembler);
781 } 739 }
782 740
783
784 void Intrinsifier::Bigint_lsh(Assembler* assembler) { 741 void Intrinsifier::Bigint_lsh(Assembler* assembler) {
785 // static void _lsh(Uint32List x_digits, int x_used, int n, 742 // static void _lsh(Uint32List x_digits, int x_used, int n,
786 // Uint32List r_digits) 743 // Uint32List r_digits)
787 744
788 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi. 745 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
789 __ ldrd(R0, R1, SP, 2 * kWordSize); 746 __ ldrd(R0, R1, SP, 2 * kWordSize);
790 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0. 747 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
791 __ ldrd(R2, R3, SP, 0 * kWordSize); 748 __ ldrd(R2, R3, SP, 0 * kWordSize);
792 __ SmiUntag(R3); 749 __ SmiUntag(R3);
793 // R4 = n ~/ _DIGIT_BITS 750 // R4 = n ~/ _DIGIT_BITS
794 __ Asr(R4, R3, Operand(5)); 751 __ Asr(R4, R3, Operand(5));
795 // R8 = &x_digits[0] 752 // R8 = &x_digits[0]
796 __ add(R8, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); 753 __ add(R8, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
797 // NOTFP = &x_digits[x_used] 754 // NOTFP = &x_digits[x_used]
798 __ add(NOTFP, R8, Operand(R0, LSL, 1)); 755 __ add(NOTFP, R8, Operand(R0, LSL, 1));
799 // R6 = &r_digits[1] 756 // R6 = &r_digits[1]
800 __ add(R6, R2, Operand(TypedData::data_offset() - kHeapObjectTag + 757 __ add(R6, R2,
801 Bigint::kBytesPerDigit)); 758 Operand(TypedData::data_offset() - kHeapObjectTag +
759 Bigint::kBytesPerDigit));
802 // R6 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1] 760 // R6 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
803 __ add(R4, R4, Operand(R0, ASR, 1)); 761 __ add(R4, R4, Operand(R0, ASR, 1));
804 __ add(R6, R6, Operand(R4, LSL, 2)); 762 __ add(R6, R6, Operand(R4, LSL, 2));
805 // R1 = n % _DIGIT_BITS 763 // R1 = n % _DIGIT_BITS
806 __ and_(R1, R3, Operand(31)); 764 __ and_(R1, R3, Operand(31));
807 // R0 = 32 - R1 765 // R0 = 32 - R1
808 __ rsb(R0, R1, Operand(32)); 766 __ rsb(R0, R1, Operand(32));
809 __ mov(R9, Operand(0)); 767 __ mov(R9, Operand(0));
810 Label loop; 768 Label loop;
811 __ Bind(&loop); 769 __ Bind(&loop);
812 __ ldr(R4, Address(NOTFP, -Bigint::kBytesPerDigit, Address::PreIndex)); 770 __ ldr(R4, Address(NOTFP, -Bigint::kBytesPerDigit, Address::PreIndex));
813 __ orr(R9, R9, Operand(R4, LSR, R0)); 771 __ orr(R9, R9, Operand(R4, LSR, R0));
814 __ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex)); 772 __ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex));
815 __ mov(R9, Operand(R4, LSL, R1)); 773 __ mov(R9, Operand(R4, LSL, R1));
816 __ teq(NOTFP, Operand(R8)); 774 __ teq(NOTFP, Operand(R8));
817 __ b(&loop, NE); 775 __ b(&loop, NE);
818 __ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex)); 776 __ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex));
819 // Returning Object::null() is not required, since this method is private. 777 // Returning Object::null() is not required, since this method is private.
820 __ Ret(); 778 __ Ret();
821 } 779 }
822 780
823
824 void Intrinsifier::Bigint_rsh(Assembler* assembler) { 781 void Intrinsifier::Bigint_rsh(Assembler* assembler) {
825 // static void _lsh(Uint32List x_digits, int x_used, int n, 782 // static void _lsh(Uint32List x_digits, int x_used, int n,
826 // Uint32List r_digits) 783 // Uint32List r_digits)
827 784
828 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi. 785 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
829 __ ldrd(R0, R1, SP, 2 * kWordSize); 786 __ ldrd(R0, R1, SP, 2 * kWordSize);
830 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0. 787 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
831 __ ldrd(R2, R3, SP, 0 * kWordSize); 788 __ ldrd(R2, R3, SP, 0 * kWordSize);
832 __ SmiUntag(R3); 789 __ SmiUntag(R3);
833 // R4 = n ~/ _DIGIT_BITS 790 // R4 = n ~/ _DIGIT_BITS
(...skipping 23 matching lines...) Expand all
857 __ str(R9, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex)); 814 __ str(R9, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
858 __ mov(R9, Operand(R4, LSR, R1)); 815 __ mov(R9, Operand(R4, LSR, R1));
859 __ Bind(&loop_entry); 816 __ Bind(&loop_entry);
860 __ teq(R6, Operand(R8)); 817 __ teq(R6, Operand(R8));
861 __ b(&loop, NE); 818 __ b(&loop, NE);
862 __ str(R9, Address(R6, 0)); 819 __ str(R9, Address(R6, 0));
863 // Returning Object::null() is not required, since this method is private. 820 // Returning Object::null() is not required, since this method is private.
864 __ Ret(); 821 __ Ret();
865 } 822 }
866 823
867
868 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { 824 void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
869 // static void _absAdd(Uint32List digits, int used, 825 // static void _absAdd(Uint32List digits, int used,
870 // Uint32List a_digits, int a_used, 826 // Uint32List a_digits, int a_used,
871 // Uint32List r_digits) 827 // Uint32List r_digits)
872 828
873 // R0 = used, R1 = digits 829 // R0 = used, R1 = digits
874 __ ldrd(R0, R1, SP, 3 * kWordSize); 830 __ ldrd(R0, R1, SP, 3 * kWordSize);
875 // R1 = &digits[0] 831 // R1 = &digits[0]
876 __ add(R1, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); 832 __ add(R1, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
877 833
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
917 873
918 __ Bind(&last_carry); 874 __ Bind(&last_carry);
919 __ mov(R4, Operand(0)); 875 __ mov(R4, Operand(0));
920 __ adc(R4, R4, Operand(0)); 876 __ adc(R4, R4, Operand(0));
921 __ str(R4, Address(R8, 0)); 877 __ str(R4, Address(R8, 0));
922 878
923 // Returning Object::null() is not required, since this method is private. 879 // Returning Object::null() is not required, since this method is private.
924 __ Ret(); 880 __ Ret();
925 } 881 }
926 882
927
928 void Intrinsifier::Bigint_absSub(Assembler* assembler) { 883 void Intrinsifier::Bigint_absSub(Assembler* assembler) {
929 // static void _absSub(Uint32List digits, int used, 884 // static void _absSub(Uint32List digits, int used,
930 // Uint32List a_digits, int a_used, 885 // Uint32List a_digits, int a_used,
931 // Uint32List r_digits) 886 // Uint32List r_digits)
932 887
933 // R0 = used, R1 = digits 888 // R0 = used, R1 = digits
934 __ ldrd(R0, R1, SP, 3 * kWordSize); 889 __ ldrd(R0, R1, SP, 3 * kWordSize);
935 // R1 = &digits[0] 890 // R1 = &digits[0]
936 __ add(R1, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); 891 __ add(R1, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
937 892
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
973 __ sbcs(R4, R4, Operand(0)); 928 __ sbcs(R4, R4, Operand(0));
974 __ teq(R1, Operand(R6)); // Does not affect carry flag. 929 __ teq(R1, Operand(R6)); // Does not affect carry flag.
975 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); 930 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex));
976 __ b(&carry_loop, NE); 931 __ b(&carry_loop, NE);
977 932
978 __ Bind(&done); 933 __ Bind(&done);
979 // Returning Object::null() is not required, since this method is private. 934 // Returning Object::null() is not required, since this method is private.
980 __ Ret(); 935 __ Ret();
981 } 936 }
982 937
983
984 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { 938 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) {
985 // Pseudo code: 939 // Pseudo code:
986 // static int _mulAdd(Uint32List x_digits, int xi, 940 // static int _mulAdd(Uint32List x_digits, int xi,
987 // Uint32List m_digits, int i, 941 // Uint32List m_digits, int i,
988 // Uint32List a_digits, int j, int n) { 942 // Uint32List a_digits, int j, int n) {
989 // uint32_t x = x_digits[xi >> 1]; // xi is Smi. 943 // uint32_t x = x_digits[xi >> 1]; // xi is Smi.
990 // if (x == 0 || n == 0) { 944 // if (x == 0 || n == 0) {
991 // return 1; 945 // return 1;
992 // } 946 // }
993 // uint32_t* mip = &m_digits[i >> 1]; // i is Smi. 947 // uint32_t* mip = &m_digits[i >> 1]; // i is Smi.
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1075 __ ldr(R0, Address(R9, 0)); 1029 __ ldr(R0, Address(R9, 0));
1076 __ adds(R0, R0, Operand(1)); 1030 __ adds(R0, R0, Operand(1));
1077 __ str(R0, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex)); 1031 __ str(R0, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex));
1078 __ b(&propagate_carry_loop, CS); 1032 __ b(&propagate_carry_loop, CS);
1079 1033
1080 __ Bind(&done); 1034 __ Bind(&done);
1081 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed. 1035 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed.
1082 __ Ret(); 1036 __ Ret();
1083 } 1037 }
1084 1038
1085
1086 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { 1039 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
1087 // Pseudo code: 1040 // Pseudo code:
1088 // static int _sqrAdd(Uint32List x_digits, int i, 1041 // static int _sqrAdd(Uint32List x_digits, int i,
1089 // Uint32List a_digits, int used) { 1042 // Uint32List a_digits, int used) {
1090 // uint32_t* xip = &x_digits[i >> 1]; // i is Smi. 1043 // uint32_t* xip = &x_digits[i >> 1]; // i is Smi.
1091 // uint32_t x = *xip++; 1044 // uint32_t x = *xip++;
1092 // if (x == 0) return 1; 1045 // if (x == 0) return 1;
1093 // uint32_t* ajp = &a_digits[i]; // j == 2*i, i is Smi. 1046 // uint32_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
1094 // uint32_t aj = *ajp; 1047 // uint32_t aj = *ajp;
1095 // uint64_t t = x*x + aj; 1048 // uint64_t t = x*x + aj;
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
1189 1142
1190 // *ajp = low32(t) = R8 1143 // *ajp = low32(t) = R8
1191 // *(ajp + 1) = high32(t) = R9 1144 // *(ajp + 1) = high32(t) = R9
1192 __ strd(R8, R9, NOTFP, 0); 1145 __ strd(R8, R9, NOTFP, 0);
1193 1146
1194 __ Bind(&x_zero); 1147 __ Bind(&x_zero);
1195 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed. 1148 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed.
1196 __ Ret(); 1149 __ Ret();
1197 } 1150 }
1198 1151
1199
1200 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { 1152 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) {
1201 // No unsigned 64-bit / 32-bit divide instruction. 1153 // No unsigned 64-bit / 32-bit divide instruction.
1202 } 1154 }
1203 1155
1204
1205 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) { 1156 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) {
1206 // Pseudo code: 1157 // Pseudo code:
1207 // static int _mulMod(Uint32List args, Uint32List digits, int i) { 1158 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
1208 // uint32_t rho = args[_RHO]; // _RHO == 2. 1159 // uint32_t rho = args[_RHO]; // _RHO == 2.
1209 // uint32_t d = digits[i >> 1]; // i is Smi. 1160 // uint32_t d = digits[i >> 1]; // i is Smi.
1210 // uint64_t t = rho*d; 1161 // uint64_t t = rho*d;
1211 // args[_MU] = t mod DIGIT_BASE; // _MU == 4. 1162 // args[_MU] = t mod DIGIT_BASE; // _MU == 4.
1212 // return 1; 1163 // return 1;
1213 // } 1164 // }
1214 1165
(...skipping 13 matching lines...) Expand all
1228 __ umull(R0, R1, R2, R3); 1179 __ umull(R0, R1, R2, R3);
1229 1180
1230 // args[4] = t mod DIGIT_BASE = low32(t) 1181 // args[4] = t mod DIGIT_BASE = low32(t)
1231 __ str(R0, FieldAddress( 1182 __ str(R0, FieldAddress(
1232 R4, TypedData::data_offset() + 4 * Bigint::kBytesPerDigit)); 1183 R4, TypedData::data_offset() + 4 * Bigint::kBytesPerDigit));
1233 1184
1234 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed. 1185 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed.
1235 __ Ret(); 1186 __ Ret();
1236 } 1187 }
1237 1188
1238
1239 // Check if the last argument is a double, jump to label 'is_smi' if smi 1189 // Check if the last argument is a double, jump to label 'is_smi' if smi
1240 // (easy to convert to double), otherwise jump to label 'not_double_smi', 1190 // (easy to convert to double), otherwise jump to label 'not_double_smi',
1241 // Returns the last argument in R0. 1191 // Returns the last argument in R0.
1242 static void TestLastArgumentIsDouble(Assembler* assembler, 1192 static void TestLastArgumentIsDouble(Assembler* assembler,
1243 Label* is_smi, 1193 Label* is_smi,
1244 Label* not_double_smi) { 1194 Label* not_double_smi) {
1245 __ ldr(R0, Address(SP, 0 * kWordSize)); 1195 __ ldr(R0, Address(SP, 0 * kWordSize));
1246 __ tst(R0, Operand(kSmiTagMask)); 1196 __ tst(R0, Operand(kSmiTagMask));
1247 __ b(is_smi, EQ); 1197 __ b(is_smi, EQ);
1248 __ CompareClassId(R0, kDoubleCid, R1); 1198 __ CompareClassId(R0, kDoubleCid, R1);
1249 __ b(not_double_smi, NE); 1199 __ b(not_double_smi, NE);
1250 // Fall through with Double in R0. 1200 // Fall through with Double in R0.
1251 } 1201 }
1252 1202
1253
1254 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown 1203 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown
1255 // type. Return true or false object in the register R0. Any NaN argument 1204 // type. Return true or false object in the register R0. Any NaN argument
1256 // returns false. Any non-double arg1 causes control flow to fall through to the 1205 // returns false. Any non-double arg1 causes control flow to fall through to the
1257 // slow case (compiled method body). 1206 // slow case (compiled method body).
1258 static void CompareDoubles(Assembler* assembler, Condition true_condition) { 1207 static void CompareDoubles(Assembler* assembler, Condition true_condition) {
1259 if (TargetCPUFeatures::vfp_supported()) { 1208 if (TargetCPUFeatures::vfp_supported()) {
1260 Label fall_through, is_smi, double_op; 1209 Label fall_through, is_smi, double_op;
1261 1210
1262 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); 1211 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through);
1263 // Both arguments are double, right operand is in R0. 1212 // Both arguments are double, right operand is in R0.
(...skipping 13 matching lines...) Expand all
1277 1226
1278 __ Bind(&is_smi); // Convert R0 to a double. 1227 __ Bind(&is_smi); // Convert R0 to a double.
1279 __ SmiUntag(R0); 1228 __ SmiUntag(R0);
1280 __ vmovsr(S0, R0); 1229 __ vmovsr(S0, R0);
1281 __ vcvtdi(D1, S0); 1230 __ vcvtdi(D1, S0);
1282 __ b(&double_op); // Then do the comparison. 1231 __ b(&double_op); // Then do the comparison.
1283 __ Bind(&fall_through); 1232 __ Bind(&fall_through);
1284 } 1233 }
1285 } 1234 }
1286 1235
1287
1288 void Intrinsifier::Double_greaterThan(Assembler* assembler) { 1236 void Intrinsifier::Double_greaterThan(Assembler* assembler) {
1289 CompareDoubles(assembler, HI); 1237 CompareDoubles(assembler, HI);
1290 } 1238 }
1291 1239
1292
1293 void Intrinsifier::Double_greaterEqualThan(Assembler* assembler) { 1240 void Intrinsifier::Double_greaterEqualThan(Assembler* assembler) {
1294 CompareDoubles(assembler, CS); 1241 CompareDoubles(assembler, CS);
1295 } 1242 }
1296 1243
1297
1298 void Intrinsifier::Double_lessThan(Assembler* assembler) { 1244 void Intrinsifier::Double_lessThan(Assembler* assembler) {
1299 CompareDoubles(assembler, CC); 1245 CompareDoubles(assembler, CC);
1300 } 1246 }
1301 1247
1302
1303 void Intrinsifier::Double_equal(Assembler* assembler) { 1248 void Intrinsifier::Double_equal(Assembler* assembler) {
1304 CompareDoubles(assembler, EQ); 1249 CompareDoubles(assembler, EQ);
1305 } 1250 }
1306 1251
1307
1308 void Intrinsifier::Double_lessEqualThan(Assembler* assembler) { 1252 void Intrinsifier::Double_lessEqualThan(Assembler* assembler) {
1309 CompareDoubles(assembler, LS); 1253 CompareDoubles(assembler, LS);
1310 } 1254 }
1311 1255
1312
1313 // Expects left argument to be double (receiver). Right argument is unknown. 1256 // Expects left argument to be double (receiver). Right argument is unknown.
1314 // Both arguments are on stack. 1257 // Both arguments are on stack.
1315 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { 1258 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) {
1316 if (TargetCPUFeatures::vfp_supported()) { 1259 if (TargetCPUFeatures::vfp_supported()) {
1317 Label fall_through, is_smi, double_op; 1260 Label fall_through, is_smi, double_op;
1318 1261
1319 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); 1262 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through);
1320 // Both arguments are double, right operand is in R0. 1263 // Both arguments are double, right operand is in R0.
1321 __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag); 1264 __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag);
1322 __ Bind(&double_op); 1265 __ Bind(&double_op);
(...skipping 22 matching lines...) Expand all
1345 __ Ret(); 1288 __ Ret();
1346 __ Bind(&is_smi); // Convert R0 to a double. 1289 __ Bind(&is_smi); // Convert R0 to a double.
1347 __ SmiUntag(R0); 1290 __ SmiUntag(R0);
1348 __ vmovsr(S0, R0); 1291 __ vmovsr(S0, R0);
1349 __ vcvtdi(D1, S0); 1292 __ vcvtdi(D1, S0);
1350 __ b(&double_op); 1293 __ b(&double_op);
1351 __ Bind(&fall_through); 1294 __ Bind(&fall_through);
1352 } 1295 }
1353 } 1296 }
1354 1297
1355
1356 void Intrinsifier::Double_add(Assembler* assembler) { 1298 void Intrinsifier::Double_add(Assembler* assembler) {
1357 DoubleArithmeticOperations(assembler, Token::kADD); 1299 DoubleArithmeticOperations(assembler, Token::kADD);
1358 } 1300 }
1359 1301
1360
1361 void Intrinsifier::Double_mul(Assembler* assembler) { 1302 void Intrinsifier::Double_mul(Assembler* assembler) {
1362 DoubleArithmeticOperations(assembler, Token::kMUL); 1303 DoubleArithmeticOperations(assembler, Token::kMUL);
1363 } 1304 }
1364 1305
1365
1366 void Intrinsifier::Double_sub(Assembler* assembler) { 1306 void Intrinsifier::Double_sub(Assembler* assembler) {
1367 DoubleArithmeticOperations(assembler, Token::kSUB); 1307 DoubleArithmeticOperations(assembler, Token::kSUB);
1368 } 1308 }
1369 1309
1370
1371 void Intrinsifier::Double_div(Assembler* assembler) { 1310 void Intrinsifier::Double_div(Assembler* assembler) {
1372 DoubleArithmeticOperations(assembler, Token::kDIV); 1311 DoubleArithmeticOperations(assembler, Token::kDIV);
1373 } 1312 }
1374 1313
1375
1376 // Left is double right is integer (Bigint, Mint or Smi) 1314 // Left is double right is integer (Bigint, Mint or Smi)
1377 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { 1315 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) {
1378 if (TargetCPUFeatures::vfp_supported()) { 1316 if (TargetCPUFeatures::vfp_supported()) {
1379 Label fall_through; 1317 Label fall_through;
1380 // Only smis allowed. 1318 // Only smis allowed.
1381 __ ldr(R0, Address(SP, 0 * kWordSize)); 1319 __ ldr(R0, Address(SP, 0 * kWordSize));
1382 __ tst(R0, Operand(kSmiTagMask)); 1320 __ tst(R0, Operand(kSmiTagMask));
1383 __ b(&fall_through, NE); 1321 __ b(&fall_through, NE);
1384 // Is Smi. 1322 // Is Smi.
1385 __ SmiUntag(R0); 1323 __ SmiUntag(R0);
1386 __ vmovsr(S0, R0); 1324 __ vmovsr(S0, R0);
1387 __ vcvtdi(D1, S0); 1325 __ vcvtdi(D1, S0);
1388 __ ldr(R0, Address(SP, 1 * kWordSize)); 1326 __ ldr(R0, Address(SP, 1 * kWordSize));
1389 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); 1327 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
1390 __ vmuld(D0, D0, D1); 1328 __ vmuld(D0, D0, D1);
1391 const Class& double_class = 1329 const Class& double_class =
1392 Class::Handle(Isolate::Current()->object_store()->double_class()); 1330 Class::Handle(Isolate::Current()->object_store()->double_class());
1393 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. 1331 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register.
1394 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); 1332 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
1395 __ Ret(); 1333 __ Ret();
1396 __ Bind(&fall_through); 1334 __ Bind(&fall_through);
1397 } 1335 }
1398 } 1336 }
1399 1337
1400
1401 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { 1338 void Intrinsifier::DoubleFromInteger(Assembler* assembler) {
1402 if (TargetCPUFeatures::vfp_supported()) { 1339 if (TargetCPUFeatures::vfp_supported()) {
1403 Label fall_through; 1340 Label fall_through;
1404 1341
1405 __ ldr(R0, Address(SP, 0 * kWordSize)); 1342 __ ldr(R0, Address(SP, 0 * kWordSize));
1406 __ tst(R0, Operand(kSmiTagMask)); 1343 __ tst(R0, Operand(kSmiTagMask));
1407 __ b(&fall_through, NE); 1344 __ b(&fall_through, NE);
1408 // Is Smi. 1345 // Is Smi.
1409 __ SmiUntag(R0); 1346 __ SmiUntag(R0);
1410 __ vmovsr(S0, R0); 1347 __ vmovsr(S0, R0);
1411 __ vcvtdi(D0, S0); 1348 __ vcvtdi(D0, S0);
1412 const Class& double_class = 1349 const Class& double_class =
1413 Class::Handle(Isolate::Current()->object_store()->double_class()); 1350 Class::Handle(Isolate::Current()->object_store()->double_class());
1414 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. 1351 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register.
1415 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); 1352 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
1416 __ Ret(); 1353 __ Ret();
1417 __ Bind(&fall_through); 1354 __ Bind(&fall_through);
1418 } 1355 }
1419 } 1356 }
1420 1357
1421
1422 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { 1358 void Intrinsifier::Double_getIsNaN(Assembler* assembler) {
1423 if (TargetCPUFeatures::vfp_supported()) { 1359 if (TargetCPUFeatures::vfp_supported()) {
1424 Label is_true; 1360 Label is_true;
1425 __ ldr(R0, Address(SP, 0 * kWordSize)); 1361 __ ldr(R0, Address(SP, 0 * kWordSize));
1426 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); 1362 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
1427 __ vcmpd(D0, D0); 1363 __ vcmpd(D0, D0);
1428 __ vmstat(); 1364 __ vmstat();
1429 __ LoadObject(R0, Bool::False(), VC); 1365 __ LoadObject(R0, Bool::False(), VC);
1430 __ LoadObject(R0, Bool::True(), VS); 1366 __ LoadObject(R0, Bool::True(), VS);
1431 __ Ret(); 1367 __ Ret();
1432 } 1368 }
1433 } 1369 }
1434 1370
1435
1436 void Intrinsifier::Double_getIsInfinite(Assembler* assembler) { 1371 void Intrinsifier::Double_getIsInfinite(Assembler* assembler) {
1437 if (TargetCPUFeatures::vfp_supported()) { 1372 if (TargetCPUFeatures::vfp_supported()) {
1438 __ ldr(R0, Address(SP, 0 * kWordSize)); 1373 __ ldr(R0, Address(SP, 0 * kWordSize));
1439 // R1 <- value[0:31], R2 <- value[32:63] 1374 // R1 <- value[0:31], R2 <- value[32:63]
1440 __ LoadFieldFromOffset(kWord, R1, R0, Double::value_offset()); 1375 __ LoadFieldFromOffset(kWord, R1, R0, Double::value_offset());
1441 __ LoadFieldFromOffset(kWord, R2, R0, Double::value_offset() + kWordSize); 1376 __ LoadFieldFromOffset(kWord, R2, R0, Double::value_offset() + kWordSize);
1442 1377
1443 // If the low word isn't 0, then it isn't infinity. 1378 // If the low word isn't 0, then it isn't infinity.
1444 __ cmp(R1, Operand(0)); 1379 __ cmp(R1, Operand(0));
1445 __ LoadObject(R0, Bool::False(), NE); 1380 __ LoadObject(R0, Bool::False(), NE);
1446 __ bx(LR, NE); // Return if NE. 1381 __ bx(LR, NE); // Return if NE.
1447 1382
1448 // Mask off the sign bit. 1383 // Mask off the sign bit.
1449 __ AndImmediate(R2, R2, 0x7FFFFFFF); 1384 __ AndImmediate(R2, R2, 0x7FFFFFFF);
1450 // Compare with +infinity. 1385 // Compare with +infinity.
1451 __ CompareImmediate(R2, 0x7FF00000); 1386 __ CompareImmediate(R2, 0x7FF00000);
1452 __ LoadObject(R0, Bool::False(), NE); 1387 __ LoadObject(R0, Bool::False(), NE);
1453 __ bx(LR, NE); 1388 __ bx(LR, NE);
1454 1389
1455 __ LoadObject(R0, Bool::True()); 1390 __ LoadObject(R0, Bool::True());
1456 __ Ret(); 1391 __ Ret();
1457 } 1392 }
1458 } 1393 }
1459 1394
1460
1461 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { 1395 void Intrinsifier::Double_getIsNegative(Assembler* assembler) {
1462 if (TargetCPUFeatures::vfp_supported()) { 1396 if (TargetCPUFeatures::vfp_supported()) {
1463 Label is_false, is_true, is_zero; 1397 Label is_false, is_true, is_zero;
1464 __ ldr(R0, Address(SP, 0 * kWordSize)); 1398 __ ldr(R0, Address(SP, 0 * kWordSize));
1465 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); 1399 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
1466 __ vcmpdz(D0); 1400 __ vcmpdz(D0);
1467 __ vmstat(); 1401 __ vmstat();
1468 __ b(&is_false, VS); // NaN -> false. 1402 __ b(&is_false, VS); // NaN -> false.
1469 __ b(&is_zero, EQ); // Check for negative zero. 1403 __ b(&is_zero, EQ); // Check for negative zero.
1470 __ b(&is_false, CS); // >= 0 -> false. 1404 __ b(&is_false, CS); // >= 0 -> false.
1471 1405
1472 __ Bind(&is_true); 1406 __ Bind(&is_true);
1473 __ LoadObject(R0, Bool::True()); 1407 __ LoadObject(R0, Bool::True());
1474 __ Ret(); 1408 __ Ret();
1475 1409
1476 __ Bind(&is_false); 1410 __ Bind(&is_false);
1477 __ LoadObject(R0, Bool::False()); 1411 __ LoadObject(R0, Bool::False());
1478 __ Ret(); 1412 __ Ret();
1479 1413
1480 __ Bind(&is_zero); 1414 __ Bind(&is_zero);
1481 // Check for negative zero by looking at the sign bit. 1415 // Check for negative zero by looking at the sign bit.
1482 __ vmovrrd(R0, R1, D0); // R1:R0 <- D0, so sign bit is in bit 31 of R1. 1416 __ vmovrrd(R0, R1, D0); // R1:R0 <- D0, so sign bit is in bit 31 of R1.
1483 __ mov(R1, Operand(R1, LSR, 31)); 1417 __ mov(R1, Operand(R1, LSR, 31));
1484 __ tst(R1, Operand(1)); 1418 __ tst(R1, Operand(1));
1485 __ b(&is_true, NE); // Sign bit set. 1419 __ b(&is_true, NE); // Sign bit set.
1486 __ b(&is_false); 1420 __ b(&is_false);
1487 } 1421 }
1488 } 1422 }
1489 1423
1490
1491 void Intrinsifier::DoubleToInteger(Assembler* assembler) { 1424 void Intrinsifier::DoubleToInteger(Assembler* assembler) {
1492 if (TargetCPUFeatures::vfp_supported()) { 1425 if (TargetCPUFeatures::vfp_supported()) {
1493 Label fall_through; 1426 Label fall_through;
1494 1427
1495 __ ldr(R0, Address(SP, 0 * kWordSize)); 1428 __ ldr(R0, Address(SP, 0 * kWordSize));
1496 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); 1429 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
1497 1430
1498 // Explicit NaN check, since ARM gives an FPU exception if you try to 1431 // Explicit NaN check, since ARM gives an FPU exception if you try to
1499 // convert NaN to an int. 1432 // convert NaN to an int.
1500 __ vcmpd(D0, D0); 1433 __ vcmpd(D0, D0);
1501 __ vmstat(); 1434 __ vmstat();
1502 __ b(&fall_through, VS); 1435 __ b(&fall_through, VS);
1503 1436
1504 __ vcvtid(S0, D0); 1437 __ vcvtid(S0, D0);
1505 __ vmovrs(R0, S0); 1438 __ vmovrs(R0, S0);
1506 // Overflow is signaled with minint. 1439 // Overflow is signaled with minint.
1507 // Check for overflow and that it fits into Smi. 1440 // Check for overflow and that it fits into Smi.
1508 __ CompareImmediate(R0, 0xC0000000); 1441 __ CompareImmediate(R0, 0xC0000000);
1509 __ SmiTag(R0, PL); 1442 __ SmiTag(R0, PL);
1510 __ bx(LR, PL); 1443 __ bx(LR, PL);
1511 __ Bind(&fall_through); 1444 __ Bind(&fall_through);
1512 } 1445 }
1513 } 1446 }
1514 1447
1515
1516 void Intrinsifier::MathSqrt(Assembler* assembler) { 1448 void Intrinsifier::MathSqrt(Assembler* assembler) {
1517 if (TargetCPUFeatures::vfp_supported()) { 1449 if (TargetCPUFeatures::vfp_supported()) {
1518 Label fall_through, is_smi, double_op; 1450 Label fall_through, is_smi, double_op;
1519 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); 1451 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through);
1520 // Argument is double and is in R0. 1452 // Argument is double and is in R0.
1521 __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag); 1453 __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag);
1522 __ Bind(&double_op); 1454 __ Bind(&double_op);
1523 __ vsqrtd(D0, D1); 1455 __ vsqrtd(D0, D1);
1524 const Class& double_class = 1456 const Class& double_class =
1525 Class::Handle(Isolate::Current()->object_store()->double_class()); 1457 Class::Handle(Isolate::Current()->object_store()->double_class());
1526 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. 1458 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register.
1527 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); 1459 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag);
1528 __ Ret(); 1460 __ Ret();
1529 __ Bind(&is_smi); 1461 __ Bind(&is_smi);
1530 __ SmiUntag(R0); 1462 __ SmiUntag(R0);
1531 __ vmovsr(S0, R0); 1463 __ vmovsr(S0, R0);
1532 __ vcvtdi(D1, S0); 1464 __ vcvtdi(D1, S0);
1533 __ b(&double_op); 1465 __ b(&double_op);
1534 __ Bind(&fall_through); 1466 __ Bind(&fall_through);
1535 } 1467 }
1536 } 1468 }
1537 1469
1538
1539 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; 1470 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64;
1540 // _state[kSTATE_LO] = state & _MASK_32; 1471 // _state[kSTATE_LO] = state & _MASK_32;
1541 // _state[kSTATE_HI] = state >> 32; 1472 // _state[kSTATE_HI] = state >> 32;
1542 void Intrinsifier::Random_nextState(Assembler* assembler) { 1473 void Intrinsifier::Random_nextState(Assembler* assembler) {
1543 const Library& math_lib = Library::Handle(Library::MathLibrary()); 1474 const Library& math_lib = Library::Handle(Library::MathLibrary());
1544 ASSERT(!math_lib.IsNull()); 1475 ASSERT(!math_lib.IsNull());
1545 const Class& random_class = 1476 const Class& random_class =
1546 Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random())); 1477 Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random()));
1547 ASSERT(!random_class.IsNull()); 1478 ASSERT(!random_class.IsNull());
1548 const Field& state_field = Field::ZoneHandle( 1479 const Field& state_field = Field::ZoneHandle(
(...skipping 28 matching lines...) Expand all
1577 __ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag); 1508 __ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag);
1578 __ LoadFromOffset(kWord, R3, R1, disp_1 - kHeapObjectTag); 1509 __ LoadFromOffset(kWord, R3, R1, disp_1 - kHeapObjectTag);
1579 __ mov(R8, Operand(0)); // Zero extend unsigned _state[kSTATE_HI]. 1510 __ mov(R8, Operand(0)); // Zero extend unsigned _state[kSTATE_HI].
1580 // Unsigned 32-bit multiply and 64-bit accumulate into R8:R3. 1511 // Unsigned 32-bit multiply and 64-bit accumulate into R8:R3.
1581 __ umlal(R3, R8, R0, R2); // R8:R3 <- R8:R3 + R0 * R2. 1512 __ umlal(R3, R8, R0, R2); // R8:R3 <- R8:R3 + R0 * R2.
1582 __ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag); 1513 __ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag);
1583 __ StoreToOffset(kWord, R8, R1, disp_1 - kHeapObjectTag); 1514 __ StoreToOffset(kWord, R8, R1, disp_1 - kHeapObjectTag);
1584 __ Ret(); 1515 __ Ret();
1585 } 1516 }
1586 1517
1587
1588 void Intrinsifier::ObjectEquals(Assembler* assembler) { 1518 void Intrinsifier::ObjectEquals(Assembler* assembler) {
1589 __ ldr(R0, Address(SP, 0 * kWordSize)); 1519 __ ldr(R0, Address(SP, 0 * kWordSize));
1590 __ ldr(R1, Address(SP, 1 * kWordSize)); 1520 __ ldr(R1, Address(SP, 1 * kWordSize));
1591 __ cmp(R0, Operand(R1)); 1521 __ cmp(R0, Operand(R1));
1592 __ LoadObject(R0, Bool::False(), NE); 1522 __ LoadObject(R0, Bool::False(), NE);
1593 __ LoadObject(R0, Bool::True(), EQ); 1523 __ LoadObject(R0, Bool::True(), EQ);
1594 __ Ret(); 1524 __ Ret();
1595 } 1525 }
1596 1526
1597
1598 static void RangeCheck(Assembler* assembler, 1527 static void RangeCheck(Assembler* assembler,
1599 Register val, 1528 Register val,
1600 Register tmp, 1529 Register tmp,
1601 intptr_t low, 1530 intptr_t low,
1602 intptr_t high, 1531 intptr_t high,
1603 Condition cc, 1532 Condition cc,
1604 Label* target) { 1533 Label* target) {
1605 __ AddImmediate(tmp, val, -low); 1534 __ AddImmediate(tmp, val, -low);
1606 __ CompareImmediate(tmp, high - low); 1535 __ CompareImmediate(tmp, high - low);
1607 __ b(target, cc); 1536 __ b(target, cc);
1608 } 1537 }
1609 1538
1610
1611 const Condition kIfNotInRange = HI; 1539 const Condition kIfNotInRange = HI;
1612 const Condition kIfInRange = LS; 1540 const Condition kIfInRange = LS;
1613 1541
1614
1615 static void JumpIfInteger(Assembler* assembler, 1542 static void JumpIfInteger(Assembler* assembler,
1616 Register cid, 1543 Register cid,
1617 Register tmp, 1544 Register tmp,
1618 Label* target) { 1545 Label* target) {
1619 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfInRange, target); 1546 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfInRange, target);
1620 } 1547 }
1621 1548
1622
1623 static void JumpIfNotInteger(Assembler* assembler, 1549 static void JumpIfNotInteger(Assembler* assembler,
1624 Register cid, 1550 Register cid,
1625 Register tmp, 1551 Register tmp,
1626 Label* target) { 1552 Label* target) {
1627 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target); 1553 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target);
1628 } 1554 }
1629 1555
1630
1631 static void JumpIfString(Assembler* assembler, 1556 static void JumpIfString(Assembler* assembler,
1632 Register cid, 1557 Register cid,
1633 Register tmp, 1558 Register tmp,
1634 Label* target) { 1559 Label* target) {
1635 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, 1560 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid,
1636 kIfInRange, target); 1561 kIfInRange, target);
1637 } 1562 }
1638 1563
1639
1640 static void JumpIfNotString(Assembler* assembler, 1564 static void JumpIfNotString(Assembler* assembler,
1641 Register cid, 1565 Register cid,
1642 Register tmp, 1566 Register tmp,
1643 Label* target) { 1567 Label* target) {
1644 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, 1568 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid,
1645 kIfNotInRange, target); 1569 kIfNotInRange, target);
1646 } 1570 }
1647 1571
1648
1649 // Return type quickly for simple types (not parameterized and not signature). 1572 // Return type quickly for simple types (not parameterized and not signature).
1650 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { 1573 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
1651 Label fall_through, use_canonical_type, not_double, not_integer; 1574 Label fall_through, use_canonical_type, not_double, not_integer;
1652 __ ldr(R0, Address(SP, 0 * kWordSize)); 1575 __ ldr(R0, Address(SP, 0 * kWordSize));
1653 __ LoadClassIdMayBeSmi(R1, R0); 1576 __ LoadClassIdMayBeSmi(R1, R0);
1654 1577
1655 __ CompareImmediate(R1, kClosureCid); 1578 __ CompareImmediate(R1, kClosureCid);
1656 __ b(&fall_through, EQ); // Instance is a closure. 1579 __ b(&fall_through, EQ); // Instance is a closure.
1657 1580
1658 __ CompareImmediate(R1, kNumPredefinedCids); 1581 __ CompareImmediate(R1, kNumPredefinedCids);
(...skipping 28 matching lines...) Expand all
1687 __ b(&fall_through, NE); 1610 __ b(&fall_through, NE);
1688 1611
1689 __ ldr(R0, FieldAddress(R2, Class::canonical_type_offset())); 1612 __ ldr(R0, FieldAddress(R2, Class::canonical_type_offset()));
1690 __ CompareObject(R0, Object::null_object()); 1613 __ CompareObject(R0, Object::null_object());
1691 __ b(&fall_through, EQ); 1614 __ b(&fall_through, EQ);
1692 __ Ret(); 1615 __ Ret();
1693 1616
1694 __ Bind(&fall_through); 1617 __ Bind(&fall_through);
1695 } 1618 }
1696 1619
1697
1698 void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) { 1620 void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) {
1699 Label fall_through, different_cids, equal, not_equal, not_integer; 1621 Label fall_through, different_cids, equal, not_equal, not_integer;
1700 __ ldr(R0, Address(SP, 0 * kWordSize)); 1622 __ ldr(R0, Address(SP, 0 * kWordSize));
1701 __ LoadClassIdMayBeSmi(R1, R0); 1623 __ LoadClassIdMayBeSmi(R1, R0);
1702 1624
1703 // Check if left hand size is a closure. Closures are handled in the runtime. 1625 // Check if left hand size is a closure. Closures are handled in the runtime.
1704 __ CompareImmediate(R1, kClosureCid); 1626 __ CompareImmediate(R1, kClosureCid);
1705 __ b(&fall_through, EQ); 1627 __ b(&fall_through, EQ);
1706 1628
1707 __ ldr(R0, Address(SP, 1 * kWordSize)); 1629 __ ldr(R0, Address(SP, 1 * kWordSize));
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1742 JumpIfString(assembler, R2, R0, &equal); 1664 JumpIfString(assembler, R2, R0, &equal);
1743 1665
1744 // Neither strings nor integers and have different class ids. 1666 // Neither strings nor integers and have different class ids.
1745 __ Bind(&not_equal); 1667 __ Bind(&not_equal);
1746 __ LoadObject(R0, Bool::False()); 1668 __ LoadObject(R0, Bool::False());
1747 __ Ret(); 1669 __ Ret();
1748 1670
1749 __ Bind(&fall_through); 1671 __ Bind(&fall_through);
1750 } 1672 }
1751 1673
1752
1753 void Intrinsifier::String_getHashCode(Assembler* assembler) { 1674 void Intrinsifier::String_getHashCode(Assembler* assembler) {
1754 __ ldr(R0, Address(SP, 0 * kWordSize)); 1675 __ ldr(R0, Address(SP, 0 * kWordSize));
1755 __ ldr(R0, FieldAddress(R0, String::hash_offset())); 1676 __ ldr(R0, FieldAddress(R0, String::hash_offset()));
1756 __ cmp(R0, Operand(0)); 1677 __ cmp(R0, Operand(0));
1757 __ bx(LR, NE); // Hash not yet computed. 1678 __ bx(LR, NE); // Hash not yet computed.
1758 } 1679 }
1759 1680
1760
1761 void GenerateSubstringMatchesSpecialization(Assembler* assembler, 1681 void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1762 intptr_t receiver_cid, 1682 intptr_t receiver_cid,
1763 intptr_t other_cid, 1683 intptr_t other_cid,
1764 Label* return_true, 1684 Label* return_true,
1765 Label* return_false) { 1685 Label* return_false) {
1766 __ SmiUntag(R1); 1686 __ SmiUntag(R1);
1767 __ ldr(R8, FieldAddress(R0, String::length_offset())); // this.length 1687 __ ldr(R8, FieldAddress(R0, String::length_offset())); // this.length
1768 __ SmiUntag(R8); 1688 __ SmiUntag(R8);
1769 __ ldr(R9, FieldAddress(R2, String::length_offset())); // other.length 1689 __ ldr(R9, FieldAddress(R2, String::length_offset())); // other.length
1770 __ SmiUntag(R9); 1690 __ SmiUntag(R9);
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1821 // i++, while (i < len) 1741 // i++, while (i < len)
1822 __ AddImmediate(R3, 1); 1742 __ AddImmediate(R3, 1);
1823 __ AddImmediate(R0, receiver_cid == kOneByteStringCid ? 1 : 2); 1743 __ AddImmediate(R0, receiver_cid == kOneByteStringCid ? 1 : 2);
1824 __ AddImmediate(R2, other_cid == kOneByteStringCid ? 1 : 2); 1744 __ AddImmediate(R2, other_cid == kOneByteStringCid ? 1 : 2);
1825 __ cmp(R3, Operand(R9)); 1745 __ cmp(R3, Operand(R9));
1826 __ b(&loop, LT); 1746 __ b(&loop, LT);
1827 1747
1828 __ b(return_true); 1748 __ b(return_true);
1829 } 1749 }
1830 1750
1831
1832 // bool _substringMatches(int start, String other) 1751 // bool _substringMatches(int start, String other)
1833 // This intrinsic handles a OneByteString or TwoByteString receiver with a 1752 // This intrinsic handles a OneByteString or TwoByteString receiver with a
1834 // OneByteString other. 1753 // OneByteString other.
1835 void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler) { 1754 void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler) {
1836 Label fall_through, return_true, return_false, try_two_byte; 1755 Label fall_through, return_true, return_false, try_two_byte;
1837 __ ldr(R0, Address(SP, 2 * kWordSize)); // this 1756 __ ldr(R0, Address(SP, 2 * kWordSize)); // this
1838 __ ldr(R1, Address(SP, 1 * kWordSize)); // start 1757 __ ldr(R1, Address(SP, 1 * kWordSize)); // start
1839 __ ldr(R2, Address(SP, 0 * kWordSize)); // other 1758 __ ldr(R2, Address(SP, 0 * kWordSize)); // other
1840 __ Push(R4); // Make ARGS_DESC_REG available. 1759 __ Push(R4); // Make ARGS_DESC_REG available.
1841 1760
(...skipping 25 matching lines...) Expand all
1867 1786
1868 __ Bind(&return_false); 1787 __ Bind(&return_false);
1869 __ Pop(R4); 1788 __ Pop(R4);
1870 __ LoadObject(R0, Bool::False()); 1789 __ LoadObject(R0, Bool::False());
1871 __ Ret(); 1790 __ Ret();
1872 1791
1873 __ Bind(&fall_through); 1792 __ Bind(&fall_through);
1874 __ Pop(R4); 1793 __ Pop(R4);
1875 } 1794 }
1876 1795
1877
1878 void Intrinsifier::Object_getHash(Assembler* assembler) { 1796 void Intrinsifier::Object_getHash(Assembler* assembler) {
1879 UNREACHABLE(); 1797 UNREACHABLE();
1880 } 1798 }
1881 1799
1882
1883 void Intrinsifier::Object_setHash(Assembler* assembler) { 1800 void Intrinsifier::Object_setHash(Assembler* assembler) {
1884 UNREACHABLE(); 1801 UNREACHABLE();
1885 } 1802 }
1886 1803
1887
1888 void Intrinsifier::StringBaseCharAt(Assembler* assembler) { 1804 void Intrinsifier::StringBaseCharAt(Assembler* assembler) {
1889 Label fall_through, try_two_byte_string; 1805 Label fall_through, try_two_byte_string;
1890 1806
1891 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index. 1807 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index.
1892 __ ldr(R0, Address(SP, 1 * kWordSize)); // String. 1808 __ ldr(R0, Address(SP, 1 * kWordSize)); // String.
1893 __ tst(R1, Operand(kSmiTagMask)); 1809 __ tst(R1, Operand(kSmiTagMask));
1894 __ b(&fall_through, NE); // Index is not a Smi. 1810 __ b(&fall_through, NE); // Index is not a Smi.
1895 // Range check. 1811 // Range check.
1896 __ ldr(R2, FieldAddress(R0, String::length_offset())); 1812 __ ldr(R2, FieldAddress(R0, String::length_offset()));
1897 __ cmp(R1, Operand(R2)); 1813 __ cmp(R1, Operand(R2));
(...skipping 20 matching lines...) Expand all
1918 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols); 1834 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols);
1919 __ b(&fall_through, GE); 1835 __ b(&fall_through, GE);
1920 __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset())); 1836 __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset()));
1921 __ AddImmediate(R0, Symbols::kNullCharCodeSymbolOffset * kWordSize); 1837 __ AddImmediate(R0, Symbols::kNullCharCodeSymbolOffset * kWordSize);
1922 __ ldr(R0, Address(R0, R1, LSL, 2)); 1838 __ ldr(R0, Address(R0, R1, LSL, 2));
1923 __ Ret(); 1839 __ Ret();
1924 1840
1925 __ Bind(&fall_through); 1841 __ Bind(&fall_through);
1926 } 1842 }
1927 1843
1928
1929 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) { 1844 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) {
1930 __ ldr(R0, Address(SP, 0 * kWordSize)); 1845 __ ldr(R0, Address(SP, 0 * kWordSize));
1931 __ ldr(R0, FieldAddress(R0, String::length_offset())); 1846 __ ldr(R0, FieldAddress(R0, String::length_offset()));
1932 __ cmp(R0, Operand(Smi::RawValue(0))); 1847 __ cmp(R0, Operand(Smi::RawValue(0)));
1933 __ LoadObject(R0, Bool::True(), EQ); 1848 __ LoadObject(R0, Bool::True(), EQ);
1934 __ LoadObject(R0, Bool::False(), NE); 1849 __ LoadObject(R0, Bool::False(), NE);
1935 __ Ret(); 1850 __ Ret();
1936 } 1851 }
1937 1852
1938
1939 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { 1853 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) {
1940 __ ldr(R1, Address(SP, 0 * kWordSize)); 1854 __ ldr(R1, Address(SP, 0 * kWordSize));
1941 __ ldr(R0, FieldAddress(R1, String::hash_offset())); 1855 __ ldr(R0, FieldAddress(R1, String::hash_offset()));
1942 __ cmp(R0, Operand(0)); 1856 __ cmp(R0, Operand(0));
1943 __ bx(LR, NE); // Return if already computed. 1857 __ bx(LR, NE); // Return if already computed.
1944 1858
1945 __ ldr(R2, FieldAddress(R1, String::length_offset())); 1859 __ ldr(R2, FieldAddress(R1, String::length_offset()));
1946 1860
1947 Label done; 1861 Label done;
1948 // If the string is empty, set the hash to 1, and return. 1862 // If the string is empty, set the hash to 1, and return.
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1987 __ and_(R0, R0, Operand(R2)); 1901 __ and_(R0, R0, Operand(R2));
1988 __ cmp(R0, Operand(0)); 1902 __ cmp(R0, Operand(0));
1989 // return hash_ == 0 ? 1 : hash_; 1903 // return hash_ == 0 ? 1 : hash_;
1990 __ Bind(&done); 1904 __ Bind(&done);
1991 __ mov(R0, Operand(1), EQ); 1905 __ mov(R0, Operand(1), EQ);
1992 __ SmiTag(R0); 1906 __ SmiTag(R0);
1993 __ StoreIntoSmiField(FieldAddress(R1, String::hash_offset()), R0); 1907 __ StoreIntoSmiField(FieldAddress(R1, String::hash_offset()), R0);
1994 __ Ret(); 1908 __ Ret();
1995 } 1909 }
1996 1910
1997
1998 // Allocates one-byte string of length 'end - start'. The content is not 1911 // Allocates one-byte string of length 'end - start'. The content is not
1999 // initialized. 1912 // initialized.
2000 // 'length-reg' (R2) contains tagged length. 1913 // 'length-reg' (R2) contains tagged length.
2001 // Returns new string as tagged pointer in R0. 1914 // Returns new string as tagged pointer in R0.
2002 static void TryAllocateOnebyteString(Assembler* assembler, 1915 static void TryAllocateOnebyteString(Assembler* assembler,
2003 Label* ok, 1916 Label* ok,
2004 Label* failure) { 1917 Label* failure) {
2005 const Register length_reg = R2; 1918 const Register length_reg = R2;
2006 Label fail; 1919 Label fail;
2007 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kOneByteStringCid, R0, failure)); 1920 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kOneByteStringCid, R0, failure));
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
2063 __ LoadImmediate(TMP, 0); 1976 __ LoadImmediate(TMP, 0);
2064 __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::hash_offset()), TMP); 1977 __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::hash_offset()), TMP);
2065 1978
2066 NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space)); 1979 NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space));
2067 __ b(ok); 1980 __ b(ok);
2068 1981
2069 __ Bind(&fail); 1982 __ Bind(&fail);
2070 __ b(failure); 1983 __ b(failure);
2071 } 1984 }
2072 1985
2073
2074 // Arg0: OneByteString (receiver). 1986 // Arg0: OneByteString (receiver).
2075 // Arg1: Start index as Smi. 1987 // Arg1: Start index as Smi.
2076 // Arg2: End index as Smi. 1988 // Arg2: End index as Smi.
2077 // The indexes must be valid. 1989 // The indexes must be valid.
2078 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { 1990 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) {
2079 const intptr_t kStringOffset = 2 * kWordSize; 1991 const intptr_t kStringOffset = 2 * kWordSize;
2080 const intptr_t kStartIndexOffset = 1 * kWordSize; 1992 const intptr_t kStartIndexOffset = 1 * kWordSize;
2081 const intptr_t kEndIndexOffset = 0 * kWordSize; 1993 const intptr_t kEndIndexOffset = 0 * kWordSize;
2082 Label fall_through, ok; 1994 Label fall_through, ok;
2083 1995
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
2123 __ cmp(R2, Operand(0)); 2035 __ cmp(R2, Operand(0));
2124 __ strb(R1, FieldAddress(NOTFP, OneByteString::data_offset())); 2036 __ strb(R1, FieldAddress(NOTFP, OneByteString::data_offset()));
2125 __ AddImmediate(NOTFP, 1); 2037 __ AddImmediate(NOTFP, 1);
2126 __ b(&loop, GT); 2038 __ b(&loop, GT);
2127 2039
2128 __ Bind(&done); 2040 __ Bind(&done);
2129 __ Ret(); 2041 __ Ret();
2130 __ Bind(&fall_through); 2042 __ Bind(&fall_through);
2131 } 2043 }
2132 2044
2133
2134 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) { 2045 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) {
2135 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. 2046 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value.
2136 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. 2047 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index.
2137 __ ldr(R0, Address(SP, 2 * kWordSize)); // OneByteString. 2048 __ ldr(R0, Address(SP, 2 * kWordSize)); // OneByteString.
2138 __ SmiUntag(R1); 2049 __ SmiUntag(R1);
2139 __ SmiUntag(R2); 2050 __ SmiUntag(R2);
2140 __ AddImmediate(R3, R0, OneByteString::data_offset() - kHeapObjectTag); 2051 __ AddImmediate(R3, R0, OneByteString::data_offset() - kHeapObjectTag);
2141 __ strb(R2, Address(R3, R1)); 2052 __ strb(R2, Address(R3, R1));
2142 __ Ret(); 2053 __ Ret();
2143 } 2054 }
2144 2055
2145
2146 void Intrinsifier::OneByteString_allocate(Assembler* assembler) { 2056 void Intrinsifier::OneByteString_allocate(Assembler* assembler) {
2147 __ ldr(R2, Address(SP, 0 * kWordSize)); // Length. 2057 __ ldr(R2, Address(SP, 0 * kWordSize)); // Length.
2148 Label fall_through, ok; 2058 Label fall_through, ok;
2149 TryAllocateOnebyteString(assembler, &ok, &fall_through); 2059 TryAllocateOnebyteString(assembler, &ok, &fall_through);
2150 2060
2151 __ Bind(&ok); 2061 __ Bind(&ok);
2152 __ Ret(); 2062 __ Ret();
2153 2063
2154 __ Bind(&fall_through); 2064 __ Bind(&fall_through);
2155 } 2065 }
2156 2066
2157
2158 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). 2067 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings).
2159 static void StringEquality(Assembler* assembler, intptr_t string_cid) { 2068 static void StringEquality(Assembler* assembler, intptr_t string_cid) {
2160 Label fall_through, is_true, is_false, loop; 2069 Label fall_through, is_true, is_false, loop;
2161 __ ldr(R0, Address(SP, 1 * kWordSize)); // This. 2070 __ ldr(R0, Address(SP, 1 * kWordSize)); // This.
2162 __ ldr(R1, Address(SP, 0 * kWordSize)); // Other. 2071 __ ldr(R1, Address(SP, 0 * kWordSize)); // Other.
2163 2072
2164 // Are identical? 2073 // Are identical?
2165 __ cmp(R0, Operand(R1)); 2074 __ cmp(R0, Operand(R1));
2166 __ b(&is_true, EQ); 2075 __ b(&is_true, EQ);
2167 2076
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
2212 __ LoadObject(R0, Bool::True()); 2121 __ LoadObject(R0, Bool::True());
2213 __ Ret(); 2122 __ Ret();
2214 2123
2215 __ Bind(&is_false); 2124 __ Bind(&is_false);
2216 __ LoadObject(R0, Bool::False()); 2125 __ LoadObject(R0, Bool::False());
2217 __ Ret(); 2126 __ Ret();
2218 2127
2219 __ Bind(&fall_through); 2128 __ Bind(&fall_through);
2220 } 2129 }
2221 2130
2222
2223 void Intrinsifier::OneByteString_equality(Assembler* assembler) { 2131 void Intrinsifier::OneByteString_equality(Assembler* assembler) {
2224 StringEquality(assembler, kOneByteStringCid); 2132 StringEquality(assembler, kOneByteStringCid);
2225 } 2133 }
2226 2134
2227
2228 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { 2135 void Intrinsifier::TwoByteString_equality(Assembler* assembler) {
2229 StringEquality(assembler, kTwoByteStringCid); 2136 StringEquality(assembler, kTwoByteStringCid);
2230 } 2137 }
2231 2138
2232
2233 void Intrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler, 2139 void Intrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
2234 bool sticky) { 2140 bool sticky) {
2235 if (FLAG_interpret_irregexp) return; 2141 if (FLAG_interpret_irregexp) return;
2236 2142
2237 static const intptr_t kRegExpParamOffset = 2 * kWordSize; 2143 static const intptr_t kRegExpParamOffset = 2 * kWordSize;
2238 static const intptr_t kStringParamOffset = 1 * kWordSize; 2144 static const intptr_t kStringParamOffset = 1 * kWordSize;
2239 // start_index smi is located at offset 0. 2145 // start_index smi is located at offset 0.
2240 2146
2241 // Incoming registers: 2147 // Incoming registers:
2242 // R0: Function. (Will be reloaded with the specialized matcher function.) 2148 // R0: Function. (Will be reloaded with the specialized matcher function.)
(...skipping 13 matching lines...) Expand all
2256 // Registers are now set up for the lazy compile stub. It expects the function 2162 // Registers are now set up for the lazy compile stub. It expects the function
2257 // in R0, the argument descriptor in R4, and IC-Data in R9. 2163 // in R0, the argument descriptor in R4, and IC-Data in R9.
2258 __ eor(R9, R9, Operand(R9)); 2164 __ eor(R9, R9, Operand(R9));
2259 2165
2260 // Tail-call the function. 2166 // Tail-call the function.
2261 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); 2167 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
2262 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset())); 2168 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset()));
2263 __ bx(R1); 2169 __ bx(R1);
2264 } 2170 }
2265 2171
2266
2267 // On stack: user tag (+0). 2172 // On stack: user tag (+0).
2268 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { 2173 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) {
2269 // R1: Isolate. 2174 // R1: Isolate.
2270 __ LoadIsolate(R1); 2175 __ LoadIsolate(R1);
2271 // R0: Current user tag. 2176 // R0: Current user tag.
2272 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); 2177 __ ldr(R0, Address(R1, Isolate::current_tag_offset()));
2273 // R2: UserTag. 2178 // R2: UserTag.
2274 __ ldr(R2, Address(SP, +0 * kWordSize)); 2179 __ ldr(R2, Address(SP, +0 * kWordSize));
2275 // Set Isolate::current_tag_. 2180 // Set Isolate::current_tag_.
2276 __ str(R2, Address(R1, Isolate::current_tag_offset())); 2181 __ str(R2, Address(R1, Isolate::current_tag_offset()));
2277 // R2: UserTag's tag. 2182 // R2: UserTag's tag.
2278 __ ldr(R2, FieldAddress(R2, UserTag::tag_offset())); 2183 __ ldr(R2, FieldAddress(R2, UserTag::tag_offset()));
2279 // Set Isolate::user_tag_. 2184 // Set Isolate::user_tag_.
2280 __ str(R2, Address(R1, Isolate::user_tag_offset())); 2185 __ str(R2, Address(R1, Isolate::user_tag_offset()));
2281 __ Ret(); 2186 __ Ret();
2282 } 2187 }
2283 2188
2284
2285 void Intrinsifier::UserTag_defaultTag(Assembler* assembler) { 2189 void Intrinsifier::UserTag_defaultTag(Assembler* assembler) {
2286 __ LoadIsolate(R0); 2190 __ LoadIsolate(R0);
2287 __ ldr(R0, Address(R0, Isolate::default_tag_offset())); 2191 __ ldr(R0, Address(R0, Isolate::default_tag_offset()));
2288 __ Ret(); 2192 __ Ret();
2289 } 2193 }
2290 2194
2291
2292 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { 2195 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) {
2293 __ LoadIsolate(R0); 2196 __ LoadIsolate(R0);
2294 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); 2197 __ ldr(R0, Address(R0, Isolate::current_tag_offset()));
2295 __ Ret(); 2198 __ Ret();
2296 } 2199 }
2297 2200
2298
2299 void Intrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler) { 2201 void Intrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler) {
2300 if (!FLAG_support_timeline) { 2202 if (!FLAG_support_timeline) {
2301 __ LoadObject(R0, Bool::False()); 2203 __ LoadObject(R0, Bool::False());
2302 __ Ret(); 2204 __ Ret();
2303 return; 2205 return;
2304 } 2206 }
2305 // Load TimelineStream*. 2207 // Load TimelineStream*.
2306 __ ldr(R0, Address(THR, Thread::dart_stream_offset())); 2208 __ ldr(R0, Address(THR, Thread::dart_stream_offset()));
2307 // Load uintptr_t from TimelineStream*. 2209 // Load uintptr_t from TimelineStream*.
2308 __ ldr(R0, Address(R0, TimelineStream::enabled_offset())); 2210 __ ldr(R0, Address(R0, TimelineStream::enabled_offset()));
2309 __ cmp(R0, Operand(0)); 2211 __ cmp(R0, Operand(0));
2310 __ LoadObject(R0, Bool::True(), NE); 2212 __ LoadObject(R0, Bool::True(), NE);
2311 __ LoadObject(R0, Bool::False(), EQ); 2213 __ LoadObject(R0, Bool::False(), EQ);
2312 __ Ret(); 2214 __ Ret();
2313 } 2215 }
2314 2216
2315
2316 void Intrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler) { 2217 void Intrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler) {
2317 __ LoadObject(R0, Object::null_object()); 2218 __ LoadObject(R0, Object::null_object());
2318 __ str(R0, Address(THR, Thread::async_stack_trace_offset())); 2219 __ str(R0, Address(THR, Thread::async_stack_trace_offset()));
2319 __ Ret(); 2220 __ Ret();
2320 } 2221 }
2321 2222
2322
2323 void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler) { 2223 void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler) {
2324 __ ldr(R0, Address(THR, Thread::async_stack_trace_offset())); 2224 __ ldr(R0, Address(THR, Thread::async_stack_trace_offset()));
2325 __ LoadObject(R0, Object::null_object()); 2225 __ LoadObject(R0, Object::null_object());
2326 __ Ret(); 2226 __ Ret();
2327 } 2227 }
2328 2228
2329 } // namespace dart 2229 } // namespace dart
2330 2230
2331 #endif // defined TARGET_ARCH_ARM 2231 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/intrinsifier.cc ('k') | runtime/vm/intrinsifier_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698