| OLD | NEW |
| 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. |
| 6 #if defined(TARGET_ARCH_ARM64) | 6 #if defined(TARGET_ARCH_ARM64) |
| 7 | 7 |
| 8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
| 9 | 9 |
| 10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
| (...skipping 10 matching lines...) Expand all Loading... |
| 21 // When entering intrinsics code: | 21 // When entering intrinsics code: |
| 22 // R4: Arguments descriptor | 22 // R4: Arguments descriptor |
| 23 // LR: Return address | 23 // LR: Return address |
| 24 // The R4 register can be destroyed only if there is no slow-path, i.e. | 24 // The R4 register can be destroyed only if there is no slow-path, i.e. |
| 25 // if the intrinsified method always executes a return. | 25 // if the intrinsified method always executes a return. |
| 26 // The FP register should not be modified, because it is used by the profiler. | 26 // The FP register should not be modified, because it is used by the profiler. |
| 27 // The PP and THR registers (see constants_arm64.h) must be preserved. | 27 // The PP and THR registers (see constants_arm64.h) must be preserved. |
| 28 | 28 |
| 29 #define __ assembler-> | 29 #define __ assembler-> |
| 30 | 30 |
| 31 | |
| 32 intptr_t Intrinsifier::ParameterSlotFromSp() { | 31 intptr_t Intrinsifier::ParameterSlotFromSp() { |
| 33 return -1; | 32 return -1; |
| 34 } | 33 } |
| 35 | 34 |
| 36 | |
| 37 static bool IsABIPreservedRegister(Register reg) { | 35 static bool IsABIPreservedRegister(Register reg) { |
| 38 return ((1 << reg) & kAbiPreservedCpuRegs) != 0; | 36 return ((1 << reg) & kAbiPreservedCpuRegs) != 0; |
| 39 } | 37 } |
| 40 | 38 |
| 41 | |
| 42 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { | 39 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { |
| 43 ASSERT(IsABIPreservedRegister(CODE_REG)); | 40 ASSERT(IsABIPreservedRegister(CODE_REG)); |
| 44 ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG)); | 41 ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG)); |
| 45 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP)); | 42 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP)); |
| 46 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP2)); | 43 ASSERT(IsABIPreservedRegister(CALLEE_SAVED_TEMP2)); |
| 47 ASSERT(CALLEE_SAVED_TEMP != CODE_REG); | 44 ASSERT(CALLEE_SAVED_TEMP != CODE_REG); |
| 48 ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG); | 45 ASSERT(CALLEE_SAVED_TEMP != ARGS_DESC_REG); |
| 49 ASSERT(CALLEE_SAVED_TEMP2 != CODE_REG); | 46 ASSERT(CALLEE_SAVED_TEMP2 != CODE_REG); |
| 50 ASSERT(CALLEE_SAVED_TEMP2 != ARGS_DESC_REG); | 47 ASSERT(CALLEE_SAVED_TEMP2 != ARGS_DESC_REG); |
| 51 | 48 |
| 52 assembler->Comment("IntrinsicCallPrologue"); | 49 assembler->Comment("IntrinsicCallPrologue"); |
| 53 assembler->mov(CALLEE_SAVED_TEMP, LR); | 50 assembler->mov(CALLEE_SAVED_TEMP, LR); |
| 54 assembler->mov(CALLEE_SAVED_TEMP2, ARGS_DESC_REG); | 51 assembler->mov(CALLEE_SAVED_TEMP2, ARGS_DESC_REG); |
| 55 } | 52 } |
| 56 | 53 |
| 57 | |
| 58 void Intrinsifier::IntrinsicCallEpilogue(Assembler* assembler) { | 54 void Intrinsifier::IntrinsicCallEpilogue(Assembler* assembler) { |
| 59 assembler->Comment("IntrinsicCallEpilogue"); | 55 assembler->Comment("IntrinsicCallEpilogue"); |
| 60 assembler->mov(LR, CALLEE_SAVED_TEMP); | 56 assembler->mov(LR, CALLEE_SAVED_TEMP); |
| 61 assembler->mov(ARGS_DESC_REG, CALLEE_SAVED_TEMP2); | 57 assembler->mov(ARGS_DESC_REG, CALLEE_SAVED_TEMP2); |
| 62 } | 58 } |
| 63 | 59 |
| 64 | |
| 65 // Intrinsify only for Smi value and index. Non-smi values need a store buffer | 60 // Intrinsify only for Smi value and index. Non-smi values need a store buffer |
| 66 // update. Array length is always a Smi. | 61 // update. Array length is always a Smi. |
| 67 void Intrinsifier::ObjectArraySetIndexed(Assembler* assembler) { | 62 void Intrinsifier::ObjectArraySetIndexed(Assembler* assembler) { |
| 68 if (Isolate::Current()->type_checks()) { | 63 if (Isolate::Current()->type_checks()) { |
| 69 return; | 64 return; |
| 70 } | 65 } |
| 71 | 66 |
| 72 Label fall_through; | 67 Label fall_through; |
| 73 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. | 68 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. |
| 74 __ tsti(R1, Immediate(kSmiTagMask)); | 69 __ tsti(R1, Immediate(kSmiTagMask)); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 85 // Note that R1 is Smi, i.e, times 2. | 80 // Note that R1 is Smi, i.e, times 2. |
| 86 ASSERT(kSmiTagShift == 1); | 81 ASSERT(kSmiTagShift == 1); |
| 87 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. | 82 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. |
| 88 __ add(R1, R0, Operand(R1, LSL, 2)); // R1 is Smi. | 83 __ add(R1, R0, Operand(R1, LSL, 2)); // R1 is Smi. |
| 89 __ StoreIntoObject(R0, FieldAddress(R1, Array::data_offset()), R2); | 84 __ StoreIntoObject(R0, FieldAddress(R1, Array::data_offset()), R2); |
| 90 // Caller is responsible for preserving the value if necessary. | 85 // Caller is responsible for preserving the value if necessary. |
| 91 __ ret(); | 86 __ ret(); |
| 92 __ Bind(&fall_through); | 87 __ Bind(&fall_through); |
| 93 } | 88 } |
| 94 | 89 |
| 95 | |
| 96 // Allocate a GrowableObjectArray using the backing array specified. | 90 // Allocate a GrowableObjectArray using the backing array specified. |
| 97 // On stack: type argument (+1), data (+0). | 91 // On stack: type argument (+1), data (+0). |
| 98 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { | 92 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { |
| 99 // The newly allocated object is returned in R0. | 93 // The newly allocated object is returned in R0. |
| 100 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; | 94 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; |
| 101 const intptr_t kArrayOffset = 0 * kWordSize; | 95 const intptr_t kArrayOffset = 0 * kWordSize; |
| 102 Label fall_through; | 96 Label fall_through; |
| 103 | 97 |
| 104 // Try allocating in new space. | 98 // Try allocating in new space. |
| 105 const Class& cls = Class::Handle( | 99 const Class& cls = Class::Handle( |
| (...skipping 13 matching lines...) Expand all Loading... |
| 119 R0, FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), R1); | 113 R0, FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), R1); |
| 120 | 114 |
| 121 // Set the length field in the growable array object to 0. | 115 // Set the length field in the growable array object to 0. |
| 122 __ LoadImmediate(R1, 0); | 116 __ LoadImmediate(R1, 0); |
| 123 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); | 117 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); |
| 124 __ ret(); // Returns the newly allocated object in R0. | 118 __ ret(); // Returns the newly allocated object in R0. |
| 125 | 119 |
| 126 __ Bind(&fall_through); | 120 __ Bind(&fall_through); |
| 127 } | 121 } |
| 128 | 122 |
| 129 | |
| 130 // Add an element to growable array if it doesn't need to grow, otherwise | 123 // Add an element to growable array if it doesn't need to grow, otherwise |
| 131 // call into regular code. | 124 // call into regular code. |
| 132 // On stack: growable array (+1), value (+0). | 125 // On stack: growable array (+1), value (+0). |
| 133 void Intrinsifier::GrowableArray_add(Assembler* assembler) { | 126 void Intrinsifier::GrowableArray_add(Assembler* assembler) { |
| 134 // In checked mode we need to type-check the incoming argument. | 127 // In checked mode we need to type-check the incoming argument. |
| 135 if (Isolate::Current()->type_checks()) { | 128 if (Isolate::Current()->type_checks()) { |
| 136 return; | 129 return; |
| 137 } | 130 } |
| 138 Label fall_through; | 131 Label fall_through; |
| 139 // R0: Array. | 132 // R0: Array. |
| (...skipping 13 matching lines...) Expand all Loading... |
| 153 __ str(R3, FieldAddress(R0, GrowableObjectArray::length_offset())); | 146 __ str(R3, FieldAddress(R0, GrowableObjectArray::length_offset())); |
| 154 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value. | 147 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value. |
| 155 ASSERT(kSmiTagShift == 1); | 148 ASSERT(kSmiTagShift == 1); |
| 156 __ add(R1, R2, Operand(R1, LSL, 2)); | 149 __ add(R1, R2, Operand(R1, LSL, 2)); |
| 157 __ StoreIntoObject(R2, FieldAddress(R1, Array::data_offset()), R0); | 150 __ StoreIntoObject(R2, FieldAddress(R1, Array::data_offset()), R0); |
| 158 __ LoadObject(R0, Object::null_object()); | 151 __ LoadObject(R0, Object::null_object()); |
| 159 __ ret(); | 152 __ ret(); |
| 160 __ Bind(&fall_through); | 153 __ Bind(&fall_through); |
| 161 } | 154 } |
| 162 | 155 |
| 163 | |
| 164 static int GetScaleFactor(intptr_t size) { | 156 static int GetScaleFactor(intptr_t size) { |
| 165 switch (size) { | 157 switch (size) { |
| 166 case 1: | 158 case 1: |
| 167 return 0; | 159 return 0; |
| 168 case 2: | 160 case 2: |
| 169 return 1; | 161 return 1; |
| 170 case 4: | 162 case 4: |
| 171 return 2; | 163 return 2; |
| 172 case 8: | 164 case 8: |
| 173 return 3; | 165 return 3; |
| 174 case 16: | 166 case 16: |
| 175 return 4; | 167 return 4; |
| 176 } | 168 } |
| 177 UNREACHABLE(); | 169 UNREACHABLE(); |
| 178 return -1; | 170 return -1; |
| 179 } | 171 } |
| 180 | 172 |
| 181 | |
| 182 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ | 173 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ |
| 183 Label fall_through; \ | 174 Label fall_through; \ |
| 184 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ | 175 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ |
| 185 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &fall_through)); \ | 176 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &fall_through)); \ |
| 186 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | 177 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ |
| 187 /* Check that length is a positive Smi. */ \ | 178 /* Check that length is a positive Smi. */ \ |
| 188 /* R2: requested array length argument. */ \ | 179 /* R2: requested array length argument. */ \ |
| 189 __ tsti(R2, Immediate(kSmiTagMask)); \ | 180 __ tsti(R2, Immediate(kSmiTagMask)); \ |
| 190 __ b(&fall_through, NE); \ | 181 __ b(&fall_through, NE); \ |
| 191 __ CompareRegisters(R2, ZR); \ | 182 __ CompareRegisters(R2, ZR); \ |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 255 __ cmp(R2, Operand(R1)); \ | 246 __ cmp(R2, Operand(R1)); \ |
| 256 __ b(&done, CS); \ | 247 __ b(&done, CS); \ |
| 257 __ str(R3, Address(R2, 0)); \ | 248 __ str(R3, Address(R2, 0)); \ |
| 258 __ add(R2, R2, Operand(kWordSize)); \ | 249 __ add(R2, R2, Operand(kWordSize)); \ |
| 259 __ b(&init_loop); \ | 250 __ b(&init_loop); \ |
| 260 __ Bind(&done); \ | 251 __ Bind(&done); \ |
| 261 \ | 252 \ |
| 262 __ ret(); \ | 253 __ ret(); \ |
| 263 __ Bind(&fall_through); | 254 __ Bind(&fall_through); |
| 264 | 255 |
| 265 | |
| 266 #define TYPED_DATA_ALLOCATOR(clazz) \ | 256 #define TYPED_DATA_ALLOCATOR(clazz) \ |
| 267 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ | 257 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ |
| 268 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ | 258 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ |
| 269 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ | 259 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ |
| 270 int shift = GetScaleFactor(size); \ | 260 int shift = GetScaleFactor(size); \ |
| 271 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ | 261 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ |
| 272 } | 262 } |
| 273 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) | 263 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) |
| 274 #undef TYPED_DATA_ALLOCATOR | 264 #undef TYPED_DATA_ALLOCATOR |
| 275 | 265 |
| 276 | |
| 277 // Loads args from stack into R0 and R1 | 266 // Loads args from stack into R0 and R1 |
| 278 // Tests if they are smis, jumps to label not_smi if not. | 267 // Tests if they are smis, jumps to label not_smi if not. |
| 279 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { | 268 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { |
| 280 __ ldr(R0, Address(SP, +0 * kWordSize)); | 269 __ ldr(R0, Address(SP, +0 * kWordSize)); |
| 281 __ ldr(R1, Address(SP, +1 * kWordSize)); | 270 __ ldr(R1, Address(SP, +1 * kWordSize)); |
| 282 __ orr(TMP, R0, Operand(R1)); | 271 __ orr(TMP, R0, Operand(R1)); |
| 283 __ tsti(TMP, Immediate(kSmiTagMask)); | 272 __ tsti(TMP, Immediate(kSmiTagMask)); |
| 284 __ b(not_smi, NE); | 273 __ b(not_smi, NE); |
| 285 } | 274 } |
| 286 | 275 |
| 287 | |
| 288 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { | 276 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { |
| 289 Label fall_through; | 277 Label fall_through; |
| 290 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | 278 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. |
| 291 __ adds(R0, R0, Operand(R1)); // Adds. | 279 __ adds(R0, R0, Operand(R1)); // Adds. |
| 292 __ b(&fall_through, VS); // Fall-through on overflow. | 280 __ b(&fall_through, VS); // Fall-through on overflow. |
| 293 __ ret(); | 281 __ ret(); |
| 294 __ Bind(&fall_through); | 282 __ Bind(&fall_through); |
| 295 } | 283 } |
| 296 | 284 |
| 297 | |
| 298 void Intrinsifier::Integer_add(Assembler* assembler) { | 285 void Intrinsifier::Integer_add(Assembler* assembler) { |
| 299 Integer_addFromInteger(assembler); | 286 Integer_addFromInteger(assembler); |
| 300 } | 287 } |
| 301 | 288 |
| 302 | |
| 303 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { | 289 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { |
| 304 Label fall_through; | 290 Label fall_through; |
| 305 TestBothArgumentsSmis(assembler, &fall_through); | 291 TestBothArgumentsSmis(assembler, &fall_through); |
| 306 __ subs(R0, R0, Operand(R1)); // Subtract. | 292 __ subs(R0, R0, Operand(R1)); // Subtract. |
| 307 __ b(&fall_through, VS); // Fall-through on overflow. | 293 __ b(&fall_through, VS); // Fall-through on overflow. |
| 308 __ ret(); | 294 __ ret(); |
| 309 __ Bind(&fall_through); | 295 __ Bind(&fall_through); |
| 310 } | 296 } |
| 311 | 297 |
| 312 | |
| 313 void Intrinsifier::Integer_sub(Assembler* assembler) { | 298 void Intrinsifier::Integer_sub(Assembler* assembler) { |
| 314 Label fall_through; | 299 Label fall_through; |
| 315 TestBothArgumentsSmis(assembler, &fall_through); | 300 TestBothArgumentsSmis(assembler, &fall_through); |
| 316 __ subs(R0, R1, Operand(R0)); // Subtract. | 301 __ subs(R0, R1, Operand(R0)); // Subtract. |
| 317 __ b(&fall_through, VS); // Fall-through on overflow. | 302 __ b(&fall_through, VS); // Fall-through on overflow. |
| 318 __ ret(); | 303 __ ret(); |
| 319 __ Bind(&fall_through); | 304 __ Bind(&fall_through); |
| 320 } | 305 } |
| 321 | 306 |
| 322 | |
| 323 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { | 307 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { |
| 324 Label fall_through; | 308 Label fall_through; |
| 325 | 309 |
| 326 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 310 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
| 327 __ SmiUntag(R0); // Untags R6. We only want result shifted by one. | 311 __ SmiUntag(R0); // Untags R6. We only want result shifted by one. |
| 328 | 312 |
| 329 __ mul(TMP, R0, R1); | 313 __ mul(TMP, R0, R1); |
| 330 __ smulh(TMP2, R0, R1); | 314 __ smulh(TMP2, R0, R1); |
| 331 // TMP: result bits 64..127. | 315 // TMP: result bits 64..127. |
| 332 __ cmp(TMP2, Operand(TMP, ASR, 63)); | 316 __ cmp(TMP2, Operand(TMP, ASR, 63)); |
| 333 __ b(&fall_through, NE); | 317 __ b(&fall_through, NE); |
| 334 __ mov(R0, TMP); | 318 __ mov(R0, TMP); |
| 335 __ ret(); | 319 __ ret(); |
| 336 __ Bind(&fall_through); | 320 __ Bind(&fall_through); |
| 337 } | 321 } |
| 338 | 322 |
| 339 | |
| 340 void Intrinsifier::Integer_mul(Assembler* assembler) { | 323 void Intrinsifier::Integer_mul(Assembler* assembler) { |
| 341 Integer_mulFromInteger(assembler); | 324 Integer_mulFromInteger(assembler); |
| 342 } | 325 } |
| 343 | 326 |
| 344 | |
| 345 // Optimizations: | 327 // Optimizations: |
| 346 // - result is 0 if: | 328 // - result is 0 if: |
| 347 // - left is 0 | 329 // - left is 0 |
| 348 // - left equals right | 330 // - left equals right |
| 349 // - result is left if | 331 // - result is left if |
| 350 // - left > 0 && left < right | 332 // - left > 0 && left < right |
| 351 // R1: Tagged left (dividend). | 333 // R1: Tagged left (dividend). |
| 352 // R0: Tagged right (divisor). | 334 // R0: Tagged right (divisor). |
| 353 // Returns: | 335 // Returns: |
| 354 // R1: Untagged fallthrough result (remainder to be adjusted), or | 336 // R1: Untagged fallthrough result (remainder to be adjusted), or |
| (...skipping 28 matching lines...) Expand all Loading... |
| 383 | 365 |
| 384 __ Bind(&modulo); | 366 __ Bind(&modulo); |
| 385 // result <- left - right * (left / right) | 367 // result <- left - right * (left / right) |
| 386 __ SmiUntag(left); | 368 __ SmiUntag(left); |
| 387 __ SmiUntag(right); | 369 __ SmiUntag(right); |
| 388 | 370 |
| 389 __ sdiv(tmp, left, right); | 371 __ sdiv(tmp, left, right); |
| 390 __ msub(result, right, tmp, left); // result <- left - right * tmp | 372 __ msub(result, right, tmp, left); // result <- left - right * tmp |
| 391 } | 373 } |
| 392 | 374 |
| 393 | |
| 394 // Implementation: | 375 // Implementation: |
| 395 // res = left % right; | 376 // res = left % right; |
| 396 // if (res < 0) { | 377 // if (res < 0) { |
| 397 // if (right < 0) { | 378 // if (right < 0) { |
| 398 // res = res - right; | 379 // res = res - right; |
| 399 // } else { | 380 // } else { |
| 400 // res = res + right; | 381 // res = res + right; |
| 401 // } | 382 // } |
| 402 // } | 383 // } |
| 403 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { | 384 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { |
| (...skipping 22 matching lines...) Expand all Loading... |
| 426 __ CompareRegisters(R0, ZR); | 407 __ CompareRegisters(R0, ZR); |
| 427 __ sub(TMP, R1, Operand(R0)); | 408 __ sub(TMP, R1, Operand(R0)); |
| 428 __ add(TMP2, R1, Operand(R0)); | 409 __ add(TMP2, R1, Operand(R0)); |
| 429 __ csel(R0, TMP2, TMP, GE); | 410 __ csel(R0, TMP2, TMP, GE); |
| 430 __ SmiTag(R0); | 411 __ SmiTag(R0); |
| 431 __ ret(); | 412 __ ret(); |
| 432 | 413 |
| 433 __ Bind(&fall_through); | 414 __ Bind(&fall_through); |
| 434 } | 415 } |
| 435 | 416 |
| 436 | |
| 437 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { | 417 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { |
| 438 // Check to see if we have integer division | 418 // Check to see if we have integer division |
| 439 Label fall_through; | 419 Label fall_through; |
| 440 | 420 |
| 441 TestBothArgumentsSmis(assembler, &fall_through); | 421 TestBothArgumentsSmis(assembler, &fall_through); |
| 442 __ CompareRegisters(R0, ZR); | 422 __ CompareRegisters(R0, ZR); |
| 443 __ b(&fall_through, EQ); // If b is 0, fall through. | 423 __ b(&fall_through, EQ); // If b is 0, fall through. |
| 444 | 424 |
| 445 __ SmiUntag(R0); | 425 __ SmiUntag(R0); |
| 446 __ SmiUntag(R1); | 426 __ SmiUntag(R1); |
| 447 | 427 |
| 448 __ sdiv(R0, R1, R0); | 428 __ sdiv(R0, R1, R0); |
| 449 | 429 |
| 450 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we | 430 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we |
| 451 // cannot tag the result. | 431 // cannot tag the result. |
| 452 __ CompareImmediate(R0, 0x4000000000000000); | 432 __ CompareImmediate(R0, 0x4000000000000000); |
| 453 __ b(&fall_through, EQ); | 433 __ b(&fall_through, EQ); |
| 454 __ SmiTag(R0); // Not equal. Okay to tag and return. | 434 __ SmiTag(R0); // Not equal. Okay to tag and return. |
| 455 __ ret(); // Return. | 435 __ ret(); // Return. |
| 456 __ Bind(&fall_through); | 436 __ Bind(&fall_through); |
| 457 } | 437 } |
| 458 | 438 |
| 459 | |
| 460 void Intrinsifier::Integer_negate(Assembler* assembler) { | 439 void Intrinsifier::Integer_negate(Assembler* assembler) { |
| 461 Label fall_through; | 440 Label fall_through; |
| 462 __ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument. | 441 __ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument. |
| 463 __ tsti(R0, Immediate(kSmiTagMask)); // Test for Smi. | 442 __ tsti(R0, Immediate(kSmiTagMask)); // Test for Smi. |
| 464 __ b(&fall_through, NE); | 443 __ b(&fall_through, NE); |
| 465 __ negs(R0, R0); | 444 __ negs(R0, R0); |
| 466 __ b(&fall_through, VS); | 445 __ b(&fall_through, VS); |
| 467 __ ret(); | 446 __ ret(); |
| 468 __ Bind(&fall_through); | 447 __ Bind(&fall_through); |
| 469 } | 448 } |
| 470 | 449 |
| 471 | |
| 472 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { | 450 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { |
| 473 Label fall_through; | 451 Label fall_through; |
| 474 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | 452 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. |
| 475 __ and_(R0, R0, Operand(R1)); | 453 __ and_(R0, R0, Operand(R1)); |
| 476 __ ret(); | 454 __ ret(); |
| 477 __ Bind(&fall_through); | 455 __ Bind(&fall_through); |
| 478 } | 456 } |
| 479 | 457 |
| 480 | |
| 481 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { | 458 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { |
| 482 Integer_bitAndFromInteger(assembler); | 459 Integer_bitAndFromInteger(assembler); |
| 483 } | 460 } |
| 484 | 461 |
| 485 | |
| 486 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { | 462 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { |
| 487 Label fall_through; | 463 Label fall_through; |
| 488 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | 464 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. |
| 489 __ orr(R0, R0, Operand(R1)); | 465 __ orr(R0, R0, Operand(R1)); |
| 490 __ ret(); | 466 __ ret(); |
| 491 __ Bind(&fall_through); | 467 __ Bind(&fall_through); |
| 492 } | 468 } |
| 493 | 469 |
| 494 | |
| 495 void Intrinsifier::Integer_bitOr(Assembler* assembler) { | 470 void Intrinsifier::Integer_bitOr(Assembler* assembler) { |
| 496 Integer_bitOrFromInteger(assembler); | 471 Integer_bitOrFromInteger(assembler); |
| 497 } | 472 } |
| 498 | 473 |
| 499 | |
| 500 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { | 474 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { |
| 501 Label fall_through; | 475 Label fall_through; |
| 502 | 476 |
| 503 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | 477 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. |
| 504 __ eor(R0, R0, Operand(R1)); | 478 __ eor(R0, R0, Operand(R1)); |
| 505 __ ret(); | 479 __ ret(); |
| 506 __ Bind(&fall_through); | 480 __ Bind(&fall_through); |
| 507 } | 481 } |
| 508 | 482 |
| 509 | |
| 510 void Intrinsifier::Integer_bitXor(Assembler* assembler) { | 483 void Intrinsifier::Integer_bitXor(Assembler* assembler) { |
| 511 Integer_bitXorFromInteger(assembler); | 484 Integer_bitXorFromInteger(assembler); |
| 512 } | 485 } |
| 513 | 486 |
| 514 | |
| 515 void Intrinsifier::Integer_shl(Assembler* assembler) { | 487 void Intrinsifier::Integer_shl(Assembler* assembler) { |
| 516 ASSERT(kSmiTagShift == 1); | 488 ASSERT(kSmiTagShift == 1); |
| 517 ASSERT(kSmiTag == 0); | 489 ASSERT(kSmiTag == 0); |
| 518 const Register right = R0; | 490 const Register right = R0; |
| 519 const Register left = R1; | 491 const Register left = R1; |
| 520 const Register temp = R2; | 492 const Register temp = R2; |
| 521 const Register result = R0; | 493 const Register result = R0; |
| 522 Label fall_through; | 494 Label fall_through; |
| 523 | 495 |
| 524 TestBothArgumentsSmis(assembler, &fall_through); | 496 TestBothArgumentsSmis(assembler, &fall_through); |
| 525 __ CompareImmediate(right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits))); | 497 __ CompareImmediate(right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits))); |
| 526 __ b(&fall_through, CS); | 498 __ b(&fall_through, CS); |
| 527 | 499 |
| 528 // Left is not a constant. | 500 // Left is not a constant. |
| 529 // Check if count too large for handling it inlined. | 501 // Check if count too large for handling it inlined. |
| 530 __ SmiUntag(TMP, right); // SmiUntag right into TMP. | 502 __ SmiUntag(TMP, right); // SmiUntag right into TMP. |
| 531 // Overflow test (preserve left, right, and TMP); | 503 // Overflow test (preserve left, right, and TMP); |
| 532 __ lslv(temp, left, TMP); | 504 __ lslv(temp, left, TMP); |
| 533 __ asrv(TMP2, temp, TMP); | 505 __ asrv(TMP2, temp, TMP); |
| 534 __ CompareRegisters(left, TMP2); | 506 __ CompareRegisters(left, TMP2); |
| 535 __ b(&fall_through, NE); // Overflow. | 507 __ b(&fall_through, NE); // Overflow. |
| 536 // Shift for result now we know there is no overflow. | 508 // Shift for result now we know there is no overflow. |
| 537 __ lslv(result, left, TMP); | 509 __ lslv(result, left, TMP); |
| 538 __ ret(); | 510 __ ret(); |
| 539 __ Bind(&fall_through); | 511 __ Bind(&fall_through); |
| 540 } | 512 } |
| 541 | 513 |
| 542 | |
| 543 static void CompareIntegers(Assembler* assembler, Condition true_condition) { | 514 static void CompareIntegers(Assembler* assembler, Condition true_condition) { |
| 544 Label fall_through, true_label; | 515 Label fall_through, true_label; |
| 545 TestBothArgumentsSmis(assembler, &fall_through); | 516 TestBothArgumentsSmis(assembler, &fall_through); |
| 546 // R0 contains the right argument, R1 the left. | 517 // R0 contains the right argument, R1 the left. |
| 547 __ CompareRegisters(R1, R0); | 518 __ CompareRegisters(R1, R0); |
| 548 __ LoadObject(R0, Bool::False()); | 519 __ LoadObject(R0, Bool::False()); |
| 549 __ LoadObject(TMP, Bool::True()); | 520 __ LoadObject(TMP, Bool::True()); |
| 550 __ csel(R0, TMP, R0, true_condition); | 521 __ csel(R0, TMP, R0, true_condition); |
| 551 __ ret(); | 522 __ ret(); |
| 552 __ Bind(&fall_through); | 523 __ Bind(&fall_through); |
| 553 } | 524 } |
| 554 | 525 |
| 555 | |
| 556 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { | 526 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { |
| 557 CompareIntegers(assembler, LT); | 527 CompareIntegers(assembler, LT); |
| 558 } | 528 } |
| 559 | 529 |
| 560 | |
| 561 void Intrinsifier::Integer_lessThan(Assembler* assembler) { | 530 void Intrinsifier::Integer_lessThan(Assembler* assembler) { |
| 562 Integer_greaterThanFromInt(assembler); | 531 Integer_greaterThanFromInt(assembler); |
| 563 } | 532 } |
| 564 | 533 |
| 565 | |
| 566 void Intrinsifier::Integer_greaterThan(Assembler* assembler) { | 534 void Intrinsifier::Integer_greaterThan(Assembler* assembler) { |
| 567 CompareIntegers(assembler, GT); | 535 CompareIntegers(assembler, GT); |
| 568 } | 536 } |
| 569 | 537 |
| 570 | |
| 571 void Intrinsifier::Integer_lessEqualThan(Assembler* assembler) { | 538 void Intrinsifier::Integer_lessEqualThan(Assembler* assembler) { |
| 572 CompareIntegers(assembler, LE); | 539 CompareIntegers(assembler, LE); |
| 573 } | 540 } |
| 574 | 541 |
| 575 | |
| 576 void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) { | 542 void Intrinsifier::Integer_greaterEqualThan(Assembler* assembler) { |
| 577 CompareIntegers(assembler, GE); | 543 CompareIntegers(assembler, GE); |
| 578 } | 544 } |
| 579 | 545 |
| 580 | |
| 581 // This is called for Smi, Mint and Bigint receivers. The right argument | 546 // This is called for Smi, Mint and Bigint receivers. The right argument |
| 582 // can be Smi, Mint, Bigint or double. | 547 // can be Smi, Mint, Bigint or double. |
| 583 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { | 548 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { |
| 584 Label fall_through, true_label, check_for_mint; | 549 Label fall_through, true_label, check_for_mint; |
| 585 // For integer receiver '===' check first. | 550 // For integer receiver '===' check first. |
| 586 __ ldr(R0, Address(SP, 0 * kWordSize)); | 551 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 587 __ ldr(R1, Address(SP, 1 * kWordSize)); | 552 __ ldr(R1, Address(SP, 1 * kWordSize)); |
| 588 __ cmp(R0, Operand(R1)); | 553 __ cmp(R0, Operand(R1)); |
| 589 __ b(&true_label, EQ); | 554 __ b(&true_label, EQ); |
| 590 | 555 |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 623 // Receiver is Mint, return false if right is Smi. | 588 // Receiver is Mint, return false if right is Smi. |
| 624 __ tsti(R0, Immediate(kSmiTagMask)); | 589 __ tsti(R0, Immediate(kSmiTagMask)); |
| 625 __ b(&fall_through, NE); | 590 __ b(&fall_through, NE); |
| 626 __ LoadObject(R0, Bool::False()); | 591 __ LoadObject(R0, Bool::False()); |
| 627 __ ret(); | 592 __ ret(); |
| 628 // TODO(srdjan): Implement Mint == Mint comparison. | 593 // TODO(srdjan): Implement Mint == Mint comparison. |
| 629 | 594 |
| 630 __ Bind(&fall_through); | 595 __ Bind(&fall_through); |
| 631 } | 596 } |
| 632 | 597 |
| 633 | |
| 634 void Intrinsifier::Integer_equal(Assembler* assembler) { | 598 void Intrinsifier::Integer_equal(Assembler* assembler) { |
| 635 Integer_equalToInteger(assembler); | 599 Integer_equalToInteger(assembler); |
| 636 } | 600 } |
| 637 | 601 |
| 638 | |
| 639 void Intrinsifier::Integer_sar(Assembler* assembler) { | 602 void Intrinsifier::Integer_sar(Assembler* assembler) { |
| 640 Label fall_through; | 603 Label fall_through; |
| 641 | 604 |
| 642 TestBothArgumentsSmis(assembler, &fall_through); | 605 TestBothArgumentsSmis(assembler, &fall_through); |
| 643 // Shift amount in R0. Value to shift in R1. | 606 // Shift amount in R0. Value to shift in R1. |
| 644 | 607 |
| 645 // Fall through if shift amount is negative. | 608 // Fall through if shift amount is negative. |
| 646 __ SmiUntag(R0); | 609 __ SmiUntag(R0); |
| 647 __ CompareRegisters(R0, ZR); | 610 __ CompareRegisters(R0, ZR); |
| 648 __ b(&fall_through, LT); | 611 __ b(&fall_through, LT); |
| 649 | 612 |
| 650 // If shift amount is bigger than 63, set to 63. | 613 // If shift amount is bigger than 63, set to 63. |
| 651 __ LoadImmediate(TMP, 0x3F); | 614 __ LoadImmediate(TMP, 0x3F); |
| 652 __ CompareRegisters(R0, TMP); | 615 __ CompareRegisters(R0, TMP); |
| 653 __ csel(R0, TMP, R0, GT); | 616 __ csel(R0, TMP, R0, GT); |
| 654 __ SmiUntag(R1); | 617 __ SmiUntag(R1); |
| 655 __ asrv(R0, R1, R0); | 618 __ asrv(R0, R1, R0); |
| 656 __ SmiTag(R0); | 619 __ SmiTag(R0); |
| 657 __ ret(); | 620 __ ret(); |
| 658 __ Bind(&fall_through); | 621 __ Bind(&fall_through); |
| 659 } | 622 } |
| 660 | 623 |
| 661 | |
| 662 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { | 624 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { |
| 663 __ ldr(R0, Address(SP, 0 * kWordSize)); | 625 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 664 __ mvn(R0, R0); | 626 __ mvn(R0, R0); |
| 665 __ andi(R0, R0, Immediate(~kSmiTagMask)); // Remove inverted smi-tag. | 627 __ andi(R0, R0, Immediate(~kSmiTagMask)); // Remove inverted smi-tag. |
| 666 __ ret(); | 628 __ ret(); |
| 667 } | 629 } |
| 668 | 630 |
| 669 | |
| 670 void Intrinsifier::Smi_bitLength(Assembler* assembler) { | 631 void Intrinsifier::Smi_bitLength(Assembler* assembler) { |
| 671 __ ldr(R0, Address(SP, 0 * kWordSize)); | 632 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 672 __ SmiUntag(R0); | 633 __ SmiUntag(R0); |
| 673 // XOR with sign bit to complement bits if value is negative. | 634 // XOR with sign bit to complement bits if value is negative. |
| 674 __ eor(R0, R0, Operand(R0, ASR, 63)); | 635 __ eor(R0, R0, Operand(R0, ASR, 63)); |
| 675 __ clz(R0, R0); | 636 __ clz(R0, R0); |
| 676 __ LoadImmediate(R1, 64); | 637 __ LoadImmediate(R1, 64); |
| 677 __ sub(R0, R1, Operand(R0)); | 638 __ sub(R0, R1, Operand(R0)); |
| 678 __ SmiTag(R0); | 639 __ SmiTag(R0); |
| 679 __ ret(); | 640 __ ret(); |
| 680 } | 641 } |
| 681 | 642 |
| 682 | |
| 683 void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler) { | 643 void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler) { |
| 684 Integer_bitAndFromInteger(assembler); | 644 Integer_bitAndFromInteger(assembler); |
| 685 } | 645 } |
| 686 | 646 |
| 687 | |
| 688 void Intrinsifier::Bigint_lsh(Assembler* assembler) { | 647 void Intrinsifier::Bigint_lsh(Assembler* assembler) { |
| 689 // static void _lsh(Uint32List x_digits, int x_used, int n, | 648 // static void _lsh(Uint32List x_digits, int x_used, int n, |
| 690 // Uint32List r_digits) | 649 // Uint32List r_digits) |
| 691 | 650 |
| 692 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. | 651 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. |
| 693 __ ldp(R2, R3, Address(SP, 2 * kWordSize, Address::PairOffset)); | 652 __ ldp(R2, R3, Address(SP, 2 * kWordSize, Address::PairOffset)); |
| 694 __ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up. | 653 __ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up. |
| 695 __ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read. | 654 __ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read. |
| 696 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. | 655 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. |
| 697 __ ldp(R4, R5, Address(SP, 0 * kWordSize, Address::PairOffset)); | 656 __ ldp(R4, R5, Address(SP, 0 * kWordSize, Address::PairOffset)); |
| 698 __ SmiUntag(R5); | 657 __ SmiUntag(R5); |
| 699 // R0 = n ~/ (2*_DIGIT_BITS) | 658 // R0 = n ~/ (2*_DIGIT_BITS) |
| 700 __ AsrImmediate(R0, R5, 6); | 659 __ AsrImmediate(R0, R5, 6); |
| 701 // R6 = &x_digits[0] | 660 // R6 = &x_digits[0] |
| 702 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); | 661 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); |
| 703 // R7 = &x_digits[2*R2] | 662 // R7 = &x_digits[2*R2] |
| 704 __ add(R7, R6, Operand(R2, LSL, 3)); | 663 __ add(R7, R6, Operand(R2, LSL, 3)); |
| 705 // R8 = &r_digits[2*1] | 664 // R8 = &r_digits[2*1] |
| 706 __ add(R8, R4, Operand(TypedData::data_offset() - kHeapObjectTag + | 665 __ add(R8, R4, |
| 707 2 * Bigint::kBytesPerDigit)); | 666 Operand(TypedData::data_offset() - kHeapObjectTag + |
| 667 2 * Bigint::kBytesPerDigit)); |
| 708 // R8 = &r_digits[2*(R2 + n ~/ (2*_DIGIT_BITS) + 1)] | 668 // R8 = &r_digits[2*(R2 + n ~/ (2*_DIGIT_BITS) + 1)] |
| 709 __ add(R0, R0, Operand(R2)); | 669 __ add(R0, R0, Operand(R2)); |
| 710 __ add(R8, R8, Operand(R0, LSL, 3)); | 670 __ add(R8, R8, Operand(R0, LSL, 3)); |
| 711 // R3 = n % (2 * _DIGIT_BITS) | 671 // R3 = n % (2 * _DIGIT_BITS) |
| 712 __ AndImmediate(R3, R5, 63); | 672 __ AndImmediate(R3, R5, 63); |
| 713 // R2 = 64 - R3 | 673 // R2 = 64 - R3 |
| 714 __ LoadImmediate(R2, 64); | 674 __ LoadImmediate(R2, 64); |
| 715 __ sub(R2, R2, Operand(R3)); | 675 __ sub(R2, R2, Operand(R3)); |
| 716 __ mov(R1, ZR); | 676 __ mov(R1, ZR); |
| 717 Label loop; | 677 Label loop; |
| 718 __ Bind(&loop); | 678 __ Bind(&loop); |
| 719 __ ldr(R0, Address(R7, -2 * Bigint::kBytesPerDigit, Address::PreIndex)); | 679 __ ldr(R0, Address(R7, -2 * Bigint::kBytesPerDigit, Address::PreIndex)); |
| 720 __ lsrv(R4, R0, R2); | 680 __ lsrv(R4, R0, R2); |
| 721 __ orr(R1, R1, Operand(R4)); | 681 __ orr(R1, R1, Operand(R4)); |
| 722 __ str(R1, Address(R8, -2 * Bigint::kBytesPerDigit, Address::PreIndex)); | 682 __ str(R1, Address(R8, -2 * Bigint::kBytesPerDigit, Address::PreIndex)); |
| 723 __ lslv(R1, R0, R3); | 683 __ lslv(R1, R0, R3); |
| 724 __ cmp(R7, Operand(R6)); | 684 __ cmp(R7, Operand(R6)); |
| 725 __ b(&loop, NE); | 685 __ b(&loop, NE); |
| 726 __ str(R1, Address(R8, -2 * Bigint::kBytesPerDigit, Address::PreIndex)); | 686 __ str(R1, Address(R8, -2 * Bigint::kBytesPerDigit, Address::PreIndex)); |
| 727 // Returning Object::null() is not required, since this method is private. | 687 // Returning Object::null() is not required, since this method is private. |
| 728 __ ret(); | 688 __ ret(); |
| 729 } | 689 } |
| 730 | 690 |
| 731 | |
| 732 void Intrinsifier::Bigint_rsh(Assembler* assembler) { | 691 void Intrinsifier::Bigint_rsh(Assembler* assembler) { |
| 733 // static void _lsh(Uint32List x_digits, int x_used, int n, | 692 // static void _lsh(Uint32List x_digits, int x_used, int n, |
| 734 // Uint32List r_digits) | 693 // Uint32List r_digits) |
| 735 | 694 |
| 736 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. | 695 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi. |
| 737 __ ldp(R2, R3, Address(SP, 2 * kWordSize, Address::PairOffset)); | 696 __ ldp(R2, R3, Address(SP, 2 * kWordSize, Address::PairOffset)); |
| 738 __ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up. | 697 __ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up. |
| 739 __ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read. | 698 __ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read. |
| 740 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. | 699 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0. |
| 741 __ ldp(R4, R5, Address(SP, 0 * kWordSize, Address::PairOffset)); | 700 __ ldp(R4, R5, Address(SP, 0 * kWordSize, Address::PairOffset)); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 769 __ str(R1, Address(R8, 2 * Bigint::kBytesPerDigit, Address::PostIndex)); | 728 __ str(R1, Address(R8, 2 * Bigint::kBytesPerDigit, Address::PostIndex)); |
| 770 __ lsrv(R1, R0, R3); | 729 __ lsrv(R1, R0, R3); |
| 771 __ Bind(&loop_entry); | 730 __ Bind(&loop_entry); |
| 772 __ cmp(R8, Operand(R6)); | 731 __ cmp(R8, Operand(R6)); |
| 773 __ b(&loop, NE); | 732 __ b(&loop, NE); |
| 774 __ str(R1, Address(R8, 0)); | 733 __ str(R1, Address(R8, 0)); |
| 775 // Returning Object::null() is not required, since this method is private. | 734 // Returning Object::null() is not required, since this method is private. |
| 776 __ ret(); | 735 __ ret(); |
| 777 } | 736 } |
| 778 | 737 |
| 779 | |
| 780 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { | 738 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { |
| 781 // static void _absAdd(Uint32List digits, int used, | 739 // static void _absAdd(Uint32List digits, int used, |
| 782 // Uint32List a_digits, int a_used, | 740 // Uint32List a_digits, int a_used, |
| 783 // Uint32List r_digits) | 741 // Uint32List r_digits) |
| 784 | 742 |
| 785 // R2 = used, R3 = digits | 743 // R2 = used, R3 = digits |
| 786 __ ldp(R2, R3, Address(SP, 3 * kWordSize, Address::PairOffset)); | 744 __ ldp(R2, R3, Address(SP, 3 * kWordSize, Address::PairOffset)); |
| 787 __ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up. | 745 __ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up. |
| 788 __ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process. | 746 __ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process. |
| 789 // R3 = &digits[0] | 747 // R3 = &digits[0] |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 835 Label done; | 793 Label done; |
| 836 __ b(&done, CC); | 794 __ b(&done, CC); |
| 837 __ LoadImmediate(R0, 1); | 795 __ LoadImmediate(R0, 1); |
| 838 __ str(R0, Address(R6, 0)); | 796 __ str(R0, Address(R6, 0)); |
| 839 | 797 |
| 840 __ Bind(&done); | 798 __ Bind(&done); |
| 841 // Returning Object::null() is not required, since this method is private. | 799 // Returning Object::null() is not required, since this method is private. |
| 842 __ ret(); | 800 __ ret(); |
| 843 } | 801 } |
| 844 | 802 |
| 845 | |
| 846 void Intrinsifier::Bigint_absSub(Assembler* assembler) { | 803 void Intrinsifier::Bigint_absSub(Assembler* assembler) { |
| 847 // static void _absSub(Uint32List digits, int used, | 804 // static void _absSub(Uint32List digits, int used, |
| 848 // Uint32List a_digits, int a_used, | 805 // Uint32List a_digits, int a_used, |
| 849 // Uint32List r_digits) | 806 // Uint32List r_digits) |
| 850 | 807 |
| 851 // R2 = used, R3 = digits | 808 // R2 = used, R3 = digits |
| 852 __ ldp(R2, R3, Address(SP, 3 * kWordSize, Address::PairOffset)); | 809 __ ldp(R2, R3, Address(SP, 3 * kWordSize, Address::PairOffset)); |
| 853 __ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up. | 810 __ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up. |
| 854 __ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process. | 811 __ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process. |
| 855 // R3 = &digits[0] | 812 // R3 = &digits[0] |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 895 __ sbcs(R0, R0, ZR); | 852 __ sbcs(R0, R0, ZR); |
| 896 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag. | 853 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag. |
| 897 __ str(R0, Address(R6, 2 * Bigint::kBytesPerDigit, Address::PostIndex)); | 854 __ str(R0, Address(R6, 2 * Bigint::kBytesPerDigit, Address::PostIndex)); |
| 898 __ cbnz(&carry_loop, R9); | 855 __ cbnz(&carry_loop, R9); |
| 899 | 856 |
| 900 __ Bind(&done); | 857 __ Bind(&done); |
| 901 // Returning Object::null() is not required, since this method is private. | 858 // Returning Object::null() is not required, since this method is private. |
| 902 __ ret(); | 859 __ ret(); |
| 903 } | 860 } |
| 904 | 861 |
| 905 | |
| 906 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { | 862 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { |
| 907 // Pseudo code: | 863 // Pseudo code: |
| 908 // static int _mulAdd(Uint32List x_digits, int xi, | 864 // static int _mulAdd(Uint32List x_digits, int xi, |
| 909 // Uint32List m_digits, int i, | 865 // Uint32List m_digits, int i, |
| 910 // Uint32List a_digits, int j, int n) { | 866 // Uint32List a_digits, int j, int n) { |
| 911 // uint64_t x = x_digits[xi >> 1 .. (xi >> 1) + 1]; // xi is Smi and even. | 867 // uint64_t x = x_digits[xi >> 1 .. (xi >> 1) + 1]; // xi is Smi and even. |
| 912 // if (x == 0 || n == 0) { | 868 // if (x == 0 || n == 0) { |
| 913 // return 2; | 869 // return 2; |
| 914 // } | 870 // } |
| 915 // uint64_t* mip = &m_digits[i >> 1]; // i is Smi and even. | 871 // uint64_t* mip = &m_digits[i >> 1]; // i is Smi and even. |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1006 __ ldr(R0, Address(R5, 0)); | 962 __ ldr(R0, Address(R5, 0)); |
| 1007 __ adds(R0, R0, Operand(1)); | 963 __ adds(R0, R0, Operand(1)); |
| 1008 __ str(R0, Address(R5, 2 * Bigint::kBytesPerDigit, Address::PostIndex)); | 964 __ str(R0, Address(R5, 2 * Bigint::kBytesPerDigit, Address::PostIndex)); |
| 1009 __ b(&propagate_carry_loop, CS); | 965 __ b(&propagate_carry_loop, CS); |
| 1010 | 966 |
| 1011 __ Bind(&done); | 967 __ Bind(&done); |
| 1012 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. | 968 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. |
| 1013 __ ret(); | 969 __ ret(); |
| 1014 } | 970 } |
| 1015 | 971 |
| 1016 | |
| 1017 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { | 972 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { |
| 1018 // Pseudo code: | 973 // Pseudo code: |
| 1019 // static int _sqrAdd(Uint32List x_digits, int i, | 974 // static int _sqrAdd(Uint32List x_digits, int i, |
| 1020 // Uint32List a_digits, int used) { | 975 // Uint32List a_digits, int used) { |
| 1021 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even. | 976 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even. |
| 1022 // uint64_t x = *xip++; | 977 // uint64_t x = *xip++; |
| 1023 // if (x == 0) return 2; | 978 // if (x == 0) return 2; |
| 1024 // uint64_t* ajp = &a_digits[i]; // j == 2*i, i is Smi. | 979 // uint64_t* ajp = &a_digits[i]; // j == 2*i, i is Smi. |
| 1025 // uint64_t aj = *ajp; | 980 // uint64_t aj = *ajp; |
| 1026 // uint128_t t = x*x + aj; | 981 // uint128_t t = x*x + aj; |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1121 | 1076 |
| 1122 // *ajp = low64(t) = R6 | 1077 // *ajp = low64(t) = R6 |
| 1123 // *(ajp + 1) = high64(t) = R7 | 1078 // *(ajp + 1) = high64(t) = R7 |
| 1124 __ stp(R6, R7, Address(R5, 0, Address::PairOffset)); | 1079 __ stp(R6, R7, Address(R5, 0, Address::PairOffset)); |
| 1125 | 1080 |
| 1126 __ Bind(&x_zero); | 1081 __ Bind(&x_zero); |
| 1127 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. | 1082 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. |
| 1128 __ ret(); | 1083 __ ret(); |
| 1129 } | 1084 } |
| 1130 | 1085 |
| 1131 | |
| 1132 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { | 1086 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { |
| 1133 // There is no 128-bit by 64-bit division instruction on arm64, so we use two | 1087 // There is no 128-bit by 64-bit division instruction on arm64, so we use two |
| 1134 // 64-bit by 32-bit divisions and two 64-bit by 64-bit multiplications to | 1088 // 64-bit by 32-bit divisions and two 64-bit by 64-bit multiplications to |
| 1135 // adjust the two 32-bit digits of the estimated quotient. | 1089 // adjust the two 32-bit digits of the estimated quotient. |
| 1136 // | 1090 // |
| 1137 // Pseudo code: | 1091 // Pseudo code: |
| 1138 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) { | 1092 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) { |
| 1139 // uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1. | 1093 // uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1. |
| 1140 // uint64_t* dp = &digits[(i >> 1) - 1]; // i is Smi. | 1094 // uint64_t* dp = &digits[(i >> 1) - 1]; // i is Smi. |
| 1141 // uint64_t dh = dp[0]; // dh == digits[(i >> 1) - 1 .. i >> 1]. | 1095 // uint64_t dh = dp[0]; // dh == digits[(i >> 1) - 1 .. i >> 1]. |
| (...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1295 | 1249 |
| 1296 __ Bind(&return_qd); | 1250 __ Bind(&return_qd); |
| 1297 // args[2..3] = qd | 1251 // args[2..3] = qd |
| 1298 __ str(R0, FieldAddress( | 1252 __ str(R0, FieldAddress( |
| 1299 R4, TypedData::data_offset() + 2 * Bigint::kBytesPerDigit)); | 1253 R4, TypedData::data_offset() + 2 * Bigint::kBytesPerDigit)); |
| 1300 | 1254 |
| 1301 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. | 1255 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. |
| 1302 __ ret(); | 1256 __ ret(); |
| 1303 } | 1257 } |
| 1304 | 1258 |
| 1305 | |
| 1306 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) { | 1259 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) { |
| 1307 // Pseudo code: | 1260 // Pseudo code: |
| 1308 // static int _mulMod(Uint32List args, Uint32List digits, int i) { | 1261 // static int _mulMod(Uint32List args, Uint32List digits, int i) { |
| 1309 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3. | 1262 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3. |
| 1310 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even. | 1263 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even. |
| 1311 // uint128_t t = rho*d; | 1264 // uint128_t t = rho*d; |
| 1312 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5. | 1265 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5. |
| 1313 // return 2; | 1266 // return 2; |
| 1314 // } | 1267 // } |
| 1315 | 1268 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1330 __ mul(R0, R2, R3); // R0 = low64(R2*R3). | 1283 __ mul(R0, R2, R3); // R0 = low64(R2*R3). |
| 1331 | 1284 |
| 1332 // args[4 .. 5] = R0 | 1285 // args[4 .. 5] = R0 |
| 1333 __ str(R0, FieldAddress( | 1286 __ str(R0, FieldAddress( |
| 1334 R4, TypedData::data_offset() + 4 * Bigint::kBytesPerDigit)); | 1287 R4, TypedData::data_offset() + 4 * Bigint::kBytesPerDigit)); |
| 1335 | 1288 |
| 1336 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. | 1289 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. |
| 1337 __ ret(); | 1290 __ ret(); |
| 1338 } | 1291 } |
| 1339 | 1292 |
| 1340 | |
| 1341 // Check if the last argument is a double, jump to label 'is_smi' if smi | 1293 // Check if the last argument is a double, jump to label 'is_smi' if smi |
| 1342 // (easy to convert to double), otherwise jump to label 'not_double_smi', | 1294 // (easy to convert to double), otherwise jump to label 'not_double_smi', |
| 1343 // Returns the last argument in R0. | 1295 // Returns the last argument in R0. |
| 1344 static void TestLastArgumentIsDouble(Assembler* assembler, | 1296 static void TestLastArgumentIsDouble(Assembler* assembler, |
| 1345 Label* is_smi, | 1297 Label* is_smi, |
| 1346 Label* not_double_smi) { | 1298 Label* not_double_smi) { |
| 1347 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1299 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1348 __ tsti(R0, Immediate(kSmiTagMask)); | 1300 __ tsti(R0, Immediate(kSmiTagMask)); |
| 1349 __ b(is_smi, EQ); | 1301 __ b(is_smi, EQ); |
| 1350 __ CompareClassId(R0, kDoubleCid); | 1302 __ CompareClassId(R0, kDoubleCid); |
| 1351 __ b(not_double_smi, NE); | 1303 __ b(not_double_smi, NE); |
| 1352 // Fall through with Double in R0. | 1304 // Fall through with Double in R0. |
| 1353 } | 1305 } |
| 1354 | 1306 |
| 1355 | |
| 1356 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown | 1307 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown |
| 1357 // type. Return true or false object in the register R0. Any NaN argument | 1308 // type. Return true or false object in the register R0. Any NaN argument |
| 1358 // returns false. Any non-double arg1 causes control flow to fall through to the | 1309 // returns false. Any non-double arg1 causes control flow to fall through to the |
| 1359 // slow case (compiled method body). | 1310 // slow case (compiled method body). |
| 1360 static void CompareDoubles(Assembler* assembler, Condition true_condition) { | 1311 static void CompareDoubles(Assembler* assembler, Condition true_condition) { |
| 1361 Label fall_through, is_smi, double_op, not_nan; | 1312 Label fall_through, is_smi, double_op, not_nan; |
| 1362 | 1313 |
| 1363 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1314 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
| 1364 // Both arguments are double, right operand is in R0. | 1315 // Both arguments are double, right operand is in R0. |
| 1365 | 1316 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1378 __ csel(R0, TMP, R0, true_condition); | 1329 __ csel(R0, TMP, R0, true_condition); |
| 1379 __ ret(); | 1330 __ ret(); |
| 1380 | 1331 |
| 1381 __ Bind(&is_smi); // Convert R0 to a double. | 1332 __ Bind(&is_smi); // Convert R0 to a double. |
| 1382 __ SmiUntag(R0); | 1333 __ SmiUntag(R0); |
| 1383 __ scvtfdx(V1, R0); | 1334 __ scvtfdx(V1, R0); |
| 1384 __ b(&double_op); // Then do the comparison. | 1335 __ b(&double_op); // Then do the comparison. |
| 1385 __ Bind(&fall_through); | 1336 __ Bind(&fall_through); |
| 1386 } | 1337 } |
| 1387 | 1338 |
| 1388 | |
| 1389 void Intrinsifier::Double_greaterThan(Assembler* assembler) { | 1339 void Intrinsifier::Double_greaterThan(Assembler* assembler) { |
| 1390 CompareDoubles(assembler, HI); | 1340 CompareDoubles(assembler, HI); |
| 1391 } | 1341 } |
| 1392 | 1342 |
| 1393 | |
| 1394 void Intrinsifier::Double_greaterEqualThan(Assembler* assembler) { | 1343 void Intrinsifier::Double_greaterEqualThan(Assembler* assembler) { |
| 1395 CompareDoubles(assembler, CS); | 1344 CompareDoubles(assembler, CS); |
| 1396 } | 1345 } |
| 1397 | 1346 |
| 1398 | |
| 1399 void Intrinsifier::Double_lessThan(Assembler* assembler) { | 1347 void Intrinsifier::Double_lessThan(Assembler* assembler) { |
| 1400 CompareDoubles(assembler, CC); | 1348 CompareDoubles(assembler, CC); |
| 1401 } | 1349 } |
| 1402 | 1350 |
| 1403 | |
| 1404 void Intrinsifier::Double_equal(Assembler* assembler) { | 1351 void Intrinsifier::Double_equal(Assembler* assembler) { |
| 1405 CompareDoubles(assembler, EQ); | 1352 CompareDoubles(assembler, EQ); |
| 1406 } | 1353 } |
| 1407 | 1354 |
| 1408 | |
| 1409 void Intrinsifier::Double_lessEqualThan(Assembler* assembler) { | 1355 void Intrinsifier::Double_lessEqualThan(Assembler* assembler) { |
| 1410 CompareDoubles(assembler, LS); | 1356 CompareDoubles(assembler, LS); |
| 1411 } | 1357 } |
| 1412 | 1358 |
| 1413 | |
| 1414 // Expects left argument to be double (receiver). Right argument is unknown. | 1359 // Expects left argument to be double (receiver). Right argument is unknown. |
| 1415 // Both arguments are on stack. | 1360 // Both arguments are on stack. |
| 1416 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { | 1361 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { |
| 1417 Label fall_through, is_smi, double_op; | 1362 Label fall_through, is_smi, double_op; |
| 1418 | 1363 |
| 1419 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1364 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
| 1420 // Both arguments are double, right operand is in R0. | 1365 // Both arguments are double, right operand is in R0. |
| 1421 __ LoadDFieldFromOffset(V1, R0, Double::value_offset()); | 1366 __ LoadDFieldFromOffset(V1, R0, Double::value_offset()); |
| 1422 __ Bind(&double_op); | 1367 __ Bind(&double_op); |
| 1423 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. | 1368 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1445 __ ret(); | 1390 __ ret(); |
| 1446 | 1391 |
| 1447 __ Bind(&is_smi); // Convert R0 to a double. | 1392 __ Bind(&is_smi); // Convert R0 to a double. |
| 1448 __ SmiUntag(R0); | 1393 __ SmiUntag(R0); |
| 1449 __ scvtfdx(V1, R0); | 1394 __ scvtfdx(V1, R0); |
| 1450 __ b(&double_op); | 1395 __ b(&double_op); |
| 1451 | 1396 |
| 1452 __ Bind(&fall_through); | 1397 __ Bind(&fall_through); |
| 1453 } | 1398 } |
| 1454 | 1399 |
| 1455 | |
| 1456 void Intrinsifier::Double_add(Assembler* assembler) { | 1400 void Intrinsifier::Double_add(Assembler* assembler) { |
| 1457 DoubleArithmeticOperations(assembler, Token::kADD); | 1401 DoubleArithmeticOperations(assembler, Token::kADD); |
| 1458 } | 1402 } |
| 1459 | 1403 |
| 1460 | |
| 1461 void Intrinsifier::Double_mul(Assembler* assembler) { | 1404 void Intrinsifier::Double_mul(Assembler* assembler) { |
| 1462 DoubleArithmeticOperations(assembler, Token::kMUL); | 1405 DoubleArithmeticOperations(assembler, Token::kMUL); |
| 1463 } | 1406 } |
| 1464 | 1407 |
| 1465 | |
| 1466 void Intrinsifier::Double_sub(Assembler* assembler) { | 1408 void Intrinsifier::Double_sub(Assembler* assembler) { |
| 1467 DoubleArithmeticOperations(assembler, Token::kSUB); | 1409 DoubleArithmeticOperations(assembler, Token::kSUB); |
| 1468 } | 1410 } |
| 1469 | 1411 |
| 1470 | |
| 1471 void Intrinsifier::Double_div(Assembler* assembler) { | 1412 void Intrinsifier::Double_div(Assembler* assembler) { |
| 1472 DoubleArithmeticOperations(assembler, Token::kDIV); | 1413 DoubleArithmeticOperations(assembler, Token::kDIV); |
| 1473 } | 1414 } |
| 1474 | 1415 |
| 1475 | |
| 1476 // Left is double right is integer (Bigint, Mint or Smi) | 1416 // Left is double right is integer (Bigint, Mint or Smi) |
| 1477 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { | 1417 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { |
| 1478 Label fall_through; | 1418 Label fall_through; |
| 1479 // Only smis allowed. | 1419 // Only smis allowed. |
| 1480 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1420 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1481 __ tsti(R0, Immediate(kSmiTagMask)); | 1421 __ tsti(R0, Immediate(kSmiTagMask)); |
| 1482 __ b(&fall_through, NE); | 1422 __ b(&fall_through, NE); |
| 1483 // Is Smi. | 1423 // Is Smi. |
| 1484 __ SmiUntag(R0); | 1424 __ SmiUntag(R0); |
| 1485 __ scvtfdx(V1, R0); | 1425 __ scvtfdx(V1, R0); |
| 1486 __ ldr(R0, Address(SP, 1 * kWordSize)); | 1426 __ ldr(R0, Address(SP, 1 * kWordSize)); |
| 1487 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); | 1427 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); |
| 1488 __ fmuld(V0, V0, V1); | 1428 __ fmuld(V0, V0, V1); |
| 1489 const Class& double_class = | 1429 const Class& double_class = |
| 1490 Class::Handle(Isolate::Current()->object_store()->double_class()); | 1430 Class::Handle(Isolate::Current()->object_store()->double_class()); |
| 1491 __ TryAllocate(double_class, &fall_through, R0, R1); | 1431 __ TryAllocate(double_class, &fall_through, R0, R1); |
| 1492 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); | 1432 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); |
| 1493 __ ret(); | 1433 __ ret(); |
| 1494 __ Bind(&fall_through); | 1434 __ Bind(&fall_through); |
| 1495 } | 1435 } |
| 1496 | 1436 |
| 1497 | |
| 1498 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { | 1437 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { |
| 1499 Label fall_through; | 1438 Label fall_through; |
| 1500 | 1439 |
| 1501 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1440 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1502 __ tsti(R0, Immediate(kSmiTagMask)); | 1441 __ tsti(R0, Immediate(kSmiTagMask)); |
| 1503 __ b(&fall_through, NE); | 1442 __ b(&fall_through, NE); |
| 1504 // Is Smi. | 1443 // Is Smi. |
| 1505 __ SmiUntag(R0); | 1444 __ SmiUntag(R0); |
| 1506 __ scvtfdx(V0, R0); | 1445 __ scvtfdx(V0, R0); |
| 1507 const Class& double_class = | 1446 const Class& double_class = |
| 1508 Class::Handle(Isolate::Current()->object_store()->double_class()); | 1447 Class::Handle(Isolate::Current()->object_store()->double_class()); |
| 1509 __ TryAllocate(double_class, &fall_through, R0, R1); | 1448 __ TryAllocate(double_class, &fall_through, R0, R1); |
| 1510 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); | 1449 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); |
| 1511 __ ret(); | 1450 __ ret(); |
| 1512 __ Bind(&fall_through); | 1451 __ Bind(&fall_through); |
| 1513 } | 1452 } |
| 1514 | 1453 |
| 1515 | |
| 1516 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { | 1454 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { |
| 1517 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1455 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1518 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); | 1456 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); |
| 1519 __ fcmpd(V0, V0); | 1457 __ fcmpd(V0, V0); |
| 1520 __ LoadObject(TMP, Bool::False()); | 1458 __ LoadObject(TMP, Bool::False()); |
| 1521 __ LoadObject(R0, Bool::True()); | 1459 __ LoadObject(R0, Bool::True()); |
| 1522 __ csel(R0, TMP, R0, VC); | 1460 __ csel(R0, TMP, R0, VC); |
| 1523 __ ret(); | 1461 __ ret(); |
| 1524 } | 1462 } |
| 1525 | 1463 |
| 1526 | |
| 1527 void Intrinsifier::Double_getIsInfinite(Assembler* assembler) { | 1464 void Intrinsifier::Double_getIsInfinite(Assembler* assembler) { |
| 1528 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1465 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1529 __ LoadFieldFromOffset(R0, R0, Double::value_offset()); | 1466 __ LoadFieldFromOffset(R0, R0, Double::value_offset()); |
| 1530 // Mask off the sign. | 1467 // Mask off the sign. |
| 1531 __ AndImmediate(R0, R0, 0x7FFFFFFFFFFFFFFFLL); | 1468 __ AndImmediate(R0, R0, 0x7FFFFFFFFFFFFFFFLL); |
| 1532 // Compare with +infinity. | 1469 // Compare with +infinity. |
| 1533 __ CompareImmediate(R0, 0x7FF0000000000000LL); | 1470 __ CompareImmediate(R0, 0x7FF0000000000000LL); |
| 1534 __ LoadObject(R0, Bool::False()); | 1471 __ LoadObject(R0, Bool::False()); |
| 1535 __ LoadObject(TMP, Bool::True()); | 1472 __ LoadObject(TMP, Bool::True()); |
| 1536 __ csel(R0, TMP, R0, EQ); | 1473 __ csel(R0, TMP, R0, EQ); |
| 1537 __ ret(); | 1474 __ ret(); |
| 1538 } | 1475 } |
| 1539 | 1476 |
| 1540 | |
| 1541 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { | 1477 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { |
| 1542 const Register false_reg = R0; | 1478 const Register false_reg = R0; |
| 1543 const Register true_reg = R2; | 1479 const Register true_reg = R2; |
| 1544 Label is_false, is_true, is_zero; | 1480 Label is_false, is_true, is_zero; |
| 1545 | 1481 |
| 1546 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1482 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1547 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); | 1483 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); |
| 1548 __ fcmpdz(V0); | 1484 __ fcmpdz(V0); |
| 1549 __ LoadObject(true_reg, Bool::True()); | 1485 __ LoadObject(true_reg, Bool::True()); |
| 1550 __ LoadObject(false_reg, Bool::False()); | 1486 __ LoadObject(false_reg, Bool::False()); |
| 1551 __ b(&is_false, VS); // NaN -> false. | 1487 __ b(&is_false, VS); // NaN -> false. |
| 1552 __ b(&is_zero, EQ); // Check for negative zero. | 1488 __ b(&is_zero, EQ); // Check for negative zero. |
| 1553 __ b(&is_false, CS); // >= 0 -> false. | 1489 __ b(&is_false, CS); // >= 0 -> false. |
| 1554 | 1490 |
| 1555 __ Bind(&is_true); | 1491 __ Bind(&is_true); |
| 1556 __ mov(R0, true_reg); | 1492 __ mov(R0, true_reg); |
| 1557 | 1493 |
| 1558 __ Bind(&is_false); | 1494 __ Bind(&is_false); |
| 1559 __ ret(); | 1495 __ ret(); |
| 1560 | 1496 |
| 1561 __ Bind(&is_zero); | 1497 __ Bind(&is_zero); |
| 1562 // Check for negative zero by looking at the sign bit. | 1498 // Check for negative zero by looking at the sign bit. |
| 1563 __ fmovrd(R1, V0); | 1499 __ fmovrd(R1, V0); |
| 1564 __ LsrImmediate(R1, R1, 63); | 1500 __ LsrImmediate(R1, R1, 63); |
| 1565 __ tsti(R1, Immediate(1)); | 1501 __ tsti(R1, Immediate(1)); |
| 1566 __ csel(R0, true_reg, false_reg, NE); // Sign bit set. | 1502 __ csel(R0, true_reg, false_reg, NE); // Sign bit set. |
| 1567 __ ret(); | 1503 __ ret(); |
| 1568 } | 1504 } |
| 1569 | 1505 |
| 1570 | |
| 1571 void Intrinsifier::DoubleToInteger(Assembler* assembler) { | 1506 void Intrinsifier::DoubleToInteger(Assembler* assembler) { |
| 1572 Label fall_through; | 1507 Label fall_through; |
| 1573 | 1508 |
| 1574 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1509 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1575 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); | 1510 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); |
| 1576 | 1511 |
| 1577 // Explicit NaN check, since ARM gives an FPU exception if you try to | 1512 // Explicit NaN check, since ARM gives an FPU exception if you try to |
| 1578 // convert NaN to an int. | 1513 // convert NaN to an int. |
| 1579 __ fcmpd(V0, V0); | 1514 __ fcmpd(V0, V0); |
| 1580 __ b(&fall_through, VS); | 1515 __ b(&fall_through, VS); |
| 1581 | 1516 |
| 1582 __ fcvtzds(R0, V0); | 1517 __ fcvtzds(R0, V0); |
| 1583 // Overflow is signaled with minint. | 1518 // Overflow is signaled with minint. |
| 1584 // Check for overflow and that it fits into Smi. | 1519 // Check for overflow and that it fits into Smi. |
| 1585 __ CompareImmediate(R0, 0xC000000000000000); | 1520 __ CompareImmediate(R0, 0xC000000000000000); |
| 1586 __ b(&fall_through, MI); | 1521 __ b(&fall_through, MI); |
| 1587 __ SmiTag(R0); | 1522 __ SmiTag(R0); |
| 1588 __ ret(); | 1523 __ ret(); |
| 1589 __ Bind(&fall_through); | 1524 __ Bind(&fall_through); |
| 1590 } | 1525 } |
| 1591 | 1526 |
| 1592 | |
| 1593 void Intrinsifier::MathSqrt(Assembler* assembler) { | 1527 void Intrinsifier::MathSqrt(Assembler* assembler) { |
| 1594 Label fall_through, is_smi, double_op; | 1528 Label fall_through, is_smi, double_op; |
| 1595 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1529 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
| 1596 // Argument is double and is in R0. | 1530 // Argument is double and is in R0. |
| 1597 __ LoadDFieldFromOffset(V1, R0, Double::value_offset()); | 1531 __ LoadDFieldFromOffset(V1, R0, Double::value_offset()); |
| 1598 __ Bind(&double_op); | 1532 __ Bind(&double_op); |
| 1599 __ fsqrtd(V0, V1); | 1533 __ fsqrtd(V0, V1); |
| 1600 const Class& double_class = | 1534 const Class& double_class = |
| 1601 Class::Handle(Isolate::Current()->object_store()->double_class()); | 1535 Class::Handle(Isolate::Current()->object_store()->double_class()); |
| 1602 __ TryAllocate(double_class, &fall_through, R0, R1); | 1536 __ TryAllocate(double_class, &fall_through, R0, R1); |
| 1603 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); | 1537 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); |
| 1604 __ ret(); | 1538 __ ret(); |
| 1605 __ Bind(&is_smi); | 1539 __ Bind(&is_smi); |
| 1606 __ SmiUntag(R0); | 1540 __ SmiUntag(R0); |
| 1607 __ scvtfdx(V1, R0); | 1541 __ scvtfdx(V1, R0); |
| 1608 __ b(&double_op); | 1542 __ b(&double_op); |
| 1609 __ Bind(&fall_through); | 1543 __ Bind(&fall_through); |
| 1610 } | 1544 } |
| 1611 | 1545 |
| 1612 | |
| 1613 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; | 1546 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; |
| 1614 // _state[kSTATE_LO] = state & _MASK_32; | 1547 // _state[kSTATE_LO] = state & _MASK_32; |
| 1615 // _state[kSTATE_HI] = state >> 32; | 1548 // _state[kSTATE_HI] = state >> 32; |
| 1616 void Intrinsifier::Random_nextState(Assembler* assembler) { | 1549 void Intrinsifier::Random_nextState(Assembler* assembler) { |
| 1617 const Library& math_lib = Library::Handle(Library::MathLibrary()); | 1550 const Library& math_lib = Library::Handle(Library::MathLibrary()); |
| 1618 ASSERT(!math_lib.IsNull()); | 1551 ASSERT(!math_lib.IsNull()); |
| 1619 const Class& random_class = | 1552 const Class& random_class = |
| 1620 Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random())); | 1553 Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random())); |
| 1621 ASSERT(!random_class.IsNull()); | 1554 ASSERT(!random_class.IsNull()); |
| 1622 const Field& state_field = Field::ZoneHandle( | 1555 const Field& state_field = Field::ZoneHandle( |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1646 __ LoadImmediate(R0, a_int_value); | 1579 __ LoadImmediate(R0, a_int_value); |
| 1647 __ LoadFromOffset(R2, R1, disp); | 1580 __ LoadFromOffset(R2, R1, disp); |
| 1648 __ LsrImmediate(R3, R2, 32); | 1581 __ LsrImmediate(R3, R2, 32); |
| 1649 __ andi(R2, R2, Immediate(0xffffffff)); | 1582 __ andi(R2, R2, Immediate(0xffffffff)); |
| 1650 __ mul(R2, R0, R2); | 1583 __ mul(R2, R0, R2); |
| 1651 __ add(R2, R2, Operand(R3)); | 1584 __ add(R2, R2, Operand(R3)); |
| 1652 __ StoreToOffset(R2, R1, disp); | 1585 __ StoreToOffset(R2, R1, disp); |
| 1653 __ ret(); | 1586 __ ret(); |
| 1654 } | 1587 } |
| 1655 | 1588 |
| 1656 | |
| 1657 void Intrinsifier::ObjectEquals(Assembler* assembler) { | 1589 void Intrinsifier::ObjectEquals(Assembler* assembler) { |
| 1658 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1590 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1659 __ ldr(R1, Address(SP, 1 * kWordSize)); | 1591 __ ldr(R1, Address(SP, 1 * kWordSize)); |
| 1660 __ cmp(R0, Operand(R1)); | 1592 __ cmp(R0, Operand(R1)); |
| 1661 __ LoadObject(R0, Bool::False()); | 1593 __ LoadObject(R0, Bool::False()); |
| 1662 __ LoadObject(TMP, Bool::True()); | 1594 __ LoadObject(TMP, Bool::True()); |
| 1663 __ csel(R0, TMP, R0, EQ); | 1595 __ csel(R0, TMP, R0, EQ); |
| 1664 __ ret(); | 1596 __ ret(); |
| 1665 } | 1597 } |
| 1666 | 1598 |
| 1667 | |
| 1668 static void RangeCheck(Assembler* assembler, | 1599 static void RangeCheck(Assembler* assembler, |
| 1669 Register val, | 1600 Register val, |
| 1670 Register tmp, | 1601 Register tmp, |
| 1671 intptr_t low, | 1602 intptr_t low, |
| 1672 intptr_t high, | 1603 intptr_t high, |
| 1673 Condition cc, | 1604 Condition cc, |
| 1674 Label* target) { | 1605 Label* target) { |
| 1675 __ AddImmediate(tmp, val, -low); | 1606 __ AddImmediate(tmp, val, -low); |
| 1676 __ CompareImmediate(tmp, high - low); | 1607 __ CompareImmediate(tmp, high - low); |
| 1677 __ b(target, cc); | 1608 __ b(target, cc); |
| 1678 } | 1609 } |
| 1679 | 1610 |
| 1680 | |
| 1681 const Condition kIfNotInRange = HI; | 1611 const Condition kIfNotInRange = HI; |
| 1682 const Condition kIfInRange = LS; | 1612 const Condition kIfInRange = LS; |
| 1683 | 1613 |
| 1684 | |
| 1685 static void JumpIfInteger(Assembler* assembler, | 1614 static void JumpIfInteger(Assembler* assembler, |
| 1686 Register cid, | 1615 Register cid, |
| 1687 Register tmp, | 1616 Register tmp, |
| 1688 Label* target) { | 1617 Label* target) { |
| 1689 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfInRange, target); | 1618 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfInRange, target); |
| 1690 } | 1619 } |
| 1691 | 1620 |
| 1692 | |
| 1693 static void JumpIfNotInteger(Assembler* assembler, | 1621 static void JumpIfNotInteger(Assembler* assembler, |
| 1694 Register cid, | 1622 Register cid, |
| 1695 Register tmp, | 1623 Register tmp, |
| 1696 Label* target) { | 1624 Label* target) { |
| 1697 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target); | 1625 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target); |
| 1698 } | 1626 } |
| 1699 | 1627 |
| 1700 | |
| 1701 static void JumpIfString(Assembler* assembler, | 1628 static void JumpIfString(Assembler* assembler, |
| 1702 Register cid, | 1629 Register cid, |
| 1703 Register tmp, | 1630 Register tmp, |
| 1704 Label* target) { | 1631 Label* target) { |
| 1705 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, | 1632 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, |
| 1706 kIfInRange, target); | 1633 kIfInRange, target); |
| 1707 } | 1634 } |
| 1708 | 1635 |
| 1709 | |
| 1710 static void JumpIfNotString(Assembler* assembler, | 1636 static void JumpIfNotString(Assembler* assembler, |
| 1711 Register cid, | 1637 Register cid, |
| 1712 Register tmp, | 1638 Register tmp, |
| 1713 Label* target) { | 1639 Label* target) { |
| 1714 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, | 1640 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, |
| 1715 kIfNotInRange, target); | 1641 kIfNotInRange, target); |
| 1716 } | 1642 } |
| 1717 | 1643 |
| 1718 | |
| 1719 // Return type quickly for simple types (not parameterized and not signature). | 1644 // Return type quickly for simple types (not parameterized and not signature). |
| 1720 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { | 1645 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { |
| 1721 Label fall_through, use_canonical_type, not_double, not_integer; | 1646 Label fall_through, use_canonical_type, not_double, not_integer; |
| 1722 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1647 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1723 __ LoadClassIdMayBeSmi(R1, R0); | 1648 __ LoadClassIdMayBeSmi(R1, R0); |
| 1724 | 1649 |
| 1725 __ CompareImmediate(R1, kClosureCid); | 1650 __ CompareImmediate(R1, kClosureCid); |
| 1726 __ b(&fall_through, EQ); // Instance is a closure. | 1651 __ b(&fall_through, EQ); // Instance is a closure. |
| 1727 | 1652 |
| 1728 __ CompareImmediate(R1, kNumPredefinedCids); | 1653 __ CompareImmediate(R1, kNumPredefinedCids); |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1757 __ b(&fall_through, NE); | 1682 __ b(&fall_through, NE); |
| 1758 | 1683 |
| 1759 __ ldr(R0, FieldAddress(R2, Class::canonical_type_offset())); | 1684 __ ldr(R0, FieldAddress(R2, Class::canonical_type_offset())); |
| 1760 __ CompareObject(R0, Object::null_object()); | 1685 __ CompareObject(R0, Object::null_object()); |
| 1761 __ b(&fall_through, EQ); | 1686 __ b(&fall_through, EQ); |
| 1762 __ ret(); | 1687 __ ret(); |
| 1763 | 1688 |
| 1764 __ Bind(&fall_through); | 1689 __ Bind(&fall_through); |
| 1765 } | 1690 } |
| 1766 | 1691 |
| 1767 | |
| 1768 void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) { | 1692 void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) { |
| 1769 Label fall_through, different_cids, equal, not_equal, not_integer; | 1693 Label fall_through, different_cids, equal, not_equal, not_integer; |
| 1770 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1694 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1771 __ LoadClassIdMayBeSmi(R1, R0); | 1695 __ LoadClassIdMayBeSmi(R1, R0); |
| 1772 | 1696 |
| 1773 // Check if left hand size is a closure. Closures are handled in the runtime. | 1697 // Check if left hand size is a closure. Closures are handled in the runtime. |
| 1774 __ CompareImmediate(R1, kClosureCid); | 1698 __ CompareImmediate(R1, kClosureCid); |
| 1775 __ b(&fall_through, EQ); | 1699 __ b(&fall_through, EQ); |
| 1776 | 1700 |
| 1777 __ ldr(R0, Address(SP, 1 * kWordSize)); | 1701 __ ldr(R0, Address(SP, 1 * kWordSize)); |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1812 JumpIfString(assembler, R2, R0, &equal); | 1736 JumpIfString(assembler, R2, R0, &equal); |
| 1813 | 1737 |
| 1814 // Neither strings nor integers and have different class ids. | 1738 // Neither strings nor integers and have different class ids. |
| 1815 __ Bind(¬_equal); | 1739 __ Bind(¬_equal); |
| 1816 __ LoadObject(R0, Bool::False()); | 1740 __ LoadObject(R0, Bool::False()); |
| 1817 __ ret(); | 1741 __ ret(); |
| 1818 | 1742 |
| 1819 __ Bind(&fall_through); | 1743 __ Bind(&fall_through); |
| 1820 } | 1744 } |
| 1821 | 1745 |
| 1822 | |
| 1823 void Intrinsifier::String_getHashCode(Assembler* assembler) { | 1746 void Intrinsifier::String_getHashCode(Assembler* assembler) { |
| 1824 Label fall_through; | 1747 Label fall_through; |
| 1825 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1748 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1826 __ ldr(R0, FieldAddress(R0, String::hash_offset()), kUnsignedWord); | 1749 __ ldr(R0, FieldAddress(R0, String::hash_offset()), kUnsignedWord); |
| 1827 __ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag. | 1750 __ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag. |
| 1828 __ b(&fall_through, EQ); | 1751 __ b(&fall_through, EQ); |
| 1829 __ ret(); | 1752 __ ret(); |
| 1830 // Hash not yet computed. | 1753 // Hash not yet computed. |
| 1831 __ Bind(&fall_through); | 1754 __ Bind(&fall_through); |
| 1832 } | 1755 } |
| 1833 | 1756 |
| 1834 | |
| 1835 void Intrinsifier::Object_getHash(Assembler* assembler) { | 1757 void Intrinsifier::Object_getHash(Assembler* assembler) { |
| 1836 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1758 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 1837 __ ldr(R0, FieldAddress(R0, String::hash_offset()), kUnsignedWord); | 1759 __ ldr(R0, FieldAddress(R0, String::hash_offset()), kUnsignedWord); |
| 1838 __ SmiTag(R0); | 1760 __ SmiTag(R0); |
| 1839 __ ret(); | 1761 __ ret(); |
| 1840 } | 1762 } |
| 1841 | 1763 |
| 1842 | |
| 1843 void Intrinsifier::Object_setHash(Assembler* assembler) { | 1764 void Intrinsifier::Object_setHash(Assembler* assembler) { |
| 1844 __ ldr(R0, Address(SP, 1 * kWordSize)); // Object. | 1765 __ ldr(R0, Address(SP, 1 * kWordSize)); // Object. |
| 1845 __ ldr(R1, Address(SP, 0 * kWordSize)); // Value. | 1766 __ ldr(R1, Address(SP, 0 * kWordSize)); // Value. |
| 1846 __ SmiUntag(R1); | 1767 __ SmiUntag(R1); |
| 1847 __ str(R1, FieldAddress(R0, String::hash_offset()), kUnsignedWord); | 1768 __ str(R1, FieldAddress(R0, String::hash_offset()), kUnsignedWord); |
| 1848 __ ret(); | 1769 __ ret(); |
| 1849 } | 1770 } |
| 1850 | 1771 |
| 1851 | |
| 1852 void GenerateSubstringMatchesSpecialization(Assembler* assembler, | 1772 void GenerateSubstringMatchesSpecialization(Assembler* assembler, |
| 1853 intptr_t receiver_cid, | 1773 intptr_t receiver_cid, |
| 1854 intptr_t other_cid, | 1774 intptr_t other_cid, |
| 1855 Label* return_true, | 1775 Label* return_true, |
| 1856 Label* return_false) { | 1776 Label* return_false) { |
| 1857 __ SmiUntag(R1); | 1777 __ SmiUntag(R1); |
| 1858 __ ldr(R8, FieldAddress(R0, String::length_offset())); // this.length | 1778 __ ldr(R8, FieldAddress(R0, String::length_offset())); // this.length |
| 1859 __ SmiUntag(R8); | 1779 __ SmiUntag(R8); |
| 1860 __ ldr(R9, FieldAddress(R2, String::length_offset())); // other.length | 1780 __ ldr(R9, FieldAddress(R2, String::length_offset())); // other.length |
| 1861 __ SmiUntag(R9); | 1781 __ SmiUntag(R9); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1908 // i++, while (i < len) | 1828 // i++, while (i < len) |
| 1909 __ add(R3, R3, Operand(1)); | 1829 __ add(R3, R3, Operand(1)); |
| 1910 __ add(R0, R0, Operand(receiver_cid == kOneByteStringCid ? 1 : 2)); | 1830 __ add(R0, R0, Operand(receiver_cid == kOneByteStringCid ? 1 : 2)); |
| 1911 __ add(R2, R2, Operand(other_cid == kOneByteStringCid ? 1 : 2)); | 1831 __ add(R2, R2, Operand(other_cid == kOneByteStringCid ? 1 : 2)); |
| 1912 __ cmp(R3, Operand(R9)); | 1832 __ cmp(R3, Operand(R9)); |
| 1913 __ b(&loop, LT); | 1833 __ b(&loop, LT); |
| 1914 | 1834 |
| 1915 __ b(return_true); | 1835 __ b(return_true); |
| 1916 } | 1836 } |
| 1917 | 1837 |
| 1918 | |
| 1919 // bool _substringMatches(int start, String other) | 1838 // bool _substringMatches(int start, String other) |
| 1920 // This intrinsic handles a OneByteString or TwoByteString receiver with a | 1839 // This intrinsic handles a OneByteString or TwoByteString receiver with a |
| 1921 // OneByteString other. | 1840 // OneByteString other. |
| 1922 void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler) { | 1841 void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler) { |
| 1923 Label fall_through, return_true, return_false, try_two_byte; | 1842 Label fall_through, return_true, return_false, try_two_byte; |
| 1924 __ ldr(R0, Address(SP, 2 * kWordSize)); // this | 1843 __ ldr(R0, Address(SP, 2 * kWordSize)); // this |
| 1925 __ ldr(R1, Address(SP, 1 * kWordSize)); // start | 1844 __ ldr(R1, Address(SP, 1 * kWordSize)); // start |
| 1926 __ ldr(R2, Address(SP, 0 * kWordSize)); // other | 1845 __ ldr(R2, Address(SP, 0 * kWordSize)); // other |
| 1927 | 1846 |
| 1928 __ tsti(R1, Immediate(kSmiTagMask)); | 1847 __ tsti(R1, Immediate(kSmiTagMask)); |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1950 __ LoadObject(R0, Bool::True()); | 1869 __ LoadObject(R0, Bool::True()); |
| 1951 __ ret(); | 1870 __ ret(); |
| 1952 | 1871 |
| 1953 __ Bind(&return_false); | 1872 __ Bind(&return_false); |
| 1954 __ LoadObject(R0, Bool::False()); | 1873 __ LoadObject(R0, Bool::False()); |
| 1955 __ ret(); | 1874 __ ret(); |
| 1956 | 1875 |
| 1957 __ Bind(&fall_through); | 1876 __ Bind(&fall_through); |
| 1958 } | 1877 } |
| 1959 | 1878 |
| 1960 | |
| 1961 void Intrinsifier::StringBaseCharAt(Assembler* assembler) { | 1879 void Intrinsifier::StringBaseCharAt(Assembler* assembler) { |
| 1962 Label fall_through, try_two_byte_string; | 1880 Label fall_through, try_two_byte_string; |
| 1963 | 1881 |
| 1964 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index. | 1882 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index. |
| 1965 __ ldr(R0, Address(SP, 1 * kWordSize)); // String. | 1883 __ ldr(R0, Address(SP, 1 * kWordSize)); // String. |
| 1966 __ tsti(R1, Immediate(kSmiTagMask)); | 1884 __ tsti(R1, Immediate(kSmiTagMask)); |
| 1967 __ b(&fall_through, NE); // Index is not a Smi. | 1885 __ b(&fall_through, NE); // Index is not a Smi. |
| 1968 // Range check. | 1886 // Range check. |
| 1969 __ ldr(R2, FieldAddress(R0, String::length_offset())); | 1887 __ ldr(R2, FieldAddress(R0, String::length_offset())); |
| 1970 __ cmp(R1, Operand(R2)); | 1888 __ cmp(R1, Operand(R2)); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1991 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols); | 1909 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols); |
| 1992 __ b(&fall_through, GE); | 1910 __ b(&fall_through, GE); |
| 1993 __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset())); | 1911 __ ldr(R0, Address(THR, Thread::predefined_symbols_address_offset())); |
| 1994 __ AddImmediate(R0, Symbols::kNullCharCodeSymbolOffset * kWordSize); | 1912 __ AddImmediate(R0, Symbols::kNullCharCodeSymbolOffset * kWordSize); |
| 1995 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled)); | 1913 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled)); |
| 1996 __ ret(); | 1914 __ ret(); |
| 1997 | 1915 |
| 1998 __ Bind(&fall_through); | 1916 __ Bind(&fall_through); |
| 1999 } | 1917 } |
| 2000 | 1918 |
| 2001 | |
| 2002 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) { | 1919 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) { |
| 2003 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1920 __ ldr(R0, Address(SP, 0 * kWordSize)); |
| 2004 __ ldr(R0, FieldAddress(R0, String::length_offset())); | 1921 __ ldr(R0, FieldAddress(R0, String::length_offset())); |
| 2005 __ cmp(R0, Operand(Smi::RawValue(0))); | 1922 __ cmp(R0, Operand(Smi::RawValue(0))); |
| 2006 __ LoadObject(R0, Bool::True()); | 1923 __ LoadObject(R0, Bool::True()); |
| 2007 __ LoadObject(TMP, Bool::False()); | 1924 __ LoadObject(TMP, Bool::False()); |
| 2008 __ csel(R0, TMP, R0, NE); | 1925 __ csel(R0, TMP, R0, NE); |
| 2009 __ ret(); | 1926 __ ret(); |
| 2010 } | 1927 } |
| 2011 | 1928 |
| 2012 | |
| 2013 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { | 1929 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { |
| 2014 Label compute_hash; | 1930 Label compute_hash; |
| 2015 __ ldr(R1, Address(SP, 0 * kWordSize)); // OneByteString object. | 1931 __ ldr(R1, Address(SP, 0 * kWordSize)); // OneByteString object. |
| 2016 __ ldr(R0, FieldAddress(R1, String::hash_offset()), kUnsignedWord); | 1932 __ ldr(R0, FieldAddress(R1, String::hash_offset()), kUnsignedWord); |
| 2017 __ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag. | 1933 __ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag. |
| 2018 __ b(&compute_hash, EQ); | 1934 __ b(&compute_hash, EQ); |
| 2019 __ ret(); // Return if already computed. | 1935 __ ret(); // Return if already computed. |
| 2020 | 1936 |
| 2021 __ Bind(&compute_hash); | 1937 __ Bind(&compute_hash); |
| 2022 __ ldr(R2, FieldAddress(R1, String::length_offset())); | 1938 __ ldr(R2, FieldAddress(R1, String::length_offset())); |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2062 __ AndImmediate(R0, R0, (static_cast<intptr_t>(1) << String::kHashBits) - 1); | 1978 __ AndImmediate(R0, R0, (static_cast<intptr_t>(1) << String::kHashBits) - 1); |
| 2063 __ CompareRegisters(R0, ZR); | 1979 __ CompareRegisters(R0, ZR); |
| 2064 // return hash_ == 0 ? 1 : hash_; | 1980 // return hash_ == 0 ? 1 : hash_; |
| 2065 __ Bind(&done); | 1981 __ Bind(&done); |
| 2066 __ csinc(R0, R0, ZR, NE); // R0 <- (R0 != 0) ? R0 : (ZR + 1). | 1982 __ csinc(R0, R0, ZR, NE); // R0 <- (R0 != 0) ? R0 : (ZR + 1). |
| 2067 __ str(R0, FieldAddress(R1, String::hash_offset()), kUnsignedWord); | 1983 __ str(R0, FieldAddress(R1, String::hash_offset()), kUnsignedWord); |
| 2068 __ SmiTag(R0); | 1984 __ SmiTag(R0); |
| 2069 __ ret(); | 1985 __ ret(); |
| 2070 } | 1986 } |
| 2071 | 1987 |
| 2072 | |
| 2073 // Allocates one-byte string of length 'end - start'. The content is not | 1988 // Allocates one-byte string of length 'end - start'. The content is not |
| 2074 // initialized. | 1989 // initialized. |
| 2075 // 'length-reg' (R2) contains tagged length. | 1990 // 'length-reg' (R2) contains tagged length. |
| 2076 // Returns new string as tagged pointer in R0. | 1991 // Returns new string as tagged pointer in R0. |
| 2077 static void TryAllocateOnebyteString(Assembler* assembler, | 1992 static void TryAllocateOnebyteString(Assembler* assembler, |
| 2078 Label* ok, | 1993 Label* ok, |
| 2079 Label* failure) { | 1994 Label* failure) { |
| 2080 const Register length_reg = R2; | 1995 const Register length_reg = R2; |
| 2081 Label fail; | 1996 Label fail; |
| 2082 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kOneByteStringCid, R0, failure)); | 1997 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kOneByteStringCid, R0, failure)); |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2139 | 2054 |
| 2140 // Set the length field using the saved length (R6). | 2055 // Set the length field using the saved length (R6). |
| 2141 __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::length_offset()), | 2056 __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::length_offset()), |
| 2142 R6); | 2057 R6); |
| 2143 __ b(ok); | 2058 __ b(ok); |
| 2144 | 2059 |
| 2145 __ Bind(&fail); | 2060 __ Bind(&fail); |
| 2146 __ b(failure); | 2061 __ b(failure); |
| 2147 } | 2062 } |
| 2148 | 2063 |
| 2149 | |
| 2150 // Arg0: OneByteString (receiver). | 2064 // Arg0: OneByteString (receiver). |
| 2151 // Arg1: Start index as Smi. | 2065 // Arg1: Start index as Smi. |
| 2152 // Arg2: End index as Smi. | 2066 // Arg2: End index as Smi. |
| 2153 // The indexes must be valid. | 2067 // The indexes must be valid. |
| 2154 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { | 2068 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { |
| 2155 const intptr_t kStringOffset = 2 * kWordSize; | 2069 const intptr_t kStringOffset = 2 * kWordSize; |
| 2156 const intptr_t kStartIndexOffset = 1 * kWordSize; | 2070 const intptr_t kStartIndexOffset = 1 * kWordSize; |
| 2157 const intptr_t kEndIndexOffset = 0 * kWordSize; | 2071 const intptr_t kEndIndexOffset = 0 * kWordSize; |
| 2158 Label fall_through, ok; | 2072 Label fall_through, ok; |
| 2159 | 2073 |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2199 __ cmp(R2, Operand(0)); | 2113 __ cmp(R2, Operand(0)); |
| 2200 __ str(R1, FieldAddress(R7, OneByteString::data_offset()), kUnsignedByte); | 2114 __ str(R1, FieldAddress(R7, OneByteString::data_offset()), kUnsignedByte); |
| 2201 __ AddImmediate(R7, 1); | 2115 __ AddImmediate(R7, 1); |
| 2202 __ b(&loop, GT); | 2116 __ b(&loop, GT); |
| 2203 | 2117 |
| 2204 __ Bind(&done); | 2118 __ Bind(&done); |
| 2205 __ ret(); | 2119 __ ret(); |
| 2206 __ Bind(&fall_through); | 2120 __ Bind(&fall_through); |
| 2207 } | 2121 } |
| 2208 | 2122 |
| 2209 | |
| 2210 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) { | 2123 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) { |
| 2211 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. | 2124 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. |
| 2212 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. | 2125 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. |
| 2213 __ ldr(R0, Address(SP, 2 * kWordSize)); // OneByteString. | 2126 __ ldr(R0, Address(SP, 2 * kWordSize)); // OneByteString. |
| 2214 __ SmiUntag(R1); | 2127 __ SmiUntag(R1); |
| 2215 __ SmiUntag(R2); | 2128 __ SmiUntag(R2); |
| 2216 __ AddImmediate(R3, R0, OneByteString::data_offset() - kHeapObjectTag); | 2129 __ AddImmediate(R3, R0, OneByteString::data_offset() - kHeapObjectTag); |
| 2217 __ str(R2, Address(R3, R1), kUnsignedByte); | 2130 __ str(R2, Address(R3, R1), kUnsignedByte); |
| 2218 __ ret(); | 2131 __ ret(); |
| 2219 } | 2132 } |
| 2220 | 2133 |
| 2221 | |
| 2222 void Intrinsifier::OneByteString_allocate(Assembler* assembler) { | 2134 void Intrinsifier::OneByteString_allocate(Assembler* assembler) { |
| 2223 Label fall_through, ok; | 2135 Label fall_through, ok; |
| 2224 | 2136 |
| 2225 __ ldr(R2, Address(SP, 0 * kWordSize)); // Length. | 2137 __ ldr(R2, Address(SP, 0 * kWordSize)); // Length. |
| 2226 TryAllocateOnebyteString(assembler, &ok, &fall_through); | 2138 TryAllocateOnebyteString(assembler, &ok, &fall_through); |
| 2227 | 2139 |
| 2228 __ Bind(&ok); | 2140 __ Bind(&ok); |
| 2229 __ ret(); | 2141 __ ret(); |
| 2230 | 2142 |
| 2231 __ Bind(&fall_through); | 2143 __ Bind(&fall_through); |
| 2232 } | 2144 } |
| 2233 | 2145 |
| 2234 | |
| 2235 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). | 2146 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). |
| 2236 static void StringEquality(Assembler* assembler, intptr_t string_cid) { | 2147 static void StringEquality(Assembler* assembler, intptr_t string_cid) { |
| 2237 Label fall_through, is_true, is_false, loop; | 2148 Label fall_through, is_true, is_false, loop; |
| 2238 __ ldr(R0, Address(SP, 1 * kWordSize)); // This. | 2149 __ ldr(R0, Address(SP, 1 * kWordSize)); // This. |
| 2239 __ ldr(R1, Address(SP, 0 * kWordSize)); // Other. | 2150 __ ldr(R1, Address(SP, 0 * kWordSize)); // Other. |
| 2240 | 2151 |
| 2241 // Are identical? | 2152 // Are identical? |
| 2242 __ cmp(R0, Operand(R1)); | 2153 __ cmp(R0, Operand(R1)); |
| 2243 __ b(&is_true, EQ); | 2154 __ b(&is_true, EQ); |
| 2244 | 2155 |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2289 __ LoadObject(R0, Bool::True()); | 2200 __ LoadObject(R0, Bool::True()); |
| 2290 __ ret(); | 2201 __ ret(); |
| 2291 | 2202 |
| 2292 __ Bind(&is_false); | 2203 __ Bind(&is_false); |
| 2293 __ LoadObject(R0, Bool::False()); | 2204 __ LoadObject(R0, Bool::False()); |
| 2294 __ ret(); | 2205 __ ret(); |
| 2295 | 2206 |
| 2296 __ Bind(&fall_through); | 2207 __ Bind(&fall_through); |
| 2297 } | 2208 } |
| 2298 | 2209 |
| 2299 | |
| 2300 void Intrinsifier::OneByteString_equality(Assembler* assembler) { | 2210 void Intrinsifier::OneByteString_equality(Assembler* assembler) { |
| 2301 StringEquality(assembler, kOneByteStringCid); | 2211 StringEquality(assembler, kOneByteStringCid); |
| 2302 } | 2212 } |
| 2303 | 2213 |
| 2304 | |
| 2305 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { | 2214 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { |
| 2306 StringEquality(assembler, kTwoByteStringCid); | 2215 StringEquality(assembler, kTwoByteStringCid); |
| 2307 } | 2216 } |
| 2308 | 2217 |
| 2309 | |
| 2310 void Intrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler, | 2218 void Intrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler, |
| 2311 bool sticky) { | 2219 bool sticky) { |
| 2312 if (FLAG_interpret_irregexp) return; | 2220 if (FLAG_interpret_irregexp) return; |
| 2313 | 2221 |
| 2314 static const intptr_t kRegExpParamOffset = 2 * kWordSize; | 2222 static const intptr_t kRegExpParamOffset = 2 * kWordSize; |
| 2315 static const intptr_t kStringParamOffset = 1 * kWordSize; | 2223 static const intptr_t kStringParamOffset = 1 * kWordSize; |
| 2316 // start_index smi is located at offset 0. | 2224 // start_index smi is located at offset 0. |
| 2317 | 2225 |
| 2318 // Incoming registers: | 2226 // Incoming registers: |
| 2319 // R0: Function. (Will be reloaded with the specialized matcher function.) | 2227 // R0: Function. (Will be reloaded with the specialized matcher function.) |
| (...skipping 13 matching lines...) Expand all Loading... |
| 2333 // Registers are now set up for the lazy compile stub. It expects the function | 2241 // Registers are now set up for the lazy compile stub. It expects the function |
| 2334 // in R0, the argument descriptor in R4, and IC-Data in R5. | 2242 // in R0, the argument descriptor in R4, and IC-Data in R5. |
| 2335 __ eor(R5, R5, Operand(R5)); | 2243 __ eor(R5, R5, Operand(R5)); |
| 2336 | 2244 |
| 2337 // Tail-call the function. | 2245 // Tail-call the function. |
| 2338 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); | 2246 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); |
| 2339 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset())); | 2247 __ ldr(R1, FieldAddress(R0, Function::entry_point_offset())); |
| 2340 __ br(R1); | 2248 __ br(R1); |
| 2341 } | 2249 } |
| 2342 | 2250 |
| 2343 | |
| 2344 // On stack: user tag (+0). | 2251 // On stack: user tag (+0). |
| 2345 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { | 2252 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { |
| 2346 // R1: Isolate. | 2253 // R1: Isolate. |
| 2347 __ LoadIsolate(R1); | 2254 __ LoadIsolate(R1); |
| 2348 // R0: Current user tag. | 2255 // R0: Current user tag. |
| 2349 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); | 2256 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); |
| 2350 // R2: UserTag. | 2257 // R2: UserTag. |
| 2351 __ ldr(R2, Address(SP, +0 * kWordSize)); | 2258 __ ldr(R2, Address(SP, +0 * kWordSize)); |
| 2352 // Set Isolate::current_tag_. | 2259 // Set Isolate::current_tag_. |
| 2353 __ str(R2, Address(R1, Isolate::current_tag_offset())); | 2260 __ str(R2, Address(R1, Isolate::current_tag_offset())); |
| 2354 // R2: UserTag's tag. | 2261 // R2: UserTag's tag. |
| 2355 __ ldr(R2, FieldAddress(R2, UserTag::tag_offset())); | 2262 __ ldr(R2, FieldAddress(R2, UserTag::tag_offset())); |
| 2356 // Set Isolate::user_tag_. | 2263 // Set Isolate::user_tag_. |
| 2357 __ str(R2, Address(R1, Isolate::user_tag_offset())); | 2264 __ str(R2, Address(R1, Isolate::user_tag_offset())); |
| 2358 __ ret(); | 2265 __ ret(); |
| 2359 } | 2266 } |
| 2360 | 2267 |
| 2361 | |
| 2362 void Intrinsifier::UserTag_defaultTag(Assembler* assembler) { | 2268 void Intrinsifier::UserTag_defaultTag(Assembler* assembler) { |
| 2363 __ LoadIsolate(R0); | 2269 __ LoadIsolate(R0); |
| 2364 __ ldr(R0, Address(R0, Isolate::default_tag_offset())); | 2270 __ ldr(R0, Address(R0, Isolate::default_tag_offset())); |
| 2365 __ ret(); | 2271 __ ret(); |
| 2366 } | 2272 } |
| 2367 | 2273 |
| 2368 | |
| 2369 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { | 2274 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { |
| 2370 __ LoadIsolate(R0); | 2275 __ LoadIsolate(R0); |
| 2371 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); | 2276 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); |
| 2372 __ ret(); | 2277 __ ret(); |
| 2373 } | 2278 } |
| 2374 | 2279 |
| 2375 | |
| 2376 void Intrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler) { | 2280 void Intrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler) { |
| 2377 if (!FLAG_support_timeline) { | 2281 if (!FLAG_support_timeline) { |
| 2378 __ LoadObject(R0, Bool::False()); | 2282 __ LoadObject(R0, Bool::False()); |
| 2379 __ ret(); | 2283 __ ret(); |
| 2380 return; | 2284 return; |
| 2381 } | 2285 } |
| 2382 // Load TimelineStream*. | 2286 // Load TimelineStream*. |
| 2383 __ ldr(R0, Address(THR, Thread::dart_stream_offset())); | 2287 __ ldr(R0, Address(THR, Thread::dart_stream_offset())); |
| 2384 // Load uintptr_t from TimelineStream*. | 2288 // Load uintptr_t from TimelineStream*. |
| 2385 __ ldr(R0, Address(R0, TimelineStream::enabled_offset())); | 2289 __ ldr(R0, Address(R0, TimelineStream::enabled_offset())); |
| 2386 __ cmp(R0, Operand(0)); | 2290 __ cmp(R0, Operand(0)); |
| 2387 __ LoadObject(R0, Bool::False()); | 2291 __ LoadObject(R0, Bool::False()); |
| 2388 __ LoadObject(TMP, Bool::True()); | 2292 __ LoadObject(TMP, Bool::True()); |
| 2389 __ csel(R0, TMP, R0, NE); | 2293 __ csel(R0, TMP, R0, NE); |
| 2390 __ ret(); | 2294 __ ret(); |
| 2391 } | 2295 } |
| 2392 | 2296 |
| 2393 | |
| 2394 void Intrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler) { | 2297 void Intrinsifier::ClearAsyncThreadStackTrace(Assembler* assembler) { |
| 2395 __ LoadObject(R0, Object::null_object()); | 2298 __ LoadObject(R0, Object::null_object()); |
| 2396 __ str(R0, Address(THR, Thread::async_stack_trace_offset())); | 2299 __ str(R0, Address(THR, Thread::async_stack_trace_offset())); |
| 2397 __ ret(); | 2300 __ ret(); |
| 2398 } | 2301 } |
| 2399 | 2302 |
| 2400 | |
| 2401 void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler) { | 2303 void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler) { |
| 2402 __ ldr(R0, Address(THR, Thread::async_stack_trace_offset())); | 2304 __ ldr(R0, Address(THR, Thread::async_stack_trace_offset())); |
| 2403 __ LoadObject(R0, Object::null_object()); | 2305 __ LoadObject(R0, Object::null_object()); |
| 2404 __ ret(); | 2306 __ ret(); |
| 2405 } | 2307 } |
| 2406 | 2308 |
| 2407 } // namespace dart | 2309 } // namespace dart |
| 2408 | 2310 |
| 2409 #endif // defined TARGET_ARCH_ARM64 | 2311 #endif // defined TARGET_ARCH_ARM64 |
| OLD | NEW |