OLD | NEW |
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. |
6 #if defined(TARGET_ARCH_ARM64) | 6 #if defined(TARGET_ARCH_ARM64) |
7 | 7 |
8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
9 | 9 |
10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
51 Label fall_through; | 51 Label fall_through; |
52 | 52 |
53 if (Isolate::Current()->flags().type_checks()) { | 53 if (Isolate::Current()->flags().type_checks()) { |
54 const intptr_t type_args_field_offset = | 54 const intptr_t type_args_field_offset = |
55 ComputeObjectArrayTypeArgumentsOffset(); | 55 ComputeObjectArrayTypeArgumentsOffset(); |
56 // Inline simple tests (Smi, null), fallthrough if not positive. | 56 // Inline simple tests (Smi, null), fallthrough if not positive. |
57 Label checked_ok; | 57 Label checked_ok; |
58 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. | 58 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. |
59 | 59 |
60 // Null value is valid for any type. | 60 // Null value is valid for any type. |
61 __ CompareObject(R2, Object::null_object(), PP); | 61 __ CompareObject(R2, Object::null_object()); |
62 __ b(&checked_ok, EQ); | 62 __ b(&checked_ok, EQ); |
63 | 63 |
64 __ ldr(R1, Address(SP, 2 * kWordSize)); // Array. | 64 __ ldr(R1, Address(SP, 2 * kWordSize)); // Array. |
65 __ ldr(R1, FieldAddress(R1, type_args_field_offset)); | 65 __ ldr(R1, FieldAddress(R1, type_args_field_offset)); |
66 | 66 |
67 // R1: Type arguments of array. | 67 // R1: Type arguments of array. |
68 __ CompareObject(R1, Object::null_object(), PP); | 68 __ CompareObject(R1, Object::null_object()); |
69 __ b(&checked_ok, EQ); | 69 __ b(&checked_ok, EQ); |
70 | 70 |
71 // Check if it's dynamic. | 71 // Check if it's dynamic. |
72 // Get type at index 0. | 72 // Get type at index 0. |
73 __ ldr(R0, FieldAddress(R1, TypeArguments::type_at_offset(0))); | 73 __ ldr(R0, FieldAddress(R1, TypeArguments::type_at_offset(0))); |
74 __ CompareObject(R0, Type::ZoneHandle(Type::DynamicType()), PP); | 74 __ CompareObject(R0, Type::ZoneHandle(Type::DynamicType())); |
75 __ b(&checked_ok, EQ); | 75 __ b(&checked_ok, EQ); |
76 | 76 |
77 // Check for int and num. | 77 // Check for int and num. |
78 __ tsti(R2, Immediate(Immediate(kSmiTagMask))); // Value is Smi? | 78 __ tsti(R2, Immediate(Immediate(kSmiTagMask))); // Value is Smi? |
79 __ b(&fall_through, NE); // Non-smi value. | 79 __ b(&fall_through, NE); // Non-smi value. |
80 __ CompareObject(R0, Type::ZoneHandle(Type::IntType()), PP); | 80 __ CompareObject(R0, Type::ZoneHandle(Type::IntType())); |
81 __ b(&checked_ok, EQ); | 81 __ b(&checked_ok, EQ); |
82 __ CompareObject(R0, Type::ZoneHandle(Type::Number()), PP); | 82 __ CompareObject(R0, Type::ZoneHandle(Type::Number())); |
83 __ b(&fall_through, NE); | 83 __ b(&fall_through, NE); |
84 __ Bind(&checked_ok); | 84 __ Bind(&checked_ok); |
85 } | 85 } |
86 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. | 86 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. |
87 __ tsti(R1, Immediate(kSmiTagMask)); | 87 __ tsti(R1, Immediate(kSmiTagMask)); |
88 // Index not Smi. | 88 // Index not Smi. |
89 __ b(&fall_through, NE); | 89 __ b(&fall_through, NE); |
90 __ ldr(R0, Address(SP, 2 * kWordSize)); // Array. | 90 __ ldr(R0, Address(SP, 2 * kWordSize)); // Array. |
91 | 91 |
92 // Range check. | 92 // Range check. |
(...skipping 19 matching lines...) Expand all Loading... |
112 // On stack: type argument (+1), data (+0). | 112 // On stack: type argument (+1), data (+0). |
113 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { | 113 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { |
114 // The newly allocated object is returned in R0. | 114 // The newly allocated object is returned in R0. |
115 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; | 115 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; |
116 const intptr_t kArrayOffset = 0 * kWordSize; | 116 const intptr_t kArrayOffset = 0 * kWordSize; |
117 Label fall_through; | 117 Label fall_through; |
118 | 118 |
119 // Try allocating in new space. | 119 // Try allocating in new space. |
120 const Class& cls = Class::Handle( | 120 const Class& cls = Class::Handle( |
121 Isolate::Current()->object_store()->growable_object_array_class()); | 121 Isolate::Current()->object_store()->growable_object_array_class()); |
122 __ TryAllocate(cls, &fall_through, R0, R1, kNoPP); | 122 __ TryAllocate(cls, &fall_through, R0, R1); |
123 | 123 |
124 // Store backing array object in growable array object. | 124 // Store backing array object in growable array object. |
125 __ ldr(R1, Address(SP, kArrayOffset)); // Data argument. | 125 __ ldr(R1, Address(SP, kArrayOffset)); // Data argument. |
126 // R0 is new, no barrier needed. | 126 // R0 is new, no barrier needed. |
127 __ StoreIntoObjectNoBarrier( | 127 __ StoreIntoObjectNoBarrier( |
128 R0, | 128 R0, |
129 FieldAddress(R0, GrowableObjectArray::data_offset()), | 129 FieldAddress(R0, GrowableObjectArray::data_offset()), |
130 R1); | 130 R1); |
131 | 131 |
132 // R0: new growable array object start as a tagged pointer. | 132 // R0: new growable array object start as a tagged pointer. |
133 // Store the type argument field in the growable array object. | 133 // Store the type argument field in the growable array object. |
134 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument. | 134 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument. |
135 __ StoreIntoObjectNoBarrier( | 135 __ StoreIntoObjectNoBarrier( |
136 R0, | 136 R0, |
137 FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), | 137 FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), |
138 R1); | 138 R1); |
139 | 139 |
140 // Set the length field in the growable array object to 0. | 140 // Set the length field in the growable array object to 0. |
141 __ LoadImmediate(R1, 0, kNoPP); | 141 __ LoadImmediate(R1, 0); |
142 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); | 142 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); |
143 __ ret(); // Returns the newly allocated object in R0. | 143 __ ret(); // Returns the newly allocated object in R0. |
144 | 144 |
145 __ Bind(&fall_through); | 145 __ Bind(&fall_through); |
146 } | 146 } |
147 | 147 |
148 | 148 |
149 // Add an element to growable array if it doesn't need to grow, otherwise | 149 // Add an element to growable array if it doesn't need to grow, otherwise |
150 // call into regular code. | 150 // call into regular code. |
151 // On stack: growable array (+1), value (+0). | 151 // On stack: growable array (+1), value (+0). |
(...skipping 17 matching lines...) Expand all Loading... |
169 const int64_t value_one = reinterpret_cast<int64_t>(Smi::New(1)); | 169 const int64_t value_one = reinterpret_cast<int64_t>(Smi::New(1)); |
170 // len = len + 1; | 170 // len = len + 1; |
171 __ add(R3, R1, Operand(value_one)); | 171 __ add(R3, R1, Operand(value_one)); |
172 __ str(R3, FieldAddress(R0, GrowableObjectArray::length_offset())); | 172 __ str(R3, FieldAddress(R0, GrowableObjectArray::length_offset())); |
173 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value. | 173 __ ldr(R0, Address(SP, 0 * kWordSize)); // Value. |
174 ASSERT(kSmiTagShift == 1); | 174 ASSERT(kSmiTagShift == 1); |
175 __ add(R1, R2, Operand(R1, LSL, 2)); | 175 __ add(R1, R2, Operand(R1, LSL, 2)); |
176 __ StoreIntoObject(R2, | 176 __ StoreIntoObject(R2, |
177 FieldAddress(R1, Array::data_offset()), | 177 FieldAddress(R1, Array::data_offset()), |
178 R0); | 178 R0); |
179 __ LoadObject(R0, Object::null_object(), PP); | 179 __ LoadObject(R0, Object::null_object()); |
180 __ ret(); | 180 __ ret(); |
181 __ Bind(&fall_through); | 181 __ Bind(&fall_through); |
182 } | 182 } |
183 | 183 |
184 | 184 |
185 static int GetScaleFactor(intptr_t size) { | 185 static int GetScaleFactor(intptr_t size) { |
186 switch (size) { | 186 switch (size) { |
187 case 1: return 0; | 187 case 1: return 0; |
188 case 2: return 1; | 188 case 2: return 1; |
189 case 4: return 2; | 189 case 4: return 2; |
190 case 8: return 3; | 190 case 8: return 3; |
191 case 16: return 4; | 191 case 16: return 4; |
192 } | 192 } |
193 UNREACHABLE(); | 193 UNREACHABLE(); |
194 return -1; | 194 return -1; |
195 } | 195 } |
196 | 196 |
197 | 197 |
198 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ | 198 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ |
199 Label fall_through; \ | 199 Label fall_through; \ |
200 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ | 200 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ |
201 __ MaybeTraceAllocation(cid, R2, kNoPP, &fall_through); \ | 201 __ MaybeTraceAllocation(cid, R2, &fall_through); \ |
202 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | 202 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ |
203 /* Check that length is a positive Smi. */ \ | 203 /* Check that length is a positive Smi. */ \ |
204 /* R2: requested array length argument. */ \ | 204 /* R2: requested array length argument. */ \ |
205 __ tsti(R2, Immediate(kSmiTagMask)); \ | 205 __ tsti(R2, Immediate(kSmiTagMask)); \ |
206 __ b(&fall_through, NE); \ | 206 __ b(&fall_through, NE); \ |
207 __ CompareRegisters(R2, ZR); \ | 207 __ CompareRegisters(R2, ZR); \ |
208 __ b(&fall_through, LT); \ | 208 __ b(&fall_through, LT); \ |
209 __ SmiUntag(R2); \ | 209 __ SmiUntag(R2); \ |
210 /* Check for maximum allowed length. */ \ | 210 /* Check for maximum allowed length. */ \ |
211 /* R2: untagged array length. */ \ | 211 /* R2: untagged array length. */ \ |
212 __ CompareImmediate(R2, max_len, kNoPP); \ | 212 __ CompareImmediate(R2, max_len); \ |
213 __ b(&fall_through, GT); \ | 213 __ b(&fall_through, GT); \ |
214 __ LslImmediate(R2, R2, scale_shift); \ | 214 __ LslImmediate(R2, R2, scale_shift); \ |
215 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ | 215 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ |
216 __ AddImmediate(R2, R2, fixed_size, kNoPP); \ | 216 __ AddImmediate(R2, R2, fixed_size); \ |
217 __ andi(R2, R2, Immediate(~(kObjectAlignment - 1))); \ | 217 __ andi(R2, R2, Immediate(~(kObjectAlignment - 1))); \ |
218 Heap* heap = Isolate::Current()->heap(); \ | 218 Heap* heap = Isolate::Current()->heap(); \ |
219 Heap::Space space = heap->SpaceForAllocation(cid); \ | 219 Heap::Space space = heap->SpaceForAllocation(cid); \ |
220 __ LoadImmediate(R0, heap->TopAddress(space), kNoPP); \ | 220 __ LoadImmediate(R0, heap->TopAddress(space)); \ |
221 __ ldr(R0, Address(R0, 0)); \ | 221 __ ldr(R0, Address(R0, 0)); \ |
222 \ | 222 \ |
223 /* R2: allocation size. */ \ | 223 /* R2: allocation size. */ \ |
224 __ adds(R1, R0, Operand(R2)); \ | 224 __ adds(R1, R0, Operand(R2)); \ |
225 __ b(&fall_through, CS); /* Fail on unsigned overflow. */ \ | 225 __ b(&fall_through, CS); /* Fail on unsigned overflow. */ \ |
226 \ | 226 \ |
227 /* Check if the allocation fits into the remaining space. */ \ | 227 /* Check if the allocation fits into the remaining space. */ \ |
228 /* R0: potential new object start. */ \ | 228 /* R0: potential new object start. */ \ |
229 /* R1: potential next object start. */ \ | 229 /* R1: potential next object start. */ \ |
230 /* R2: allocation size. */ \ | 230 /* R2: allocation size. */ \ |
231 __ LoadImmediate(R3, heap->EndAddress(space), kNoPP); \ | 231 __ LoadImmediate(R3, heap->EndAddress(space)); \ |
232 __ ldr(R3, Address(R3, 0)); \ | 232 __ ldr(R3, Address(R3, 0)); \ |
233 __ cmp(R1, Operand(R3)); \ | 233 __ cmp(R1, Operand(R3)); \ |
234 __ b(&fall_through, CS); \ | 234 __ b(&fall_through, CS); \ |
235 \ | 235 \ |
236 /* Successfully allocated the object(s), now update top to point to */ \ | 236 /* Successfully allocated the object(s), now update top to point to */ \ |
237 /* next object start and initialize the object. */ \ | 237 /* next object start and initialize the object. */ \ |
238 __ LoadImmediate(R3, heap->TopAddress(space), kNoPP); \ | 238 __ LoadImmediate(R3, heap->TopAddress(space)); \ |
239 __ str(R1, Address(R3, 0)); \ | 239 __ str(R1, Address(R3, 0)); \ |
240 __ AddImmediate(R0, R0, kHeapObjectTag, kNoPP); \ | 240 __ AddImmediate(R0, R0, kHeapObjectTag); \ |
241 __ UpdateAllocationStatsWithSize(cid, R2, kNoPP, space); \ | 241 __ UpdateAllocationStatsWithSize(cid, R2, space); \ |
242 /* Initialize the tags. */ \ | 242 /* Initialize the tags. */ \ |
243 /* R0: new object start as a tagged pointer. */ \ | 243 /* R0: new object start as a tagged pointer. */ \ |
244 /* R1: new object end address. */ \ | 244 /* R1: new object end address. */ \ |
245 /* R2: allocation size. */ \ | 245 /* R2: allocation size. */ \ |
246 { \ | 246 { \ |
247 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag, kNoPP); \ | 247 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \ |
248 __ LslImmediate(R2, R2, RawObject::kSizeTagPos - kObjectAlignmentLog2); \ | 248 __ LslImmediate(R2, R2, RawObject::kSizeTagPos - kObjectAlignmentLog2); \ |
249 __ csel(R2, ZR, R2, HI); \ | 249 __ csel(R2, ZR, R2, HI); \ |
250 \ | 250 \ |
251 /* Get the class index and insert it into the tags. */ \ | 251 /* Get the class index and insert it into the tags. */ \ |
252 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid), kNoPP); \ | 252 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \ |
253 __ orr(R2, R2, Operand(TMP)); \ | 253 __ orr(R2, R2, Operand(TMP)); \ |
254 __ str(R2, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \ | 254 __ str(R2, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \ |
255 } \ | 255 } \ |
256 /* Set the length field. */ \ | 256 /* Set the length field. */ \ |
257 /* R0: new object start as a tagged pointer. */ \ | 257 /* R0: new object start as a tagged pointer. */ \ |
258 /* R1: new object end address. */ \ | 258 /* R1: new object end address. */ \ |
259 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | 259 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ |
260 __ StoreIntoObjectNoBarrier(R0, \ | 260 __ StoreIntoObjectNoBarrier(R0, \ |
261 FieldAddress(R0, type_name::length_offset()), \ | 261 FieldAddress(R0, type_name::length_offset()), \ |
262 R2); \ | 262 R2); \ |
263 /* Initialize all array elements to 0. */ \ | 263 /* Initialize all array elements to 0. */ \ |
264 /* R0: new object start as a tagged pointer. */ \ | 264 /* R0: new object start as a tagged pointer. */ \ |
265 /* R1: new object end address. */ \ | 265 /* R1: new object end address. */ \ |
266 /* R2: iterator which initially points to the start of the variable */ \ | 266 /* R2: iterator which initially points to the start of the variable */ \ |
267 /* R3: scratch register. */ \ | 267 /* R3: scratch register. */ \ |
268 /* data area to be initialized. */ \ | 268 /* data area to be initialized. */ \ |
269 __ mov(R3, ZR); \ | 269 __ mov(R3, ZR); \ |
270 __ AddImmediate(R2, R0, sizeof(Raw##type_name) - 1, kNoPP); \ | 270 __ AddImmediate(R2, R0, sizeof(Raw##type_name) - 1); \ |
271 Label init_loop, done; \ | 271 Label init_loop, done; \ |
272 __ Bind(&init_loop); \ | 272 __ Bind(&init_loop); \ |
273 __ cmp(R2, Operand(R1)); \ | 273 __ cmp(R2, Operand(R1)); \ |
274 __ b(&done, CS); \ | 274 __ b(&done, CS); \ |
275 __ str(R3, Address(R2, 0)); \ | 275 __ str(R3, Address(R2, 0)); \ |
276 __ add(R2, R2, Operand(kWordSize)); \ | 276 __ add(R2, R2, Operand(kWordSize)); \ |
277 __ b(&init_loop); \ | 277 __ b(&init_loop); \ |
278 __ Bind(&done); \ | 278 __ Bind(&done); \ |
279 \ | 279 \ |
280 __ ret(); \ | 280 __ ret(); \ |
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
466 __ CompareRegisters(R0, ZR); | 466 __ CompareRegisters(R0, ZR); |
467 __ b(&fall_through, EQ); // If b is 0, fall through. | 467 __ b(&fall_through, EQ); // If b is 0, fall through. |
468 | 468 |
469 __ SmiUntag(R0); | 469 __ SmiUntag(R0); |
470 __ SmiUntag(R1); | 470 __ SmiUntag(R1); |
471 | 471 |
472 __ sdiv(R0, R1, R0); | 472 __ sdiv(R0, R1, R0); |
473 | 473 |
474 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we | 474 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we |
475 // cannot tag the result. | 475 // cannot tag the result. |
476 __ CompareImmediate(R0, 0x4000000000000000, kNoPP); | 476 __ CompareImmediate(R0, 0x4000000000000000); |
477 __ b(&fall_through, EQ); | 477 __ b(&fall_through, EQ); |
478 __ SmiTag(R0); // Not equal. Okay to tag and return. | 478 __ SmiTag(R0); // Not equal. Okay to tag and return. |
479 __ ret(); // Return. | 479 __ ret(); // Return. |
480 __ Bind(&fall_through); | 480 __ Bind(&fall_through); |
481 } | 481 } |
482 | 482 |
483 | 483 |
484 void Intrinsifier::Integer_negate(Assembler* assembler) { | 484 void Intrinsifier::Integer_negate(Assembler* assembler) { |
485 Label fall_through; | 485 Label fall_through; |
486 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Grab first argument. | 486 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Grab first argument. |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
540 ASSERT(kSmiTagShift == 1); | 540 ASSERT(kSmiTagShift == 1); |
541 ASSERT(kSmiTag == 0); | 541 ASSERT(kSmiTag == 0); |
542 const Register right = R0; | 542 const Register right = R0; |
543 const Register left = R1; | 543 const Register left = R1; |
544 const Register temp = R2; | 544 const Register temp = R2; |
545 const Register result = R0; | 545 const Register result = R0; |
546 Label fall_through; | 546 Label fall_through; |
547 | 547 |
548 TestBothArgumentsSmis(assembler, &fall_through); | 548 TestBothArgumentsSmis(assembler, &fall_through); |
549 __ CompareImmediate( | 549 __ CompareImmediate( |
550 right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits)), PP); | 550 right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits))); |
551 __ b(&fall_through, CS); | 551 __ b(&fall_through, CS); |
552 | 552 |
553 // Left is not a constant. | 553 // Left is not a constant. |
554 // Check if count too large for handling it inlined. | 554 // Check if count too large for handling it inlined. |
555 __ SmiUntag(TMP, right); // SmiUntag right into TMP. | 555 __ SmiUntag(TMP, right); // SmiUntag right into TMP. |
556 // Overflow test (preserve left, right, and TMP); | 556 // Overflow test (preserve left, right, and TMP); |
557 __ lslv(temp, left, TMP); | 557 __ lslv(temp, left, TMP); |
558 __ asrv(TMP2, temp, TMP); | 558 __ asrv(TMP2, temp, TMP); |
559 __ CompareRegisters(left, TMP2); | 559 __ CompareRegisters(left, TMP2); |
560 __ b(&fall_through, NE); // Overflow. | 560 __ b(&fall_through, NE); // Overflow. |
561 // Shift for result now we know there is no overflow. | 561 // Shift for result now we know there is no overflow. |
562 __ lslv(result, left, TMP); | 562 __ lslv(result, left, TMP); |
563 __ ret(); | 563 __ ret(); |
564 __ Bind(&fall_through); | 564 __ Bind(&fall_through); |
565 } | 565 } |
566 | 566 |
567 | 567 |
568 static void CompareIntegers(Assembler* assembler, Condition true_condition) { | 568 static void CompareIntegers(Assembler* assembler, Condition true_condition) { |
569 Label fall_through, true_label; | 569 Label fall_through, true_label; |
570 TestBothArgumentsSmis(assembler, &fall_through); | 570 TestBothArgumentsSmis(assembler, &fall_through); |
571 // R0 contains the right argument, R1 the left. | 571 // R0 contains the right argument, R1 the left. |
572 __ CompareRegisters(R1, R0); | 572 __ CompareRegisters(R1, R0); |
573 __ LoadObject(R0, Bool::False(), PP); | 573 __ LoadObject(R0, Bool::False()); |
574 __ LoadObject(TMP, Bool::True(), PP); | 574 __ LoadObject(TMP, Bool::True()); |
575 __ csel(R0, TMP, R0, true_condition); | 575 __ csel(R0, TMP, R0, true_condition); |
576 __ ret(); | 576 __ ret(); |
577 __ Bind(&fall_through); | 577 __ Bind(&fall_through); |
578 } | 578 } |
579 | 579 |
580 | 580 |
581 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { | 581 void Intrinsifier::Integer_greaterThanFromInt(Assembler* assembler) { |
582 CompareIntegers(assembler, LT); | 582 CompareIntegers(assembler, LT); |
583 } | 583 } |
584 | 584 |
(...skipping 26 matching lines...) Expand all Loading... |
611 __ ldr(R0, Address(SP, 0 * kWordSize)); | 611 __ ldr(R0, Address(SP, 0 * kWordSize)); |
612 __ ldr(R1, Address(SP, 1 * kWordSize)); | 612 __ ldr(R1, Address(SP, 1 * kWordSize)); |
613 __ cmp(R0, Operand(R1)); | 613 __ cmp(R0, Operand(R1)); |
614 __ b(&true_label, EQ); | 614 __ b(&true_label, EQ); |
615 | 615 |
616 __ orr(R2, R0, Operand(R1)); | 616 __ orr(R2, R0, Operand(R1)); |
617 __ tsti(R2, Immediate(kSmiTagMask)); | 617 __ tsti(R2, Immediate(kSmiTagMask)); |
618 __ b(&check_for_mint, NE); // If R0 or R1 is not a smi do Mint checks. | 618 __ b(&check_for_mint, NE); // If R0 or R1 is not a smi do Mint checks. |
619 | 619 |
620 // Both arguments are smi, '===' is good enough. | 620 // Both arguments are smi, '===' is good enough. |
621 __ LoadObject(R0, Bool::False(), PP); | 621 __ LoadObject(R0, Bool::False()); |
622 __ ret(); | 622 __ ret(); |
623 __ Bind(&true_label); | 623 __ Bind(&true_label); |
624 __ LoadObject(R0, Bool::True(), PP); | 624 __ LoadObject(R0, Bool::True()); |
625 __ ret(); | 625 __ ret(); |
626 | 626 |
627 // At least one of the arguments was not Smi. | 627 // At least one of the arguments was not Smi. |
628 Label receiver_not_smi; | 628 Label receiver_not_smi; |
629 __ Bind(&check_for_mint); | 629 __ Bind(&check_for_mint); |
630 | 630 |
631 __ tsti(R1, Immediate(kSmiTagMask)); // Check receiver. | 631 __ tsti(R1, Immediate(kSmiTagMask)); // Check receiver. |
632 __ b(&receiver_not_smi, NE); | 632 __ b(&receiver_not_smi, NE); |
633 | 633 |
634 // Left (receiver) is Smi, return false if right is not Double. | 634 // Left (receiver) is Smi, return false if right is not Double. |
635 // Note that an instance of Mint or Bigint never contains a value that can be | 635 // Note that an instance of Mint or Bigint never contains a value that can be |
636 // represented by Smi. | 636 // represented by Smi. |
637 | 637 |
638 __ CompareClassId(R0, kDoubleCid, kNoPP); | 638 __ CompareClassId(R0, kDoubleCid); |
639 __ b(&fall_through, EQ); | 639 __ b(&fall_through, EQ); |
640 __ LoadObject(R0, Bool::False(), PP); // Smi == Mint -> false. | 640 __ LoadObject(R0, Bool::False()); // Smi == Mint -> false. |
641 __ ret(); | 641 __ ret(); |
642 | 642 |
643 __ Bind(&receiver_not_smi); | 643 __ Bind(&receiver_not_smi); |
644 // R1: receiver. | 644 // R1: receiver. |
645 | 645 |
646 __ CompareClassId(R1, kMintCid, kNoPP); | 646 __ CompareClassId(R1, kMintCid); |
647 __ b(&fall_through, NE); | 647 __ b(&fall_through, NE); |
648 // Receiver is Mint, return false if right is Smi. | 648 // Receiver is Mint, return false if right is Smi. |
649 __ tsti(R0, Immediate(kSmiTagMask)); | 649 __ tsti(R0, Immediate(kSmiTagMask)); |
650 __ b(&fall_through, NE); | 650 __ b(&fall_through, NE); |
651 __ LoadObject(R0, Bool::False(), PP); | 651 __ LoadObject(R0, Bool::False()); |
652 __ ret(); | 652 __ ret(); |
653 // TODO(srdjan): Implement Mint == Mint comparison. | 653 // TODO(srdjan): Implement Mint == Mint comparison. |
654 | 654 |
655 __ Bind(&fall_through); | 655 __ Bind(&fall_through); |
656 } | 656 } |
657 | 657 |
658 | 658 |
659 void Intrinsifier::Integer_equal(Assembler* assembler) { | 659 void Intrinsifier::Integer_equal(Assembler* assembler) { |
660 Integer_equalToInteger(assembler); | 660 Integer_equalToInteger(assembler); |
661 } | 661 } |
662 | 662 |
663 | 663 |
664 void Intrinsifier::Integer_sar(Assembler* assembler) { | 664 void Intrinsifier::Integer_sar(Assembler* assembler) { |
665 Label fall_through; | 665 Label fall_through; |
666 | 666 |
667 TestBothArgumentsSmis(assembler, &fall_through); | 667 TestBothArgumentsSmis(assembler, &fall_through); |
668 // Shift amount in R0. Value to shift in R1. | 668 // Shift amount in R0. Value to shift in R1. |
669 | 669 |
670 // Fall through if shift amount is negative. | 670 // Fall through if shift amount is negative. |
671 __ SmiUntag(R0); | 671 __ SmiUntag(R0); |
672 __ CompareRegisters(R0, ZR); | 672 __ CompareRegisters(R0, ZR); |
673 __ b(&fall_through, LT); | 673 __ b(&fall_through, LT); |
674 | 674 |
675 // If shift amount is bigger than 63, set to 63. | 675 // If shift amount is bigger than 63, set to 63. |
676 __ LoadImmediate(TMP, 0x3F, kNoPP); | 676 __ LoadImmediate(TMP, 0x3F); |
677 __ CompareRegisters(R0, TMP); | 677 __ CompareRegisters(R0, TMP); |
678 __ csel(R0, TMP, R0, GT); | 678 __ csel(R0, TMP, R0, GT); |
679 __ SmiUntag(R1); | 679 __ SmiUntag(R1); |
680 __ asrv(R0, R1, R0); | 680 __ asrv(R0, R1, R0); |
681 __ SmiTag(R0); | 681 __ SmiTag(R0); |
682 __ ret(); | 682 __ ret(); |
683 __ Bind(&fall_through); | 683 __ Bind(&fall_through); |
684 } | 684 } |
685 | 685 |
686 | 686 |
687 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { | 687 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { |
688 __ ldr(R0, Address(SP, 0 * kWordSize)); | 688 __ ldr(R0, Address(SP, 0 * kWordSize)); |
689 __ mvn(R0, R0); | 689 __ mvn(R0, R0); |
690 __ andi(R0, R0, Immediate(~kSmiTagMask)); // Remove inverted smi-tag. | 690 __ andi(R0, R0, Immediate(~kSmiTagMask)); // Remove inverted smi-tag. |
691 __ ret(); | 691 __ ret(); |
692 } | 692 } |
693 | 693 |
694 | 694 |
695 void Intrinsifier::Smi_bitLength(Assembler* assembler) { | 695 void Intrinsifier::Smi_bitLength(Assembler* assembler) { |
696 __ ldr(R0, Address(SP, 0 * kWordSize)); | 696 __ ldr(R0, Address(SP, 0 * kWordSize)); |
697 __ SmiUntag(R0); | 697 __ SmiUntag(R0); |
698 // XOR with sign bit to complement bits if value is negative. | 698 // XOR with sign bit to complement bits if value is negative. |
699 __ eor(R0, R0, Operand(R0, ASR, 63)); | 699 __ eor(R0, R0, Operand(R0, ASR, 63)); |
700 __ clz(R0, R0); | 700 __ clz(R0, R0); |
701 __ LoadImmediate(R1, 64, kNoPP); | 701 __ LoadImmediate(R1, 64); |
702 __ sub(R0, R1, Operand(R0)); | 702 __ sub(R0, R1, Operand(R0)); |
703 __ SmiTag(R0); | 703 __ SmiTag(R0); |
704 __ ret(); | 704 __ ret(); |
705 } | 705 } |
706 | 706 |
707 | 707 |
708 void Intrinsifier::Bigint_lsh(Assembler* assembler) { | 708 void Intrinsifier::Bigint_lsh(Assembler* assembler) { |
709 // static void _lsh(Uint32List x_digits, int x_used, int n, | 709 // static void _lsh(Uint32List x_digits, int x_used, int n, |
710 // Uint32List r_digits) | 710 // Uint32List r_digits) |
711 | 711 |
(...skipping 10 matching lines...) Expand all Loading... |
722 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); | 722 __ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); |
723 // R7 = &x_digits[2*R2] | 723 // R7 = &x_digits[2*R2] |
724 __ add(R7, R6, Operand(R2, LSL, 3)); | 724 __ add(R7, R6, Operand(R2, LSL, 3)); |
725 // R8 = &r_digits[2*1] | 725 // R8 = &r_digits[2*1] |
726 __ add(R8, R4, Operand(TypedData::data_offset() - kHeapObjectTag + | 726 __ add(R8, R4, Operand(TypedData::data_offset() - kHeapObjectTag + |
727 2 * Bigint::kBytesPerDigit)); | 727 2 * Bigint::kBytesPerDigit)); |
728 // R8 = &r_digits[2*(R2 + n ~/ (2*_DIGIT_BITS) + 1)] | 728 // R8 = &r_digits[2*(R2 + n ~/ (2*_DIGIT_BITS) + 1)] |
729 __ add(R0, R0, Operand(R2)); | 729 __ add(R0, R0, Operand(R2)); |
730 __ add(R8, R8, Operand(R0, LSL, 3)); | 730 __ add(R8, R8, Operand(R0, LSL, 3)); |
731 // R3 = n % (2 * _DIGIT_BITS) | 731 // R3 = n % (2 * _DIGIT_BITS) |
732 __ AndImmediate(R3, R5, 63, kNoPP); | 732 __ AndImmediate(R3, R5, 63); |
733 // R2 = 64 - R3 | 733 // R2 = 64 - R3 |
734 __ LoadImmediate(R2, 64, kNoPP); | 734 __ LoadImmediate(R2, 64); |
735 __ sub(R2, R2, Operand(R3)); | 735 __ sub(R2, R2, Operand(R3)); |
736 __ mov(R1, ZR); | 736 __ mov(R1, ZR); |
737 Label loop; | 737 Label loop; |
738 __ Bind(&loop); | 738 __ Bind(&loop); |
739 __ ldr(R0, Address(R7, -2 * Bigint::kBytesPerDigit, Address::PreIndex)); | 739 __ ldr(R0, Address(R7, -2 * Bigint::kBytesPerDigit, Address::PreIndex)); |
740 __ lsrv(R4, R0, R2); | 740 __ lsrv(R4, R0, R2); |
741 __ orr(R1, R1, Operand(R4)); | 741 __ orr(R1, R1, Operand(R4)); |
742 __ str(R1, Address(R8, -2 * Bigint::kBytesPerDigit, Address::PreIndex)); | 742 __ str(R1, Address(R8, -2 * Bigint::kBytesPerDigit, Address::PreIndex)); |
743 __ lslv(R1, R0, R3); | 743 __ lslv(R1, R0, R3); |
744 __ cmp(R7, Operand(R6)); | 744 __ cmp(R7, Operand(R6)); |
(...skipping 20 matching lines...) Expand all Loading... |
765 // R8 = &r_digits[0] | 765 // R8 = &r_digits[0] |
766 __ add(R8, R4, Operand(TypedData::data_offset() - kHeapObjectTag)); | 766 __ add(R8, R4, Operand(TypedData::data_offset() - kHeapObjectTag)); |
767 // R7 = &x_digits[2*(n ~/ (2*_DIGIT_BITS))] | 767 // R7 = &x_digits[2*(n ~/ (2*_DIGIT_BITS))] |
768 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); | 768 __ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); |
769 __ add(R7, R7, Operand(R0, LSL, 3)); | 769 __ add(R7, R7, Operand(R0, LSL, 3)); |
770 // R6 = &r_digits[2*(R2 - n ~/ (2*_DIGIT_BITS) - 1)] | 770 // R6 = &r_digits[2*(R2 - n ~/ (2*_DIGIT_BITS) - 1)] |
771 __ add(R0, R0, Operand(1)); | 771 __ add(R0, R0, Operand(1)); |
772 __ sub(R0, R2, Operand(R0)); | 772 __ sub(R0, R2, Operand(R0)); |
773 __ add(R6, R8, Operand(R0, LSL, 3)); | 773 __ add(R6, R8, Operand(R0, LSL, 3)); |
774 // R3 = n % (2*_DIGIT_BITS) | 774 // R3 = n % (2*_DIGIT_BITS) |
775 __ AndImmediate(R3, R5, 63, kNoPP); | 775 __ AndImmediate(R3, R5, 63); |
776 // R2 = 64 - R3 | 776 // R2 = 64 - R3 |
777 __ LoadImmediate(R2, 64, kNoPP); | 777 __ LoadImmediate(R2, 64); |
778 __ sub(R2, R2, Operand(R3)); | 778 __ sub(R2, R2, Operand(R3)); |
779 // R1 = x_digits[n ~/ (2*_DIGIT_BITS)] >> (n % (2*_DIGIT_BITS)) | 779 // R1 = x_digits[n ~/ (2*_DIGIT_BITS)] >> (n % (2*_DIGIT_BITS)) |
780 __ ldr(R1, Address(R7, 2 * Bigint::kBytesPerDigit, Address::PostIndex)); | 780 __ ldr(R1, Address(R7, 2 * Bigint::kBytesPerDigit, Address::PostIndex)); |
781 __ lsrv(R1, R1, R3); | 781 __ lsrv(R1, R1, R3); |
782 Label loop_entry; | 782 Label loop_entry; |
783 __ b(&loop_entry); | 783 __ b(&loop_entry); |
784 Label loop; | 784 Label loop; |
785 __ Bind(&loop); | 785 __ Bind(&loop); |
786 __ ldr(R0, Address(R7, 2 * Bigint::kBytesPerDigit, Address::PostIndex)); | 786 __ ldr(R0, Address(R7, 2 * Bigint::kBytesPerDigit, Address::PostIndex)); |
787 __ lslv(R4, R0, R2); | 787 __ lslv(R4, R0, R2); |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
847 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0. | 847 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0. |
848 __ ldr(R0, Address(R3, 2*Bigint::kBytesPerDigit, Address::PostIndex)); | 848 __ ldr(R0, Address(R3, 2*Bigint::kBytesPerDigit, Address::PostIndex)); |
849 __ adcs(R0, R0, ZR); | 849 __ adcs(R0, R0, ZR); |
850 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag. | 850 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag. |
851 __ str(R0, Address(R6, 2*Bigint::kBytesPerDigit, Address::PostIndex)); | 851 __ str(R0, Address(R6, 2*Bigint::kBytesPerDigit, Address::PostIndex)); |
852 __ cbnz(&carry_loop, R9); | 852 __ cbnz(&carry_loop, R9); |
853 | 853 |
854 __ Bind(&last_carry); | 854 __ Bind(&last_carry); |
855 Label done; | 855 Label done; |
856 __ b(&done, CC); | 856 __ b(&done, CC); |
857 __ LoadImmediate(R0, 1, kNoPP); | 857 __ LoadImmediate(R0, 1); |
858 __ str(R0, Address(R6, 0)); | 858 __ str(R0, Address(R6, 0)); |
859 | 859 |
860 __ Bind(&done); | 860 __ Bind(&done); |
861 // Returning Object::null() is not required, since this method is private. | 861 // Returning Object::null() is not required, since this method is private. |
862 __ ret(); | 862 __ ret(); |
863 } | 863 } |
864 | 864 |
865 | 865 |
866 void Intrinsifier::Bigint_absSub(Assembler* assembler) { | 866 void Intrinsifier::Bigint_absSub(Assembler* assembler) { |
867 // static void _absSub(Uint32List digits, int used, | 867 // static void _absSub(Uint32List digits, int used, |
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1022 __ b(&done, CC); | 1022 __ b(&done, CC); |
1023 | 1023 |
1024 Label propagate_carry_loop; | 1024 Label propagate_carry_loop; |
1025 __ Bind(&propagate_carry_loop); | 1025 __ Bind(&propagate_carry_loop); |
1026 __ ldr(R0, Address(R5, 0)); | 1026 __ ldr(R0, Address(R5, 0)); |
1027 __ adds(R0, R0, Operand(1)); | 1027 __ adds(R0, R0, Operand(1)); |
1028 __ str(R0, Address(R5, 2*Bigint::kBytesPerDigit, Address::PostIndex)); | 1028 __ str(R0, Address(R5, 2*Bigint::kBytesPerDigit, Address::PostIndex)); |
1029 __ b(&propagate_carry_loop, CS); | 1029 __ b(&propagate_carry_loop, CS); |
1030 | 1030 |
1031 __ Bind(&done); | 1031 __ Bind(&done); |
1032 __ LoadImmediate(R0, Smi::RawValue(2), kNoPP); // Two digits processed. | 1032 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. |
1033 __ ret(); | 1033 __ ret(); |
1034 } | 1034 } |
1035 | 1035 |
1036 | 1036 |
1037 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { | 1037 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { |
1038 // Pseudo code: | 1038 // Pseudo code: |
1039 // static int _sqrAdd(Uint32List x_digits, int i, | 1039 // static int _sqrAdd(Uint32List x_digits, int i, |
1040 // Uint32List a_digits, int used) { | 1040 // Uint32List a_digits, int used) { |
1041 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even. | 1041 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even. |
1042 // uint64_t x = *xip++; | 1042 // uint64_t x = *xip++; |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1137 | 1137 |
1138 // uint128_t t = aj + c | 1138 // uint128_t t = aj + c |
1139 __ adds(R6, R6, Operand(R0)); | 1139 __ adds(R6, R6, Operand(R0)); |
1140 __ adc(R7, R7, ZR); | 1140 __ adc(R7, R7, ZR); |
1141 | 1141 |
1142 // *ajp = low64(t) = R6 | 1142 // *ajp = low64(t) = R6 |
1143 // *(ajp + 1) = high64(t) = R7 | 1143 // *(ajp + 1) = high64(t) = R7 |
1144 __ stp(R6, R7, Address(R5, 0, Address::PairOffset)); | 1144 __ stp(R6, R7, Address(R5, 0, Address::PairOffset)); |
1145 | 1145 |
1146 __ Bind(&x_zero); | 1146 __ Bind(&x_zero); |
1147 __ LoadImmediate(R0, Smi::RawValue(2), kNoPP); // Two digits processed. | 1147 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. |
1148 __ ret(); | 1148 __ ret(); |
1149 } | 1149 } |
1150 | 1150 |
1151 | 1151 |
1152 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { | 1152 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { |
1153 // There is no 128-bit by 64-bit division instruction on arm64, so we use two | 1153 // There is no 128-bit by 64-bit division instruction on arm64, so we use two |
1154 // 64-bit by 32-bit divisions and two 64-bit by 64-bit multiplications to | 1154 // 64-bit by 32-bit divisions and two 64-bit by 64-bit multiplications to |
1155 // adjust the two 32-bit digits of the estimated quotient. | 1155 // adjust the two 32-bit digits of the estimated quotient. |
1156 // | 1156 // |
1157 // Pseudo code: | 1157 // Pseudo code: |
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1311 | 1311 |
1312 __ Bind(&ql_ok); | 1312 __ Bind(&ql_ok); |
1313 // qd |= ql; | 1313 // qd |= ql; |
1314 __ orr(R0, R0, Operand(R6)); | 1314 __ orr(R0, R0, Operand(R6)); |
1315 | 1315 |
1316 __ Bind(&return_qd); | 1316 __ Bind(&return_qd); |
1317 // args[2..3] = qd | 1317 // args[2..3] = qd |
1318 __ str(R0, | 1318 __ str(R0, |
1319 FieldAddress(R4, TypedData::data_offset() + 2*Bigint::kBytesPerDigit)); | 1319 FieldAddress(R4, TypedData::data_offset() + 2*Bigint::kBytesPerDigit)); |
1320 | 1320 |
1321 __ LoadImmediate(R0, Smi::RawValue(2), kNoPP); // Two digits processed. | 1321 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. |
1322 __ ret(); | 1322 __ ret(); |
1323 } | 1323 } |
1324 | 1324 |
1325 | 1325 |
1326 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) { | 1326 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) { |
1327 // Pseudo code: | 1327 // Pseudo code: |
1328 // static int _mulMod(Uint32List args, Uint32List digits, int i) { | 1328 // static int _mulMod(Uint32List args, Uint32List digits, int i) { |
1329 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3. | 1329 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3. |
1330 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even. | 1330 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even. |
1331 // uint128_t t = rho*d; | 1331 // uint128_t t = rho*d; |
(...skipping 14 matching lines...) Expand all Loading... |
1346 __ add(R1, R1, Operand(R0, LSL, 1)); | 1346 __ add(R1, R1, Operand(R0, LSL, 1)); |
1347 __ ldr(R2, FieldAddress(R1, TypedData::data_offset())); | 1347 __ ldr(R2, FieldAddress(R1, TypedData::data_offset())); |
1348 | 1348 |
1349 // R0 = rho*d mod DIGIT_BASE | 1349 // R0 = rho*d mod DIGIT_BASE |
1350 __ mul(R0, R2, R3); // R0 = low64(R2*R3). | 1350 __ mul(R0, R2, R3); // R0 = low64(R2*R3). |
1351 | 1351 |
1352 // args[4 .. 5] = R0 | 1352 // args[4 .. 5] = R0 |
1353 __ str(R0, | 1353 __ str(R0, |
1354 FieldAddress(R4, TypedData::data_offset() + 4*Bigint::kBytesPerDigit)); | 1354 FieldAddress(R4, TypedData::data_offset() + 4*Bigint::kBytesPerDigit)); |
1355 | 1355 |
1356 __ LoadImmediate(R0, Smi::RawValue(2), kNoPP); // Two digits processed. | 1356 __ LoadImmediate(R0, Smi::RawValue(2)); // Two digits processed. |
1357 __ ret(); | 1357 __ ret(); |
1358 } | 1358 } |
1359 | 1359 |
1360 | 1360 |
1361 // Check if the last argument is a double, jump to label 'is_smi' if smi | 1361 // Check if the last argument is a double, jump to label 'is_smi' if smi |
1362 // (easy to convert to double), otherwise jump to label 'not_double_smi', | 1362 // (easy to convert to double), otherwise jump to label 'not_double_smi', |
1363 // Returns the last argument in R0. | 1363 // Returns the last argument in R0. |
1364 static void TestLastArgumentIsDouble(Assembler* assembler, | 1364 static void TestLastArgumentIsDouble(Assembler* assembler, |
1365 Label* is_smi, | 1365 Label* is_smi, |
1366 Label* not_double_smi) { | 1366 Label* not_double_smi) { |
1367 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1367 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1368 __ tsti(R0, Immediate(kSmiTagMask)); | 1368 __ tsti(R0, Immediate(kSmiTagMask)); |
1369 __ b(is_smi, EQ); | 1369 __ b(is_smi, EQ); |
1370 __ CompareClassId(R0, kDoubleCid, kNoPP); | 1370 __ CompareClassId(R0, kDoubleCid); |
1371 __ b(not_double_smi, NE); | 1371 __ b(not_double_smi, NE); |
1372 // Fall through with Double in R0. | 1372 // Fall through with Double in R0. |
1373 } | 1373 } |
1374 | 1374 |
1375 | 1375 |
1376 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown | 1376 // Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown |
1377 // type. Return true or false object in the register R0. Any NaN argument | 1377 // type. Return true or false object in the register R0. Any NaN argument |
1378 // returns false. Any non-double arg1 causes control flow to fall through to the | 1378 // returns false. Any non-double arg1 causes control flow to fall through to the |
1379 // slow case (compiled method body). | 1379 // slow case (compiled method body). |
1380 static void CompareDoubles(Assembler* assembler, Condition true_condition) { | 1380 static void CompareDoubles(Assembler* assembler, Condition true_condition) { |
1381 Label fall_through, is_smi, double_op, not_nan; | 1381 Label fall_through, is_smi, double_op, not_nan; |
1382 | 1382 |
1383 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1383 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
1384 // Both arguments are double, right operand is in R0. | 1384 // Both arguments are double, right operand is in R0. |
1385 | 1385 |
1386 __ LoadDFieldFromOffset(V1, R0, Double::value_offset(), kNoPP); | 1386 __ LoadDFieldFromOffset(V1, R0, Double::value_offset()); |
1387 __ Bind(&double_op); | 1387 __ Bind(&double_op); |
1388 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. | 1388 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. |
1389 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | 1389 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); |
1390 | 1390 |
1391 __ fcmpd(V0, V1); | 1391 __ fcmpd(V0, V1); |
1392 __ LoadObject(R0, Bool::False(), PP); | 1392 __ LoadObject(R0, Bool::False()); |
1393 // Return false if D0 or D1 was NaN before checking true condition. | 1393 // Return false if D0 or D1 was NaN before checking true condition. |
1394 __ b(¬_nan, VC); | 1394 __ b(¬_nan, VC); |
1395 __ ret(); | 1395 __ ret(); |
1396 __ Bind(¬_nan); | 1396 __ Bind(¬_nan); |
1397 __ LoadObject(TMP, Bool::True(), PP); | 1397 __ LoadObject(TMP, Bool::True()); |
1398 __ csel(R0, TMP, R0, true_condition); | 1398 __ csel(R0, TMP, R0, true_condition); |
1399 __ ret(); | 1399 __ ret(); |
1400 | 1400 |
1401 __ Bind(&is_smi); // Convert R0 to a double. | 1401 __ Bind(&is_smi); // Convert R0 to a double. |
1402 __ SmiUntag(R0); | 1402 __ SmiUntag(R0); |
1403 __ scvtfdx(V1, R0); | 1403 __ scvtfdx(V1, R0); |
1404 __ b(&double_op); // Then do the comparison. | 1404 __ b(&double_op); // Then do the comparison. |
1405 __ Bind(&fall_through); | 1405 __ Bind(&fall_through); |
1406 } | 1406 } |
1407 | 1407 |
(...skipping 23 matching lines...) Expand all Loading... |
1431 } | 1431 } |
1432 | 1432 |
1433 | 1433 |
1434 // Expects left argument to be double (receiver). Right argument is unknown. | 1434 // Expects left argument to be double (receiver). Right argument is unknown. |
1435 // Both arguments are on stack. | 1435 // Both arguments are on stack. |
1436 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { | 1436 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { |
1437 Label fall_through; | 1437 Label fall_through; |
1438 | 1438 |
1439 TestLastArgumentIsDouble(assembler, &fall_through, &fall_through); | 1439 TestLastArgumentIsDouble(assembler, &fall_through, &fall_through); |
1440 // Both arguments are double, right operand is in R0. | 1440 // Both arguments are double, right operand is in R0. |
1441 __ LoadDFieldFromOffset(V1, R0, Double::value_offset(), kNoPP); | 1441 __ LoadDFieldFromOffset(V1, R0, Double::value_offset()); |
1442 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. | 1442 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. |
1443 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | 1443 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); |
1444 switch (kind) { | 1444 switch (kind) { |
1445 case Token::kADD: __ faddd(V0, V0, V1); break; | 1445 case Token::kADD: __ faddd(V0, V0, V1); break; |
1446 case Token::kSUB: __ fsubd(V0, V0, V1); break; | 1446 case Token::kSUB: __ fsubd(V0, V0, V1); break; |
1447 case Token::kMUL: __ fmuld(V0, V0, V1); break; | 1447 case Token::kMUL: __ fmuld(V0, V0, V1); break; |
1448 case Token::kDIV: __ fdivd(V0, V0, V1); break; | 1448 case Token::kDIV: __ fdivd(V0, V0, V1); break; |
1449 default: UNREACHABLE(); | 1449 default: UNREACHABLE(); |
1450 } | 1450 } |
1451 const Class& double_class = Class::Handle( | 1451 const Class& double_class = Class::Handle( |
1452 Isolate::Current()->object_store()->double_class()); | 1452 Isolate::Current()->object_store()->double_class()); |
1453 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); | 1453 __ TryAllocate(double_class, &fall_through, R0, R1); |
1454 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); | 1454 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); |
1455 __ ret(); | 1455 __ ret(); |
1456 __ Bind(&fall_through); | 1456 __ Bind(&fall_through); |
1457 } | 1457 } |
1458 | 1458 |
1459 | 1459 |
1460 void Intrinsifier::Double_add(Assembler* assembler) { | 1460 void Intrinsifier::Double_add(Assembler* assembler) { |
1461 DoubleArithmeticOperations(assembler, Token::kADD); | 1461 DoubleArithmeticOperations(assembler, Token::kADD); |
1462 } | 1462 } |
1463 | 1463 |
1464 | 1464 |
(...skipping 16 matching lines...) Expand all Loading... |
1481 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { | 1481 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { |
1482 Label fall_through; | 1482 Label fall_through; |
1483 // Only smis allowed. | 1483 // Only smis allowed. |
1484 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1484 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1485 __ tsti(R0, Immediate(kSmiTagMask)); | 1485 __ tsti(R0, Immediate(kSmiTagMask)); |
1486 __ b(&fall_through, NE); | 1486 __ b(&fall_through, NE); |
1487 // Is Smi. | 1487 // Is Smi. |
1488 __ SmiUntag(R0); | 1488 __ SmiUntag(R0); |
1489 __ scvtfdx(V1, R0); | 1489 __ scvtfdx(V1, R0); |
1490 __ ldr(R0, Address(SP, 1 * kWordSize)); | 1490 __ ldr(R0, Address(SP, 1 * kWordSize)); |
1491 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | 1491 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); |
1492 __ fmuld(V0, V0, V1); | 1492 __ fmuld(V0, V0, V1); |
1493 const Class& double_class = Class::Handle( | 1493 const Class& double_class = Class::Handle( |
1494 Isolate::Current()->object_store()->double_class()); | 1494 Isolate::Current()->object_store()->double_class()); |
1495 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); | 1495 __ TryAllocate(double_class, &fall_through, R0, R1); |
1496 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); | 1496 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); |
1497 __ ret(); | 1497 __ ret(); |
1498 __ Bind(&fall_through); | 1498 __ Bind(&fall_through); |
1499 } | 1499 } |
1500 | 1500 |
1501 | 1501 |
1502 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { | 1502 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { |
1503 Label fall_through; | 1503 Label fall_through; |
1504 | 1504 |
1505 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1505 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1506 __ tsti(R0, Immediate(kSmiTagMask)); | 1506 __ tsti(R0, Immediate(kSmiTagMask)); |
1507 __ b(&fall_through, NE); | 1507 __ b(&fall_through, NE); |
1508 // Is Smi. | 1508 // Is Smi. |
1509 __ SmiUntag(R0); | 1509 __ SmiUntag(R0); |
1510 __ scvtfdx(V0, R0); | 1510 __ scvtfdx(V0, R0); |
1511 const Class& double_class = Class::Handle( | 1511 const Class& double_class = Class::Handle( |
1512 Isolate::Current()->object_store()->double_class()); | 1512 Isolate::Current()->object_store()->double_class()); |
1513 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); | 1513 __ TryAllocate(double_class, &fall_through, R0, R1); |
1514 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); | 1514 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); |
1515 __ ret(); | 1515 __ ret(); |
1516 __ Bind(&fall_through); | 1516 __ Bind(&fall_through); |
1517 } | 1517 } |
1518 | 1518 |
1519 | 1519 |
1520 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { | 1520 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { |
1521 Label is_true; | 1521 Label is_true; |
1522 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1522 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1523 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | 1523 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); |
1524 __ fcmpd(V0, V0); | 1524 __ fcmpd(V0, V0); |
1525 __ LoadObject(TMP, Bool::False(), PP); | 1525 __ LoadObject(TMP, Bool::False()); |
1526 __ LoadObject(R0, Bool::True(), PP); | 1526 __ LoadObject(R0, Bool::True()); |
1527 __ csel(R0, TMP, R0, VC); | 1527 __ csel(R0, TMP, R0, VC); |
1528 __ ret(); | 1528 __ ret(); |
1529 } | 1529 } |
1530 | 1530 |
1531 | 1531 |
1532 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { | 1532 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { |
1533 const Register false_reg = R0; | 1533 const Register false_reg = R0; |
1534 const Register true_reg = R2; | 1534 const Register true_reg = R2; |
1535 Label is_false, is_true, is_zero; | 1535 Label is_false, is_true, is_zero; |
1536 | 1536 |
1537 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1537 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1538 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | 1538 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); |
1539 __ fcmpdz(V0); | 1539 __ fcmpdz(V0); |
1540 __ LoadObject(true_reg, Bool::True(), PP); | 1540 __ LoadObject(true_reg, Bool::True()); |
1541 __ LoadObject(false_reg, Bool::False(), PP); | 1541 __ LoadObject(false_reg, Bool::False()); |
1542 __ b(&is_false, VS); // NaN -> false. | 1542 __ b(&is_false, VS); // NaN -> false. |
1543 __ b(&is_zero, EQ); // Check for negative zero. | 1543 __ b(&is_zero, EQ); // Check for negative zero. |
1544 __ b(&is_false, CS); // >= 0 -> false. | 1544 __ b(&is_false, CS); // >= 0 -> false. |
1545 | 1545 |
1546 __ Bind(&is_true); | 1546 __ Bind(&is_true); |
1547 __ mov(R0, true_reg); | 1547 __ mov(R0, true_reg); |
1548 | 1548 |
1549 __ Bind(&is_false); | 1549 __ Bind(&is_false); |
1550 __ ret(); | 1550 __ ret(); |
1551 | 1551 |
1552 __ Bind(&is_zero); | 1552 __ Bind(&is_zero); |
1553 // Check for negative zero by looking at the sign bit. | 1553 // Check for negative zero by looking at the sign bit. |
1554 __ fmovrd(R1, V0); | 1554 __ fmovrd(R1, V0); |
1555 __ LsrImmediate(R1, R1, 63); | 1555 __ LsrImmediate(R1, R1, 63); |
1556 __ tsti(R1, Immediate(1)); | 1556 __ tsti(R1, Immediate(1)); |
1557 __ csel(R0, true_reg, false_reg, NE); // Sign bit set. | 1557 __ csel(R0, true_reg, false_reg, NE); // Sign bit set. |
1558 __ ret(); | 1558 __ ret(); |
1559 } | 1559 } |
1560 | 1560 |
1561 | 1561 |
1562 void Intrinsifier::DoubleToInteger(Assembler* assembler) { | 1562 void Intrinsifier::DoubleToInteger(Assembler* assembler) { |
1563 Label fall_through; | 1563 Label fall_through; |
1564 | 1564 |
1565 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1565 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1566 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); | 1566 __ LoadDFieldFromOffset(V0, R0, Double::value_offset()); |
1567 | 1567 |
1568 // Explicit NaN check, since ARM gives an FPU exception if you try to | 1568 // Explicit NaN check, since ARM gives an FPU exception if you try to |
1569 // convert NaN to an int. | 1569 // convert NaN to an int. |
1570 __ fcmpd(V0, V0); | 1570 __ fcmpd(V0, V0); |
1571 __ b(&fall_through, VS); | 1571 __ b(&fall_through, VS); |
1572 | 1572 |
1573 __ fcvtzds(R0, V0); | 1573 __ fcvtzds(R0, V0); |
1574 // Overflow is signaled with minint. | 1574 // Overflow is signaled with minint. |
1575 // Check for overflow and that it fits into Smi. | 1575 // Check for overflow and that it fits into Smi. |
1576 __ CompareImmediate(R0, 0xC000000000000000, kNoPP); | 1576 __ CompareImmediate(R0, 0xC000000000000000); |
1577 __ b(&fall_through, MI); | 1577 __ b(&fall_through, MI); |
1578 __ SmiTag(R0); | 1578 __ SmiTag(R0); |
1579 __ ret(); | 1579 __ ret(); |
1580 __ Bind(&fall_through); | 1580 __ Bind(&fall_through); |
1581 } | 1581 } |
1582 | 1582 |
1583 | 1583 |
1584 void Intrinsifier::MathSqrt(Assembler* assembler) { | 1584 void Intrinsifier::MathSqrt(Assembler* assembler) { |
1585 Label fall_through, is_smi, double_op; | 1585 Label fall_through, is_smi, double_op; |
1586 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1586 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
1587 // Argument is double and is in R0. | 1587 // Argument is double and is in R0. |
1588 __ LoadDFieldFromOffset(V1, R0, Double::value_offset(), kNoPP); | 1588 __ LoadDFieldFromOffset(V1, R0, Double::value_offset()); |
1589 __ Bind(&double_op); | 1589 __ Bind(&double_op); |
1590 __ fsqrtd(V0, V1); | 1590 __ fsqrtd(V0, V1); |
1591 const Class& double_class = Class::Handle( | 1591 const Class& double_class = Class::Handle( |
1592 Isolate::Current()->object_store()->double_class()); | 1592 Isolate::Current()->object_store()->double_class()); |
1593 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); | 1593 __ TryAllocate(double_class, &fall_through, R0, R1); |
1594 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); | 1594 __ StoreDFieldToOffset(V0, R0, Double::value_offset()); |
1595 __ ret(); | 1595 __ ret(); |
1596 __ Bind(&is_smi); | 1596 __ Bind(&is_smi); |
1597 __ SmiUntag(R0); | 1597 __ SmiUntag(R0); |
1598 __ scvtfdx(V1, R0); | 1598 __ scvtfdx(V1, R0); |
1599 __ b(&double_op); | 1599 __ b(&double_op); |
1600 __ Bind(&fall_through); | 1600 __ Bind(&fall_through); |
1601 } | 1601 } |
1602 | 1602 |
1603 | 1603 |
1604 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; | 1604 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; |
(...skipping 15 matching lines...) Expand all Loading... |
1620 const Instance& a_value = Instance::Handle(random_A_field.value()); | 1620 const Instance& a_value = Instance::Handle(random_A_field.value()); |
1621 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); | 1621 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); |
1622 | 1622 |
1623 __ ldr(R0, Address(SP, 0 * kWordSize)); // Receiver. | 1623 __ ldr(R0, Address(SP, 0 * kWordSize)); // Receiver. |
1624 __ ldr(R1, FieldAddress(R0, state_field.Offset())); // Field '_state'. | 1624 __ ldr(R1, FieldAddress(R0, state_field.Offset())); // Field '_state'. |
1625 | 1625 |
1626 // Addresses of _state[0]. | 1626 // Addresses of _state[0]. |
1627 const int64_t disp = | 1627 const int64_t disp = |
1628 Instance::DataOffsetFor(kTypedDataUint32ArrayCid) - kHeapObjectTag; | 1628 Instance::DataOffsetFor(kTypedDataUint32ArrayCid) - kHeapObjectTag; |
1629 | 1629 |
1630 __ LoadImmediate(R0, a_int_value, kNoPP); | 1630 __ LoadImmediate(R0, a_int_value); |
1631 __ LoadFromOffset(R2, R1, disp, kNoPP); | 1631 __ LoadFromOffset(R2, R1, disp); |
1632 __ LsrImmediate(R3, R2, 32); | 1632 __ LsrImmediate(R3, R2, 32); |
1633 __ andi(R2, R2, Immediate(0xffffffff)); | 1633 __ andi(R2, R2, Immediate(0xffffffff)); |
1634 __ mul(R2, R0, R2); | 1634 __ mul(R2, R0, R2); |
1635 __ add(R2, R2, Operand(R3)); | 1635 __ add(R2, R2, Operand(R3)); |
1636 __ StoreToOffset(R2, R1, disp, kNoPP); | 1636 __ StoreToOffset(R2, R1, disp); |
1637 __ ret(); | 1637 __ ret(); |
1638 } | 1638 } |
1639 | 1639 |
1640 | 1640 |
1641 void Intrinsifier::ObjectEquals(Assembler* assembler) { | 1641 void Intrinsifier::ObjectEquals(Assembler* assembler) { |
1642 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1642 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1643 __ ldr(R1, Address(SP, 1 * kWordSize)); | 1643 __ ldr(R1, Address(SP, 1 * kWordSize)); |
1644 __ cmp(R0, Operand(R1)); | 1644 __ cmp(R0, Operand(R1)); |
1645 __ LoadObject(R0, Bool::False(), PP); | 1645 __ LoadObject(R0, Bool::False()); |
1646 __ LoadObject(TMP, Bool::True(), PP); | 1646 __ LoadObject(TMP, Bool::True()); |
1647 __ csel(R0, TMP, R0, EQ); | 1647 __ csel(R0, TMP, R0, EQ); |
1648 __ ret(); | 1648 __ ret(); |
1649 } | 1649 } |
1650 | 1650 |
1651 | 1651 |
1652 // Return type quickly for simple types (not parameterized and not signature). | 1652 // Return type quickly for simple types (not parameterized and not signature). |
1653 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { | 1653 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { |
1654 Label fall_through; | 1654 Label fall_through; |
1655 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1655 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1656 __ LoadClassIdMayBeSmi(R1, R0); | 1656 __ LoadClassIdMayBeSmi(R1, R0); |
1657 __ LoadClassById(R2, R1, PP); | 1657 __ LoadClassById(R2, R1); |
1658 // R2: class of instance (R0). | 1658 // R2: class of instance (R0). |
1659 __ ldr(R3, FieldAddress(R2, Class::signature_function_offset())); | 1659 __ ldr(R3, FieldAddress(R2, Class::signature_function_offset())); |
1660 __ CompareObject(R3, Object::null_object(), PP); | 1660 __ CompareObject(R3, Object::null_object()); |
1661 __ b(&fall_through, NE); | 1661 __ b(&fall_through, NE); |
1662 | 1662 |
1663 __ ldr(R3, FieldAddress(R2, Class::num_type_arguments_offset()), kHalfword); | 1663 __ ldr(R3, FieldAddress(R2, Class::num_type_arguments_offset()), kHalfword); |
1664 __ CompareImmediate(R3, 0, kNoPP); | 1664 __ CompareImmediate(R3, 0); |
1665 __ b(&fall_through, NE); | 1665 __ b(&fall_through, NE); |
1666 | 1666 |
1667 __ ldr(R0, FieldAddress(R2, Class::canonical_types_offset())); | 1667 __ ldr(R0, FieldAddress(R2, Class::canonical_types_offset())); |
1668 __ CompareObject(R0, Object::null_object(), PP); | 1668 __ CompareObject(R0, Object::null_object()); |
1669 __ b(&fall_through, EQ); | 1669 __ b(&fall_through, EQ); |
1670 __ ret(); | 1670 __ ret(); |
1671 | 1671 |
1672 __ Bind(&fall_through); | 1672 __ Bind(&fall_through); |
1673 } | 1673 } |
1674 | 1674 |
1675 | 1675 |
1676 void Intrinsifier::String_getHashCode(Assembler* assembler) { | 1676 void Intrinsifier::String_getHashCode(Assembler* assembler) { |
1677 Label fall_through; | 1677 Label fall_through; |
1678 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1678 __ ldr(R0, Address(SP, 0 * kWordSize)); |
(...skipping 10 matching lines...) Expand all Loading... |
1689 Label fall_through, try_two_byte_string; | 1689 Label fall_through, try_two_byte_string; |
1690 | 1690 |
1691 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index. | 1691 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index. |
1692 __ ldr(R0, Address(SP, 1 * kWordSize)); // String. | 1692 __ ldr(R0, Address(SP, 1 * kWordSize)); // String. |
1693 __ tsti(R1, Immediate(kSmiTagMask)); | 1693 __ tsti(R1, Immediate(kSmiTagMask)); |
1694 __ b(&fall_through, NE); // Index is not a Smi. | 1694 __ b(&fall_through, NE); // Index is not a Smi. |
1695 // Range check. | 1695 // Range check. |
1696 __ ldr(R2, FieldAddress(R0, String::length_offset())); | 1696 __ ldr(R2, FieldAddress(R0, String::length_offset())); |
1697 __ cmp(R1, Operand(R2)); | 1697 __ cmp(R1, Operand(R2)); |
1698 __ b(&fall_through, CS); // Runtime throws exception. | 1698 __ b(&fall_through, CS); // Runtime throws exception. |
1699 __ CompareClassId(R0, kOneByteStringCid, kNoPP); | 1699 __ CompareClassId(R0, kOneByteStringCid); |
1700 __ b(&try_two_byte_string, NE); | 1700 __ b(&try_two_byte_string, NE); |
1701 __ SmiUntag(R1); | 1701 __ SmiUntag(R1); |
1702 __ AddImmediate(R0, R0, OneByteString::data_offset() - kHeapObjectTag, kNoPP); | 1702 __ AddImmediate(R0, R0, OneByteString::data_offset() - kHeapObjectTag); |
1703 __ ldr(R0, Address(R0, R1), kUnsignedByte); | 1703 __ ldr(R0, Address(R0, R1), kUnsignedByte); |
1704 __ SmiTag(R0); | 1704 __ SmiTag(R0); |
1705 __ ret(); | 1705 __ ret(); |
1706 | 1706 |
1707 __ Bind(&try_two_byte_string); | 1707 __ Bind(&try_two_byte_string); |
1708 __ CompareClassId(R0, kTwoByteStringCid, kNoPP); | 1708 __ CompareClassId(R0, kTwoByteStringCid); |
1709 __ b(&fall_through, NE); | 1709 __ b(&fall_through, NE); |
1710 ASSERT(kSmiTagShift == 1); | 1710 ASSERT(kSmiTagShift == 1); |
1711 __ AddImmediate(R0, R0, TwoByteString::data_offset() - kHeapObjectTag, kNoPP); | 1711 __ AddImmediate(R0, R0, TwoByteString::data_offset() - kHeapObjectTag); |
1712 __ ldr(R0, Address(R0, R1), kUnsignedHalfword); | 1712 __ ldr(R0, Address(R0, R1), kUnsignedHalfword); |
1713 __ SmiTag(R0); | 1713 __ SmiTag(R0); |
1714 __ ret(); | 1714 __ ret(); |
1715 | 1715 |
1716 __ Bind(&fall_through); | 1716 __ Bind(&fall_through); |
1717 } | 1717 } |
1718 | 1718 |
1719 | 1719 |
1720 void Intrinsifier::StringBaseCharAt(Assembler* assembler) { | 1720 void Intrinsifier::StringBaseCharAt(Assembler* assembler) { |
1721 Label fall_through, try_two_byte_string; | 1721 Label fall_through, try_two_byte_string; |
1722 | 1722 |
1723 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index. | 1723 __ ldr(R1, Address(SP, 0 * kWordSize)); // Index. |
1724 __ ldr(R0, Address(SP, 1 * kWordSize)); // String. | 1724 __ ldr(R0, Address(SP, 1 * kWordSize)); // String. |
1725 __ tsti(R1, Immediate(kSmiTagMask)); | 1725 __ tsti(R1, Immediate(kSmiTagMask)); |
1726 __ b(&fall_through, NE); // Index is not a Smi. | 1726 __ b(&fall_through, NE); // Index is not a Smi. |
1727 // Range check. | 1727 // Range check. |
1728 __ ldr(R2, FieldAddress(R0, String::length_offset())); | 1728 __ ldr(R2, FieldAddress(R0, String::length_offset())); |
1729 __ cmp(R1, Operand(R2)); | 1729 __ cmp(R1, Operand(R2)); |
1730 __ b(&fall_through, CS); // Runtime throws exception. | 1730 __ b(&fall_through, CS); // Runtime throws exception. |
1731 | 1731 |
1732 __ CompareClassId(R0, kOneByteStringCid, kNoPP); | 1732 __ CompareClassId(R0, kOneByteStringCid); |
1733 __ b(&try_two_byte_string, NE); | 1733 __ b(&try_two_byte_string, NE); |
1734 __ SmiUntag(R1); | 1734 __ SmiUntag(R1); |
1735 __ AddImmediate(R0, R0, OneByteString::data_offset() - kHeapObjectTag, kNoPP); | 1735 __ AddImmediate(R0, R0, OneByteString::data_offset() - kHeapObjectTag); |
1736 __ ldr(R1, Address(R0, R1), kUnsignedByte); | 1736 __ ldr(R1, Address(R0, R1), kUnsignedByte); |
1737 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols, kNoPP); | 1737 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols); |
1738 __ b(&fall_through, GE); | 1738 __ b(&fall_through, GE); |
1739 const ExternalLabel symbols_label( | 1739 const ExternalLabel symbols_label( |
1740 reinterpret_cast<uword>(Symbols::PredefinedAddress())); | 1740 reinterpret_cast<uword>(Symbols::PredefinedAddress())); |
1741 __ TagAndPushPP(); | 1741 __ TagAndPushPP(); |
1742 __ LoadPoolPointer(PP); | 1742 __ LoadPoolPointer(); |
1743 assembler->set_constant_pool_allowed(true); | 1743 __ LoadExternalLabel(R0, &symbols_label); |
1744 __ LoadExternalLabel(R0, &symbols_label, kNotPatchable, PP); | |
1745 assembler->set_constant_pool_allowed(false); | |
1746 __ PopAndUntagPP(); | 1744 __ PopAndUntagPP(); |
1747 __ AddImmediate( | 1745 __ AddImmediate( |
1748 R0, R0, Symbols::kNullCharCodeSymbolOffset * kWordSize, kNoPP); | 1746 R0, R0, Symbols::kNullCharCodeSymbolOffset * kWordSize); |
1749 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled)); | 1747 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled)); |
1750 __ ret(); | 1748 __ ret(); |
1751 | 1749 |
1752 __ Bind(&try_two_byte_string); | 1750 __ Bind(&try_two_byte_string); |
1753 __ CompareClassId(R0, kTwoByteStringCid, kNoPP); | 1751 __ CompareClassId(R0, kTwoByteStringCid); |
1754 __ b(&fall_through, NE); | 1752 __ b(&fall_through, NE); |
1755 ASSERT(kSmiTagShift == 1); | 1753 ASSERT(kSmiTagShift == 1); |
1756 __ AddImmediate(R0, R0, TwoByteString::data_offset() - kHeapObjectTag, kNoPP); | 1754 __ AddImmediate(R0, R0, TwoByteString::data_offset() - kHeapObjectTag); |
1757 __ ldr(R1, Address(R0, R1), kUnsignedHalfword); | 1755 __ ldr(R1, Address(R0, R1), kUnsignedHalfword); |
1758 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols, kNoPP); | 1756 __ CompareImmediate(R1, Symbols::kNumberOfOneCharCodeSymbols); |
1759 __ b(&fall_through, GE); | 1757 __ b(&fall_through, GE); |
1760 __ TagAndPushPP(); | 1758 __ TagAndPushPP(); |
1761 __ LoadPoolPointer(PP); | 1759 __ LoadPoolPointer(); |
1762 assembler->set_constant_pool_allowed(true); | 1760 __ LoadExternalLabel(R0, &symbols_label); |
1763 __ LoadExternalLabel(R0, &symbols_label, kNotPatchable, PP); | |
1764 assembler->set_constant_pool_allowed(false); | |
1765 __ PopAndUntagPP(); | 1761 __ PopAndUntagPP(); |
1766 __ AddImmediate( | 1762 __ AddImmediate( |
1767 R0, R0, Symbols::kNullCharCodeSymbolOffset * kWordSize, kNoPP); | 1763 R0, R0, Symbols::kNullCharCodeSymbolOffset * kWordSize); |
1768 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled)); | 1764 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled)); |
1769 __ ret(); | 1765 __ ret(); |
1770 | 1766 |
1771 __ Bind(&fall_through); | 1767 __ Bind(&fall_through); |
1772 } | 1768 } |
1773 | 1769 |
1774 | 1770 |
1775 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) { | 1771 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) { |
1776 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1772 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1777 __ ldr(R0, FieldAddress(R0, String::length_offset())); | 1773 __ ldr(R0, FieldAddress(R0, String::length_offset())); |
1778 __ cmp(R0, Operand(Smi::RawValue(0))); | 1774 __ cmp(R0, Operand(Smi::RawValue(0))); |
1779 __ LoadObject(R0, Bool::True(), PP); | 1775 __ LoadObject(R0, Bool::True()); |
1780 __ LoadObject(TMP, Bool::False(), PP); | 1776 __ LoadObject(TMP, Bool::False()); |
1781 __ csel(R0, TMP, R0, NE); | 1777 __ csel(R0, TMP, R0, NE); |
1782 __ ret(); | 1778 __ ret(); |
1783 } | 1779 } |
1784 | 1780 |
1785 | 1781 |
1786 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { | 1782 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { |
1787 Label compute_hash; | 1783 Label compute_hash; |
1788 __ ldr(R1, Address(SP, 0 * kWordSize)); // OneByteString object. | 1784 __ ldr(R1, Address(SP, 0 * kWordSize)); // OneByteString object. |
1789 __ ldr(R0, FieldAddress(R1, String::hash_offset())); | 1785 __ ldr(R0, FieldAddress(R1, String::hash_offset())); |
1790 __ CompareRegisters(R0, ZR); | 1786 __ CompareRegisters(R0, ZR); |
1791 __ b(&compute_hash, EQ); | 1787 __ b(&compute_hash, EQ); |
1792 __ ret(); // Return if already computed. | 1788 __ ret(); // Return if already computed. |
1793 | 1789 |
1794 __ Bind(&compute_hash); | 1790 __ Bind(&compute_hash); |
1795 __ ldr(R2, FieldAddress(R1, String::length_offset())); | 1791 __ ldr(R2, FieldAddress(R1, String::length_offset())); |
1796 __ SmiUntag(R2); | 1792 __ SmiUntag(R2); |
1797 | 1793 |
1798 Label done; | 1794 Label done; |
1799 // If the string is empty, set the hash to 1, and return. | 1795 // If the string is empty, set the hash to 1, and return. |
1800 __ CompareRegisters(R2, ZR); | 1796 __ CompareRegisters(R2, ZR); |
1801 __ b(&done, EQ); | 1797 __ b(&done, EQ); |
1802 | 1798 |
1803 __ mov(R3, ZR); | 1799 __ mov(R3, ZR); |
1804 __ AddImmediate(R6, R1, OneByteString::data_offset() - kHeapObjectTag, kNoPP); | 1800 __ AddImmediate(R6, R1, OneByteString::data_offset() - kHeapObjectTag); |
1805 // R1: Instance of OneByteString. | 1801 // R1: Instance of OneByteString. |
1806 // R2: String length, untagged integer. | 1802 // R2: String length, untagged integer. |
1807 // R3: Loop counter, untagged integer. | 1803 // R3: Loop counter, untagged integer. |
1808 // R6: String data. | 1804 // R6: String data. |
1809 // R0: Hash code, untagged integer. | 1805 // R0: Hash code, untagged integer. |
1810 | 1806 |
1811 Label loop; | 1807 Label loop; |
1812 // Add to hash code: (hash_ is uint32) | 1808 // Add to hash code: (hash_ is uint32) |
1813 // hash_ += ch; | 1809 // hash_ += ch; |
1814 // hash_ += hash_ << 10; | 1810 // hash_ += hash_ << 10; |
(...skipping 11 matching lines...) Expand all Loading... |
1826 | 1822 |
1827 // Finalize. | 1823 // Finalize. |
1828 // hash_ += hash_ << 3; | 1824 // hash_ += hash_ << 3; |
1829 // hash_ ^= hash_ >> 11; | 1825 // hash_ ^= hash_ >> 11; |
1830 // hash_ += hash_ << 15; | 1826 // hash_ += hash_ << 15; |
1831 __ addw(R0, R0, Operand(R0, LSL, 3)); | 1827 __ addw(R0, R0, Operand(R0, LSL, 3)); |
1832 __ eorw(R0, R0, Operand(R0, LSR, 11)); | 1828 __ eorw(R0, R0, Operand(R0, LSR, 11)); |
1833 __ addw(R0, R0, Operand(R0, LSL, 15)); | 1829 __ addw(R0, R0, Operand(R0, LSL, 15)); |
1834 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1); | 1830 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1); |
1835 __ AndImmediate( | 1831 __ AndImmediate( |
1836 R0, R0, (static_cast<intptr_t>(1) << String::kHashBits) - 1, kNoPP); | 1832 R0, R0, (static_cast<intptr_t>(1) << String::kHashBits) - 1); |
1837 __ CompareRegisters(R0, ZR); | 1833 __ CompareRegisters(R0, ZR); |
1838 // return hash_ == 0 ? 1 : hash_; | 1834 // return hash_ == 0 ? 1 : hash_; |
1839 __ Bind(&done); | 1835 __ Bind(&done); |
1840 __ csinc(R0, R0, ZR, NE); // R0 <- (R0 != 0) ? R0 : (ZR + 1). | 1836 __ csinc(R0, R0, ZR, NE); // R0 <- (R0 != 0) ? R0 : (ZR + 1). |
1841 __ SmiTag(R0); | 1837 __ SmiTag(R0); |
1842 __ str(R0, FieldAddress(R1, String::hash_offset())); | 1838 __ str(R0, FieldAddress(R1, String::hash_offset())); |
1843 __ ret(); | 1839 __ ret(); |
1844 } | 1840 } |
1845 | 1841 |
1846 | 1842 |
1847 // Allocates one-byte string of length 'end - start'. The content is not | 1843 // Allocates one-byte string of length 'end - start'. The content is not |
1848 // initialized. | 1844 // initialized. |
1849 // 'length-reg' (R2) contains tagged length. | 1845 // 'length-reg' (R2) contains tagged length. |
1850 // Returns new string as tagged pointer in R0. | 1846 // Returns new string as tagged pointer in R0. |
1851 static void TryAllocateOnebyteString(Assembler* assembler, | 1847 static void TryAllocateOnebyteString(Assembler* assembler, |
1852 Label* ok, | 1848 Label* ok, |
1853 Label* failure) { | 1849 Label* failure) { |
1854 const Register length_reg = R2; | 1850 const Register length_reg = R2; |
1855 Label fail; | 1851 Label fail; |
1856 __ MaybeTraceAllocation(kOneByteStringCid, R0, kNoPP, failure); | 1852 __ MaybeTraceAllocation(kOneByteStringCid, R0, failure); |
1857 __ mov(R6, length_reg); // Save the length register. | 1853 __ mov(R6, length_reg); // Save the length register. |
1858 // TODO(koda): Protect against negative length and overflow here. | 1854 // TODO(koda): Protect against negative length and overflow here. |
1859 __ SmiUntag(length_reg); | 1855 __ SmiUntag(length_reg); |
1860 const intptr_t fixed_size = sizeof(RawString) + kObjectAlignment - 1; | 1856 const intptr_t fixed_size = sizeof(RawString) + kObjectAlignment - 1; |
1861 __ AddImmediate(length_reg, length_reg, fixed_size, kNoPP); | 1857 __ AddImmediate(length_reg, length_reg, fixed_size); |
1862 __ andi(length_reg, length_reg, Immediate(~(kObjectAlignment - 1))); | 1858 __ andi(length_reg, length_reg, Immediate(~(kObjectAlignment - 1))); |
1863 | 1859 |
1864 Isolate* isolate = Isolate::Current(); | 1860 Isolate* isolate = Isolate::Current(); |
1865 Heap* heap = isolate->heap(); | 1861 Heap* heap = isolate->heap(); |
1866 const intptr_t cid = kOneByteStringCid; | 1862 const intptr_t cid = kOneByteStringCid; |
1867 Heap::Space space = heap->SpaceForAllocation(cid); | 1863 Heap::Space space = heap->SpaceForAllocation(cid); |
1868 __ LoadImmediate(R3, heap->TopAddress(space), kNoPP); | 1864 __ LoadImmediate(R3, heap->TopAddress(space)); |
1869 __ ldr(R0, Address(R3)); | 1865 __ ldr(R0, Address(R3)); |
1870 | 1866 |
1871 // length_reg: allocation size. | 1867 // length_reg: allocation size. |
1872 __ adds(R1, R0, Operand(length_reg)); | 1868 __ adds(R1, R0, Operand(length_reg)); |
1873 __ b(&fail, CS); // Fail on unsigned overflow. | 1869 __ b(&fail, CS); // Fail on unsigned overflow. |
1874 | 1870 |
1875 // Check if the allocation fits into the remaining space. | 1871 // Check if the allocation fits into the remaining space. |
1876 // R0: potential new object start. | 1872 // R0: potential new object start. |
1877 // R1: potential next object start. | 1873 // R1: potential next object start. |
1878 // R2: allocation size. | 1874 // R2: allocation size. |
1879 // R3: heap->TopAddress(space). | 1875 // R3: heap->TopAddress(space). |
1880 __ LoadImmediate(R7, heap->EndAddress(space), kNoPP); | 1876 __ LoadImmediate(R7, heap->EndAddress(space)); |
1881 __ ldr(R7, Address(R7)); | 1877 __ ldr(R7, Address(R7)); |
1882 __ cmp(R1, Operand(R7)); | 1878 __ cmp(R1, Operand(R7)); |
1883 __ b(&fail, CS); | 1879 __ b(&fail, CS); |
1884 | 1880 |
1885 // Successfully allocated the object(s), now update top to point to | 1881 // Successfully allocated the object(s), now update top to point to |
1886 // next object start and initialize the object. | 1882 // next object start and initialize the object. |
1887 __ str(R1, Address(R3)); | 1883 __ str(R1, Address(R3)); |
1888 __ AddImmediate(R0, R0, kHeapObjectTag, kNoPP); | 1884 __ AddImmediate(R0, R0, kHeapObjectTag); |
1889 __ UpdateAllocationStatsWithSize(cid, R2, kNoPP, space); | 1885 __ UpdateAllocationStatsWithSize(cid, R2, space); |
1890 | 1886 |
1891 // Initialize the tags. | 1887 // Initialize the tags. |
1892 // R0: new object start as a tagged pointer. | 1888 // R0: new object start as a tagged pointer. |
1893 // R1: new object end address. | 1889 // R1: new object end address. |
1894 // R2: allocation size. | 1890 // R2: allocation size. |
1895 { | 1891 { |
1896 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | 1892 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; |
1897 | 1893 |
1898 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag, kNoPP); | 1894 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); |
1899 __ LslImmediate(R2, R2, shift); | 1895 __ LslImmediate(R2, R2, shift); |
1900 __ csel(R2, R2, ZR, LS); | 1896 __ csel(R2, R2, ZR, LS); |
1901 | 1897 |
1902 // Get the class index and insert it into the tags. | 1898 // Get the class index and insert it into the tags. |
1903 // R2: size and bit tags. | 1899 // R2: size and bit tags. |
1904 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid), kNoPP); | 1900 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); |
1905 __ orr(R2, R2, Operand(TMP)); | 1901 __ orr(R2, R2, Operand(TMP)); |
1906 __ str(R2, FieldAddress(R0, String::tags_offset())); // Store tags. | 1902 __ str(R2, FieldAddress(R0, String::tags_offset())); // Store tags. |
1907 } | 1903 } |
1908 | 1904 |
1909 // Set the length field using the saved length (R6). | 1905 // Set the length field using the saved length (R6). |
1910 __ StoreIntoObjectNoBarrier(R0, | 1906 __ StoreIntoObjectNoBarrier(R0, |
1911 FieldAddress(R0, String::length_offset()), | 1907 FieldAddress(R0, String::length_offset()), |
1912 R6); | 1908 R6); |
1913 // Clear hash. | 1909 // Clear hash. |
1914 __ mov(TMP, ZR); | 1910 __ mov(TMP, ZR); |
(...skipping 24 matching lines...) Expand all Loading... |
1939 __ sub(R2, R2, Operand(TMP)); | 1935 __ sub(R2, R2, Operand(TMP)); |
1940 TryAllocateOnebyteString(assembler, &ok, &fall_through); | 1936 TryAllocateOnebyteString(assembler, &ok, &fall_through); |
1941 __ Bind(&ok); | 1937 __ Bind(&ok); |
1942 // R0: new string as tagged pointer. | 1938 // R0: new string as tagged pointer. |
1943 // Copy string. | 1939 // Copy string. |
1944 __ ldr(R3, Address(SP, kStringOffset)); | 1940 __ ldr(R3, Address(SP, kStringOffset)); |
1945 __ ldr(R1, Address(SP, kStartIndexOffset)); | 1941 __ ldr(R1, Address(SP, kStartIndexOffset)); |
1946 __ SmiUntag(R1); | 1942 __ SmiUntag(R1); |
1947 __ add(R3, R3, Operand(R1)); | 1943 __ add(R3, R3, Operand(R1)); |
1948 // Calculate start address and untag (- 1). | 1944 // Calculate start address and untag (- 1). |
1949 __ AddImmediate(R3, R3, OneByteString::data_offset() - 1, kNoPP); | 1945 __ AddImmediate(R3, R3, OneByteString::data_offset() - 1); |
1950 | 1946 |
1951 // R3: Start address to copy from (untagged). | 1947 // R3: Start address to copy from (untagged). |
1952 // R1: Untagged start index. | 1948 // R1: Untagged start index. |
1953 __ ldr(R2, Address(SP, kEndIndexOffset)); | 1949 __ ldr(R2, Address(SP, kEndIndexOffset)); |
1954 __ SmiUntag(R2); | 1950 __ SmiUntag(R2); |
1955 __ sub(R2, R2, Operand(R1)); | 1951 __ sub(R2, R2, Operand(R1)); |
1956 | 1952 |
1957 // R3: Start address to copy from (untagged). | 1953 // R3: Start address to copy from (untagged). |
1958 // R2: Untagged number of bytes to copy. | 1954 // R2: Untagged number of bytes to copy. |
1959 // R0: Tagged result string. | 1955 // R0: Tagged result string. |
1960 // R6: Pointer into R3. | 1956 // R6: Pointer into R3. |
1961 // R7: Pointer into R0. | 1957 // R7: Pointer into R0. |
1962 // R1: Scratch register. | 1958 // R1: Scratch register. |
1963 Label loop, done; | 1959 Label loop, done; |
1964 __ cmp(R2, Operand(0)); | 1960 __ cmp(R2, Operand(0)); |
1965 __ b(&done, LE); | 1961 __ b(&done, LE); |
1966 __ mov(R6, R3); | 1962 __ mov(R6, R3); |
1967 __ mov(R7, R0); | 1963 __ mov(R7, R0); |
1968 __ Bind(&loop); | 1964 __ Bind(&loop); |
1969 __ ldr(R1, Address(R6), kUnsignedByte); | 1965 __ ldr(R1, Address(R6), kUnsignedByte); |
1970 __ AddImmediate(R6, R6, 1, kNoPP); | 1966 __ AddImmediate(R6, R6, 1); |
1971 __ sub(R2, R2, Operand(1)); | 1967 __ sub(R2, R2, Operand(1)); |
1972 __ cmp(R2, Operand(0)); | 1968 __ cmp(R2, Operand(0)); |
1973 __ str(R1, FieldAddress(R7, OneByteString::data_offset()), kUnsignedByte); | 1969 __ str(R1, FieldAddress(R7, OneByteString::data_offset()), kUnsignedByte); |
1974 __ AddImmediate(R7, R7, 1, kNoPP); | 1970 __ AddImmediate(R7, R7, 1); |
1975 __ b(&loop, GT); | 1971 __ b(&loop, GT); |
1976 | 1972 |
1977 __ Bind(&done); | 1973 __ Bind(&done); |
1978 __ ret(); | 1974 __ ret(); |
1979 __ Bind(&fall_through); | 1975 __ Bind(&fall_through); |
1980 } | 1976 } |
1981 | 1977 |
1982 | 1978 |
1983 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) { | 1979 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) { |
1984 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. | 1980 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. |
1985 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. | 1981 __ ldr(R1, Address(SP, 1 * kWordSize)); // Index. |
1986 __ ldr(R0, Address(SP, 2 * kWordSize)); // OneByteString. | 1982 __ ldr(R0, Address(SP, 2 * kWordSize)); // OneByteString. |
1987 __ SmiUntag(R1); | 1983 __ SmiUntag(R1); |
1988 __ SmiUntag(R2); | 1984 __ SmiUntag(R2); |
1989 __ AddImmediate(R3, R0, OneByteString::data_offset() - kHeapObjectTag, kNoPP); | 1985 __ AddImmediate(R3, R0, OneByteString::data_offset() - kHeapObjectTag); |
1990 __ str(R2, Address(R3, R1), kUnsignedByte); | 1986 __ str(R2, Address(R3, R1), kUnsignedByte); |
1991 __ ret(); | 1987 __ ret(); |
1992 } | 1988 } |
1993 | 1989 |
1994 | 1990 |
1995 void Intrinsifier::OneByteString_allocate(Assembler* assembler) { | 1991 void Intrinsifier::OneByteString_allocate(Assembler* assembler) { |
1996 Label fall_through, ok; | 1992 Label fall_through, ok; |
1997 | 1993 |
1998 __ ldr(R2, Address(SP, 0 * kWordSize)); // Length. | 1994 __ ldr(R2, Address(SP, 0 * kWordSize)); // Length. |
1999 TryAllocateOnebyteString(assembler, &ok, &fall_through); | 1995 TryAllocateOnebyteString(assembler, &ok, &fall_through); |
(...skipping 11 matching lines...) Expand all Loading... |
2011 __ ldr(R0, Address(SP, 1 * kWordSize)); // This. | 2007 __ ldr(R0, Address(SP, 1 * kWordSize)); // This. |
2012 __ ldr(R1, Address(SP, 0 * kWordSize)); // Other. | 2008 __ ldr(R1, Address(SP, 0 * kWordSize)); // Other. |
2013 | 2009 |
2014 // Are identical? | 2010 // Are identical? |
2015 __ cmp(R0, Operand(R1)); | 2011 __ cmp(R0, Operand(R1)); |
2016 __ b(&is_true, EQ); | 2012 __ b(&is_true, EQ); |
2017 | 2013 |
2018 // Is other OneByteString? | 2014 // Is other OneByteString? |
2019 __ tsti(R1, Immediate(kSmiTagMask)); | 2015 __ tsti(R1, Immediate(kSmiTagMask)); |
2020 __ b(&fall_through, EQ); | 2016 __ b(&fall_through, EQ); |
2021 __ CompareClassId(R1, string_cid, kNoPP); | 2017 __ CompareClassId(R1, string_cid); |
2022 __ b(&fall_through, NE); | 2018 __ b(&fall_through, NE); |
2023 | 2019 |
2024 // Have same length? | 2020 // Have same length? |
2025 __ ldr(R2, FieldAddress(R0, String::length_offset())); | 2021 __ ldr(R2, FieldAddress(R0, String::length_offset())); |
2026 __ ldr(R3, FieldAddress(R1, String::length_offset())); | 2022 __ ldr(R3, FieldAddress(R1, String::length_offset())); |
2027 __ cmp(R2, Operand(R3)); | 2023 __ cmp(R2, Operand(R3)); |
2028 __ b(&is_false, NE); | 2024 __ b(&is_false, NE); |
2029 | 2025 |
2030 // Check contents, no fall-through possible. | 2026 // Check contents, no fall-through possible. |
2031 // TODO(zra): try out other sequences. | 2027 // TODO(zra): try out other sequences. |
2032 ASSERT((string_cid == kOneByteStringCid) || | 2028 ASSERT((string_cid == kOneByteStringCid) || |
2033 (string_cid == kTwoByteStringCid)); | 2029 (string_cid == kTwoByteStringCid)); |
2034 const intptr_t offset = (string_cid == kOneByteStringCid) ? | 2030 const intptr_t offset = (string_cid == kOneByteStringCid) ? |
2035 OneByteString::data_offset() : TwoByteString::data_offset(); | 2031 OneByteString::data_offset() : TwoByteString::data_offset(); |
2036 __ AddImmediate(R0, R0, offset - kHeapObjectTag, kNoPP); | 2032 __ AddImmediate(R0, R0, offset - kHeapObjectTag); |
2037 __ AddImmediate(R1, R1, offset - kHeapObjectTag, kNoPP); | 2033 __ AddImmediate(R1, R1, offset - kHeapObjectTag); |
2038 __ SmiUntag(R2); | 2034 __ SmiUntag(R2); |
2039 __ Bind(&loop); | 2035 __ Bind(&loop); |
2040 __ AddImmediate(R2, R2, -1, kNoPP); | 2036 __ AddImmediate(R2, R2, -1); |
2041 __ CompareRegisters(R2, ZR); | 2037 __ CompareRegisters(R2, ZR); |
2042 __ b(&is_true, LT); | 2038 __ b(&is_true, LT); |
2043 if (string_cid == kOneByteStringCid) { | 2039 if (string_cid == kOneByteStringCid) { |
2044 __ ldr(R3, Address(R0), kUnsignedByte); | 2040 __ ldr(R3, Address(R0), kUnsignedByte); |
2045 __ ldr(R4, Address(R1), kUnsignedByte); | 2041 __ ldr(R4, Address(R1), kUnsignedByte); |
2046 __ AddImmediate(R0, R0, 1, kNoPP); | 2042 __ AddImmediate(R0, R0, 1); |
2047 __ AddImmediate(R1, R1, 1, kNoPP); | 2043 __ AddImmediate(R1, R1, 1); |
2048 } else if (string_cid == kTwoByteStringCid) { | 2044 } else if (string_cid == kTwoByteStringCid) { |
2049 __ ldr(R3, Address(R0), kUnsignedHalfword); | 2045 __ ldr(R3, Address(R0), kUnsignedHalfword); |
2050 __ ldr(R4, Address(R1), kUnsignedHalfword); | 2046 __ ldr(R4, Address(R1), kUnsignedHalfword); |
2051 __ AddImmediate(R0, R0, 2, kNoPP); | 2047 __ AddImmediate(R0, R0, 2); |
2052 __ AddImmediate(R1, R1, 2, kNoPP); | 2048 __ AddImmediate(R1, R1, 2); |
2053 } else { | 2049 } else { |
2054 UNIMPLEMENTED(); | 2050 UNIMPLEMENTED(); |
2055 } | 2051 } |
2056 __ cmp(R3, Operand(R4)); | 2052 __ cmp(R3, Operand(R4)); |
2057 __ b(&is_false, NE); | 2053 __ b(&is_false, NE); |
2058 __ b(&loop); | 2054 __ b(&loop); |
2059 | 2055 |
2060 __ Bind(&is_true); | 2056 __ Bind(&is_true); |
2061 __ LoadObject(R0, Bool::True(), PP); | 2057 __ LoadObject(R0, Bool::True()); |
2062 __ ret(); | 2058 __ ret(); |
2063 | 2059 |
2064 __ Bind(&is_false); | 2060 __ Bind(&is_false); |
2065 __ LoadObject(R0, Bool::False(), PP); | 2061 __ LoadObject(R0, Bool::False()); |
2066 __ ret(); | 2062 __ ret(); |
2067 | 2063 |
2068 __ Bind(&fall_through); | 2064 __ Bind(&fall_through); |
2069 } | 2065 } |
2070 | 2066 |
2071 | 2067 |
2072 void Intrinsifier::OneByteString_equality(Assembler* assembler) { | 2068 void Intrinsifier::OneByteString_equality(Assembler* assembler) { |
2073 StringEquality(assembler, kOneByteStringCid); | 2069 StringEquality(assembler, kOneByteStringCid); |
2074 } | 2070 } |
2075 | 2071 |
(...skipping 12 matching lines...) Expand all Loading... |
2088 | 2084 |
2089 // Incoming registers: | 2085 // Incoming registers: |
2090 // R0: Function. (Will be reloaded with the specialized matcher function.) | 2086 // R0: Function. (Will be reloaded with the specialized matcher function.) |
2091 // R4: Arguments descriptor. (Will be preserved.) | 2087 // R4: Arguments descriptor. (Will be preserved.) |
2092 // R5: Unknown. (Must be GC safe on tail call.) | 2088 // R5: Unknown. (Must be GC safe on tail call.) |
2093 | 2089 |
2094 // Load the specialized function pointer into R0. Leverage the fact the | 2090 // Load the specialized function pointer into R0. Leverage the fact the |
2095 // string CIDs as well as stored function pointers are in sequence. | 2091 // string CIDs as well as stored function pointers are in sequence. |
2096 __ ldr(R2, Address(SP, kRegExpParamOffset)); | 2092 __ ldr(R2, Address(SP, kRegExpParamOffset)); |
2097 __ ldr(R1, Address(SP, kStringParamOffset)); | 2093 __ ldr(R1, Address(SP, kStringParamOffset)); |
2098 __ LoadClassId(R1, R1, kNoPP); | 2094 __ LoadClassId(R1, R1); |
2099 __ AddImmediate(R1, R1, -kOneByteStringCid, kNoPP); | 2095 __ AddImmediate(R1, R1, -kOneByteStringCid); |
2100 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2)); | 2096 __ add(R1, R2, Operand(R1, LSL, kWordSizeLog2)); |
2101 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid))); | 2097 __ ldr(R0, FieldAddress(R1, JSRegExp::function_offset(kOneByteStringCid))); |
2102 | 2098 |
2103 // Registers are now set up for the lazy compile stub. It expects the function | 2099 // Registers are now set up for the lazy compile stub. It expects the function |
2104 // in R0, the argument descriptor in R4, and IC-Data in R5. | 2100 // in R0, the argument descriptor in R4, and IC-Data in R5. |
2105 static const intptr_t arg_count = RegExpMacroAssembler::kParamCount; | 2101 static const intptr_t arg_count = RegExpMacroAssembler::kParamCount; |
2106 __ LoadObject(R4, Array::Handle(ArgumentsDescriptor::New(arg_count)), kNoPP); | 2102 __ LoadObject(R4, Array::Handle(ArgumentsDescriptor::New(arg_count))); |
2107 __ eor(R5, R5, Operand(R5)); | 2103 __ eor(R5, R5, Operand(R5)); |
2108 | 2104 |
2109 // Tail-call the function. | 2105 // Tail-call the function. |
2110 __ ldr(R1, FieldAddress(R0, Function::instructions_offset())); | 2106 __ ldr(R1, FieldAddress(R0, Function::instructions_offset())); |
2111 __ AddImmediate(R1, R1, Instructions::HeaderSize() - kHeapObjectTag, kNoPP); | 2107 __ AddImmediate(R1, R1, Instructions::HeaderSize() - kHeapObjectTag); |
2112 __ br(R1); | 2108 __ br(R1); |
2113 } | 2109 } |
2114 | 2110 |
2115 | 2111 |
2116 // On stack: user tag (+0). | 2112 // On stack: user tag (+0). |
2117 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { | 2113 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { |
2118 // R1: Isolate. | 2114 // R1: Isolate. |
2119 __ LoadIsolate(R1); | 2115 __ LoadIsolate(R1); |
2120 // R0: Current user tag. | 2116 // R0: Current user tag. |
2121 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); | 2117 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); |
(...skipping 18 matching lines...) Expand all Loading... |
2140 | 2136 |
2141 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { | 2137 void Intrinsifier::Profiler_getCurrentTag(Assembler* assembler) { |
2142 __ LoadIsolate(R0); | 2138 __ LoadIsolate(R0); |
2143 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); | 2139 __ ldr(R0, Address(R0, Isolate::current_tag_offset())); |
2144 __ ret(); | 2140 __ ret(); |
2145 } | 2141 } |
2146 | 2142 |
2147 } // namespace dart | 2143 } // namespace dart |
2148 | 2144 |
2149 #endif // defined TARGET_ARCH_ARM64 | 2145 #endif // defined TARGET_ARCH_ARM64 |
OLD | NEW |