OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. |
6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
7 | 7 |
8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
9 | 9 |
10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
(...skipping 12 matching lines...) Expand all Loading... |
23 // R4: Arguments descriptor | 23 // R4: Arguments descriptor |
24 // LR: Return address | 24 // LR: Return address |
25 // The R4 register can be destroyed only if there is no slow-path, i.e. | 25 // The R4 register can be destroyed only if there is no slow-path, i.e. |
26 // if the intrinsified method always executes a return. | 26 // if the intrinsified method always executes a return. |
27 // The FP register should not be modified, because it is used by the profiler. | 27 // The FP register should not be modified, because it is used by the profiler. |
28 // The PP and THR registers (see constants_arm.h) must be preserved. | 28 // The PP and THR registers (see constants_arm.h) must be preserved. |
29 | 29 |
30 #define __ assembler-> | 30 #define __ assembler-> |
31 | 31 |
32 | 32 |
33 intptr_t Intrinsifier::ParameterSlotFromSp() { return -1; } | 33 intptr_t Intrinsifier::ParameterSlotFromSp() { |
| 34 return -1; |
| 35 } |
34 | 36 |
35 | 37 |
36 static bool IsABIPreservedRegister(Register reg) { | 38 static bool IsABIPreservedRegister(Register reg) { |
37 return ((1 << reg) & kAbiPreservedCpuRegs) != 0; | 39 return ((1 << reg) & kAbiPreservedCpuRegs) != 0; |
38 } | 40 } |
39 | 41 |
40 | 42 |
41 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { | 43 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { |
42 ASSERT(IsABIPreservedRegister(CODE_REG)); | 44 ASSERT(IsABIPreservedRegister(CODE_REG)); |
43 ASSERT(IsABIPreservedRegister(ARGS_DESC_REG)); | 45 ASSERT(IsABIPreservedRegister(ARGS_DESC_REG)); |
(...skipping 28 matching lines...) Expand all Loading... |
72 | 74 |
73 // Range check. | 75 // Range check. |
74 __ ldr(R3, FieldAddress(R0, Array::length_offset())); // Array length. | 76 __ ldr(R3, FieldAddress(R0, Array::length_offset())); // Array length. |
75 __ cmp(R1, Operand(R3)); | 77 __ cmp(R1, Operand(R3)); |
76 // Runtime throws exception. | 78 // Runtime throws exception. |
77 __ b(&fall_through, CS); | 79 __ b(&fall_through, CS); |
78 | 80 |
79 // Note that R1 is Smi, i.e, times 2. | 81 // Note that R1 is Smi, i.e, times 2. |
80 ASSERT(kSmiTagShift == 1); | 82 ASSERT(kSmiTagShift == 1); |
81 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. | 83 __ ldr(R2, Address(SP, 0 * kWordSize)); // Value. |
82 __ add(R1, R0, Operand(R1, LSL, 1)); // R1 is Smi. | 84 __ add(R1, R0, Operand(R1, LSL, 1)); // R1 is Smi. |
83 __ StoreIntoObject(R0, FieldAddress(R1, Array::data_offset()), R2); | 85 __ StoreIntoObject(R0, FieldAddress(R1, Array::data_offset()), R2); |
84 // Caller is responsible for preserving the value if necessary. | 86 // Caller is responsible for preserving the value if necessary. |
85 __ Ret(); | 87 __ Ret(); |
86 __ Bind(&fall_through); | 88 __ Bind(&fall_through); |
87 } | 89 } |
88 | 90 |
89 | 91 |
90 // Allocate a GrowableObjectArray using the backing array specified. | 92 // Allocate a GrowableObjectArray using the backing array specified. |
91 // On stack: type argument (+1), data (+0). | 93 // On stack: type argument (+1), data (+0). |
92 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { | 94 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { |
93 // The newly allocated object is returned in R0. | 95 // The newly allocated object is returned in R0. |
94 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; | 96 const intptr_t kTypeArgumentsOffset = 1 * kWordSize; |
95 const intptr_t kArrayOffset = 0 * kWordSize; | 97 const intptr_t kArrayOffset = 0 * kWordSize; |
96 Label fall_through; | 98 Label fall_through; |
97 | 99 |
98 // Try allocating in new space. | 100 // Try allocating in new space. |
99 const Class& cls = Class::Handle( | 101 const Class& cls = Class::Handle( |
100 Isolate::Current()->object_store()->growable_object_array_class()); | 102 Isolate::Current()->object_store()->growable_object_array_class()); |
101 __ TryAllocate(cls, &fall_through, R0, R1); | 103 __ TryAllocate(cls, &fall_through, R0, R1); |
102 | 104 |
103 // Store backing array object in growable array object. | 105 // Store backing array object in growable array object. |
104 __ ldr(R1, Address(SP, kArrayOffset)); // Data argument. | 106 __ ldr(R1, Address(SP, kArrayOffset)); // Data argument. |
105 // R0 is new, no barrier needed. | 107 // R0 is new, no barrier needed. |
106 __ StoreIntoObjectNoBarrier( | 108 __ StoreIntoObjectNoBarrier( |
107 R0, | 109 R0, FieldAddress(R0, GrowableObjectArray::data_offset()), R1); |
108 FieldAddress(R0, GrowableObjectArray::data_offset()), | |
109 R1); | |
110 | 110 |
111 // R0: new growable array object start as a tagged pointer. | 111 // R0: new growable array object start as a tagged pointer. |
112 // Store the type argument field in the growable array object. | 112 // Store the type argument field in the growable array object. |
113 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument. | 113 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument. |
114 __ StoreIntoObjectNoBarrier( | 114 __ StoreIntoObjectNoBarrier( |
115 R0, | 115 R0, FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), R1); |
116 FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), | |
117 R1); | |
118 | 116 |
119 // Set the length field in the growable array object to 0. | 117 // Set the length field in the growable array object to 0. |
120 __ LoadImmediate(R1, 0); | 118 __ LoadImmediate(R1, 0); |
121 __ StoreIntoObjectNoBarrier( | 119 __ StoreIntoObjectNoBarrier( |
122 R0, | 120 R0, FieldAddress(R0, GrowableObjectArray::length_offset()), R1); |
123 FieldAddress(R0, GrowableObjectArray::length_offset()), | |
124 R1); | |
125 __ Ret(); // Returns the newly allocated object in R0. | 121 __ Ret(); // Returns the newly allocated object in R0. |
126 | 122 |
127 __ Bind(&fall_through); | 123 __ Bind(&fall_through); |
128 } | 124 } |
129 | 125 |
130 | 126 |
131 // Add an element to growable array if it doesn't need to grow, otherwise | 127 // Add an element to growable array if it doesn't need to grow, otherwise |
132 // call into regular code. | 128 // call into regular code. |
133 // On stack: growable array (+1), value (+0). | 129 // On stack: growable array (+1), value (+0). |
134 void Intrinsifier::GrowableArray_add(Assembler* assembler) { | 130 void Intrinsifier::GrowableArray_add(Assembler* assembler) { |
(...skipping 25 matching lines...) Expand all Loading... |
160 __ LoadObject(R0, Object::null_object()); | 156 __ LoadObject(R0, Object::null_object()); |
161 __ Ret(); | 157 __ Ret(); |
162 __ Bind(&fall_through); | 158 __ Bind(&fall_through); |
163 } | 159 } |
164 | 160 |
165 | 161 |
166 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ | 162 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_shift) \ |
167 Label fall_through; \ | 163 Label fall_through; \ |
168 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ | 164 const intptr_t kArrayLengthStackOffset = 0 * kWordSize; \ |
169 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &fall_through)); \ | 165 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &fall_through)); \ |
170 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | 166 __ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ |
171 /* Check that length is a positive Smi. */ \ | 167 /* Check that length is a positive Smi. */ \ |
172 /* R2: requested array length argument. */ \ | 168 /* R2: requested array length argument. */ \ |
173 __ tst(R2, Operand(kSmiTagMask)); \ | 169 __ tst(R2, Operand(kSmiTagMask)); \ |
174 __ b(&fall_through, NE); \ | 170 __ b(&fall_through, NE); \ |
175 __ CompareImmediate(R2, 0); \ | 171 __ CompareImmediate(R2, 0); \ |
176 __ b(&fall_through, LT); \ | 172 __ b(&fall_through, LT); \ |
177 __ SmiUntag(R2); \ | 173 __ SmiUntag(R2); \ |
178 /* Check for maximum allowed length. */ \ | 174 /* Check for maximum allowed length. */ \ |
179 /* R2: untagged array length. */ \ | 175 /* R2: untagged array length. */ \ |
180 __ CompareImmediate(R2, max_len); \ | 176 __ CompareImmediate(R2, max_len); \ |
181 __ b(&fall_through, GT); \ | 177 __ b(&fall_through, GT); \ |
182 __ mov(R2, Operand(R2, LSL, scale_shift)); \ | 178 __ mov(R2, Operand(R2, LSL, scale_shift)); \ |
183 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ | 179 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ |
184 __ AddImmediate(R2, fixed_size); \ | 180 __ AddImmediate(R2, fixed_size); \ |
185 __ bic(R2, R2, Operand(kObjectAlignment - 1)); \ | 181 __ bic(R2, R2, Operand(kObjectAlignment - 1)); \ |
186 Heap::Space space = Heap::kNew; \ | 182 Heap::Space space = Heap::kNew; \ |
187 __ ldr(R3, Address(THR, Thread::heap_offset())); \ | 183 __ ldr(R3, Address(THR, Thread::heap_offset())); \ |
188 __ ldr(R0, Address(R3, Heap::TopOffset(space))); \ | 184 __ ldr(R0, Address(R3, Heap::TopOffset(space))); \ |
189 \ | 185 \ |
190 /* R2: allocation size. */ \ | 186 /* R2: allocation size. */ \ |
191 __ adds(R1, R0, Operand(R2)); \ | 187 __ adds(R1, R0, Operand(R2)); \ |
192 __ b(&fall_through, CS); /* Fail on unsigned overflow. */ \ | 188 __ b(&fall_through, CS); /* Fail on unsigned overflow. */ \ |
193 \ | 189 \ |
194 /* Check if the allocation fits into the remaining space. */ \ | 190 /* Check if the allocation fits into the remaining space. */ \ |
195 /* R0: potential new object start. */ \ | 191 /* R0: potential new object start. */ \ |
196 /* R1: potential next object start. */ \ | 192 /* R1: potential next object start. */ \ |
197 /* R2: allocation size. */ \ | 193 /* R2: allocation size. */ \ |
198 /* R3: heap. */ \ | 194 /* R3: heap. */ \ |
199 __ ldr(IP, Address(R3, Heap::EndOffset(space))); \ | 195 __ ldr(IP, Address(R3, Heap::EndOffset(space))); \ |
200 __ cmp(R1, Operand(IP)); \ | 196 __ cmp(R1, Operand(IP)); \ |
201 __ b(&fall_through, CS); \ | 197 __ b(&fall_through, CS); \ |
202 \ | 198 \ |
203 /* Successfully allocated the object(s), now update top to point to */ \ | 199 /* Successfully allocated the object(s), now update top to point to */ \ |
204 /* next object start and initialize the object. */ \ | 200 /* next object start and initialize the object. */ \ |
205 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid)); \ | 201 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid)); \ |
206 __ str(R1, Address(R3, Heap::TopOffset(space))); \ | 202 __ str(R1, Address(R3, Heap::TopOffset(space))); \ |
207 __ AddImmediate(R0, kHeapObjectTag); \ | 203 __ AddImmediate(R0, kHeapObjectTag); \ |
208 /* Initialize the tags. */ \ | 204 /* Initialize the tags. */ \ |
209 /* R0: new object start as a tagged pointer. */ \ | 205 /* R0: new object start as a tagged pointer. */ \ |
210 /* R1: new object end address. */ \ | 206 /* R1: new object end address. */ \ |
211 /* R2: allocation size. */ \ | 207 /* R2: allocation size. */ \ |
212 /* R4: allocation stats address */ \ | 208 /* R4: allocation stats address */ \ |
213 { \ | 209 { \ |
214 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \ | 210 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \ |
215 __ mov(R3, Operand(R2, LSL, \ | 211 __ mov(R3, \ |
216 RawObject::kSizeTagPos - kObjectAlignmentLog2), LS); \ | 212 Operand(R2, LSL, RawObject::kSizeTagPos - kObjectAlignmentLog2), \ |
| 213 LS); \ |
217 __ mov(R3, Operand(0), HI); \ | 214 __ mov(R3, Operand(0), HI); \ |
218 \ | 215 \ |
219 /* Get the class index and insert it into the tags. */ \ | 216 /* Get the class index and insert it into the tags. */ \ |
220 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \ | 217 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); \ |
221 __ orr(R3, R3, Operand(TMP)); \ | 218 __ orr(R3, R3, Operand(TMP)); \ |
222 __ str(R3, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \ | 219 __ str(R3, FieldAddress(R0, type_name::tags_offset())); /* Tags. */ \ |
223 } \ | 220 } \ |
224 /* Set the length field. */ \ | 221 /* Set the length field. */ \ |
225 /* R0: new object start as a tagged pointer. */ \ | 222 /* R0: new object start as a tagged pointer. */ \ |
226 /* R1: new object end address. */ \ | 223 /* R1: new object end address. */ \ |
227 /* R2: allocation size. */ \ | 224 /* R2: allocation size. */ \ |
228 /* R4: allocation stats address. */ \ | 225 /* R4: allocation stats address. */ \ |
229 __ ldr(R3, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ | 226 __ ldr(R3, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \ |
230 __ StoreIntoObjectNoBarrier(R0, \ | 227 __ StoreIntoObjectNoBarrier( \ |
231 FieldAddress(R0, type_name::length_offset()), \ | 228 R0, FieldAddress(R0, type_name::length_offset()), R3); \ |
232 R3); \ | |
233 /* Initialize all array elements to 0. */ \ | 229 /* Initialize all array elements to 0. */ \ |
234 /* R0: new object start as a tagged pointer. */ \ | 230 /* R0: new object start as a tagged pointer. */ \ |
235 /* R1: new object end address. */ \ | 231 /* R1: new object end address. */ \ |
236 /* R2: allocation size. */ \ | 232 /* R2: allocation size. */ \ |
237 /* R3: iterator which initially points to the start of the variable */ \ | 233 /* R3: iterator which initially points to the start of the variable */ \ |
238 /* R4: allocation stats address */ \ | 234 /* R4: allocation stats address */ \ |
239 /* R8, R9: zero. */ \ | 235 /* R8, R9: zero. */ \ |
240 /* data area to be initialized. */ \ | 236 /* data area to be initialized. */ \ |
241 __ LoadImmediate(R8, 0); \ | 237 __ LoadImmediate(R8, 0); \ |
242 __ mov(R9, Operand(R8)); \ | 238 __ mov(R9, Operand(R8)); \ |
243 __ AddImmediate(R3, R0, sizeof(Raw##type_name) - 1); \ | 239 __ AddImmediate(R3, R0, sizeof(Raw##type_name) - 1); \ |
244 Label init_loop; \ | 240 Label init_loop; \ |
245 __ Bind(&init_loop); \ | 241 __ Bind(&init_loop); \ |
246 __ AddImmediate(R3, 2 * kWordSize); \ | 242 __ AddImmediate(R3, 2 * kWordSize); \ |
247 __ cmp(R3, Operand(R1)); \ | 243 __ cmp(R3, Operand(R1)); \ |
248 __ strd(R8, R9, R3, -2 * kWordSize, LS); \ | 244 __ strd(R8, R9, R3, -2 * kWordSize, LS); \ |
249 __ b(&init_loop, CC); \ | 245 __ b(&init_loop, CC); \ |
250 __ str(R8, Address(R3, -2 * kWordSize), HI); \ | 246 __ str(R8, Address(R3, -2 * kWordSize), HI); \ |
251 \ | 247 \ |
252 NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space)); \ | 248 NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space)); \ |
253 __ Ret(); \ | 249 __ Ret(); \ |
254 __ Bind(&fall_through); \ | 250 __ Bind(&fall_through); |
255 | 251 |
256 | 252 |
257 static int GetScaleFactor(intptr_t size) { | 253 static int GetScaleFactor(intptr_t size) { |
258 switch (size) { | 254 switch (size) { |
259 case 1: return 0; | 255 case 1: |
260 case 2: return 1; | 256 return 0; |
261 case 4: return 2; | 257 case 2: |
262 case 8: return 3; | 258 return 1; |
263 case 16: return 4; | 259 case 4: |
| 260 return 2; |
| 261 case 8: |
| 262 return 3; |
| 263 case 16: |
| 264 return 4; |
264 } | 265 } |
265 UNREACHABLE(); | 266 UNREACHABLE(); |
266 return -1; | 267 return -1; |
267 } | 268 } |
268 | 269 |
269 | 270 |
270 #define TYPED_DATA_ALLOCATOR(clazz) \ | 271 #define TYPED_DATA_ALLOCATOR(clazz) \ |
271 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ | 272 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ |
272 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ | 273 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ |
273 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ | 274 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ |
274 int shift = GetScaleFactor(size); \ | 275 int shift = GetScaleFactor(size); \ |
275 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ | 276 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, shift); \ |
276 } | 277 } |
277 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) | 278 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) |
278 #undef TYPED_DATA_ALLOCATOR | 279 #undef TYPED_DATA_ALLOCATOR |
279 | 280 |
280 | 281 |
281 // Loads args from stack into R0 and R1 | 282 // Loads args from stack into R0 and R1 |
282 // Tests if they are smis, jumps to label not_smi if not. | 283 // Tests if they are smis, jumps to label not_smi if not. |
283 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { | 284 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { |
284 __ ldr(R0, Address(SP, + 0 * kWordSize)); | 285 __ ldr(R0, Address(SP, +0 * kWordSize)); |
285 __ ldr(R1, Address(SP, + 1 * kWordSize)); | 286 __ ldr(R1, Address(SP, +1 * kWordSize)); |
286 __ orr(TMP, R0, Operand(R1)); | 287 __ orr(TMP, R0, Operand(R1)); |
287 __ tst(TMP, Operand(kSmiTagMask)); | 288 __ tst(TMP, Operand(kSmiTagMask)); |
288 __ b(not_smi, NE); | 289 __ b(not_smi, NE); |
289 return; | 290 return; |
290 } | 291 } |
291 | 292 |
292 | 293 |
293 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { | 294 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { |
294 Label fall_through; | 295 Label fall_through; |
295 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. | 296 TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis. |
296 __ adds(R0, R0, Operand(R1)); // Adds. | 297 __ adds(R0, R0, Operand(R1)); // Adds. |
297 __ bx(LR, VC); // Return if no overflow. | 298 __ bx(LR, VC); // Return if no overflow. |
298 // Otherwise fall through. | 299 // Otherwise fall through. |
299 __ Bind(&fall_through); | 300 __ Bind(&fall_through); |
300 } | 301 } |
301 | 302 |
302 | 303 |
303 void Intrinsifier::Integer_add(Assembler* assembler) { | 304 void Intrinsifier::Integer_add(Assembler* assembler) { |
304 Integer_addFromInteger(assembler); | 305 Integer_addFromInteger(assembler); |
305 } | 306 } |
306 | 307 |
307 | 308 |
308 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { | 309 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { |
309 Label fall_through; | 310 Label fall_through; |
310 TestBothArgumentsSmis(assembler, &fall_through); | 311 TestBothArgumentsSmis(assembler, &fall_through); |
311 __ subs(R0, R0, Operand(R1)); // Subtract. | 312 __ subs(R0, R0, Operand(R1)); // Subtract. |
312 __ bx(LR, VC); // Return if no overflow. | 313 __ bx(LR, VC); // Return if no overflow. |
313 // Otherwise fall through. | 314 // Otherwise fall through. |
314 __ Bind(&fall_through); | 315 __ Bind(&fall_through); |
315 } | 316 } |
316 | 317 |
317 | 318 |
318 void Intrinsifier::Integer_sub(Assembler* assembler) { | 319 void Intrinsifier::Integer_sub(Assembler* assembler) { |
319 Label fall_through; | 320 Label fall_through; |
320 TestBothArgumentsSmis(assembler, &fall_through); | 321 TestBothArgumentsSmis(assembler, &fall_through); |
321 __ subs(R0, R1, Operand(R0)); // Subtract. | 322 __ subs(R0, R1, Operand(R0)); // Subtract. |
322 __ bx(LR, VC); // Return if no overflow. | 323 __ bx(LR, VC); // Return if no overflow. |
323 // Otherwise fall through. | 324 // Otherwise fall through. |
324 __ Bind(&fall_through); | 325 __ Bind(&fall_through); |
325 } | 326 } |
326 | 327 |
327 | 328 |
328 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { | 329 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { |
329 Label fall_through; | 330 Label fall_through; |
330 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis | 331 TestBothArgumentsSmis(assembler, &fall_through); // checks two smis |
331 __ SmiUntag(R0); // Untags R0. We only want result shifted by one. | 332 __ SmiUntag(R0); // Untags R0. We only want result shifted by one. |
332 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1. | 333 __ smull(R0, IP, R0, R1); // IP:R0 <- R0 * R1. |
333 __ cmp(IP, Operand(R0, ASR, 31)); | 334 __ cmp(IP, Operand(R0, ASR, 31)); |
334 __ bx(LR, EQ); | 335 __ bx(LR, EQ); |
335 __ Bind(&fall_through); // Fall through on overflow. | 336 __ Bind(&fall_through); // Fall through on overflow. |
336 } | 337 } |
337 | 338 |
338 | 339 |
339 void Intrinsifier::Integer_mul(Assembler* assembler) { | 340 void Intrinsifier::Integer_mul(Assembler* assembler) { |
340 Integer_mulFromInteger(assembler); | 341 Integer_mulFromInteger(assembler); |
341 } | 342 } |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
397 // } else { | 398 // } else { |
398 // res = res + right; | 399 // res = res + right; |
399 // } | 400 // } |
400 // } | 401 // } |
401 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { | 402 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { |
402 if (!TargetCPUFeatures::can_divide()) { | 403 if (!TargetCPUFeatures::can_divide()) { |
403 return; | 404 return; |
404 } | 405 } |
405 // Check to see if we have integer division | 406 // Check to see if we have integer division |
406 Label fall_through; | 407 Label fall_through; |
407 __ ldr(R1, Address(SP, + 0 * kWordSize)); | 408 __ ldr(R1, Address(SP, +0 * kWordSize)); |
408 __ ldr(R0, Address(SP, + 1 * kWordSize)); | 409 __ ldr(R0, Address(SP, +1 * kWordSize)); |
409 __ orr(TMP, R0, Operand(R1)); | 410 __ orr(TMP, R0, Operand(R1)); |
410 __ tst(TMP, Operand(kSmiTagMask)); | 411 __ tst(TMP, Operand(kSmiTagMask)); |
411 __ b(&fall_through, NE); | 412 __ b(&fall_through, NE); |
412 // R1: Tagged left (dividend). | 413 // R1: Tagged left (dividend). |
413 // R0: Tagged right (divisor). | 414 // R0: Tagged right (divisor). |
414 // Check if modulo by zero -> exception thrown in main function. | 415 // Check if modulo by zero -> exception thrown in main function. |
415 __ cmp(R0, Operand(0)); | 416 __ cmp(R0, Operand(0)); |
416 __ b(&fall_through, EQ); | 417 __ b(&fall_through, EQ); |
417 EmitRemainderOperation(assembler); | 418 EmitRemainderOperation(assembler); |
418 // Untagged right in R0. Untagged remainder result in R1. | 419 // Untagged right in R0. Untagged remainder result in R1. |
(...skipping 26 matching lines...) Expand all Loading... |
445 | 446 |
446 __ SmiUntag(R0); | 447 __ SmiUntag(R0); |
447 __ SmiUntag(R1); | 448 __ SmiUntag(R1); |
448 | 449 |
449 __ IntegerDivide(R0, R1, R0, D1, D0); | 450 __ IntegerDivide(R0, R1, R0, D1, D0); |
450 | 451 |
451 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we | 452 // Check the corner case of dividing the 'MIN_SMI' with -1, in which case we |
452 // cannot tag the result. | 453 // cannot tag the result. |
453 __ CompareImmediate(R0, 0x40000000); | 454 __ CompareImmediate(R0, 0x40000000); |
454 __ SmiTag(R0, NE); // Not equal. Okay to tag and return. | 455 __ SmiTag(R0, NE); // Not equal. Okay to tag and return. |
455 __ bx(LR, NE); // Return. | 456 __ bx(LR, NE); // Return. |
456 __ Bind(&fall_through); | 457 __ Bind(&fall_through); |
457 } | 458 } |
458 | 459 |
459 | 460 |
460 void Intrinsifier::Integer_negate(Assembler* assembler) { | 461 void Intrinsifier::Integer_negate(Assembler* assembler) { |
461 Label fall_through; | 462 Label fall_through; |
462 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Grab first argument. | 463 __ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument. |
463 __ tst(R0, Operand(kSmiTagMask)); // Test for Smi. | 464 __ tst(R0, Operand(kSmiTagMask)); // Test for Smi. |
464 __ b(&fall_through, NE); | 465 __ b(&fall_through, NE); |
465 __ rsbs(R0, R0, Operand(0)); // R0 is a Smi. R0 <- 0 - R0. | 466 __ rsbs(R0, R0, Operand(0)); // R0 is a Smi. R0 <- 0 - R0. |
466 __ bx(LR, VC); // Return if there wasn't overflow, fall through otherwise. | 467 __ bx(LR, VC); // Return if there wasn't overflow, fall through otherwise. |
467 // R0 is not a Smi. Fall through. | 468 // R0 is not a Smi. Fall through. |
468 __ Bind(&fall_through); | 469 __ Bind(&fall_through); |
469 } | 470 } |
470 | 471 |
471 | 472 |
472 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { | 473 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { |
473 Label fall_through; | 474 Label fall_through; |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
540 __ CompareImmediate(R1, 0); | 541 __ CompareImmediate(R1, 0); |
541 __ b(&fall_through, LT); | 542 __ b(&fall_through, LT); |
542 __ SmiUntag(R1); | 543 __ SmiUntag(R1); |
543 | 544 |
544 // Pull off high bits that will be shifted off of R1 by making a mask | 545 // Pull off high bits that will be shifted off of R1 by making a mask |
545 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. | 546 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back. |
546 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) | 547 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0) |
547 // lo bits = R1 << R0 | 548 // lo bits = R1 << R0 |
548 __ LoadImmediate(NOTFP, 1); | 549 __ LoadImmediate(NOTFP, 1); |
549 __ mov(NOTFP, Operand(NOTFP, LSL, R0)); // NOTFP <- 1 << R0 | 550 __ mov(NOTFP, Operand(NOTFP, LSL, R0)); // NOTFP <- 1 << R0 |
550 __ sub(NOTFP, NOTFP, Operand(1)); // NOTFP <- NOTFP - 1 | 551 __ sub(NOTFP, NOTFP, Operand(1)); // NOTFP <- NOTFP - 1 |
551 __ rsb(R3, R0, Operand(32)); // R3 <- 32 - R0 | 552 __ rsb(R3, R0, Operand(32)); // R3 <- 32 - R0 |
552 __ mov(NOTFP, Operand(NOTFP, LSL, R3)); // NOTFP <- NOTFP << R3 | 553 __ mov(NOTFP, Operand(NOTFP, LSL, R3)); // NOTFP <- NOTFP << R3 |
553 __ and_(NOTFP, R1, Operand(NOTFP)); // NOTFP <- NOTFP & R1 | 554 __ and_(NOTFP, R1, Operand(NOTFP)); // NOTFP <- NOTFP & R1 |
554 __ mov(NOTFP, Operand(NOTFP, LSR, R3)); // NOTFP <- NOTFP >> R3 | 555 __ mov(NOTFP, Operand(NOTFP, LSR, R3)); // NOTFP <- NOTFP >> R3 |
555 // Now NOTFP has the bits that fall off of R1 on a left shift. | 556 // Now NOTFP has the bits that fall off of R1 on a left shift. |
556 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. | 557 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits. |
557 | 558 |
558 const Class& mint_class = Class::Handle( | 559 const Class& mint_class = |
559 Isolate::Current()->object_store()->mint_class()); | 560 Class::Handle(Isolate::Current()->object_store()->mint_class()); |
560 __ TryAllocate(mint_class, &fall_through, R0, R2); | 561 __ TryAllocate(mint_class, &fall_through, R0, R2); |
561 | 562 |
562 | 563 |
563 __ str(R1, FieldAddress(R0, Mint::value_offset())); | 564 __ str(R1, FieldAddress(R0, Mint::value_offset())); |
564 __ str(NOTFP, FieldAddress(R0, Mint::value_offset() + kWordSize)); | 565 __ str(NOTFP, FieldAddress(R0, Mint::value_offset() + kWordSize)); |
565 __ Ret(); | 566 __ Ret(); |
566 __ Bind(&fall_through); | 567 __ Bind(&fall_through); |
567 } | 568 } |
568 | 569 |
569 | 570 |
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
895 // Loop a_used times, a_used > 0. | 896 // Loop a_used times, a_used > 0. |
896 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex)); | 897 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex)); |
897 __ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); | 898 __ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); |
898 __ adcs(R4, R4, Operand(R9)); | 899 __ adcs(R4, R4, Operand(R9)); |
899 __ teq(R1, Operand(NOTFP)); // Does not affect carry flag. | 900 __ teq(R1, Operand(NOTFP)); // Does not affect carry flag. |
900 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); | 901 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); |
901 __ b(&add_loop, NE); | 902 __ b(&add_loop, NE); |
902 | 903 |
903 Label last_carry; | 904 Label last_carry; |
904 __ teq(R1, Operand(R6)); // Does not affect carry flag. | 905 __ teq(R1, Operand(R6)); // Does not affect carry flag. |
905 __ b(&last_carry, EQ); // If used - a_used == 0. | 906 __ b(&last_carry, EQ); // If used - a_used == 0. |
906 | 907 |
907 Label carry_loop; | 908 Label carry_loop; |
908 __ Bind(&carry_loop); | 909 __ Bind(&carry_loop); |
909 // Loop used - a_used times, used - a_used > 0. | 910 // Loop used - a_used times, used - a_used > 0. |
910 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex)); | 911 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex)); |
911 __ adcs(R4, R4, Operand(0)); | 912 __ adcs(R4, R4, Operand(0)); |
912 __ teq(R1, Operand(R6)); // Does not affect carry flag. | 913 __ teq(R1, Operand(R6)); // Does not affect carry flag. |
913 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); | 914 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); |
914 __ b(&carry_loop, NE); | 915 __ b(&carry_loop, NE); |
915 | 916 |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
955 // Loop a_used times, a_used > 0. | 956 // Loop a_used times, a_used > 0. |
956 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex)); | 957 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex)); |
957 __ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); | 958 __ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex)); |
958 __ sbcs(R4, R4, Operand(R9)); | 959 __ sbcs(R4, R4, Operand(R9)); |
959 __ teq(R1, Operand(NOTFP)); // Does not affect carry flag. | 960 __ teq(R1, Operand(NOTFP)); // Does not affect carry flag. |
960 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); | 961 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); |
961 __ b(&sub_loop, NE); | 962 __ b(&sub_loop, NE); |
962 | 963 |
963 Label done; | 964 Label done; |
964 __ teq(R1, Operand(R6)); // Does not affect carry flag. | 965 __ teq(R1, Operand(R6)); // Does not affect carry flag. |
965 __ b(&done, EQ); // If used - a_used == 0. | 966 __ b(&done, EQ); // If used - a_used == 0. |
966 | 967 |
967 Label carry_loop; | 968 Label carry_loop; |
968 __ Bind(&carry_loop); | 969 __ Bind(&carry_loop); |
969 // Loop used - a_used times, used - a_used > 0. | 970 // Loop used - a_used times, used - a_used > 0. |
970 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex)); | 971 __ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex)); |
971 __ sbcs(R4, R4, Operand(0)); | 972 __ sbcs(R4, R4, Operand(0)); |
972 __ teq(R1, Operand(R6)); // Does not affect carry flag. | 973 __ teq(R1, Operand(R6)); // Does not affect carry flag. |
973 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); | 974 __ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex)); |
974 __ b(&carry_loop, NE); | 975 __ b(&carry_loop, NE); |
975 | 976 |
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1114 __ add(R4, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); | 1115 __ add(R4, R3, Operand(TypedData::data_offset() - kHeapObjectTag)); |
1115 | 1116 |
1116 // R3 = x = *xip++, return if x == 0 | 1117 // R3 = x = *xip++, return if x == 0 |
1117 Label x_zero; | 1118 Label x_zero; |
1118 __ ldr(R3, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); | 1119 __ ldr(R3, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); |
1119 __ tst(R3, Operand(R3)); | 1120 __ tst(R3, Operand(R3)); |
1120 __ b(&x_zero, EQ); | 1121 __ b(&x_zero, EQ); |
1121 | 1122 |
1122 // NOTFP = ajp = &a_digits[i] | 1123 // NOTFP = ajp = &a_digits[i] |
1123 __ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits | 1124 __ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits |
1124 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi. | 1125 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi. |
1125 __ add(NOTFP, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); | 1126 __ add(NOTFP, R1, Operand(TypedData::data_offset() - kHeapObjectTag)); |
1126 | 1127 |
1127 // R8:R0 = t = x*x + *ajp | 1128 // R8:R0 = t = x*x + *ajp |
1128 __ ldr(R0, Address(NOTFP, 0)); | 1129 __ ldr(R0, Address(NOTFP, 0)); |
1129 __ mov(R8, Operand(0)); | 1130 __ mov(R8, Operand(0)); |
1130 __ umaal(R0, R8, R3, R3); // R8:R0 = R3*R3 + R8 + R0. | 1131 __ umaal(R0, R8, R3, R3); // R8:R0 = R3*R3 + R8 + R0. |
1131 | 1132 |
1132 // *ajp++ = low32(t) = R0 | 1133 // *ajp++ = low32(t) = R0 |
1133 __ str(R0, Address(NOTFP, Bigint::kBytesPerDigit, Address::PostIndex)); | 1134 __ str(R0, Address(NOTFP, Bigint::kBytesPerDigit, Address::PostIndex)); |
1134 | 1135 |
(...skipping 22 matching lines...) Expand all Loading... |
1157 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); | 1158 __ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex)); |
1158 | 1159 |
1159 // uint96_t t = R9:R8:R0 = 2*x*xi + aj + c | 1160 // uint96_t t = R9:R8:R0 = 2*x*xi + aj + c |
1160 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3. | 1161 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3. |
1161 __ adds(R0, R0, Operand(R0)); | 1162 __ adds(R0, R0, Operand(R0)); |
1162 __ adcs(R1, R1, Operand(R1)); | 1163 __ adcs(R1, R1, Operand(R1)); |
1163 __ mov(R2, Operand(0)); | 1164 __ mov(R2, Operand(0)); |
1164 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi. | 1165 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi. |
1165 __ adds(R0, R0, Operand(R8)); | 1166 __ adds(R0, R0, Operand(R8)); |
1166 __ adcs(R1, R1, Operand(R9)); | 1167 __ adcs(R1, R1, Operand(R9)); |
1167 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c. | 1168 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c. |
1168 __ ldr(R8, Address(NOTFP, 0)); // R8 = aj = *ajp. | 1169 __ ldr(R8, Address(NOTFP, 0)); // R8 = aj = *ajp. |
1169 __ adds(R0, R0, Operand(R8)); | 1170 __ adds(R0, R0, Operand(R8)); |
1170 __ adcs(R8, R1, Operand(0)); | 1171 __ adcs(R8, R1, Operand(0)); |
1171 __ adc(R9, R2, Operand(0)); // R9:R8:R0 = 2*x*xi + c + aj. | 1172 __ adc(R9, R2, Operand(0)); // R9:R8:R0 = 2*x*xi + c + aj. |
1172 | 1173 |
1173 // *ajp++ = low32(t) = R0 | 1174 // *ajp++ = low32(t) = R0 |
1174 __ str(R0, Address(NOTFP, Bigint::kBytesPerDigit, Address::PostIndex)); | 1175 __ str(R0, Address(NOTFP, Bigint::kBytesPerDigit, Address::PostIndex)); |
1175 | 1176 |
1176 // while (--n >= 0) | 1177 // while (--n >= 0) |
1177 __ subs(R6, R6, Operand(1)); // --n | 1178 __ subs(R6, R6, Operand(1)); // --n |
(...skipping 29 matching lines...) Expand all Loading... |
1207 // uint32_t d = digits[i >> 1]; // i is Smi. | 1208 // uint32_t d = digits[i >> 1]; // i is Smi. |
1208 // uint64_t t = rho*d; | 1209 // uint64_t t = rho*d; |
1209 // args[_MU] = t mod DIGIT_BASE; // _MU == 4. | 1210 // args[_MU] = t mod DIGIT_BASE; // _MU == 4. |
1210 // return 1; | 1211 // return 1; |
1211 // } | 1212 // } |
1212 | 1213 |
1213 // R4 = args | 1214 // R4 = args |
1214 __ ldr(R4, Address(SP, 2 * kWordSize)); // args | 1215 __ ldr(R4, Address(SP, 2 * kWordSize)); // args |
1215 | 1216 |
1216 // R3 = rho = args[2] | 1217 // R3 = rho = args[2] |
1217 __ ldr(R3, FieldAddress(R4, | 1218 __ ldr(R3, FieldAddress( |
1218 TypedData::data_offset() + 2*Bigint::kBytesPerDigit)); | 1219 R4, TypedData::data_offset() + 2 * Bigint::kBytesPerDigit)); |
1219 | 1220 |
1220 // R2 = digits[i >> 1] | 1221 // R2 = digits[i >> 1] |
1221 __ ldrd(R0, R1, SP, 0 * kWordSize); // R0 = i as Smi, R1 = digits | 1222 __ ldrd(R0, R1, SP, 0 * kWordSize); // R0 = i as Smi, R1 = digits |
1222 __ add(R1, R1, Operand(R0, LSL, 1)); | 1223 __ add(R1, R1, Operand(R0, LSL, 1)); |
1223 __ ldr(R2, FieldAddress(R1, TypedData::data_offset())); | 1224 __ ldr(R2, FieldAddress(R1, TypedData::data_offset())); |
1224 | 1225 |
1225 // R1:R0 = t = rho*d | 1226 // R1:R0 = t = rho*d |
1226 __ umull(R0, R1, R2, R3); | 1227 __ umull(R0, R1, R2, R3); |
1227 | 1228 |
1228 // args[4] = t mod DIGIT_BASE = low32(t) | 1229 // args[4] = t mod DIGIT_BASE = low32(t) |
1229 __ str(R0, | 1230 __ str(R0, FieldAddress( |
1230 FieldAddress(R4, TypedData::data_offset() + 4*Bigint::kBytesPerDigit)); | 1231 R4, TypedData::data_offset() + 4 * Bigint::kBytesPerDigit)); |
1231 | 1232 |
1232 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed. | 1233 __ mov(R0, Operand(Smi::RawValue(1))); // One digit processed. |
1233 __ Ret(); | 1234 __ Ret(); |
1234 } | 1235 } |
1235 | 1236 |
1236 | 1237 |
1237 // Check if the last argument is a double, jump to label 'is_smi' if smi | 1238 // Check if the last argument is a double, jump to label 'is_smi' if smi |
1238 // (easy to convert to double), otherwise jump to label 'not_double_smi', | 1239 // (easy to convert to double), otherwise jump to label 'not_double_smi', |
1239 // Returns the last argument in R0. | 1240 // Returns the last argument in R0. |
1240 static void TestLastArgumentIsDouble(Assembler* assembler, | 1241 static void TestLastArgumentIsDouble(Assembler* assembler, |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1314 if (TargetCPUFeatures::vfp_supported()) { | 1315 if (TargetCPUFeatures::vfp_supported()) { |
1315 Label fall_through, is_smi, double_op; | 1316 Label fall_through, is_smi, double_op; |
1316 | 1317 |
1317 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1318 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
1318 // Both arguments are double, right operand is in R0. | 1319 // Both arguments are double, right operand is in R0. |
1319 __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag); | 1320 __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag); |
1320 __ Bind(&double_op); | 1321 __ Bind(&double_op); |
1321 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. | 1322 __ ldr(R0, Address(SP, 1 * kWordSize)); // Left argument. |
1322 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1323 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
1323 switch (kind) { | 1324 switch (kind) { |
1324 case Token::kADD: __ vaddd(D0, D0, D1); break; | 1325 case Token::kADD: |
1325 case Token::kSUB: __ vsubd(D0, D0, D1); break; | 1326 __ vaddd(D0, D0, D1); |
1326 case Token::kMUL: __ vmuld(D0, D0, D1); break; | 1327 break; |
1327 case Token::kDIV: __ vdivd(D0, D0, D1); break; | 1328 case Token::kSUB: |
1328 default: UNREACHABLE(); | 1329 __ vsubd(D0, D0, D1); |
| 1330 break; |
| 1331 case Token::kMUL: |
| 1332 __ vmuld(D0, D0, D1); |
| 1333 break; |
| 1334 case Token::kDIV: |
| 1335 __ vdivd(D0, D0, D1); |
| 1336 break; |
| 1337 default: |
| 1338 UNREACHABLE(); |
1329 } | 1339 } |
1330 const Class& double_class = Class::Handle( | 1340 const Class& double_class = |
1331 Isolate::Current()->object_store()->double_class()); | 1341 Class::Handle(Isolate::Current()->object_store()->double_class()); |
1332 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. | 1342 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. |
1333 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1343 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
1334 __ Ret(); | 1344 __ Ret(); |
1335 __ Bind(&is_smi); // Convert R0 to a double. | 1345 __ Bind(&is_smi); // Convert R0 to a double. |
1336 __ SmiUntag(R0); | 1346 __ SmiUntag(R0); |
1337 __ vmovsr(S0, R0); | 1347 __ vmovsr(S0, R0); |
1338 __ vcvtdi(D1, S0); | 1348 __ vcvtdi(D1, S0); |
1339 __ b(&double_op); | 1349 __ b(&double_op); |
1340 __ Bind(&fall_through); | 1350 __ Bind(&fall_through); |
1341 } | 1351 } |
(...skipping 28 matching lines...) Expand all Loading... |
1370 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1380 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1371 __ tst(R0, Operand(kSmiTagMask)); | 1381 __ tst(R0, Operand(kSmiTagMask)); |
1372 __ b(&fall_through, NE); | 1382 __ b(&fall_through, NE); |
1373 // Is Smi. | 1383 // Is Smi. |
1374 __ SmiUntag(R0); | 1384 __ SmiUntag(R0); |
1375 __ vmovsr(S0, R0); | 1385 __ vmovsr(S0, R0); |
1376 __ vcvtdi(D1, S0); | 1386 __ vcvtdi(D1, S0); |
1377 __ ldr(R0, Address(SP, 1 * kWordSize)); | 1387 __ ldr(R0, Address(SP, 1 * kWordSize)); |
1378 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1388 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
1379 __ vmuld(D0, D0, D1); | 1389 __ vmuld(D0, D0, D1); |
1380 const Class& double_class = Class::Handle( | 1390 const Class& double_class = |
1381 Isolate::Current()->object_store()->double_class()); | 1391 Class::Handle(Isolate::Current()->object_store()->double_class()); |
1382 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. | 1392 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. |
1383 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1393 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
1384 __ Ret(); | 1394 __ Ret(); |
1385 __ Bind(&fall_through); | 1395 __ Bind(&fall_through); |
1386 } | 1396 } |
1387 } | 1397 } |
1388 | 1398 |
1389 | 1399 |
1390 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { | 1400 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { |
1391 if (TargetCPUFeatures::vfp_supported()) { | 1401 if (TargetCPUFeatures::vfp_supported()) { |
1392 Label fall_through; | 1402 Label fall_through; |
1393 | 1403 |
1394 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1404 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1395 __ tst(R0, Operand(kSmiTagMask)); | 1405 __ tst(R0, Operand(kSmiTagMask)); |
1396 __ b(&fall_through, NE); | 1406 __ b(&fall_through, NE); |
1397 // Is Smi. | 1407 // Is Smi. |
1398 __ SmiUntag(R0); | 1408 __ SmiUntag(R0); |
1399 __ vmovsr(S0, R0); | 1409 __ vmovsr(S0, R0); |
1400 __ vcvtdi(D0, S0); | 1410 __ vcvtdi(D0, S0); |
1401 const Class& double_class = Class::Handle( | 1411 const Class& double_class = |
1402 Isolate::Current()->object_store()->double_class()); | 1412 Class::Handle(Isolate::Current()->object_store()->double_class()); |
1403 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. | 1413 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. |
1404 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1414 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
1405 __ Ret(); | 1415 __ Ret(); |
1406 __ Bind(&fall_through); | 1416 __ Bind(&fall_through); |
1407 } | 1417 } |
1408 } | 1418 } |
1409 | 1419 |
1410 | 1420 |
1411 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { | 1421 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { |
1412 if (TargetCPUFeatures::vfp_supported()) { | 1422 if (TargetCPUFeatures::vfp_supported()) { |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1448 | 1458 |
1449 | 1459 |
1450 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { | 1460 void Intrinsifier::Double_getIsNegative(Assembler* assembler) { |
1451 if (TargetCPUFeatures::vfp_supported()) { | 1461 if (TargetCPUFeatures::vfp_supported()) { |
1452 Label is_false, is_true, is_zero; | 1462 Label is_false, is_true, is_zero; |
1453 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1463 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1454 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1464 __ LoadDFromOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
1455 __ vcmpdz(D0); | 1465 __ vcmpdz(D0); |
1456 __ vmstat(); | 1466 __ vmstat(); |
1457 __ b(&is_false, VS); // NaN -> false. | 1467 __ b(&is_false, VS); // NaN -> false. |
1458 __ b(&is_zero, EQ); // Check for negative zero. | 1468 __ b(&is_zero, EQ); // Check for negative zero. |
1459 __ b(&is_false, CS); // >= 0 -> false. | 1469 __ b(&is_false, CS); // >= 0 -> false. |
1460 | 1470 |
1461 __ Bind(&is_true); | 1471 __ Bind(&is_true); |
1462 __ LoadObject(R0, Bool::True()); | 1472 __ LoadObject(R0, Bool::True()); |
1463 __ Ret(); | 1473 __ Ret(); |
1464 | 1474 |
1465 __ Bind(&is_false); | 1475 __ Bind(&is_false); |
1466 __ LoadObject(R0, Bool::False()); | 1476 __ LoadObject(R0, Bool::False()); |
1467 __ Ret(); | 1477 __ Ret(); |
1468 | 1478 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1503 | 1513 |
1504 | 1514 |
1505 void Intrinsifier::MathSqrt(Assembler* assembler) { | 1515 void Intrinsifier::MathSqrt(Assembler* assembler) { |
1506 if (TargetCPUFeatures::vfp_supported()) { | 1516 if (TargetCPUFeatures::vfp_supported()) { |
1507 Label fall_through, is_smi, double_op; | 1517 Label fall_through, is_smi, double_op; |
1508 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1518 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
1509 // Argument is double and is in R0. | 1519 // Argument is double and is in R0. |
1510 __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag); | 1520 __ LoadDFromOffset(D1, R0, Double::value_offset() - kHeapObjectTag); |
1511 __ Bind(&double_op); | 1521 __ Bind(&double_op); |
1512 __ vsqrtd(D0, D1); | 1522 __ vsqrtd(D0, D1); |
1513 const Class& double_class = Class::Handle( | 1523 const Class& double_class = |
1514 Isolate::Current()->object_store()->double_class()); | 1524 Class::Handle(Isolate::Current()->object_store()->double_class()); |
1515 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. | 1525 __ TryAllocate(double_class, &fall_through, R0, R1); // Result register. |
1516 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); | 1526 __ StoreDToOffset(D0, R0, Double::value_offset() - kHeapObjectTag); |
1517 __ Ret(); | 1527 __ Ret(); |
1518 __ Bind(&is_smi); | 1528 __ Bind(&is_smi); |
1519 __ SmiUntag(R0); | 1529 __ SmiUntag(R0); |
1520 __ vmovsr(S0, R0); | 1530 __ vmovsr(S0, R0); |
1521 __ vcvtdi(D1, S0); | 1531 __ vcvtdi(D1, S0); |
1522 __ b(&double_op); | 1532 __ b(&double_op); |
1523 __ Bind(&fall_through); | 1533 __ Bind(&fall_through); |
1524 } | 1534 } |
1525 } | 1535 } |
1526 | 1536 |
1527 | 1537 |
1528 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; | 1538 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; |
1529 // _state[kSTATE_LO] = state & _MASK_32; | 1539 // _state[kSTATE_LO] = state & _MASK_32; |
1530 // _state[kSTATE_HI] = state >> 32; | 1540 // _state[kSTATE_HI] = state >> 32; |
1531 void Intrinsifier::Random_nextState(Assembler* assembler) { | 1541 void Intrinsifier::Random_nextState(Assembler* assembler) { |
1532 const Library& math_lib = Library::Handle(Library::MathLibrary()); | 1542 const Library& math_lib = Library::Handle(Library::MathLibrary()); |
1533 ASSERT(!math_lib.IsNull()); | 1543 ASSERT(!math_lib.IsNull()); |
1534 const Class& random_class = Class::Handle( | 1544 const Class& random_class = |
1535 math_lib.LookupClassAllowPrivate(Symbols::_Random())); | 1545 Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random())); |
1536 ASSERT(!random_class.IsNull()); | 1546 ASSERT(!random_class.IsNull()); |
1537 const Field& state_field = Field::ZoneHandle( | 1547 const Field& state_field = Field::ZoneHandle( |
1538 random_class.LookupInstanceFieldAllowPrivate(Symbols::_state())); | 1548 random_class.LookupInstanceFieldAllowPrivate(Symbols::_state())); |
1539 ASSERT(!state_field.IsNull()); | 1549 ASSERT(!state_field.IsNull()); |
1540 const Field& random_A_field = Field::ZoneHandle( | 1550 const Field& random_A_field = Field::ZoneHandle( |
1541 random_class.LookupStaticFieldAllowPrivate(Symbols::_A())); | 1551 random_class.LookupStaticFieldAllowPrivate(Symbols::_A())); |
1542 ASSERT(!random_A_field.IsNull()); | 1552 ASSERT(!random_A_field.IsNull()); |
1543 ASSERT(random_A_field.is_const()); | 1553 ASSERT(random_A_field.is_const()); |
1544 const Instance& a_value = Instance::Handle(random_A_field.StaticValue()); | 1554 const Instance& a_value = Instance::Handle(random_A_field.StaticValue()); |
1545 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); | 1555 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); |
1546 // 'a_int_value' is a mask. | 1556 // 'a_int_value' is a mask. |
1547 ASSERT(Utils::IsUint(32, a_int_value)); | 1557 ASSERT(Utils::IsUint(32, a_int_value)); |
1548 int32_t a_int32_value = static_cast<int32_t>(a_int_value); | 1558 int32_t a_int32_value = static_cast<int32_t>(a_int_value); |
1549 | 1559 |
1550 // Receiver. | 1560 // Receiver. |
1551 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1561 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1552 // Field '_state'. | 1562 // Field '_state'. |
1553 __ ldr(R1, FieldAddress(R0, state_field.Offset())); | 1563 __ ldr(R1, FieldAddress(R0, state_field.Offset())); |
1554 // Addresses of _state[0] and _state[1]. | 1564 // Addresses of _state[0] and _state[1]. |
1555 | 1565 |
1556 const int64_t disp_0 = Instance::DataOffsetFor(kTypedDataUint32ArrayCid); | 1566 const int64_t disp_0 = Instance::DataOffsetFor(kTypedDataUint32ArrayCid); |
1557 const int64_t disp_1 = disp_0 + | 1567 const int64_t disp_1 = |
1558 Instance::ElementSizeFor(kTypedDataUint32ArrayCid); | 1568 disp_0 + Instance::ElementSizeFor(kTypedDataUint32ArrayCid); |
1559 | 1569 |
1560 __ LoadImmediate(R0, a_int32_value); | 1570 __ LoadImmediate(R0, a_int32_value); |
1561 __ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag); | 1571 __ LoadFromOffset(kWord, R2, R1, disp_0 - kHeapObjectTag); |
1562 __ LoadFromOffset(kWord, R3, R1, disp_1 - kHeapObjectTag); | 1572 __ LoadFromOffset(kWord, R3, R1, disp_1 - kHeapObjectTag); |
1563 __ mov(R8, Operand(0)); // Zero extend unsigned _state[kSTATE_HI]. | 1573 __ mov(R8, Operand(0)); // Zero extend unsigned _state[kSTATE_HI]. |
1564 // Unsigned 32-bit multiply and 64-bit accumulate into R8:R3. | 1574 // Unsigned 32-bit multiply and 64-bit accumulate into R8:R3. |
1565 __ umlal(R3, R8, R0, R2); // R8:R3 <- R8:R3 + R0 * R2. | 1575 __ umlal(R3, R8, R0, R2); // R8:R3 <- R8:R3 + R0 * R2. |
1566 __ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag); | 1576 __ StoreToOffset(kWord, R3, R1, disp_0 - kHeapObjectTag); |
1567 __ StoreToOffset(kWord, R8, R1, disp_1 - kHeapObjectTag); | 1577 __ StoreToOffset(kWord, R8, R1, disp_1 - kHeapObjectTag); |
1568 __ Ret(); | 1578 __ Ret(); |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1606 | 1616 |
1607 static void JumpIfNotInteger(Assembler* assembler, | 1617 static void JumpIfNotInteger(Assembler* assembler, |
1608 Register cid, | 1618 Register cid, |
1609 Register tmp, | 1619 Register tmp, |
1610 Label* target) { | 1620 Label* target) { |
1611 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target); | 1621 RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target); |
1612 } | 1622 } |
1613 | 1623 |
1614 | 1624 |
1615 static void JumpIfString(Assembler* assembler, | 1625 static void JumpIfString(Assembler* assembler, |
1616 Register cid, | 1626 Register cid, |
1617 Register tmp, | 1627 Register tmp, |
1618 Label* target) { | 1628 Label* target) { |
1619 RangeCheck(assembler, | 1629 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, |
1620 cid, | 1630 kIfInRange, target); |
1621 tmp, | |
1622 kOneByteStringCid, | |
1623 kExternalTwoByteStringCid, | |
1624 kIfInRange, | |
1625 target); | |
1626 } | 1631 } |
1627 | 1632 |
1628 | 1633 |
1629 static void JumpIfNotString(Assembler* assembler, | 1634 static void JumpIfNotString(Assembler* assembler, |
1630 Register cid, | 1635 Register cid, |
1631 Register tmp, | 1636 Register tmp, |
1632 Label* target) { | 1637 Label* target) { |
1633 RangeCheck(assembler, | 1638 RangeCheck(assembler, cid, tmp, kOneByteStringCid, kExternalTwoByteStringCid, |
1634 cid, | 1639 kIfNotInRange, target); |
1635 tmp, | |
1636 kOneByteStringCid, | |
1637 kExternalTwoByteStringCid, | |
1638 kIfNotInRange, | |
1639 target); | |
1640 } | 1640 } |
1641 | 1641 |
1642 | 1642 |
1643 // Return type quickly for simple types (not parameterized and not signature). | 1643 // Return type quickly for simple types (not parameterized and not signature). |
1644 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { | 1644 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { |
1645 Label fall_through, use_canonical_type, not_double, not_integer; | 1645 Label fall_through, use_canonical_type, not_double, not_integer; |
1646 __ ldr(R0, Address(SP, 0 * kWordSize)); | 1646 __ ldr(R0, Address(SP, 0 * kWordSize)); |
1647 __ LoadClassIdMayBeSmi(R1, R0); | 1647 __ LoadClassIdMayBeSmi(R1, R0); |
1648 | 1648 |
1649 __ CompareImmediate(R1, kClosureCid); | 1649 __ CompareImmediate(R1, kClosureCid); |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1793 } | 1793 } |
1794 | 1794 |
1795 // i = 0 | 1795 // i = 0 |
1796 __ LoadImmediate(R3, 0); | 1796 __ LoadImmediate(R3, 0); |
1797 | 1797 |
1798 // do | 1798 // do |
1799 Label loop; | 1799 Label loop; |
1800 __ Bind(&loop); | 1800 __ Bind(&loop); |
1801 | 1801 |
1802 if (receiver_cid == kOneByteStringCid) { | 1802 if (receiver_cid == kOneByteStringCid) { |
1803 __ ldrb(R4, Address(R0, 0)); // this.codeUnitAt(i + start) | 1803 __ ldrb(R4, Address(R0, 0)); // this.codeUnitAt(i + start) |
1804 } else { | 1804 } else { |
1805 __ ldrh(R4, Address(R0, 0)); // this.codeUnitAt(i + start) | 1805 __ ldrh(R4, Address(R0, 0)); // this.codeUnitAt(i + start) |
1806 } | 1806 } |
1807 if (other_cid == kOneByteStringCid) { | 1807 if (other_cid == kOneByteStringCid) { |
1808 __ ldrb(NOTFP, Address(R2, 0)); // other.codeUnitAt(i) | 1808 __ ldrb(NOTFP, Address(R2, 0)); // other.codeUnitAt(i) |
1809 } else { | 1809 } else { |
1810 __ ldrh(NOTFP, Address(R2, 0)); // other.codeUnitAt(i) | 1810 __ ldrh(NOTFP, Address(R2, 0)); // other.codeUnitAt(i) |
1811 } | 1811 } |
1812 __ cmp(R4, Operand(NOTFP)); | 1812 __ cmp(R4, Operand(NOTFP)); |
1813 __ b(return_false, NE); | 1813 __ b(return_false, NE); |
1814 | 1814 |
1815 // i++, while (i < len) | 1815 // i++, while (i < len) |
1816 __ AddImmediate(R3, R3, 1); | 1816 __ AddImmediate(R3, R3, 1); |
1817 __ AddImmediate(R0, R0, receiver_cid == kOneByteStringCid ? 1 : 2); | 1817 __ AddImmediate(R0, R0, receiver_cid == kOneByteStringCid ? 1 : 2); |
1818 __ AddImmediate(R2, R2, other_cid == kOneByteStringCid ? 1 : 2); | 1818 __ AddImmediate(R2, R2, other_cid == kOneByteStringCid ? 1 : 2); |
1819 __ cmp(R3, Operand(R9)); | 1819 __ cmp(R3, Operand(R9)); |
1820 __ b(&loop, LT); | 1820 __ b(&loop, LT); |
1821 | 1821 |
1822 __ b(return_true); | 1822 __ b(return_true); |
1823 } | 1823 } |
1824 | 1824 |
1825 | 1825 |
1826 // bool _substringMatches(int start, String other) | 1826 // bool _substringMatches(int start, String other) |
1827 // This intrinsic handles a OneByteString or TwoByteString receiver with a | 1827 // This intrinsic handles a OneByteString or TwoByteString receiver with a |
1828 // OneByteString other. | 1828 // OneByteString other. |
1829 void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler) { | 1829 void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler) { |
1830 Label fall_through, return_true, return_false, try_two_byte; | 1830 Label fall_through, return_true, return_false, try_two_byte; |
1831 __ ldr(R0, Address(SP, 2 * kWordSize)); // this | 1831 __ ldr(R0, Address(SP, 2 * kWordSize)); // this |
1832 __ ldr(R1, Address(SP, 1 * kWordSize)); // start | 1832 __ ldr(R1, Address(SP, 1 * kWordSize)); // start |
1833 __ ldr(R2, Address(SP, 0 * kWordSize)); // other | 1833 __ ldr(R2, Address(SP, 0 * kWordSize)); // other |
1834 __ Push(R4); // Make ARGS_DESC_REG available. | 1834 __ Push(R4); // Make ARGS_DESC_REG available. |
1835 | 1835 |
1836 __ tst(R1, Operand(kSmiTagMask)); | 1836 __ tst(R1, Operand(kSmiTagMask)); |
1837 __ b(&fall_through, NE); // 'start' is not a Smi. | 1837 __ b(&fall_through, NE); // 'start' is not a Smi. |
1838 | 1838 |
1839 __ CompareClassId(R2, kOneByteStringCid, R3); | 1839 __ CompareClassId(R2, kOneByteStringCid, R3); |
1840 __ b(&fall_through, NE); | 1840 __ b(&fall_through, NE); |
1841 | 1841 |
1842 __ CompareClassId(R0, kOneByteStringCid, R3); | 1842 __ CompareClassId(R0, kOneByteStringCid, R3); |
1843 __ b(&try_two_byte, NE); | 1843 __ b(&try_two_byte, NE); |
1844 | 1844 |
1845 GenerateSubstringMatchesSpecialization(assembler, | 1845 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid, |
1846 kOneByteStringCid, | 1846 kOneByteStringCid, &return_true, |
1847 kOneByteStringCid, | |
1848 &return_true, | |
1849 &return_false); | 1847 &return_false); |
1850 | 1848 |
1851 __ Bind(&try_two_byte); | 1849 __ Bind(&try_two_byte); |
1852 __ CompareClassId(R0, kTwoByteStringCid, R3); | 1850 __ CompareClassId(R0, kTwoByteStringCid, R3); |
1853 __ b(&fall_through, NE); | 1851 __ b(&fall_through, NE); |
1854 | 1852 |
1855 GenerateSubstringMatchesSpecialization(assembler, | 1853 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid, |
1856 kTwoByteStringCid, | 1854 kOneByteStringCid, &return_true, |
1857 kOneByteStringCid, | |
1858 &return_true, | |
1859 &return_false); | 1855 &return_false); |
1860 | 1856 |
1861 __ Bind(&return_true); | 1857 __ Bind(&return_true); |
1862 __ Pop(R4); | 1858 __ Pop(R4); |
1863 __ LoadObject(R0, Bool::True()); | 1859 __ LoadObject(R0, Bool::True()); |
1864 __ Ret(); | 1860 __ Ret(); |
1865 | 1861 |
1866 __ Bind(&return_false); | 1862 __ Bind(&return_false); |
1867 __ Pop(R4); | 1863 __ Pop(R4); |
1868 __ LoadObject(R0, Bool::False()); | 1864 __ LoadObject(R0, Bool::False()); |
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2037 __ mov(R3, Operand(0), HI); | 2033 __ mov(R3, Operand(0), HI); |
2038 | 2034 |
2039 // Get the class index and insert it into the tags. | 2035 // Get the class index and insert it into the tags. |
2040 // R3: size and bit tags. | 2036 // R3: size and bit tags. |
2041 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); | 2037 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); |
2042 __ orr(R3, R3, Operand(TMP)); | 2038 __ orr(R3, R3, Operand(TMP)); |
2043 __ str(R3, FieldAddress(R0, String::tags_offset())); // Store tags. | 2039 __ str(R3, FieldAddress(R0, String::tags_offset())); // Store tags. |
2044 } | 2040 } |
2045 | 2041 |
2046 // Set the length field using the saved length (R8). | 2042 // Set the length field using the saved length (R8). |
2047 __ StoreIntoObjectNoBarrier(R0, | 2043 __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::length_offset()), |
2048 FieldAddress(R0, String::length_offset()), | |
2049 R8); | 2044 R8); |
2050 // Clear hash. | 2045 // Clear hash. |
2051 __ LoadImmediate(TMP, 0); | 2046 __ LoadImmediate(TMP, 0); |
2052 __ StoreIntoObjectNoBarrier(R0, | 2047 __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::hash_offset()), TMP); |
2053 FieldAddress(R0, String::hash_offset()), | |
2054 TMP); | |
2055 | 2048 |
2056 NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space)); | 2049 NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space)); |
2057 __ b(ok); | 2050 __ b(ok); |
2058 | 2051 |
2059 __ Bind(&fail); | 2052 __ Bind(&fail); |
2060 __ b(failure); | 2053 __ b(failure); |
2061 } | 2054 } |
2062 | 2055 |
2063 | 2056 |
2064 // Arg0: OneByteString (receiver). | 2057 // Arg0: OneByteString (receiver). |
2065 // Arg1: Start index as Smi. | 2058 // Arg1: Start index as Smi. |
2066 // Arg2: End index as Smi. | 2059 // Arg2: End index as Smi. |
2067 // The indexes must be valid. | 2060 // The indexes must be valid. |
2068 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { | 2061 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { |
2069 const intptr_t kStringOffset = 2 * kWordSize; | 2062 const intptr_t kStringOffset = 2 * kWordSize; |
2070 const intptr_t kStartIndexOffset = 1 * kWordSize; | 2063 const intptr_t kStartIndexOffset = 1 * kWordSize; |
2071 const intptr_t kEndIndexOffset = 0 * kWordSize; | 2064 const intptr_t kEndIndexOffset = 0 * kWordSize; |
2072 Label fall_through, ok; | 2065 Label fall_through, ok; |
2073 | 2066 |
2074 __ ldr(R2, Address(SP, kEndIndexOffset)); | 2067 __ ldr(R2, Address(SP, kEndIndexOffset)); |
2075 __ ldr(TMP, Address(SP, kStartIndexOffset)); | 2068 __ ldr(TMP, Address(SP, kStartIndexOffset)); |
2076 __ orr(R3, R2, Operand(TMP)); | 2069 __ orr(R3, R2, Operand(TMP)); |
2077 __ tst(R3, Operand(kSmiTagMask)); | 2070 __ tst(R3, Operand(kSmiTagMask)); |
2078 __ b(&fall_through, NE); // 'start', 'end' not Smi. | 2071 __ b(&fall_through, NE); // 'start', 'end' not Smi. |
2079 | 2072 |
2080 __ sub(R2, R2, Operand(TMP)); | 2073 __ sub(R2, R2, Operand(TMP)); |
2081 TryAllocateOnebyteString(assembler, &ok, &fall_through); | 2074 TryAllocateOnebyteString(assembler, &ok, &fall_through); |
2082 __ Bind(&ok); | 2075 __ Bind(&ok); |
2083 // R0: new string as tagged pointer. | 2076 // R0: new string as tagged pointer. |
2084 // Copy string. | 2077 // Copy string. |
2085 __ ldr(R3, Address(SP, kStringOffset)); | 2078 __ ldr(R3, Address(SP, kStringOffset)); |
2086 __ ldr(R1, Address(SP, kStartIndexOffset)); | 2079 __ ldr(R1, Address(SP, kStartIndexOffset)); |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2164 // Have same length? | 2157 // Have same length? |
2165 __ ldr(R2, FieldAddress(R0, String::length_offset())); | 2158 __ ldr(R2, FieldAddress(R0, String::length_offset())); |
2166 __ ldr(R3, FieldAddress(R1, String::length_offset())); | 2159 __ ldr(R3, FieldAddress(R1, String::length_offset())); |
2167 __ cmp(R2, Operand(R3)); | 2160 __ cmp(R2, Operand(R3)); |
2168 __ b(&is_false, NE); | 2161 __ b(&is_false, NE); |
2169 | 2162 |
2170 // Check contents, no fall-through possible. | 2163 // Check contents, no fall-through possible. |
2171 // TODO(zra): try out other sequences. | 2164 // TODO(zra): try out other sequences. |
2172 ASSERT((string_cid == kOneByteStringCid) || | 2165 ASSERT((string_cid == kOneByteStringCid) || |
2173 (string_cid == kTwoByteStringCid)); | 2166 (string_cid == kTwoByteStringCid)); |
2174 const intptr_t offset = (string_cid == kOneByteStringCid) ? | 2167 const intptr_t offset = (string_cid == kOneByteStringCid) |
2175 OneByteString::data_offset() : TwoByteString::data_offset(); | 2168 ? OneByteString::data_offset() |
| 2169 : TwoByteString::data_offset(); |
2176 __ AddImmediate(R0, offset - kHeapObjectTag); | 2170 __ AddImmediate(R0, offset - kHeapObjectTag); |
2177 __ AddImmediate(R1, offset - kHeapObjectTag); | 2171 __ AddImmediate(R1, offset - kHeapObjectTag); |
2178 __ SmiUntag(R2); | 2172 __ SmiUntag(R2); |
2179 __ Bind(&loop); | 2173 __ Bind(&loop); |
2180 __ AddImmediate(R2, -1); | 2174 __ AddImmediate(R2, -1); |
2181 __ cmp(R2, Operand(0)); | 2175 __ cmp(R2, Operand(0)); |
2182 __ b(&is_true, LT); | 2176 __ b(&is_true, LT); |
2183 if (string_cid == kOneByteStringCid) { | 2177 if (string_cid == kOneByteStringCid) { |
2184 __ ldrb(R3, Address(R0)); | 2178 __ ldrb(R3, Address(R0)); |
2185 __ ldrb(R4, Address(R1)); | 2179 __ ldrb(R4, Address(R1)); |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2251 } | 2245 } |
2252 | 2246 |
2253 | 2247 |
2254 // On stack: user tag (+0). | 2248 // On stack: user tag (+0). |
2255 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { | 2249 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { |
2256 // R1: Isolate. | 2250 // R1: Isolate. |
2257 __ LoadIsolate(R1); | 2251 __ LoadIsolate(R1); |
2258 // R0: Current user tag. | 2252 // R0: Current user tag. |
2259 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); | 2253 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); |
2260 // R2: UserTag. | 2254 // R2: UserTag. |
2261 __ ldr(R2, Address(SP, + 0 * kWordSize)); | 2255 __ ldr(R2, Address(SP, +0 * kWordSize)); |
2262 // Set Isolate::current_tag_. | 2256 // Set Isolate::current_tag_. |
2263 __ str(R2, Address(R1, Isolate::current_tag_offset())); | 2257 __ str(R2, Address(R1, Isolate::current_tag_offset())); |
2264 // R2: UserTag's tag. | 2258 // R2: UserTag's tag. |
2265 __ ldr(R2, FieldAddress(R2, UserTag::tag_offset())); | 2259 __ ldr(R2, FieldAddress(R2, UserTag::tag_offset())); |
2266 // Set Isolate::user_tag_. | 2260 // Set Isolate::user_tag_. |
2267 __ str(R2, Address(R1, Isolate::user_tag_offset())); | 2261 __ str(R2, Address(R1, Isolate::user_tag_offset())); |
2268 __ Ret(); | 2262 __ Ret(); |
2269 } | 2263 } |
2270 | 2264 |
2271 | 2265 |
(...skipping 23 matching lines...) Expand all Loading... |
2295 __ ldr(R0, Address(R0, TimelineStream::enabled_offset())); | 2289 __ ldr(R0, Address(R0, TimelineStream::enabled_offset())); |
2296 __ cmp(R0, Operand(0)); | 2290 __ cmp(R0, Operand(0)); |
2297 __ LoadObject(R0, Bool::True(), NE); | 2291 __ LoadObject(R0, Bool::True(), NE); |
2298 __ LoadObject(R0, Bool::False(), EQ); | 2292 __ LoadObject(R0, Bool::False(), EQ); |
2299 __ Ret(); | 2293 __ Ret(); |
2300 } | 2294 } |
2301 | 2295 |
2302 } // namespace dart | 2296 } // namespace dart |
2303 | 2297 |
2304 #endif // defined TARGET_ARCH_ARM | 2298 #endif // defined TARGET_ARCH_ARM |
OLD | NEW |