OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. |
6 #if defined(TARGET_ARCH_X64) | 6 #if defined(TARGET_ARCH_X64) |
7 | 7 |
8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
9 | 9 |
10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
(...skipping 11 matching lines...) Expand all Loading... |
22 // R10: Arguments descriptor | 22 // R10: Arguments descriptor |
23 // TOS: Return address | 23 // TOS: Return address |
24 // The R10 registers can be destroyed only if there is no slow-path, i.e. | 24 // The R10 registers can be destroyed only if there is no slow-path, i.e. |
25 // if the intrinsified method always executes a return. | 25 // if the intrinsified method always executes a return. |
26 // The RBP register should not be modified, because it is used by the profiler. | 26 // The RBP register should not be modified, because it is used by the profiler. |
27 // The PP and THR registers (see constants_x64.h) must be preserved. | 27 // The PP and THR registers (see constants_x64.h) must be preserved. |
28 | 28 |
29 #define __ assembler-> | 29 #define __ assembler-> |
30 | 30 |
31 | 31 |
32 intptr_t Intrinsifier::ParameterSlotFromSp() { return 0; } | 32 intptr_t Intrinsifier::ParameterSlotFromSp() { |
| 33 return 0; |
| 34 } |
33 | 35 |
34 | 36 |
35 static bool IsABIPreservedRegister(Register reg) { | 37 static bool IsABIPreservedRegister(Register reg) { |
36 return ((1 << reg) & CallingConventions::kCalleeSaveCpuRegisters) != 0; | 38 return ((1 << reg) & CallingConventions::kCalleeSaveCpuRegisters) != 0; |
37 } | 39 } |
38 | 40 |
39 | 41 |
40 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { | 42 void Intrinsifier::IntrinsicCallPrologue(Assembler* assembler) { |
41 ASSERT(IsABIPreservedRegister(CODE_REG)); | 43 ASSERT(IsABIPreservedRegister(CODE_REG)); |
42 ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG)); | 44 ASSERT(!IsABIPreservedRegister(ARGS_DESC_REG)); |
(...skipping 11 matching lines...) Expand all Loading... |
54 assembler->movq(ARGS_DESC_REG, CALLEE_SAVED_TEMP); | 56 assembler->movq(ARGS_DESC_REG, CALLEE_SAVED_TEMP); |
55 } | 57 } |
56 | 58 |
57 | 59 |
58 void Intrinsifier::ObjectArraySetIndexed(Assembler* assembler) { | 60 void Intrinsifier::ObjectArraySetIndexed(Assembler* assembler) { |
59 if (Isolate::Current()->type_checks()) { | 61 if (Isolate::Current()->type_checks()) { |
60 return; | 62 return; |
61 } | 63 } |
62 | 64 |
63 Label fall_through; | 65 Label fall_through; |
64 __ movq(RDX, Address(RSP, + 1 * kWordSize)); // Value. | 66 __ movq(RDX, Address(RSP, +1 * kWordSize)); // Value. |
65 __ movq(RCX, Address(RSP, + 2 * kWordSize)); // Index. | 67 __ movq(RCX, Address(RSP, +2 * kWordSize)); // Index. |
66 __ movq(RAX, Address(RSP, + 3 * kWordSize)); // Array. | 68 __ movq(RAX, Address(RSP, +3 * kWordSize)); // Array. |
67 __ testq(RCX, Immediate(kSmiTagMask)); | 69 __ testq(RCX, Immediate(kSmiTagMask)); |
68 __ j(NOT_ZERO, &fall_through); | 70 __ j(NOT_ZERO, &fall_through); |
69 // Range check. | 71 // Range check. |
70 __ cmpq(RCX, FieldAddress(RAX, Array::length_offset())); | 72 __ cmpq(RCX, FieldAddress(RAX, Array::length_offset())); |
71 // Runtime throws exception. | 73 // Runtime throws exception. |
72 __ j(ABOVE_EQUAL, &fall_through); | 74 __ j(ABOVE_EQUAL, &fall_through); |
73 // Note that RBX is Smi, i.e, times 2. | 75 // Note that RBX is Smi, i.e, times 2. |
74 ASSERT(kSmiTagShift == 1); | 76 ASSERT(kSmiTagShift == 1); |
75 // Destroy RCX (ic data) as we will not continue in the function. | 77 // Destroy RCX (ic data) as we will not continue in the function. |
76 __ StoreIntoObject(RAX, | 78 __ StoreIntoObject(RAX, FieldAddress(RAX, RCX, TIMES_4, Array::data_offset()), |
77 FieldAddress(RAX, RCX, TIMES_4, Array::data_offset()), | |
78 RDX); | 79 RDX); |
79 // Caller is responsible of preserving the value if necessary. | 80 // Caller is responsible of preserving the value if necessary. |
80 __ ret(); | 81 __ ret(); |
81 __ Bind(&fall_through); | 82 __ Bind(&fall_through); |
82 } | 83 } |
83 | 84 |
84 | 85 |
85 // Allocate a GrowableObjectArray using the backing array specified. | 86 // Allocate a GrowableObjectArray using the backing array specified. |
86 // On stack: type argument (+2), data (+1), return-address (+0). | 87 // On stack: type argument (+2), data (+1), return-address (+0). |
87 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { | 88 void Intrinsifier::GrowableArray_Allocate(Assembler* assembler) { |
88 // This snippet of inlined code uses the following registers: | 89 // This snippet of inlined code uses the following registers: |
89 // RAX, RCX, R13 | 90 // RAX, RCX, R13 |
90 // and the newly allocated object is returned in RAX. | 91 // and the newly allocated object is returned in RAX. |
91 const intptr_t kTypeArgumentsOffset = 2 * kWordSize; | 92 const intptr_t kTypeArgumentsOffset = 2 * kWordSize; |
92 const intptr_t kArrayOffset = 1 * kWordSize; | 93 const intptr_t kArrayOffset = 1 * kWordSize; |
93 Label fall_through; | 94 Label fall_through; |
94 | 95 |
95 // Try allocating in new space. | 96 // Try allocating in new space. |
96 const Class& cls = Class::Handle( | 97 const Class& cls = Class::Handle( |
97 Isolate::Current()->object_store()->growable_object_array_class()); | 98 Isolate::Current()->object_store()->growable_object_array_class()); |
98 __ TryAllocate(cls, &fall_through, Assembler::kFarJump, RAX, R13); | 99 __ TryAllocate(cls, &fall_through, Assembler::kFarJump, RAX, R13); |
99 | 100 |
100 // Store backing array object in growable array object. | 101 // Store backing array object in growable array object. |
101 __ movq(RCX, Address(RSP, kArrayOffset)); // data argument. | 102 __ movq(RCX, Address(RSP, kArrayOffset)); // data argument. |
102 // RAX is new, no barrier needed. | 103 // RAX is new, no barrier needed. |
103 __ StoreIntoObjectNoBarrier( | 104 __ StoreIntoObjectNoBarrier( |
104 RAX, | 105 RAX, FieldAddress(RAX, GrowableObjectArray::data_offset()), RCX); |
105 FieldAddress(RAX, GrowableObjectArray::data_offset()), | |
106 RCX); | |
107 | 106 |
108 // RAX: new growable array object start as a tagged pointer. | 107 // RAX: new growable array object start as a tagged pointer. |
109 // Store the type argument field in the growable array object. | 108 // Store the type argument field in the growable array object. |
110 __ movq(RCX, Address(RSP, kTypeArgumentsOffset)); // type argument. | 109 __ movq(RCX, Address(RSP, kTypeArgumentsOffset)); // type argument. |
111 __ StoreIntoObjectNoBarrier( | 110 __ StoreIntoObjectNoBarrier( |
112 RAX, | 111 RAX, FieldAddress(RAX, GrowableObjectArray::type_arguments_offset()), |
113 FieldAddress(RAX, GrowableObjectArray::type_arguments_offset()), | |
114 RCX); | 112 RCX); |
115 | 113 |
116 // Set the length field in the growable array object to 0. | 114 // Set the length field in the growable array object to 0. |
117 __ ZeroInitSmiField(FieldAddress(RAX, GrowableObjectArray::length_offset())); | 115 __ ZeroInitSmiField(FieldAddress(RAX, GrowableObjectArray::length_offset())); |
118 __ ret(); // returns the newly allocated object in RAX. | 116 __ ret(); // returns the newly allocated object in RAX. |
119 | 117 |
120 __ Bind(&fall_through); | 118 __ Bind(&fall_through); |
121 } | 119 } |
122 | 120 |
123 | 121 |
124 // Add an element to growable array if it doesn't need to grow, otherwise | 122 // Add an element to growable array if it doesn't need to grow, otherwise |
125 // call into regular code. | 123 // call into regular code. |
126 // On stack: growable array (+2), value (+1), return-address (+0). | 124 // On stack: growable array (+2), value (+1), return-address (+0). |
127 void Intrinsifier::GrowableArray_add(Assembler* assembler) { | 125 void Intrinsifier::GrowableArray_add(Assembler* assembler) { |
128 // In checked mode we need to check the incoming argument. | 126 // In checked mode we need to check the incoming argument. |
129 if (Isolate::Current()->type_checks()) return; | 127 if (Isolate::Current()->type_checks()) return; |
130 Label fall_through; | 128 Label fall_through; |
131 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Array. | 129 __ movq(RAX, Address(RSP, +2 * kWordSize)); // Array. |
132 __ movq(RCX, FieldAddress(RAX, GrowableObjectArray::length_offset())); | 130 __ movq(RCX, FieldAddress(RAX, GrowableObjectArray::length_offset())); |
133 // RCX: length. | 131 // RCX: length. |
134 __ movq(RDX, FieldAddress(RAX, GrowableObjectArray::data_offset())); | 132 __ movq(RDX, FieldAddress(RAX, GrowableObjectArray::data_offset())); |
135 // RDX: data. | 133 // RDX: data. |
136 // Compare length with capacity. | 134 // Compare length with capacity. |
137 __ cmpq(RCX, FieldAddress(RDX, Array::length_offset())); | 135 __ cmpq(RCX, FieldAddress(RDX, Array::length_offset())); |
138 __ j(EQUAL, &fall_through); // Must grow data. | 136 __ j(EQUAL, &fall_through); // Must grow data. |
139 // len = len + 1; | 137 // len = len + 1; |
140 __ IncrementSmiField(FieldAddress(RAX, GrowableObjectArray::length_offset()), | 138 __ IncrementSmiField(FieldAddress(RAX, GrowableObjectArray::length_offset()), |
141 1); | 139 1); |
142 __ movq(RAX, Address(RSP, + 1 * kWordSize)); // Value | 140 __ movq(RAX, Address(RSP, +1 * kWordSize)); // Value |
143 ASSERT(kSmiTagShift == 1); | 141 ASSERT(kSmiTagShift == 1); |
144 __ StoreIntoObject(RDX, | 142 __ StoreIntoObject(RDX, FieldAddress(RDX, RCX, TIMES_4, Array::data_offset()), |
145 FieldAddress(RDX, RCX, TIMES_4, Array::data_offset()), | |
146 RAX); | 143 RAX); |
147 __ LoadObject(RAX, Object::null_object()); | 144 __ LoadObject(RAX, Object::null_object()); |
148 __ ret(); | 145 __ ret(); |
149 __ Bind(&fall_through); | 146 __ Bind(&fall_through); |
150 } | 147 } |
151 | 148 |
152 | 149 |
153 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_factor) \ | 150 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_factor) \ |
154 Label fall_through; \ | 151 Label fall_through; \ |
155 const intptr_t kArrayLengthStackOffset = 1 * kWordSize; \ | 152 const intptr_t kArrayLengthStackOffset = 1 * kWordSize; \ |
156 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &fall_through, false)); \ | 153 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &fall_through, false)); \ |
157 __ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \ | 154 __ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \ |
158 /* Check that length is a positive Smi. */ \ | 155 /* Check that length is a positive Smi. */ \ |
159 /* RDI: requested array length argument. */ \ | 156 /* RDI: requested array length argument. */ \ |
160 __ testq(RDI, Immediate(kSmiTagMask)); \ | 157 __ testq(RDI, Immediate(kSmiTagMask)); \ |
161 __ j(NOT_ZERO, &fall_through); \ | 158 __ j(NOT_ZERO, &fall_through); \ |
162 __ cmpq(RDI, Immediate(0)); \ | 159 __ cmpq(RDI, Immediate(0)); \ |
163 __ j(LESS, &fall_through); \ | 160 __ j(LESS, &fall_through); \ |
164 __ SmiUntag(RDI); \ | 161 __ SmiUntag(RDI); \ |
165 /* Check for maximum allowed length. */ \ | 162 /* Check for maximum allowed length. */ \ |
166 /* RDI: untagged array length. */ \ | 163 /* RDI: untagged array length. */ \ |
167 __ cmpq(RDI, Immediate(max_len)); \ | 164 __ cmpq(RDI, Immediate(max_len)); \ |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
209 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); \ | 206 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); \ |
210 __ shlq(RDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2)); \ | 207 __ shlq(RDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2)); \ |
211 __ jmp(&done, Assembler::kNearJump); \ | 208 __ jmp(&done, Assembler::kNearJump); \ |
212 \ | 209 \ |
213 __ Bind(&size_tag_overflow); \ | 210 __ Bind(&size_tag_overflow); \ |
214 __ movq(RDI, Immediate(0)); \ | 211 __ movq(RDI, Immediate(0)); \ |
215 __ Bind(&done); \ | 212 __ Bind(&done); \ |
216 \ | 213 \ |
217 /* Get the class index and insert it into the tags. */ \ | 214 /* Get the class index and insert it into the tags. */ \ |
218 __ orq(RDI, Immediate(RawObject::ClassIdTag::encode(cid))); \ | 215 __ orq(RDI, Immediate(RawObject::ClassIdTag::encode(cid))); \ |
219 __ movq(FieldAddress(RAX, type_name::tags_offset()), RDI); /* Tags. */ \ | 216 __ movq(FieldAddress(RAX, type_name::tags_offset()), RDI); /* Tags. */ \ |
220 } \ | 217 } \ |
221 /* Set the length field. */ \ | 218 /* Set the length field. */ \ |
222 /* RAX: new object start as a tagged pointer. */ \ | 219 /* RAX: new object start as a tagged pointer. */ \ |
223 /* RCX: new object end address. */ \ | 220 /* RCX: new object end address. */ \ |
224 __ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \ | 221 __ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \ |
225 __ StoreIntoObjectNoBarrier(RAX, \ | 222 __ StoreIntoObjectNoBarrier( \ |
226 FieldAddress(RAX, type_name::length_offset()), \ | 223 RAX, FieldAddress(RAX, type_name::length_offset()), RDI); \ |
227 RDI); \ | |
228 /* Initialize all array elements to 0. */ \ | 224 /* Initialize all array elements to 0. */ \ |
229 /* RAX: new object start as a tagged pointer. */ \ | 225 /* RAX: new object start as a tagged pointer. */ \ |
230 /* RCX: new object end address. */ \ | 226 /* RCX: new object end address. */ \ |
231 /* RDI: iterator which initially points to the start of the variable */ \ | 227 /* RDI: iterator which initially points to the start of the variable */ \ |
232 /* RBX: scratch register. */ \ | 228 /* RBX: scratch register. */ \ |
233 /* data area to be initialized. */ \ | 229 /* data area to be initialized. */ \ |
234 __ xorq(RBX, RBX); /* Zero. */ \ | 230 __ xorq(RBX, RBX); /* Zero. */ \ |
235 __ leaq(RDI, FieldAddress(RAX, sizeof(Raw##type_name))); \ | 231 __ leaq(RDI, FieldAddress(RAX, sizeof(Raw##type_name))); \ |
236 Label done, init_loop; \ | 232 Label done, init_loop; \ |
237 __ Bind(&init_loop); \ | 233 __ Bind(&init_loop); \ |
238 __ cmpq(RDI, RCX); \ | 234 __ cmpq(RDI, RCX); \ |
239 __ j(ABOVE_EQUAL, &done, Assembler::kNearJump); \ | 235 __ j(ABOVE_EQUAL, &done, Assembler::kNearJump); \ |
240 __ movq(Address(RDI, 0), RBX); \ | 236 __ movq(Address(RDI, 0), RBX); \ |
241 __ addq(RDI, Immediate(kWordSize)); \ | 237 __ addq(RDI, Immediate(kWordSize)); \ |
242 __ jmp(&init_loop, Assembler::kNearJump); \ | 238 __ jmp(&init_loop, Assembler::kNearJump); \ |
243 __ Bind(&done); \ | 239 __ Bind(&done); \ |
244 \ | 240 \ |
245 __ ret(); \ | 241 __ ret(); \ |
246 __ Bind(&fall_through); \ | 242 __ Bind(&fall_through); |
247 | 243 |
248 | 244 |
249 static ScaleFactor GetScaleFactor(intptr_t size) { | 245 static ScaleFactor GetScaleFactor(intptr_t size) { |
250 switch (size) { | 246 switch (size) { |
251 case 1: return TIMES_1; | 247 case 1: |
252 case 2: return TIMES_2; | 248 return TIMES_1; |
253 case 4: return TIMES_4; | 249 case 2: |
254 case 8: return TIMES_8; | 250 return TIMES_2; |
255 case 16: return TIMES_16; | 251 case 4: |
| 252 return TIMES_4; |
| 253 case 8: |
| 254 return TIMES_8; |
| 255 case 16: |
| 256 return TIMES_16; |
256 } | 257 } |
257 UNREACHABLE(); | 258 UNREACHABLE(); |
258 return static_cast<ScaleFactor>(0); | 259 return static_cast<ScaleFactor>(0); |
259 } | 260 } |
260 | 261 |
261 | 262 |
262 #define TYPED_DATA_ALLOCATOR(clazz) \ | 263 #define TYPED_DATA_ALLOCATOR(clazz) \ |
263 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ | 264 void Intrinsifier::TypedData_##clazz##_factory(Assembler* assembler) { \ |
264 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ | 265 intptr_t size = TypedData::ElementSizeInBytes(kTypedData##clazz##Cid); \ |
265 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ | 266 intptr_t max_len = TypedData::MaxElements(kTypedData##clazz##Cid); \ |
266 ScaleFactor scale = GetScaleFactor(size); \ | 267 ScaleFactor scale = GetScaleFactor(size); \ |
267 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, scale); \ | 268 TYPED_ARRAY_ALLOCATION(TypedData, kTypedData##clazz##Cid, max_len, scale); \ |
268 } | 269 } |
269 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) | 270 CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATOR) |
270 #undef TYPED_DATA_ALLOCATOR | 271 #undef TYPED_DATA_ALLOCATOR |
271 | 272 |
272 | 273 |
273 // Tests if two top most arguments are smis, jumps to label not_smi if not. | 274 // Tests if two top most arguments are smis, jumps to label not_smi if not. |
274 // Topmost argument is in RAX. | 275 // Topmost argument is in RAX. |
275 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { | 276 static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) { |
276 __ movq(RAX, Address(RSP, + 1 * kWordSize)); | 277 __ movq(RAX, Address(RSP, +1 * kWordSize)); |
277 __ movq(RCX, Address(RSP, + 2 * kWordSize)); | 278 __ movq(RCX, Address(RSP, +2 * kWordSize)); |
278 __ orq(RCX, RAX); | 279 __ orq(RCX, RAX); |
279 __ testq(RCX, Immediate(kSmiTagMask)); | 280 __ testq(RCX, Immediate(kSmiTagMask)); |
280 __ j(NOT_ZERO, not_smi); | 281 __ j(NOT_ZERO, not_smi); |
281 } | 282 } |
282 | 283 |
283 | 284 |
284 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { | 285 void Intrinsifier::Integer_addFromInteger(Assembler* assembler) { |
285 Label fall_through; | 286 Label fall_through; |
286 TestBothArgumentsSmis(assembler, &fall_through); | 287 TestBothArgumentsSmis(assembler, &fall_through); |
287 // RAX contains right argument. | 288 // RAX contains right argument. |
288 __ addq(RAX, Address(RSP, + 2 * kWordSize)); | 289 __ addq(RAX, Address(RSP, +2 * kWordSize)); |
289 __ j(OVERFLOW, &fall_through, Assembler::kNearJump); | 290 __ j(OVERFLOW, &fall_through, Assembler::kNearJump); |
290 // Result is in RAX. | 291 // Result is in RAX. |
291 __ ret(); | 292 __ ret(); |
292 __ Bind(&fall_through); | 293 __ Bind(&fall_through); |
293 } | 294 } |
294 | 295 |
295 | 296 |
296 void Intrinsifier::Integer_add(Assembler* assembler) { | 297 void Intrinsifier::Integer_add(Assembler* assembler) { |
297 Integer_addFromInteger(assembler); | 298 Integer_addFromInteger(assembler); |
298 } | 299 } |
299 | 300 |
300 | 301 |
301 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { | 302 void Intrinsifier::Integer_subFromInteger(Assembler* assembler) { |
302 Label fall_through; | 303 Label fall_through; |
303 TestBothArgumentsSmis(assembler, &fall_through); | 304 TestBothArgumentsSmis(assembler, &fall_through); |
304 // RAX contains right argument, which is the actual minuend of subtraction. | 305 // RAX contains right argument, which is the actual minuend of subtraction. |
305 __ subq(RAX, Address(RSP, + 2 * kWordSize)); | 306 __ subq(RAX, Address(RSP, +2 * kWordSize)); |
306 __ j(OVERFLOW, &fall_through, Assembler::kNearJump); | 307 __ j(OVERFLOW, &fall_through, Assembler::kNearJump); |
307 // Result is in RAX. | 308 // Result is in RAX. |
308 __ ret(); | 309 __ ret(); |
309 __ Bind(&fall_through); | 310 __ Bind(&fall_through); |
310 } | 311 } |
311 | 312 |
312 | 313 |
313 void Intrinsifier::Integer_sub(Assembler* assembler) { | 314 void Intrinsifier::Integer_sub(Assembler* assembler) { |
314 Label fall_through; | 315 Label fall_through; |
315 TestBothArgumentsSmis(assembler, &fall_through); | 316 TestBothArgumentsSmis(assembler, &fall_through); |
316 // RAX contains right argument, which is the actual subtrahend of subtraction. | 317 // RAX contains right argument, which is the actual subtrahend of subtraction. |
317 __ movq(RCX, RAX); | 318 __ movq(RCX, RAX); |
318 __ movq(RAX, Address(RSP, + 2 * kWordSize)); | 319 __ movq(RAX, Address(RSP, +2 * kWordSize)); |
319 __ subq(RAX, RCX); | 320 __ subq(RAX, RCX); |
320 __ j(OVERFLOW, &fall_through, Assembler::kNearJump); | 321 __ j(OVERFLOW, &fall_through, Assembler::kNearJump); |
321 // Result is in RAX. | 322 // Result is in RAX. |
322 __ ret(); | 323 __ ret(); |
323 __ Bind(&fall_through); | 324 __ Bind(&fall_through); |
324 } | 325 } |
325 | 326 |
326 | 327 |
327 | |
328 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { | 328 void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) { |
329 Label fall_through; | 329 Label fall_through; |
330 TestBothArgumentsSmis(assembler, &fall_through); | 330 TestBothArgumentsSmis(assembler, &fall_through); |
331 // RAX is the right argument. | 331 // RAX is the right argument. |
332 ASSERT(kSmiTag == 0); // Adjust code below if not the case. | 332 ASSERT(kSmiTag == 0); // Adjust code below if not the case. |
333 __ SmiUntag(RAX); | 333 __ SmiUntag(RAX); |
334 __ imulq(RAX, Address(RSP, + 2 * kWordSize)); | 334 __ imulq(RAX, Address(RSP, +2 * kWordSize)); |
335 __ j(OVERFLOW, &fall_through, Assembler::kNearJump); | 335 __ j(OVERFLOW, &fall_through, Assembler::kNearJump); |
336 // Result is in RAX. | 336 // Result is in RAX. |
337 __ ret(); | 337 __ ret(); |
338 __ Bind(&fall_through); | 338 __ Bind(&fall_through); |
339 } | 339 } |
340 | 340 |
341 | 341 |
342 void Intrinsifier::Integer_mul(Assembler* assembler) { | 342 void Intrinsifier::Integer_mul(Assembler* assembler) { |
343 Integer_mulFromInteger(assembler); | 343 Integer_mulFromInteger(assembler); |
344 } | 344 } |
(...skipping 16 matching lines...) Expand all Loading... |
361 __ cmpq(RAX, Immediate(0)); | 361 __ cmpq(RAX, Immediate(0)); |
362 __ j(EQUAL, &return_zero, Assembler::kNearJump); | 362 __ j(EQUAL, &return_zero, Assembler::kNearJump); |
363 __ cmpq(RAX, RCX); | 363 __ cmpq(RAX, RCX); |
364 __ j(EQUAL, &return_zero, Assembler::kNearJump); | 364 __ j(EQUAL, &return_zero, Assembler::kNearJump); |
365 | 365 |
366 // Check if result equals left. | 366 // Check if result equals left. |
367 __ cmpq(RAX, Immediate(0)); | 367 __ cmpq(RAX, Immediate(0)); |
368 __ j(LESS, &try_modulo, Assembler::kNearJump); | 368 __ j(LESS, &try_modulo, Assembler::kNearJump); |
369 // left is positive. | 369 // left is positive. |
370 __ cmpq(RAX, RCX); | 370 __ cmpq(RAX, RCX); |
371 __ j(GREATER, &try_modulo, Assembler::kNearJump); | 371 __ j(GREATER, &try_modulo, Assembler::kNearJump); |
372 // left is less than right, result is left (RAX). | 372 // left is less than right, result is left (RAX). |
373 __ ret(); | 373 __ ret(); |
374 | 374 |
375 __ Bind(&return_zero); | 375 __ Bind(&return_zero); |
376 __ xorq(RAX, RAX); | 376 __ xorq(RAX, RAX); |
377 __ ret(); | 377 __ ret(); |
378 | 378 |
379 __ Bind(&try_modulo); | 379 __ Bind(&try_modulo); |
380 | 380 |
381 // Check if both operands fit into 32bits as idiv with 64bit operands | 381 // Check if both operands fit into 32bits as idiv with 64bit operands |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
413 // if (res < 0) { | 413 // if (res < 0) { |
414 // if (right < 0) { | 414 // if (right < 0) { |
415 // res = res - right; | 415 // res = res - right; |
416 // } else { | 416 // } else { |
417 // res = res + right; | 417 // res = res + right; |
418 // } | 418 // } |
419 // } | 419 // } |
420 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { | 420 void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) { |
421 Label fall_through, negative_result; | 421 Label fall_through, negative_result; |
422 TestBothArgumentsSmis(assembler, &fall_through); | 422 TestBothArgumentsSmis(assembler, &fall_through); |
423 __ movq(RCX, Address(RSP, + 2 * kWordSize)); | 423 __ movq(RCX, Address(RSP, +2 * kWordSize)); |
424 // RAX: Tagged left (dividend). | 424 // RAX: Tagged left (dividend). |
425 // RCX: Tagged right (divisor). | 425 // RCX: Tagged right (divisor). |
426 __ cmpq(RCX, Immediate(0)); | 426 __ cmpq(RCX, Immediate(0)); |
427 __ j(EQUAL, &fall_through); | 427 __ j(EQUAL, &fall_through); |
428 EmitRemainderOperation(assembler); | 428 EmitRemainderOperation(assembler); |
429 // Untagged remainder result in RAX. | 429 // Untagged remainder result in RAX. |
430 __ cmpq(RAX, Immediate(0)); | 430 __ cmpq(RAX, Immediate(0)); |
431 __ j(LESS, &negative_result, Assembler::kNearJump); | 431 __ j(LESS, &negative_result, Assembler::kNearJump); |
432 __ SmiTag(RAX); | 432 __ SmiTag(RAX); |
433 __ ret(); | 433 __ ret(); |
(...skipping 17 matching lines...) Expand all Loading... |
451 } | 451 } |
452 | 452 |
453 | 453 |
454 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { | 454 void Intrinsifier::Integer_truncDivide(Assembler* assembler) { |
455 Label fall_through, not_32bit; | 455 Label fall_through, not_32bit; |
456 TestBothArgumentsSmis(assembler, &fall_through); | 456 TestBothArgumentsSmis(assembler, &fall_through); |
457 // RAX: right argument (divisor) | 457 // RAX: right argument (divisor) |
458 __ cmpq(RAX, Immediate(0)); | 458 __ cmpq(RAX, Immediate(0)); |
459 __ j(EQUAL, &fall_through, Assembler::kNearJump); | 459 __ j(EQUAL, &fall_through, Assembler::kNearJump); |
460 __ movq(RCX, RAX); | 460 __ movq(RCX, RAX); |
461 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Left argument (dividend). | 461 __ movq(RAX, Address(RSP, +2 * kWordSize)); // Left argument (dividend). |
462 | 462 |
463 // Check if both operands fit into 32bits as idiv with 64bit operands | 463 // Check if both operands fit into 32bits as idiv with 64bit operands |
464 // requires twice as many cycles and has much higher latency. We are checking | 464 // requires twice as many cycles and has much higher latency. We are checking |
465 // this before untagging them to avoid corner case dividing INT_MAX by -1 that | 465 // this before untagging them to avoid corner case dividing INT_MAX by -1 that |
466 // raises exception because quotient is too large for 32bit register. | 466 // raises exception because quotient is too large for 32bit register. |
467 __ movsxd(RBX, RAX); | 467 __ movsxd(RBX, RAX); |
468 __ cmpq(RBX, RAX); | 468 __ cmpq(RBX, RAX); |
469 __ j(NOT_EQUAL, ¬_32bit); | 469 __ j(NOT_EQUAL, ¬_32bit); |
470 __ movsxd(RBX, RCX); | 470 __ movsxd(RBX, RCX); |
471 __ cmpq(RBX, RCX); | 471 __ cmpq(RBX, RCX); |
(...skipping 21 matching lines...) Expand all Loading... |
493 __ cmpq(RAX, Immediate(0x4000000000000000)); | 493 __ cmpq(RAX, Immediate(0x4000000000000000)); |
494 __ j(EQUAL, &fall_through); | 494 __ j(EQUAL, &fall_through); |
495 __ SmiTag(RAX); | 495 __ SmiTag(RAX); |
496 __ ret(); | 496 __ ret(); |
497 __ Bind(&fall_through); | 497 __ Bind(&fall_through); |
498 } | 498 } |
499 | 499 |
500 | 500 |
501 void Intrinsifier::Integer_negate(Assembler* assembler) { | 501 void Intrinsifier::Integer_negate(Assembler* assembler) { |
502 Label fall_through; | 502 Label fall_through; |
503 __ movq(RAX, Address(RSP, + 1 * kWordSize)); | 503 __ movq(RAX, Address(RSP, +1 * kWordSize)); |
504 __ testq(RAX, Immediate(kSmiTagMask)); | 504 __ testq(RAX, Immediate(kSmiTagMask)); |
505 __ j(NOT_ZERO, &fall_through, Assembler::kNearJump); // Non-smi value. | 505 __ j(NOT_ZERO, &fall_through, Assembler::kNearJump); // Non-smi value. |
506 __ negq(RAX); | 506 __ negq(RAX); |
507 __ j(OVERFLOW, &fall_through, Assembler::kNearJump); | 507 __ j(OVERFLOW, &fall_through, Assembler::kNearJump); |
508 // Result is in RAX. | 508 // Result is in RAX. |
509 __ ret(); | 509 __ ret(); |
510 __ Bind(&fall_through); | 510 __ Bind(&fall_through); |
511 } | 511 } |
512 | 512 |
513 | 513 |
514 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { | 514 void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) { |
515 Label fall_through; | 515 Label fall_through; |
516 TestBothArgumentsSmis(assembler, &fall_through); | 516 TestBothArgumentsSmis(assembler, &fall_through); |
517 // RAX is the right argument. | 517 // RAX is the right argument. |
518 __ andq(RAX, Address(RSP, + 2 * kWordSize)); | 518 __ andq(RAX, Address(RSP, +2 * kWordSize)); |
519 // Result is in RAX. | 519 // Result is in RAX. |
520 __ ret(); | 520 __ ret(); |
521 __ Bind(&fall_through); | 521 __ Bind(&fall_through); |
522 } | 522 } |
523 | 523 |
524 | 524 |
525 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { | 525 void Intrinsifier::Integer_bitAnd(Assembler* assembler) { |
526 Integer_bitAndFromInteger(assembler); | 526 Integer_bitAndFromInteger(assembler); |
527 } | 527 } |
528 | 528 |
529 | 529 |
530 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { | 530 void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) { |
531 Label fall_through; | 531 Label fall_through; |
532 TestBothArgumentsSmis(assembler, &fall_through); | 532 TestBothArgumentsSmis(assembler, &fall_through); |
533 // RAX is the right argument. | 533 // RAX is the right argument. |
534 __ orq(RAX, Address(RSP, + 2 * kWordSize)); | 534 __ orq(RAX, Address(RSP, +2 * kWordSize)); |
535 // Result is in RAX. | 535 // Result is in RAX. |
536 __ ret(); | 536 __ ret(); |
537 __ Bind(&fall_through); | 537 __ Bind(&fall_through); |
538 } | 538 } |
539 | 539 |
540 | 540 |
541 void Intrinsifier::Integer_bitOr(Assembler* assembler) { | 541 void Intrinsifier::Integer_bitOr(Assembler* assembler) { |
542 Integer_bitOrFromInteger(assembler); | 542 Integer_bitOrFromInteger(assembler); |
543 } | 543 } |
544 | 544 |
545 | 545 |
546 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { | 546 void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) { |
547 Label fall_through; | 547 Label fall_through; |
548 TestBothArgumentsSmis(assembler, &fall_through); | 548 TestBothArgumentsSmis(assembler, &fall_through); |
549 // RAX is the right argument. | 549 // RAX is the right argument. |
550 __ xorq(RAX, Address(RSP, + 2 * kWordSize)); | 550 __ xorq(RAX, Address(RSP, +2 * kWordSize)); |
551 // Result is in RAX. | 551 // Result is in RAX. |
552 __ ret(); | 552 __ ret(); |
553 __ Bind(&fall_through); | 553 __ Bind(&fall_through); |
554 } | 554 } |
555 | 555 |
556 | 556 |
557 void Intrinsifier::Integer_bitXor(Assembler* assembler) { | 557 void Intrinsifier::Integer_bitXor(Assembler* assembler) { |
558 Integer_bitXorFromInteger(assembler); | 558 Integer_bitXorFromInteger(assembler); |
559 } | 559 } |
560 | 560 |
561 | 561 |
562 void Intrinsifier::Integer_shl(Assembler* assembler) { | 562 void Intrinsifier::Integer_shl(Assembler* assembler) { |
563 ASSERT(kSmiTagShift == 1); | 563 ASSERT(kSmiTagShift == 1); |
564 ASSERT(kSmiTag == 0); | 564 ASSERT(kSmiTag == 0); |
565 Label fall_through, overflow; | 565 Label fall_through, overflow; |
566 TestBothArgumentsSmis(assembler, &fall_through); | 566 TestBothArgumentsSmis(assembler, &fall_through); |
567 // Shift value is in RAX. Compare with tagged Smi. | 567 // Shift value is in RAX. Compare with tagged Smi. |
568 __ cmpq(RAX, Immediate(Smi::RawValue(Smi::kBits))); | 568 __ cmpq(RAX, Immediate(Smi::RawValue(Smi::kBits))); |
569 __ j(ABOVE_EQUAL, &fall_through, Assembler::kNearJump); | 569 __ j(ABOVE_EQUAL, &fall_through, Assembler::kNearJump); |
570 | 570 |
571 __ SmiUntag(RAX); | 571 __ SmiUntag(RAX); |
572 __ movq(RCX, RAX); // Shift amount must be in RCX. | 572 __ movq(RCX, RAX); // Shift amount must be in RCX. |
573 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Value. | 573 __ movq(RAX, Address(RSP, +2 * kWordSize)); // Value. |
574 | 574 |
575 // Overflow test - all the shifted-out bits must be same as the sign bit. | 575 // Overflow test - all the shifted-out bits must be same as the sign bit. |
576 __ movq(RDI, RAX); | 576 __ movq(RDI, RAX); |
577 __ shlq(RAX, RCX); | 577 __ shlq(RAX, RCX); |
578 __ sarq(RAX, RCX); | 578 __ sarq(RAX, RCX); |
579 __ cmpq(RAX, RDI); | 579 __ cmpq(RAX, RDI); |
580 __ j(NOT_EQUAL, &overflow, Assembler::kNearJump); | 580 __ j(NOT_EQUAL, &overflow, Assembler::kNearJump); |
581 | 581 |
582 __ shlq(RAX, RCX); // Shift for result now we know there is no overflow. | 582 __ shlq(RAX, RCX); // Shift for result now we know there is no overflow. |
583 | 583 |
584 // RAX is a correctly tagged Smi. | 584 // RAX is a correctly tagged Smi. |
585 __ ret(); | 585 __ ret(); |
586 | 586 |
587 __ Bind(&overflow); | 587 __ Bind(&overflow); |
588 // Mint is rarely used on x64 (only for integers requiring 64 bit instead of | 588 // Mint is rarely used on x64 (only for integers requiring 64 bit instead of |
589 // 63 bits as represented by Smi). | 589 // 63 bits as represented by Smi). |
590 __ Bind(&fall_through); | 590 __ Bind(&fall_through); |
591 } | 591 } |
592 | 592 |
593 | 593 |
594 static void CompareIntegers(Assembler* assembler, Condition true_condition) { | 594 static void CompareIntegers(Assembler* assembler, Condition true_condition) { |
595 Label fall_through, true_label; | 595 Label fall_through, true_label; |
596 TestBothArgumentsSmis(assembler, &fall_through); | 596 TestBothArgumentsSmis(assembler, &fall_through); |
597 // RAX contains the right argument. | 597 // RAX contains the right argument. |
598 __ cmpq(Address(RSP, + 2 * kWordSize), RAX); | 598 __ cmpq(Address(RSP, +2 * kWordSize), RAX); |
599 __ j(true_condition, &true_label, Assembler::kNearJump); | 599 __ j(true_condition, &true_label, Assembler::kNearJump); |
600 __ LoadObject(RAX, Bool::False()); | 600 __ LoadObject(RAX, Bool::False()); |
601 __ ret(); | 601 __ ret(); |
602 __ Bind(&true_label); | 602 __ Bind(&true_label); |
603 __ LoadObject(RAX, Bool::True()); | 603 __ LoadObject(RAX, Bool::True()); |
604 __ ret(); | 604 __ ret(); |
605 __ Bind(&fall_through); | 605 __ Bind(&fall_through); |
606 } | 606 } |
607 | 607 |
608 | 608 |
(...skipping 23 matching lines...) Expand all Loading... |
632 | 632 |
633 | 633 |
634 // This is called for Smi, Mint and Bigint receivers. The right argument | 634 // This is called for Smi, Mint and Bigint receivers. The right argument |
635 // can be Smi, Mint, Bigint or double. | 635 // can be Smi, Mint, Bigint or double. |
636 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { | 636 void Intrinsifier::Integer_equalToInteger(Assembler* assembler) { |
637 Label fall_through, true_label, check_for_mint; | 637 Label fall_through, true_label, check_for_mint; |
638 const intptr_t kReceiverOffset = 2; | 638 const intptr_t kReceiverOffset = 2; |
639 const intptr_t kArgumentOffset = 1; | 639 const intptr_t kArgumentOffset = 1; |
640 | 640 |
641 // For integer receiver '===' check first. | 641 // For integer receiver '===' check first. |
642 __ movq(RAX, Address(RSP, + kArgumentOffset * kWordSize)); | 642 __ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize)); |
643 __ movq(RCX, Address(RSP, + kReceiverOffset * kWordSize)); | 643 __ movq(RCX, Address(RSP, +kReceiverOffset * kWordSize)); |
644 __ cmpq(RAX, RCX); | 644 __ cmpq(RAX, RCX); |
645 __ j(EQUAL, &true_label, Assembler::kNearJump); | 645 __ j(EQUAL, &true_label, Assembler::kNearJump); |
646 __ orq(RAX, RCX); | 646 __ orq(RAX, RCX); |
647 __ testq(RAX, Immediate(kSmiTagMask)); | 647 __ testq(RAX, Immediate(kSmiTagMask)); |
648 __ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump); | 648 __ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump); |
649 // Both arguments are smi, '===' is good enough. | 649 // Both arguments are smi, '===' is good enough. |
650 __ LoadObject(RAX, Bool::False()); | 650 __ LoadObject(RAX, Bool::False()); |
651 __ ret(); | 651 __ ret(); |
652 __ Bind(&true_label); | 652 __ Bind(&true_label); |
653 __ LoadObject(RAX, Bool::True()); | 653 __ LoadObject(RAX, Bool::True()); |
654 __ ret(); | 654 __ ret(); |
655 | 655 |
656 // At least one of the arguments was not Smi. | 656 // At least one of the arguments was not Smi. |
657 Label receiver_not_smi; | 657 Label receiver_not_smi; |
658 __ Bind(&check_for_mint); | 658 __ Bind(&check_for_mint); |
659 __ movq(RAX, Address(RSP, + kReceiverOffset * kWordSize)); | 659 __ movq(RAX, Address(RSP, +kReceiverOffset * kWordSize)); |
660 __ testq(RAX, Immediate(kSmiTagMask)); | 660 __ testq(RAX, Immediate(kSmiTagMask)); |
661 __ j(NOT_ZERO, &receiver_not_smi); | 661 __ j(NOT_ZERO, &receiver_not_smi); |
662 | 662 |
663 // Left (receiver) is Smi, return false if right is not Double. | 663 // Left (receiver) is Smi, return false if right is not Double. |
664 // Note that an instance of Mint or Bigint never contains a value that can be | 664 // Note that an instance of Mint or Bigint never contains a value that can be |
665 // represented by Smi. | 665 // represented by Smi. |
666 __ movq(RAX, Address(RSP, + kArgumentOffset * kWordSize)); | 666 __ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize)); |
667 __ CompareClassId(RAX, kDoubleCid); | 667 __ CompareClassId(RAX, kDoubleCid); |
668 __ j(EQUAL, &fall_through); | 668 __ j(EQUAL, &fall_through); |
669 __ LoadObject(RAX, Bool::False()); | 669 __ LoadObject(RAX, Bool::False()); |
670 __ ret(); | 670 __ ret(); |
671 | 671 |
672 __ Bind(&receiver_not_smi); | 672 __ Bind(&receiver_not_smi); |
673 // RAX:: receiver. | 673 // RAX:: receiver. |
674 __ CompareClassId(RAX, kMintCid); | 674 __ CompareClassId(RAX, kMintCid); |
675 __ j(NOT_EQUAL, &fall_through); | 675 __ j(NOT_EQUAL, &fall_through); |
676 // Receiver is Mint, return false if right is Smi. | 676 // Receiver is Mint, return false if right is Smi. |
677 __ movq(RAX, Address(RSP, + kArgumentOffset * kWordSize)); | 677 __ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize)); |
678 __ testq(RAX, Immediate(kSmiTagMask)); | 678 __ testq(RAX, Immediate(kSmiTagMask)); |
679 __ j(NOT_ZERO, &fall_through); | 679 __ j(NOT_ZERO, &fall_through); |
680 // Smi == Mint -> false. | 680 // Smi == Mint -> false. |
681 __ LoadObject(RAX, Bool::False()); | 681 __ LoadObject(RAX, Bool::False()); |
682 __ ret(); | 682 __ ret(); |
683 // TODO(srdjan): Implement Mint == Mint comparison. | 683 // TODO(srdjan): Implement Mint == Mint comparison. |
684 | 684 |
685 __ Bind(&fall_through); | 685 __ Bind(&fall_through); |
686 } | 686 } |
687 | 687 |
(...skipping 11 matching lines...) Expand all Loading... |
699 // For shifting right a Smi the result is the same for all numbers | 699 // For shifting right a Smi the result is the same for all numbers |
700 // >= count_limit. | 700 // >= count_limit. |
701 __ SmiUntag(RAX); | 701 __ SmiUntag(RAX); |
702 // Negative counts throw exception. | 702 // Negative counts throw exception. |
703 __ cmpq(RAX, Immediate(0)); | 703 __ cmpq(RAX, Immediate(0)); |
704 __ j(LESS, &fall_through, Assembler::kNearJump); | 704 __ j(LESS, &fall_through, Assembler::kNearJump); |
705 __ cmpq(RAX, count_limit); | 705 __ cmpq(RAX, count_limit); |
706 __ j(LESS_EQUAL, &shift_count_ok, Assembler::kNearJump); | 706 __ j(LESS_EQUAL, &shift_count_ok, Assembler::kNearJump); |
707 __ movq(RAX, count_limit); | 707 __ movq(RAX, count_limit); |
708 __ Bind(&shift_count_ok); | 708 __ Bind(&shift_count_ok); |
709 __ movq(RCX, RAX); // Shift amount must be in RCX. | 709 __ movq(RCX, RAX); // Shift amount must be in RCX. |
710 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Value. | 710 __ movq(RAX, Address(RSP, +2 * kWordSize)); // Value. |
711 __ SmiUntag(RAX); // Value. | 711 __ SmiUntag(RAX); // Value. |
712 __ sarq(RAX, RCX); | 712 __ sarq(RAX, RCX); |
713 __ SmiTag(RAX); | 713 __ SmiTag(RAX); |
714 __ ret(); | 714 __ ret(); |
715 __ Bind(&fall_through); | 715 __ Bind(&fall_through); |
716 } | 716 } |
717 | 717 |
718 | 718 |
719 // Argument is Smi (receiver). | 719 // Argument is Smi (receiver). |
720 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { | 720 void Intrinsifier::Smi_bitNegate(Assembler* assembler) { |
721 __ movq(RAX, Address(RSP, + 1 * kWordSize)); // Index. | 721 __ movq(RAX, Address(RSP, +1 * kWordSize)); // Index. |
722 __ notq(RAX); | 722 __ notq(RAX); |
723 __ andq(RAX, Immediate(~kSmiTagMask)); // Remove inverted smi-tag. | 723 __ andq(RAX, Immediate(~kSmiTagMask)); // Remove inverted smi-tag. |
724 __ ret(); | 724 __ ret(); |
725 } | 725 } |
726 | 726 |
727 | 727 |
728 void Intrinsifier::Smi_bitLength(Assembler* assembler) { | 728 void Intrinsifier::Smi_bitLength(Assembler* assembler) { |
729 ASSERT(kSmiTagShift == 1); | 729 ASSERT(kSmiTagShift == 1); |
730 __ movq(RAX, Address(RSP, + 1 * kWordSize)); // Index. | 730 __ movq(RAX, Address(RSP, +1 * kWordSize)); // Index. |
731 // XOR with sign bit to complement bits if value is negative. | 731 // XOR with sign bit to complement bits if value is negative. |
732 __ movq(RCX, RAX); | 732 __ movq(RCX, RAX); |
733 __ sarq(RCX, Immediate(63)); // All 0 or all 1. | 733 __ sarq(RCX, Immediate(63)); // All 0 or all 1. |
734 __ xorq(RAX, RCX); | 734 __ xorq(RAX, RCX); |
735 // BSR does not write the destination register if source is zero. Put a 1 in | 735 // BSR does not write the destination register if source is zero. Put a 1 in |
736 // the Smi tag bit to ensure BSR writes to destination register. | 736 // the Smi tag bit to ensure BSR writes to destination register. |
737 __ orq(RAX, Immediate(kSmiTagMask)); | 737 __ orq(RAX, Immediate(kSmiTagMask)); |
738 __ bsrq(RAX, RAX); | 738 __ bsrq(RAX, RAX); |
739 __ SmiTag(RAX); | 739 __ SmiTag(RAX); |
740 __ ret(); | 740 __ ret(); |
741 } | 741 } |
742 | 742 |
743 | 743 |
744 void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler) { | 744 void Intrinsifier::Smi_bitAndFromSmi(Assembler* assembler) { |
745 Integer_bitAndFromInteger(assembler); | 745 Integer_bitAndFromInteger(assembler); |
746 } | 746 } |
747 | 747 |
748 | 748 |
749 void Intrinsifier::Bigint_lsh(Assembler* assembler) { | 749 void Intrinsifier::Bigint_lsh(Assembler* assembler) { |
750 // static void _lsh(Uint32List x_digits, int x_used, int n, | 750 // static void _lsh(Uint32List x_digits, int x_used, int n, |
751 // Uint32List r_digits) | 751 // Uint32List r_digits) |
752 | 752 |
753 __ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits | 753 __ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits |
754 __ movq(R8, Address(RSP, 3 * kWordSize)); // x_used is Smi | 754 __ movq(R8, Address(RSP, 3 * kWordSize)); // x_used is Smi |
755 __ subq(R8, Immediate(2)); // x_used > 0, Smi. R8 = x_used - 1, round up. | 755 __ subq(R8, Immediate(2)); // x_used > 0, Smi. R8 = x_used - 1, round up. |
756 __ sarq(R8, Immediate(2)); // R8 + 1 = number of digit pairs to read. | 756 __ sarq(R8, Immediate(2)); // R8 + 1 = number of digit pairs to read. |
757 __ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi | 757 __ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi |
758 __ SmiUntag(RCX); | 758 __ SmiUntag(RCX); |
759 __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits | 759 __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits |
760 __ movq(RSI, RCX); | 760 __ movq(RSI, RCX); |
761 __ sarq(RSI, Immediate(6)); // RSI = n ~/ (2*_DIGIT_BITS). | 761 __ sarq(RSI, Immediate(6)); // RSI = n ~/ (2*_DIGIT_BITS). |
762 __ leaq(RBX, FieldAddress(RBX, RSI, TIMES_8, TypedData::data_offset())); | 762 __ leaq(RBX, FieldAddress(RBX, RSI, TIMES_8, TypedData::data_offset())); |
763 __ xorq(RAX, RAX); // RAX = 0. | 763 __ xorq(RAX, RAX); // RAX = 0. |
764 __ movq(RDX, FieldAddress(RDI, R8, TIMES_8, TypedData::data_offset())); | 764 __ movq(RDX, FieldAddress(RDI, R8, TIMES_8, TypedData::data_offset())); |
765 __ shldq(RAX, RDX, RCX); | 765 __ shldq(RAX, RDX, RCX); |
766 __ movq(Address(RBX, R8, TIMES_8, 2 * Bigint::kBytesPerDigit), RAX); | 766 __ movq(Address(RBX, R8, TIMES_8, 2 * Bigint::kBytesPerDigit), RAX); |
767 Label last; | 767 Label last; |
768 __ cmpq(R8, Immediate(0)); | 768 __ cmpq(R8, Immediate(0)); |
769 __ j(EQUAL, &last, Assembler::kNearJump); | 769 __ j(EQUAL, &last, Assembler::kNearJump); |
770 Label loop; | 770 Label loop; |
771 __ Bind(&loop); | 771 __ Bind(&loop); |
772 __ movq(RAX, RDX); | 772 __ movq(RAX, RDX); |
773 __ movq(RDX, | 773 __ movq(RDX, FieldAddress(RDI, R8, TIMES_8, TypedData::data_offset() - |
774 FieldAddress(RDI, R8, TIMES_8, | 774 2 * Bigint::kBytesPerDigit)); |
775 TypedData::data_offset() - 2 * Bigint::kBytesPerDigit)); | |
776 __ shldq(RAX, RDX, RCX); | 775 __ shldq(RAX, RDX, RCX); |
777 __ movq(Address(RBX, R8, TIMES_8, 0), RAX); | 776 __ movq(Address(RBX, R8, TIMES_8, 0), RAX); |
778 __ decq(R8); | 777 __ decq(R8); |
779 __ j(NOT_ZERO, &loop, Assembler::kNearJump); | 778 __ j(NOT_ZERO, &loop, Assembler::kNearJump); |
780 __ Bind(&last); | 779 __ Bind(&last); |
781 __ shldq(RDX, R8, RCX); // R8 == 0. | 780 __ shldq(RDX, R8, RCX); // R8 == 0. |
782 __ movq(Address(RBX, 0), RDX); | 781 __ movq(Address(RBX, 0), RDX); |
783 // Returning Object::null() is not required, since this method is private. | 782 // Returning Object::null() is not required, since this method is private. |
784 __ ret(); | 783 __ ret(); |
785 } | 784 } |
786 | 785 |
787 | 786 |
788 void Intrinsifier::Bigint_rsh(Assembler* assembler) { | 787 void Intrinsifier::Bigint_rsh(Assembler* assembler) { |
789 // static void _rsh(Uint32List x_digits, int x_used, int n, | 788 // static void _rsh(Uint32List x_digits, int x_used, int n, |
790 // Uint32List r_digits) | 789 // Uint32List r_digits) |
791 | 790 |
792 __ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits | 791 __ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits |
793 __ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi | 792 __ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi |
794 __ SmiUntag(RCX); | 793 __ SmiUntag(RCX); |
795 __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits | 794 __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits |
796 __ movq(RDX, RCX); | 795 __ movq(RDX, RCX); |
797 __ sarq(RDX, Immediate(6)); // RDX = n ~/ (2*_DIGIT_BITS). | 796 __ sarq(RDX, Immediate(6)); // RDX = n ~/ (2*_DIGIT_BITS). |
798 __ movq(RSI, Address(RSP, 3 * kWordSize)); // x_used is Smi | 797 __ movq(RSI, Address(RSP, 3 * kWordSize)); // x_used is Smi |
799 __ subq(RSI, Immediate(2)); // x_used > 0, Smi. RSI = x_used - 1, round up. | 798 __ subq(RSI, Immediate(2)); // x_used > 0, Smi. RSI = x_used - 1, round up. |
800 __ sarq(RSI, Immediate(2)); | 799 __ sarq(RSI, Immediate(2)); |
801 __ leaq(RDI, FieldAddress(RDI, RSI, TIMES_8, TypedData::data_offset())); | 800 __ leaq(RDI, FieldAddress(RDI, RSI, TIMES_8, TypedData::data_offset())); |
802 __ subq(RSI, RDX); // RSI + 1 = number of digit pairs to read. | 801 __ subq(RSI, RDX); // RSI + 1 = number of digit pairs to read. |
803 __ leaq(RBX, FieldAddress(RBX, RSI, TIMES_8, TypedData::data_offset())); | 802 __ leaq(RBX, FieldAddress(RBX, RSI, TIMES_8, TypedData::data_offset())); |
804 __ negq(RSI); | 803 __ negq(RSI); |
805 __ movq(RDX, Address(RDI, RSI, TIMES_8, 0)); | 804 __ movq(RDX, Address(RDI, RSI, TIMES_8, 0)); |
806 Label last; | 805 Label last; |
807 __ cmpq(RSI, Immediate(0)); | 806 __ cmpq(RSI, Immediate(0)); |
(...skipping 13 matching lines...) Expand all Loading... |
821 __ ret(); | 820 __ ret(); |
822 } | 821 } |
823 | 822 |
824 | 823 |
825 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { | 824 void Intrinsifier::Bigint_absAdd(Assembler* assembler) { |
826 // static void _absAdd(Uint32List digits, int used, | 825 // static void _absAdd(Uint32List digits, int used, |
827 // Uint32List a_digits, int a_used, | 826 // Uint32List a_digits, int a_used, |
828 // Uint32List r_digits) | 827 // Uint32List r_digits) |
829 | 828 |
830 __ movq(RDI, Address(RSP, 5 * kWordSize)); // digits | 829 __ movq(RDI, Address(RSP, 5 * kWordSize)); // digits |
831 __ movq(R8, Address(RSP, 4 * kWordSize)); // used is Smi | 830 __ movq(R8, Address(RSP, 4 * kWordSize)); // used is Smi |
832 __ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up. | 831 __ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up. |
833 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process. | 832 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process. |
834 __ movq(RSI, Address(RSP, 3 * kWordSize)); // a_digits | 833 __ movq(RSI, Address(RSP, 3 * kWordSize)); // a_digits |
835 __ movq(RCX, Address(RSP, 2 * kWordSize)); // a_used is Smi | 834 __ movq(RCX, Address(RSP, 2 * kWordSize)); // a_used is Smi |
836 __ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up. | 835 __ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up. |
837 __ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process. | 836 __ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process. |
838 __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits | 837 __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits |
839 | 838 |
840 // Precompute 'used - a_used' now so that carry flag is not lost later. | 839 // Precompute 'used - a_used' now so that carry flag is not lost later. |
841 __ subq(R8, RCX); | 840 __ subq(R8, RCX); |
842 __ incq(R8); // To account for the extra test between loops. | 841 __ incq(R8); // To account for the extra test between loops. |
843 | 842 |
844 __ xorq(RDX, RDX); // RDX = 0, carry flag = 0. | 843 __ xorq(RDX, RDX); // RDX = 0, carry flag = 0. |
845 Label add_loop; | 844 Label add_loop; |
846 __ Bind(&add_loop); | 845 __ Bind(&add_loop); |
847 // Loop (a_used+1)/2 times, RCX > 0. | 846 // Loop (a_used+1)/2 times, RCX > 0. |
848 __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset())); | 847 __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset())); |
849 __ adcq(RAX, FieldAddress(RSI, RDX, TIMES_8, TypedData::data_offset())); | 848 __ adcq(RAX, FieldAddress(RSI, RDX, TIMES_8, TypedData::data_offset())); |
850 __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX); | 849 __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX); |
851 __ incq(RDX); // Does not affect carry flag. | 850 __ incq(RDX); // Does not affect carry flag. |
852 __ decq(RCX); // Does not affect carry flag. | 851 __ decq(RCX); // Does not affect carry flag. |
853 __ j(NOT_ZERO, &add_loop, Assembler::kNearJump); | 852 __ j(NOT_ZERO, &add_loop, Assembler::kNearJump); |
854 | 853 |
855 Label last_carry; | 854 Label last_carry; |
856 __ decq(R8); // Does not affect carry flag. | 855 __ decq(R8); // Does not affect carry flag. |
857 __ j(ZERO, &last_carry, Assembler::kNearJump); // If used - a_used == 0. | 856 __ j(ZERO, &last_carry, Assembler::kNearJump); // If used - a_used == 0. |
858 | 857 |
859 Label carry_loop; | 858 Label carry_loop; |
860 __ Bind(&carry_loop); | 859 __ Bind(&carry_loop); |
861 // Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0. | 860 // Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0. |
862 __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset())); | 861 __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset())); |
863 __ adcq(RAX, Immediate(0)); | 862 __ adcq(RAX, Immediate(0)); |
864 __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX); | 863 __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX); |
865 __ incq(RDX); // Does not affect carry flag. | 864 __ incq(RDX); // Does not affect carry flag. |
866 __ decq(R8); // Does not affect carry flag. | 865 __ decq(R8); // Does not affect carry flag. |
867 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump); | 866 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump); |
868 | 867 |
869 __ Bind(&last_carry); | 868 __ Bind(&last_carry); |
870 Label done; | 869 Label done; |
871 __ j(NOT_CARRY, &done); | 870 __ j(NOT_CARRY, &done); |
872 __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), | 871 __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), |
873 Immediate(1)); | 872 Immediate(1)); |
874 | 873 |
875 __ Bind(&done); | 874 __ Bind(&done); |
876 // Returning Object::null() is not required, since this method is private. | 875 // Returning Object::null() is not required, since this method is private. |
877 __ ret(); | 876 __ ret(); |
878 } | 877 } |
879 | 878 |
880 | 879 |
881 void Intrinsifier::Bigint_absSub(Assembler* assembler) { | 880 void Intrinsifier::Bigint_absSub(Assembler* assembler) { |
882 // static void _absSub(Uint32List digits, int used, | 881 // static void _absSub(Uint32List digits, int used, |
883 // Uint32List a_digits, int a_used, | 882 // Uint32List a_digits, int a_used, |
884 // Uint32List r_digits) | 883 // Uint32List r_digits) |
885 | 884 |
886 __ movq(RDI, Address(RSP, 5 * kWordSize)); // digits | 885 __ movq(RDI, Address(RSP, 5 * kWordSize)); // digits |
887 __ movq(R8, Address(RSP, 4 * kWordSize)); // used is Smi | 886 __ movq(R8, Address(RSP, 4 * kWordSize)); // used is Smi |
888 __ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up. | 887 __ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up. |
889 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process. | 888 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process. |
890 __ movq(RSI, Address(RSP, 3 * kWordSize)); // a_digits | 889 __ movq(RSI, Address(RSP, 3 * kWordSize)); // a_digits |
891 __ movq(RCX, Address(RSP, 2 * kWordSize)); // a_used is Smi | 890 __ movq(RCX, Address(RSP, 2 * kWordSize)); // a_used is Smi |
892 __ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up. | 891 __ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up. |
893 __ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process. | 892 __ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process. |
894 __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits | 893 __ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits |
895 | 894 |
896 // Precompute 'used - a_used' now so that carry flag is not lost later. | 895 // Precompute 'used - a_used' now so that carry flag is not lost later. |
897 __ subq(R8, RCX); | 896 __ subq(R8, RCX); |
898 __ incq(R8); // To account for the extra test between loops. | 897 __ incq(R8); // To account for the extra test between loops. |
899 | 898 |
900 __ xorq(RDX, RDX); // RDX = 0, carry flag = 0. | 899 __ xorq(RDX, RDX); // RDX = 0, carry flag = 0. |
901 Label sub_loop; | 900 Label sub_loop; |
902 __ Bind(&sub_loop); | 901 __ Bind(&sub_loop); |
903 // Loop (a_used+1)/2 times, RCX > 0. | 902 // Loop (a_used+1)/2 times, RCX > 0. |
904 __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset())); | 903 __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset())); |
905 __ sbbq(RAX, FieldAddress(RSI, RDX, TIMES_8, TypedData::data_offset())); | 904 __ sbbq(RAX, FieldAddress(RSI, RDX, TIMES_8, TypedData::data_offset())); |
906 __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX); | 905 __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX); |
907 __ incq(RDX); // Does not affect carry flag. | 906 __ incq(RDX); // Does not affect carry flag. |
908 __ decq(RCX); // Does not affect carry flag. | 907 __ decq(RCX); // Does not affect carry flag. |
909 __ j(NOT_ZERO, &sub_loop, Assembler::kNearJump); | 908 __ j(NOT_ZERO, &sub_loop, Assembler::kNearJump); |
910 | 909 |
911 Label done; | 910 Label done; |
912 __ decq(R8); // Does not affect carry flag. | 911 __ decq(R8); // Does not affect carry flag. |
913 __ j(ZERO, &done, Assembler::kNearJump); // If used - a_used == 0. | 912 __ j(ZERO, &done, Assembler::kNearJump); // If used - a_used == 0. |
914 | 913 |
915 Label carry_loop; | 914 Label carry_loop; |
916 __ Bind(&carry_loop); | 915 __ Bind(&carry_loop); |
917 // Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0. | 916 // Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0. |
918 __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset())); | 917 __ movq(RAX, FieldAddress(RDI, RDX, TIMES_8, TypedData::data_offset())); |
919 __ sbbq(RAX, Immediate(0)); | 918 __ sbbq(RAX, Immediate(0)); |
920 __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX); | 919 __ movq(FieldAddress(RBX, RDX, TIMES_8, TypedData::data_offset()), RAX); |
921 __ incq(RDX); // Does not affect carry flag. | 920 __ incq(RDX); // Does not affect carry flag. |
922 __ decq(R8); // Does not affect carry flag. | 921 __ decq(R8); // Does not affect carry flag. |
923 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump); | 922 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump); |
924 | 923 |
925 __ Bind(&done); | 924 __ Bind(&done); |
926 // Returning Object::null() is not required, since this method is private. | 925 // Returning Object::null() is not required, since this method is private. |
927 __ ret(); | 926 __ ret(); |
928 } | 927 } |
929 | 928 |
930 | 929 |
931 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { | 930 void Intrinsifier::Bigint_mulAdd(Assembler* assembler) { |
932 // Pseudo code: | 931 // Pseudo code: |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
988 __ Bind(&muladd_loop); | 987 __ Bind(&muladd_loop); |
989 // x: RBX | 988 // x: RBX |
990 // mip: RDI | 989 // mip: RDI |
991 // ajp: RSI | 990 // ajp: RSI |
992 // c: RCX | 991 // c: RCX |
993 // t: RDX:RAX (not live at loop entry) | 992 // t: RDX:RAX (not live at loop entry) |
994 // n: R8 | 993 // n: R8 |
995 | 994 |
996 // uint64_t mi = *mip++ | 995 // uint64_t mi = *mip++ |
997 __ movq(RAX, Address(RDI, 0)); | 996 __ movq(RAX, Address(RDI, 0)); |
998 __ addq(RDI, Immediate(2*Bigint::kBytesPerDigit)); | 997 __ addq(RDI, Immediate(2 * Bigint::kBytesPerDigit)); |
999 | 998 |
1000 // uint128_t t = x*mi | 999 // uint128_t t = x*mi |
1001 __ mulq(RBX); // t = RDX:RAX = RAX * RBX, 64-bit * 64-bit -> 64-bit | 1000 __ mulq(RBX); // t = RDX:RAX = RAX * RBX, 64-bit * 64-bit -> 64-bit |
1002 __ addq(RAX, RCX); // t += c | 1001 __ addq(RAX, RCX); // t += c |
1003 __ adcq(RDX, Immediate(0)); | 1002 __ adcq(RDX, Immediate(0)); |
1004 | 1003 |
1005 // uint64_t aj = *ajp; t += aj | 1004 // uint64_t aj = *ajp; t += aj |
1006 __ addq(RAX, Address(RSI, 0)); | 1005 __ addq(RAX, Address(RSI, 0)); |
1007 __ adcq(RDX, Immediate(0)); | 1006 __ adcq(RDX, Immediate(0)); |
1008 | 1007 |
1009 // *ajp++ = low64(t) | 1008 // *ajp++ = low64(t) |
1010 __ movq(Address(RSI, 0), RAX); | 1009 __ movq(Address(RSI, 0), RAX); |
1011 __ addq(RSI, Immediate(2*Bigint::kBytesPerDigit)); | 1010 __ addq(RSI, Immediate(2 * Bigint::kBytesPerDigit)); |
1012 | 1011 |
1013 // c = high64(t) | 1012 // c = high64(t) |
1014 __ movq(RCX, RDX); | 1013 __ movq(RCX, RDX); |
1015 | 1014 |
1016 // while (--n > 0) | 1015 // while (--n > 0) |
1017 __ decq(R8); // --n | 1016 __ decq(R8); // --n |
1018 __ j(NOT_ZERO, &muladd_loop, Assembler::kNearJump); | 1017 __ j(NOT_ZERO, &muladd_loop, Assembler::kNearJump); |
1019 | 1018 |
1020 __ testq(RCX, RCX); | 1019 __ testq(RCX, RCX); |
1021 __ j(ZERO, &done, Assembler::kNearJump); | 1020 __ j(ZERO, &done, Assembler::kNearJump); |
1022 | 1021 |
1023 // *ajp += c | 1022 // *ajp += c |
1024 __ addq(Address(RSI, 0), RCX); | 1023 __ addq(Address(RSI, 0), RCX); |
1025 __ j(NOT_CARRY, &done, Assembler::kNearJump); | 1024 __ j(NOT_CARRY, &done, Assembler::kNearJump); |
1026 | 1025 |
1027 Label propagate_carry_loop; | 1026 Label propagate_carry_loop; |
1028 __ Bind(&propagate_carry_loop); | 1027 __ Bind(&propagate_carry_loop); |
1029 __ addq(RSI, Immediate(2*Bigint::kBytesPerDigit)); | 1028 __ addq(RSI, Immediate(2 * Bigint::kBytesPerDigit)); |
1030 __ incq(Address(RSI, 0)); // c == 0 or 1 | 1029 __ incq(Address(RSI, 0)); // c == 0 or 1 |
1031 __ j(CARRY, &propagate_carry_loop, Assembler::kNearJump); | 1030 __ j(CARRY, &propagate_carry_loop, Assembler::kNearJump); |
1032 | 1031 |
1033 __ Bind(&done); | 1032 __ Bind(&done); |
1034 __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed. | 1033 __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed. |
1035 __ ret(); | 1034 __ ret(); |
1036 } | 1035 } |
1037 | 1036 |
1038 | 1037 |
1039 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { | 1038 void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) { |
(...skipping 26 matching lines...) Expand all Loading... |
1066 // RDI = xip = &x_digits[i >> 1] | 1065 // RDI = xip = &x_digits[i >> 1] |
1067 __ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits | 1066 __ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits |
1068 __ movq(RAX, Address(RSP, 3 * kWordSize)); // i is Smi | 1067 __ movq(RAX, Address(RSP, 3 * kWordSize)); // i is Smi |
1069 __ leaq(RDI, FieldAddress(RDI, RAX, TIMES_2, TypedData::data_offset())); | 1068 __ leaq(RDI, FieldAddress(RDI, RAX, TIMES_2, TypedData::data_offset())); |
1070 | 1069 |
1071 // RBX = x = *xip++, return if x == 0 | 1070 // RBX = x = *xip++, return if x == 0 |
1072 Label x_zero; | 1071 Label x_zero; |
1073 __ movq(RBX, Address(RDI, 0)); | 1072 __ movq(RBX, Address(RDI, 0)); |
1074 __ cmpq(RBX, Immediate(0)); | 1073 __ cmpq(RBX, Immediate(0)); |
1075 __ j(EQUAL, &x_zero); | 1074 __ j(EQUAL, &x_zero); |
1076 __ addq(RDI, Immediate(2*Bigint::kBytesPerDigit)); | 1075 __ addq(RDI, Immediate(2 * Bigint::kBytesPerDigit)); |
1077 | 1076 |
1078 // RSI = ajp = &a_digits[i] | 1077 // RSI = ajp = &a_digits[i] |
1079 __ movq(RSI, Address(RSP, 2 * kWordSize)); // a_digits | 1078 __ movq(RSI, Address(RSP, 2 * kWordSize)); // a_digits |
1080 __ leaq(RSI, FieldAddress(RSI, RAX, TIMES_4, TypedData::data_offset())); | 1079 __ leaq(RSI, FieldAddress(RSI, RAX, TIMES_4, TypedData::data_offset())); |
1081 | 1080 |
1082 // RDX:RAX = t = x*x + *ajp | 1081 // RDX:RAX = t = x*x + *ajp |
1083 __ movq(RAX, RBX); | 1082 __ movq(RAX, RBX); |
1084 __ mulq(RBX); | 1083 __ mulq(RBX); |
1085 __ addq(RAX, Address(RSI, 0)); | 1084 __ addq(RAX, Address(RSI, 0)); |
1086 __ adcq(RDX, Immediate(0)); | 1085 __ adcq(RDX, Immediate(0)); |
1087 | 1086 |
1088 // *ajp++ = low64(t) | 1087 // *ajp++ = low64(t) |
1089 __ movq(Address(RSI, 0), RAX); | 1088 __ movq(Address(RSI, 0), RAX); |
1090 __ addq(RSI, Immediate(2*Bigint::kBytesPerDigit)); | 1089 __ addq(RSI, Immediate(2 * Bigint::kBytesPerDigit)); |
1091 | 1090 |
1092 // int n = (used - i + 1)/2 - 1 | 1091 // int n = (used - i + 1)/2 - 1 |
1093 __ movq(R8, Address(RSP, 1 * kWordSize)); // used is Smi | 1092 __ movq(R8, Address(RSP, 1 * kWordSize)); // used is Smi |
1094 __ subq(R8, Address(RSP, 3 * kWordSize)); // i is Smi | 1093 __ subq(R8, Address(RSP, 3 * kWordSize)); // i is Smi |
1095 __ addq(R8, Immediate(2)); | 1094 __ addq(R8, Immediate(2)); |
1096 __ sarq(R8, Immediate(2)); | 1095 __ sarq(R8, Immediate(2)); |
1097 __ decq(R8); // R8 = number of digit pairs to process. | 1096 __ decq(R8); // R8 = number of digit pairs to process. |
1098 | 1097 |
1099 // uint128_t c = high64(t) | 1098 // uint128_t c = high64(t) |
1100 __ xorq(R13, R13); // R13 = high64(c) == 0 | 1099 __ xorq(R13, R13); // R13 = high64(c) == 0 |
1101 __ movq(R12, RDX); // R12 = low64(c) == high64(t) | 1100 __ movq(R12, RDX); // R12 = low64(c) == high64(t) |
1102 | 1101 |
1103 Label loop, done; | 1102 Label loop, done; |
1104 __ Bind(&loop); | 1103 __ Bind(&loop); |
1105 // x: RBX | 1104 // x: RBX |
1106 // xip: RDI | 1105 // xip: RDI |
1107 // ajp: RSI | 1106 // ajp: RSI |
1108 // c: R13:R12 | 1107 // c: R13:R12 |
1109 // t: RCX:RDX:RAX (not live at loop entry) | 1108 // t: RCX:RDX:RAX (not live at loop entry) |
1110 // n: R8 | 1109 // n: R8 |
1111 | 1110 |
1112 // while (--n >= 0) | 1111 // while (--n >= 0) |
1113 __ decq(R8); // --n | 1112 __ decq(R8); // --n |
1114 __ j(NEGATIVE, &done, Assembler::kNearJump); | 1113 __ j(NEGATIVE, &done, Assembler::kNearJump); |
1115 | 1114 |
1116 // uint64_t xi = *xip++ | 1115 // uint64_t xi = *xip++ |
1117 __ movq(RAX, Address(RDI, 0)); | 1116 __ movq(RAX, Address(RDI, 0)); |
1118 __ addq(RDI, Immediate(2*Bigint::kBytesPerDigit)); | 1117 __ addq(RDI, Immediate(2 * Bigint::kBytesPerDigit)); |
1119 | 1118 |
1120 // uint192_t t = RCX:RDX:RAX = 2*x*xi + aj + c | 1119 // uint192_t t = RCX:RDX:RAX = 2*x*xi + aj + c |
1121 __ mulq(RBX); // RDX:RAX = RAX * RBX | 1120 __ mulq(RBX); // RDX:RAX = RAX * RBX |
1122 __ xorq(RCX, RCX); // RCX = 0 | 1121 __ xorq(RCX, RCX); // RCX = 0 |
1123 __ shldq(RCX, RDX, Immediate(1)); | 1122 __ shldq(RCX, RDX, Immediate(1)); |
1124 __ shldq(RDX, RAX, Immediate(1)); | 1123 __ shldq(RDX, RAX, Immediate(1)); |
1125 __ shlq(RAX, Immediate(1)); // RCX:RDX:RAX <<= 1 | 1124 __ shlq(RAX, Immediate(1)); // RCX:RDX:RAX <<= 1 |
1126 __ addq(RAX, Address(RSI, 0)); // t += aj | 1125 __ addq(RAX, Address(RSI, 0)); // t += aj |
1127 __ adcq(RDX, Immediate(0)); | 1126 __ adcq(RDX, Immediate(0)); |
1128 __ adcq(RCX, Immediate(0)); | 1127 __ adcq(RCX, Immediate(0)); |
1129 __ addq(RAX, R12); // t += low64(c) | 1128 __ addq(RAX, R12); // t += low64(c) |
1130 __ adcq(RDX, R13); // t += high64(c) << 64 | 1129 __ adcq(RDX, R13); // t += high64(c) << 64 |
1131 __ adcq(RCX, Immediate(0)); | 1130 __ adcq(RCX, Immediate(0)); |
1132 | 1131 |
1133 // *ajp++ = low64(t) | 1132 // *ajp++ = low64(t) |
1134 __ movq(Address(RSI, 0), RAX); | 1133 __ movq(Address(RSI, 0), RAX); |
1135 __ addq(RSI, Immediate(2*Bigint::kBytesPerDigit)); | 1134 __ addq(RSI, Immediate(2 * Bigint::kBytesPerDigit)); |
1136 | 1135 |
1137 // c = high128(t) | 1136 // c = high128(t) |
1138 __ movq(R12, RDX); | 1137 __ movq(R12, RDX); |
1139 __ movq(R13, RCX); | 1138 __ movq(R13, RCX); |
1140 | 1139 |
1141 __ jmp(&loop, Assembler::kNearJump); | 1140 __ jmp(&loop, Assembler::kNearJump); |
1142 | 1141 |
1143 __ Bind(&done); | 1142 __ Bind(&done); |
1144 // uint128_t t = aj + c | 1143 // uint128_t t = aj + c |
1145 __ addq(R12, Address(RSI, 0)); // t = c, t += *ajp | 1144 __ addq(R12, Address(RSI, 0)); // t = c, t += *ajp |
1146 __ adcq(R13, Immediate(0)); | 1145 __ adcq(R13, Immediate(0)); |
1147 | 1146 |
1148 // *ajp++ = low64(t) | 1147 // *ajp++ = low64(t) |
1149 // *ajp = high64(t) | 1148 // *ajp = high64(t) |
1150 __ movq(Address(RSI, 0), R12); | 1149 __ movq(Address(RSI, 0), R12); |
1151 __ movq(Address(RSI, 2*Bigint::kBytesPerDigit), R13); | 1150 __ movq(Address(RSI, 2 * Bigint::kBytesPerDigit), R13); |
1152 | 1151 |
1153 __ Bind(&x_zero); | 1152 __ Bind(&x_zero); |
1154 __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed. | 1153 __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed. |
1155 __ ret(); | 1154 __ ret(); |
1156 } | 1155 } |
1157 | 1156 |
1158 | 1157 |
1159 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { | 1158 void Intrinsifier::Bigint_estQuotientDigit(Assembler* assembler) { |
1160 // Pseudo code: | 1159 // Pseudo code: |
1161 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) { | 1160 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) { |
(...skipping 28 matching lines...) Expand all Loading... |
1190 | 1189 |
1191 // RAX = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1 | 1190 // RAX = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1 |
1192 __ movq(RAX, Immediate(-1)); | 1191 __ movq(RAX, Immediate(-1)); |
1193 | 1192 |
1194 // Return qd if dh == yt | 1193 // Return qd if dh == yt |
1195 Label return_qd; | 1194 Label return_qd; |
1196 __ cmpq(RDX, RCX); | 1195 __ cmpq(RDX, RCX); |
1197 __ j(EQUAL, &return_qd, Assembler::kNearJump); | 1196 __ j(EQUAL, &return_qd, Assembler::kNearJump); |
1198 | 1197 |
1199 // RAX = dl = dp[-1] | 1198 // RAX = dl = dp[-1] |
1200 __ movq(RAX, Address(RBX, -2*Bigint::kBytesPerDigit)); | 1199 __ movq(RAX, Address(RBX, -2 * Bigint::kBytesPerDigit)); |
1201 | 1200 |
1202 // RAX = qd = dh:dl / yt = RDX:RAX / RCX | 1201 // RAX = qd = dh:dl / yt = RDX:RAX / RCX |
1203 __ divq(RCX); | 1202 __ divq(RCX); |
1204 | 1203 |
1205 __ Bind(&return_qd); | 1204 __ Bind(&return_qd); |
1206 // args[2..3] = qd | 1205 // args[2..3] = qd |
1207 __ movq(FieldAddress(RDI, | 1206 __ movq( |
1208 TypedData::data_offset() + 2*Bigint::kBytesPerDigit), | 1207 FieldAddress(RDI, TypedData::data_offset() + 2 * Bigint::kBytesPerDigit), |
1209 RAX); | 1208 RAX); |
1210 | 1209 |
1211 __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed. | 1210 __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed. |
1212 __ ret(); | 1211 __ ret(); |
1213 } | 1212 } |
1214 | 1213 |
1215 | 1214 |
1216 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) { | 1215 void Intrinsifier::Montgomery_mulMod(Assembler* assembler) { |
1217 // Pseudo code: | 1216 // Pseudo code: |
1218 // static int _mulMod(Uint32List args, Uint32List digits, int i) { | 1217 // static int _mulMod(Uint32List args, Uint32List digits, int i) { |
1219 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3. | 1218 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3. |
1220 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even. | 1219 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even. |
1221 // uint128_t t = rho*d; | 1220 // uint128_t t = rho*d; |
1222 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5. | 1221 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5. |
1223 // return 2; | 1222 // return 2; |
1224 // } | 1223 // } |
1225 | 1224 |
1226 // RDI = args | 1225 // RDI = args |
1227 __ movq(RDI, Address(RSP, 3 * kWordSize)); // args | 1226 __ movq(RDI, Address(RSP, 3 * kWordSize)); // args |
1228 | 1227 |
1229 // RCX = rho = args[2 .. 3] | 1228 // RCX = rho = args[2 .. 3] |
1230 __ movq(RCX, | 1229 __ movq(RCX, FieldAddress( |
1231 FieldAddress(RDI, | 1230 RDI, TypedData::data_offset() + 2 * Bigint::kBytesPerDigit)); |
1232 TypedData::data_offset() + 2*Bigint::kBytesPerDigit)); | |
1233 | 1231 |
1234 // RAX = digits[i >> 1 .. (i >> 1) + 1] | 1232 // RAX = digits[i >> 1 .. (i >> 1) + 1] |
1235 __ movq(RBX, Address(RSP, 2 * kWordSize)); // digits | 1233 __ movq(RBX, Address(RSP, 2 * kWordSize)); // digits |
1236 __ movq(RAX, Address(RSP, 1 * kWordSize)); // i is Smi | 1234 __ movq(RAX, Address(RSP, 1 * kWordSize)); // i is Smi |
1237 __ movq(RAX, FieldAddress(RBX, RAX, TIMES_2, TypedData::data_offset())); | 1235 __ movq(RAX, FieldAddress(RBX, RAX, TIMES_2, TypedData::data_offset())); |
1238 | 1236 |
1239 // RDX:RAX = t = rho*d | 1237 // RDX:RAX = t = rho*d |
1240 __ mulq(RCX); | 1238 __ mulq(RCX); |
1241 | 1239 |
1242 // args[4 .. 5] = t mod DIGIT_BASE^2 = low64(t) | 1240 // args[4 .. 5] = t mod DIGIT_BASE^2 = low64(t) |
1243 __ movq(FieldAddress(RDI, | 1241 __ movq( |
1244 TypedData::data_offset() + 4*Bigint::kBytesPerDigit), | 1242 FieldAddress(RDI, TypedData::data_offset() + 4 * Bigint::kBytesPerDigit), |
1245 RAX); | 1243 RAX); |
1246 | 1244 |
1247 __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed. | 1245 __ movq(RAX, Immediate(Smi::RawValue(2))); // Two digits processed. |
1248 __ ret(); | 1246 __ ret(); |
1249 } | 1247 } |
1250 | 1248 |
1251 | 1249 |
1252 // Check if the last argument is a double, jump to label 'is_smi' if smi | 1250 // Check if the last argument is a double, jump to label 'is_smi' if smi |
1253 // (easy to convert to double), otherwise jump to label 'not_double_smi', | 1251 // (easy to convert to double), otherwise jump to label 'not_double_smi', |
1254 // Returns the last argument in RAX. | 1252 // Returns the last argument in RAX. |
1255 static void TestLastArgumentIsDouble(Assembler* assembler, | 1253 static void TestLastArgumentIsDouble(Assembler* assembler, |
1256 Label* is_smi, | 1254 Label* is_smi, |
1257 Label* not_double_smi) { | 1255 Label* not_double_smi) { |
1258 __ movq(RAX, Address(RSP, + 1 * kWordSize)); | 1256 __ movq(RAX, Address(RSP, +1 * kWordSize)); |
1259 __ testq(RAX, Immediate(kSmiTagMask)); | 1257 __ testq(RAX, Immediate(kSmiTagMask)); |
1260 __ j(ZERO, is_smi); // Jump if Smi. | 1258 __ j(ZERO, is_smi); // Jump if Smi. |
1261 __ CompareClassId(RAX, kDoubleCid); | 1259 __ CompareClassId(RAX, kDoubleCid); |
1262 __ j(NOT_EQUAL, not_double_smi); | 1260 __ j(NOT_EQUAL, not_double_smi); |
1263 // Fall through if double. | 1261 // Fall through if double. |
1264 } | 1262 } |
1265 | 1263 |
1266 | 1264 |
1267 // Both arguments on stack, left argument is a double, right argument is of | 1265 // Both arguments on stack, left argument is a double, right argument is of |
1268 // unknown type. Return true or false object in RAX. Any NaN argument | 1266 // unknown type. Return true or false object in RAX. Any NaN argument |
1269 // returns false. Any non-double argument causes control flow to fall through | 1267 // returns false. Any non-double argument causes control flow to fall through |
1270 // to the slow case (compiled method body). | 1268 // to the slow case (compiled method body). |
1271 static void CompareDoubles(Assembler* assembler, Condition true_condition) { | 1269 static void CompareDoubles(Assembler* assembler, Condition true_condition) { |
1272 Label fall_through, is_false, is_true, is_smi, double_op; | 1270 Label fall_through, is_false, is_true, is_smi, double_op; |
1273 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1271 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
1274 // Both arguments are double, right operand is in RAX. | 1272 // Both arguments are double, right operand is in RAX. |
1275 __ movsd(XMM1, FieldAddress(RAX, Double::value_offset())); | 1273 __ movsd(XMM1, FieldAddress(RAX, Double::value_offset())); |
1276 __ Bind(&double_op); | 1274 __ Bind(&double_op); |
1277 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Left argument. | 1275 __ movq(RAX, Address(RSP, +2 * kWordSize)); // Left argument. |
1278 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); | 1276 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); |
1279 __ comisd(XMM0, XMM1); | 1277 __ comisd(XMM0, XMM1); |
1280 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false; | 1278 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false; |
1281 __ j(true_condition, &is_true, Assembler::kNearJump); | 1279 __ j(true_condition, &is_true, Assembler::kNearJump); |
1282 // Fall through false. | 1280 // Fall through false. |
1283 __ Bind(&is_false); | 1281 __ Bind(&is_false); |
1284 __ LoadObject(RAX, Bool::False()); | 1282 __ LoadObject(RAX, Bool::False()); |
1285 __ ret(); | 1283 __ ret(); |
1286 __ Bind(&is_true); | 1284 __ Bind(&is_true); |
1287 __ LoadObject(RAX, Bool::True()); | 1285 __ LoadObject(RAX, Bool::True()); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1320 | 1318 |
1321 | 1319 |
1322 // Expects left argument to be double (receiver). Right argument is unknown. | 1320 // Expects left argument to be double (receiver). Right argument is unknown. |
1323 // Both arguments are on stack. | 1321 // Both arguments are on stack. |
1324 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { | 1322 static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) { |
1325 Label fall_through, is_smi, double_op; | 1323 Label fall_through, is_smi, double_op; |
1326 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1324 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
1327 // Both arguments are double, right operand is in RAX. | 1325 // Both arguments are double, right operand is in RAX. |
1328 __ movsd(XMM1, FieldAddress(RAX, Double::value_offset())); | 1326 __ movsd(XMM1, FieldAddress(RAX, Double::value_offset())); |
1329 __ Bind(&double_op); | 1327 __ Bind(&double_op); |
1330 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // Left argument. | 1328 __ movq(RAX, Address(RSP, +2 * kWordSize)); // Left argument. |
1331 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); | 1329 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); |
1332 switch (kind) { | 1330 switch (kind) { |
1333 case Token::kADD: __ addsd(XMM0, XMM1); break; | 1331 case Token::kADD: |
1334 case Token::kSUB: __ subsd(XMM0, XMM1); break; | 1332 __ addsd(XMM0, XMM1); |
1335 case Token::kMUL: __ mulsd(XMM0, XMM1); break; | 1333 break; |
1336 case Token::kDIV: __ divsd(XMM0, XMM1); break; | 1334 case Token::kSUB: |
1337 default: UNREACHABLE(); | 1335 __ subsd(XMM0, XMM1); |
| 1336 break; |
| 1337 case Token::kMUL: |
| 1338 __ mulsd(XMM0, XMM1); |
| 1339 break; |
| 1340 case Token::kDIV: |
| 1341 __ divsd(XMM0, XMM1); |
| 1342 break; |
| 1343 default: |
| 1344 UNREACHABLE(); |
1338 } | 1345 } |
1339 const Class& double_class = Class::Handle( | 1346 const Class& double_class = |
1340 Isolate::Current()->object_store()->double_class()); | 1347 Class::Handle(Isolate::Current()->object_store()->double_class()); |
1341 __ TryAllocate(double_class, | 1348 __ TryAllocate(double_class, &fall_through, Assembler::kFarJump, |
1342 &fall_through, | |
1343 Assembler::kFarJump, | |
1344 RAX, // Result register. | 1349 RAX, // Result register. |
1345 R13); | 1350 R13); |
1346 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); | 1351 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); |
1347 __ ret(); | 1352 __ ret(); |
1348 __ Bind(&is_smi); | 1353 __ Bind(&is_smi); |
1349 __ SmiUntag(RAX); | 1354 __ SmiUntag(RAX); |
1350 __ cvtsi2sdq(XMM1, RAX); | 1355 __ cvtsi2sdq(XMM1, RAX); |
1351 __ jmp(&double_op); | 1356 __ jmp(&double_op); |
1352 __ Bind(&fall_through); | 1357 __ Bind(&fall_through); |
1353 } | 1358 } |
(...skipping 15 matching lines...) Expand all Loading... |
1369 | 1374 |
1370 | 1375 |
1371 void Intrinsifier::Double_div(Assembler* assembler) { | 1376 void Intrinsifier::Double_div(Assembler* assembler) { |
1372 DoubleArithmeticOperations(assembler, Token::kDIV); | 1377 DoubleArithmeticOperations(assembler, Token::kDIV); |
1373 } | 1378 } |
1374 | 1379 |
1375 | 1380 |
1376 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { | 1381 void Intrinsifier::Double_mulFromInteger(Assembler* assembler) { |
1377 Label fall_through; | 1382 Label fall_through; |
1378 // Only smis allowed. | 1383 // Only smis allowed. |
1379 __ movq(RAX, Address(RSP, + 1 * kWordSize)); | 1384 __ movq(RAX, Address(RSP, +1 * kWordSize)); |
1380 __ testq(RAX, Immediate(kSmiTagMask)); | 1385 __ testq(RAX, Immediate(kSmiTagMask)); |
1381 __ j(NOT_ZERO, &fall_through); | 1386 __ j(NOT_ZERO, &fall_through); |
1382 // Is Smi. | 1387 // Is Smi. |
1383 __ SmiUntag(RAX); | 1388 __ SmiUntag(RAX); |
1384 __ cvtsi2sdq(XMM1, RAX); | 1389 __ cvtsi2sdq(XMM1, RAX); |
1385 __ movq(RAX, Address(RSP, + 2 * kWordSize)); | 1390 __ movq(RAX, Address(RSP, +2 * kWordSize)); |
1386 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); | 1391 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); |
1387 __ mulsd(XMM0, XMM1); | 1392 __ mulsd(XMM0, XMM1); |
1388 const Class& double_class = Class::Handle( | 1393 const Class& double_class = |
1389 Isolate::Current()->object_store()->double_class()); | 1394 Class::Handle(Isolate::Current()->object_store()->double_class()); |
1390 __ TryAllocate(double_class, | 1395 __ TryAllocate(double_class, &fall_through, Assembler::kFarJump, |
1391 &fall_through, | |
1392 Assembler::kFarJump, | |
1393 RAX, // Result register. | 1396 RAX, // Result register. |
1394 R13); | 1397 R13); |
1395 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); | 1398 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); |
1396 __ ret(); | 1399 __ ret(); |
1397 __ Bind(&fall_through); | 1400 __ Bind(&fall_through); |
1398 } | 1401 } |
1399 | 1402 |
1400 | 1403 |
1401 // Left is double right is integer (Bigint, Mint or Smi) | 1404 // Left is double right is integer (Bigint, Mint or Smi) |
1402 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { | 1405 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { |
1403 Label fall_through; | 1406 Label fall_through; |
1404 __ movq(RAX, Address(RSP, +1 * kWordSize)); | 1407 __ movq(RAX, Address(RSP, +1 * kWordSize)); |
1405 __ testq(RAX, Immediate(kSmiTagMask)); | 1408 __ testq(RAX, Immediate(kSmiTagMask)); |
1406 __ j(NOT_ZERO, &fall_through); | 1409 __ j(NOT_ZERO, &fall_through); |
1407 // Is Smi. | 1410 // Is Smi. |
1408 __ SmiUntag(RAX); | 1411 __ SmiUntag(RAX); |
1409 __ cvtsi2sdq(XMM0, RAX); | 1412 __ cvtsi2sdq(XMM0, RAX); |
1410 const Class& double_class = Class::Handle( | 1413 const Class& double_class = |
1411 Isolate::Current()->object_store()->double_class()); | 1414 Class::Handle(Isolate::Current()->object_store()->double_class()); |
1412 __ TryAllocate(double_class, | 1415 __ TryAllocate(double_class, &fall_through, Assembler::kFarJump, |
1413 &fall_through, | |
1414 Assembler::kFarJump, | |
1415 RAX, // Result register. | 1416 RAX, // Result register. |
1416 R13); | 1417 R13); |
1417 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); | 1418 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); |
1418 __ ret(); | 1419 __ ret(); |
1419 __ Bind(&fall_through); | 1420 __ Bind(&fall_through); |
1420 } | 1421 } |
1421 | 1422 |
1422 | 1423 |
1423 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { | 1424 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { |
1424 Label is_true; | 1425 Label is_true; |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1494 } | 1495 } |
1495 | 1496 |
1496 | 1497 |
1497 void Intrinsifier::MathSqrt(Assembler* assembler) { | 1498 void Intrinsifier::MathSqrt(Assembler* assembler) { |
1498 Label fall_through, is_smi, double_op; | 1499 Label fall_through, is_smi, double_op; |
1499 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1500 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
1500 // Argument is double and is in RAX. | 1501 // Argument is double and is in RAX. |
1501 __ movsd(XMM1, FieldAddress(RAX, Double::value_offset())); | 1502 __ movsd(XMM1, FieldAddress(RAX, Double::value_offset())); |
1502 __ Bind(&double_op); | 1503 __ Bind(&double_op); |
1503 __ sqrtsd(XMM0, XMM1); | 1504 __ sqrtsd(XMM0, XMM1); |
1504 const Class& double_class = Class::Handle( | 1505 const Class& double_class = |
1505 Isolate::Current()->object_store()->double_class()); | 1506 Class::Handle(Isolate::Current()->object_store()->double_class()); |
1506 __ TryAllocate(double_class, | 1507 __ TryAllocate(double_class, &fall_through, Assembler::kFarJump, |
1507 &fall_through, | |
1508 Assembler::kFarJump, | |
1509 RAX, // Result register. | 1508 RAX, // Result register. |
1510 R13); | 1509 R13); |
1511 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); | 1510 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); |
1512 __ ret(); | 1511 __ ret(); |
1513 __ Bind(&is_smi); | 1512 __ Bind(&is_smi); |
1514 __ SmiUntag(RAX); | 1513 __ SmiUntag(RAX); |
1515 __ cvtsi2sdq(XMM1, RAX); | 1514 __ cvtsi2sdq(XMM1, RAX); |
1516 __ jmp(&double_op); | 1515 __ jmp(&double_op); |
1517 __ Bind(&fall_through); | 1516 __ Bind(&fall_through); |
1518 } | 1517 } |
1519 | 1518 |
1520 | 1519 |
1521 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; | 1520 // var state = ((_A * (_state[kSTATE_LO])) + _state[kSTATE_HI]) & _MASK_64; |
1522 // _state[kSTATE_LO] = state & _MASK_32; | 1521 // _state[kSTATE_LO] = state & _MASK_32; |
1523 // _state[kSTATE_HI] = state >> 32; | 1522 // _state[kSTATE_HI] = state >> 32; |
1524 void Intrinsifier::Random_nextState(Assembler* assembler) { | 1523 void Intrinsifier::Random_nextState(Assembler* assembler) { |
1525 const Library& math_lib = Library::Handle(Library::MathLibrary()); | 1524 const Library& math_lib = Library::Handle(Library::MathLibrary()); |
1526 ASSERT(!math_lib.IsNull()); | 1525 ASSERT(!math_lib.IsNull()); |
1527 const Class& random_class = Class::Handle( | 1526 const Class& random_class = |
1528 math_lib.LookupClassAllowPrivate(Symbols::_Random())); | 1527 Class::Handle(math_lib.LookupClassAllowPrivate(Symbols::_Random())); |
1529 ASSERT(!random_class.IsNull()); | 1528 ASSERT(!random_class.IsNull()); |
1530 const Field& state_field = Field::ZoneHandle( | 1529 const Field& state_field = Field::ZoneHandle( |
1531 random_class.LookupInstanceFieldAllowPrivate(Symbols::_state())); | 1530 random_class.LookupInstanceFieldAllowPrivate(Symbols::_state())); |
1532 ASSERT(!state_field.IsNull()); | 1531 ASSERT(!state_field.IsNull()); |
1533 const Field& random_A_field = Field::ZoneHandle( | 1532 const Field& random_A_field = Field::ZoneHandle( |
1534 random_class.LookupStaticFieldAllowPrivate(Symbols::_A())); | 1533 random_class.LookupStaticFieldAllowPrivate(Symbols::_A())); |
1535 ASSERT(!random_A_field.IsNull()); | 1534 ASSERT(!random_A_field.IsNull()); |
1536 ASSERT(random_A_field.is_const()); | 1535 ASSERT(random_A_field.is_const()); |
1537 const Instance& a_value = Instance::Handle(random_A_field.StaticValue()); | 1536 const Instance& a_value = Instance::Handle(random_A_field.StaticValue()); |
1538 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); | 1537 const int64_t a_int_value = Integer::Cast(a_value).AsInt64Value(); |
1539 // Receiver. | 1538 // Receiver. |
1540 __ movq(RAX, Address(RSP, + 1 * kWordSize)); | 1539 __ movq(RAX, Address(RSP, +1 * kWordSize)); |
1541 // Field '_state'. | 1540 // Field '_state'. |
1542 __ movq(RBX, FieldAddress(RAX, state_field.Offset())); | 1541 __ movq(RBX, FieldAddress(RAX, state_field.Offset())); |
1543 // Addresses of _state[0] and _state[1]. | 1542 // Addresses of _state[0] and _state[1]. |
1544 const intptr_t scale = Instance::ElementSizeFor(kTypedDataUint32ArrayCid); | 1543 const intptr_t scale = Instance::ElementSizeFor(kTypedDataUint32ArrayCid); |
1545 const intptr_t offset = Instance::DataOffsetFor(kTypedDataUint32ArrayCid); | 1544 const intptr_t offset = Instance::DataOffsetFor(kTypedDataUint32ArrayCid); |
1546 Address addr_0 = FieldAddress(RBX, 0 * scale + offset); | 1545 Address addr_0 = FieldAddress(RBX, 0 * scale + offset); |
1547 Address addr_1 = FieldAddress(RBX, 1 * scale + offset); | 1546 Address addr_1 = FieldAddress(RBX, 1 * scale + offset); |
1548 __ movq(RAX, Immediate(a_int_value)); | 1547 __ movq(RAX, Immediate(a_int_value)); |
1549 __ movl(RCX, addr_0); | 1548 __ movl(RCX, addr_0); |
1550 __ imulq(RCX, RAX); | 1549 __ imulq(RCX, RAX); |
1551 __ movl(RDX, addr_1); | 1550 __ movl(RDX, addr_1); |
1552 __ addq(RDX, RCX); | 1551 __ addq(RDX, RCX); |
1553 __ movl(addr_0, RDX); | 1552 __ movl(addr_0, RDX); |
1554 __ shrq(RDX, Immediate(32)); | 1553 __ shrq(RDX, Immediate(32)); |
1555 __ movl(addr_1, RDX); | 1554 __ movl(addr_1, RDX); |
1556 __ ret(); | 1555 __ ret(); |
1557 } | 1556 } |
1558 | 1557 |
1559 // Identity comparison. | 1558 // Identity comparison. |
1560 void Intrinsifier::ObjectEquals(Assembler* assembler) { | 1559 void Intrinsifier::ObjectEquals(Assembler* assembler) { |
1561 Label is_true; | 1560 Label is_true; |
1562 const intptr_t kReceiverOffset = 2; | 1561 const intptr_t kReceiverOffset = 2; |
1563 const intptr_t kArgumentOffset = 1; | 1562 const intptr_t kArgumentOffset = 1; |
1564 | 1563 |
1565 __ movq(RAX, Address(RSP, + kArgumentOffset * kWordSize)); | 1564 __ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize)); |
1566 __ cmpq(RAX, Address(RSP, + kReceiverOffset * kWordSize)); | 1565 __ cmpq(RAX, Address(RSP, +kReceiverOffset * kWordSize)); |
1567 __ j(EQUAL, &is_true, Assembler::kNearJump); | 1566 __ j(EQUAL, &is_true, Assembler::kNearJump); |
1568 __ LoadObject(RAX, Bool::False()); | 1567 __ LoadObject(RAX, Bool::False()); |
1569 __ ret(); | 1568 __ ret(); |
1570 __ Bind(&is_true); | 1569 __ Bind(&is_true); |
1571 __ LoadObject(RAX, Bool::True()); | 1570 __ LoadObject(RAX, Bool::True()); |
1572 __ ret(); | 1571 __ ret(); |
1573 } | 1572 } |
1574 | 1573 |
1575 | 1574 |
1576 static void RangeCheck(Assembler* assembler, | 1575 static void RangeCheck(Assembler* assembler, |
1577 Register reg, | 1576 Register reg, |
1578 intptr_t low, | 1577 intptr_t low, |
1579 intptr_t high, | 1578 intptr_t high, |
1580 Condition cc, | 1579 Condition cc, |
1581 Label* target) { | 1580 Label* target) { |
1582 __ subq(reg, Immediate(low)); | 1581 __ subq(reg, Immediate(low)); |
1583 __ cmpq(reg, Immediate(high - low)); | 1582 __ cmpq(reg, Immediate(high - low)); |
1584 __ j(cc, target); | 1583 __ j(cc, target); |
1585 } | 1584 } |
1586 | 1585 |
1587 | 1586 |
1588 const Condition kIfNotInRange = ABOVE; | 1587 const Condition kIfNotInRange = ABOVE; |
1589 const Condition kIfInRange = BELOW_EQUAL; | 1588 const Condition kIfInRange = BELOW_EQUAL; |
1590 | 1589 |
1591 | 1590 |
1592 static void JumpIfInteger(Assembler* assembler, | 1591 static void JumpIfInteger(Assembler* assembler, Register cid, Label* target) { |
1593 Register cid, | |
1594 Label* target) { | |
1595 RangeCheck(assembler, cid, kSmiCid, kBigintCid, kIfInRange, target); | 1592 RangeCheck(assembler, cid, kSmiCid, kBigintCid, kIfInRange, target); |
1596 } | 1593 } |
1597 | 1594 |
1598 | 1595 |
1599 static void JumpIfNotInteger(Assembler* assembler, | 1596 static void JumpIfNotInteger(Assembler* assembler, |
1600 Register cid, | 1597 Register cid, |
1601 Label* target) { | 1598 Label* target) { |
1602 RangeCheck(assembler, cid, kSmiCid, kBigintCid, kIfNotInRange, target); | 1599 RangeCheck(assembler, cid, kSmiCid, kBigintCid, kIfNotInRange, target); |
1603 } | 1600 } |
1604 | 1601 |
1605 | 1602 |
1606 static void JumpIfString(Assembler* assembler, | 1603 static void JumpIfString(Assembler* assembler, Register cid, Label* target) { |
1607 Register cid, | 1604 RangeCheck(assembler, cid, kOneByteStringCid, kExternalTwoByteStringCid, |
1608 Label* target) { | 1605 kIfInRange, target); |
1609 RangeCheck(assembler, | |
1610 cid, | |
1611 kOneByteStringCid, | |
1612 kExternalTwoByteStringCid, | |
1613 kIfInRange, | |
1614 target); | |
1615 } | 1606 } |
1616 | 1607 |
1617 | 1608 |
1618 static void JumpIfNotString(Assembler* assembler, | 1609 static void JumpIfNotString(Assembler* assembler, Register cid, Label* target) { |
1619 Register cid, | 1610 RangeCheck(assembler, cid, kOneByteStringCid, kExternalTwoByteStringCid, |
1620 Label* target) { | 1611 kIfNotInRange, target); |
1621 RangeCheck(assembler, | |
1622 cid, | |
1623 kOneByteStringCid, | |
1624 kExternalTwoByteStringCid, | |
1625 kIfNotInRange, | |
1626 target); | |
1627 } | 1612 } |
1628 | 1613 |
1629 | 1614 |
1630 // Return type quickly for simple types (not parameterized and not signature). | 1615 // Return type quickly for simple types (not parameterized and not signature). |
1631 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { | 1616 void Intrinsifier::ObjectRuntimeType(Assembler* assembler) { |
1632 Label fall_through, use_canonical_type, not_integer, not_double; | 1617 Label fall_through, use_canonical_type, not_integer, not_double; |
1633 __ movq(RAX, Address(RSP, + 1 * kWordSize)); | 1618 __ movq(RAX, Address(RSP, +1 * kWordSize)); |
1634 __ LoadClassIdMayBeSmi(RCX, RAX); | 1619 __ LoadClassIdMayBeSmi(RCX, RAX); |
1635 | 1620 |
1636 // RCX: untagged cid of instance (RAX). | 1621 // RCX: untagged cid of instance (RAX). |
1637 __ cmpq(RCX, Immediate(kClosureCid)); | 1622 __ cmpq(RCX, Immediate(kClosureCid)); |
1638 __ j(EQUAL, &fall_through); // Instance is a closure. | 1623 __ j(EQUAL, &fall_through); // Instance is a closure. |
1639 | 1624 |
1640 __ cmpl(RCX, Immediate(kNumPredefinedCids)); | 1625 __ cmpl(RCX, Immediate(kNumPredefinedCids)); |
1641 __ j(ABOVE, &use_canonical_type); | 1626 __ j(ABOVE, &use_canonical_type); |
1642 | 1627 |
1643 // If object is a instance of _Double return double type. | 1628 // If object is a instance of _Double return double type. |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1681 __ j(EQUAL, &fall_through, Assembler::kNearJump); // Not yet set. | 1666 __ j(EQUAL, &fall_through, Assembler::kNearJump); // Not yet set. |
1682 __ ret(); | 1667 __ ret(); |
1683 | 1668 |
1684 __ Bind(&fall_through); | 1669 __ Bind(&fall_through); |
1685 } | 1670 } |
1686 | 1671 |
1687 | 1672 |
1688 void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) { | 1673 void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) { |
1689 Label fall_through, different_cids, equal, not_equal, not_integer; | 1674 Label fall_through, different_cids, equal, not_equal, not_integer; |
1690 | 1675 |
1691 __ movq(RAX, Address(RSP, + 1 * kWordSize)); | 1676 __ movq(RAX, Address(RSP, +1 * kWordSize)); |
1692 __ LoadClassIdMayBeSmi(RCX, RAX); | 1677 __ LoadClassIdMayBeSmi(RCX, RAX); |
1693 | 1678 |
1694 // Check if left hand size is a closure. Closures are handled in the runtime. | 1679 // Check if left hand size is a closure. Closures are handled in the runtime. |
1695 __ cmpq(RCX, Immediate(kClosureCid)); | 1680 __ cmpq(RCX, Immediate(kClosureCid)); |
1696 __ j(EQUAL, &fall_through); | 1681 __ j(EQUAL, &fall_through); |
1697 | 1682 |
1698 __ movq(RAX, Address(RSP, + 2 * kWordSize)); | 1683 __ movq(RAX, Address(RSP, +2 * kWordSize)); |
1699 __ LoadClassIdMayBeSmi(RDX, RAX); | 1684 __ LoadClassIdMayBeSmi(RDX, RAX); |
1700 | 1685 |
1701 // Check whether class ids match. If class ids don't match objects can still | 1686 // Check whether class ids match. If class ids don't match objects can still |
1702 // have the same runtime type (e.g. multiple string implementation classes | 1687 // have the same runtime type (e.g. multiple string implementation classes |
1703 // map to a single String type). | 1688 // map to a single String type). |
1704 __ cmpq(RCX, RDX); | 1689 __ cmpq(RCX, RDX); |
1705 __ j(NOT_EQUAL, &different_cids); | 1690 __ j(NOT_EQUAL, &different_cids); |
1706 | 1691 |
1707 // Objects have the same class and neither is a closure. | 1692 // Objects have the same class and neither is a closure. |
1708 // Check if there are no type arguments. In this case we can return true. | 1693 // Check if there are no type arguments. In this case we can return true. |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1744 __ Bind(¬_equal); | 1729 __ Bind(¬_equal); |
1745 __ LoadObject(RAX, Bool::False()); | 1730 __ LoadObject(RAX, Bool::False()); |
1746 __ ret(); | 1731 __ ret(); |
1747 | 1732 |
1748 __ Bind(&fall_through); | 1733 __ Bind(&fall_through); |
1749 } | 1734 } |
1750 | 1735 |
1751 | 1736 |
1752 void Intrinsifier::String_getHashCode(Assembler* assembler) { | 1737 void Intrinsifier::String_getHashCode(Assembler* assembler) { |
1753 Label fall_through; | 1738 Label fall_through; |
1754 __ movq(RAX, Address(RSP, + 1 * kWordSize)); // String object. | 1739 __ movq(RAX, Address(RSP, +1 * kWordSize)); // String object. |
1755 __ movq(RAX, FieldAddress(RAX, String::hash_offset())); | 1740 __ movq(RAX, FieldAddress(RAX, String::hash_offset())); |
1756 __ cmpq(RAX, Immediate(0)); | 1741 __ cmpq(RAX, Immediate(0)); |
1757 __ j(EQUAL, &fall_through, Assembler::kNearJump); | 1742 __ j(EQUAL, &fall_through, Assembler::kNearJump); |
1758 __ ret(); | 1743 __ ret(); |
1759 __ Bind(&fall_through); | 1744 __ Bind(&fall_through); |
1760 // Hash not yet computed. | 1745 // Hash not yet computed. |
1761 } | 1746 } |
1762 | 1747 |
1763 | 1748 |
1764 void GenerateSubstringMatchesSpecialization(Assembler* assembler, | 1749 void GenerateSubstringMatchesSpecialization(Assembler* assembler, |
(...skipping 11 matching lines...) Expand all Loading... |
1776 // if (start < 0) return false; | 1761 // if (start < 0) return false; |
1777 __ testq(RBX, RBX); | 1762 __ testq(RBX, RBX); |
1778 __ j(SIGN, return_false); | 1763 __ j(SIGN, return_false); |
1779 | 1764 |
1780 // if (start + other.length > this.length) return false; | 1765 // if (start + other.length > this.length) return false; |
1781 __ movq(R11, RBX); | 1766 __ movq(R11, RBX); |
1782 __ addq(R11, R9); | 1767 __ addq(R11, R9); |
1783 __ cmpq(R11, R8); | 1768 __ cmpq(R11, R8); |
1784 __ j(GREATER, return_false); | 1769 __ j(GREATER, return_false); |
1785 | 1770 |
1786 __ SmiUntag(RBX); // start | 1771 __ SmiUntag(RBX); // start |
1787 __ SmiUntag(R9); // other.length | 1772 __ SmiUntag(R9); // other.length |
1788 __ movq(R11, Immediate(0)); // i = 0 | 1773 __ movq(R11, Immediate(0)); // i = 0 |
1789 | 1774 |
1790 // do | 1775 // do |
1791 Label loop; | 1776 Label loop; |
1792 __ Bind(&loop); | 1777 __ Bind(&loop); |
1793 | 1778 |
1794 // this.codeUnitAt(i + start) | 1779 // this.codeUnitAt(i + start) |
1795 // clobbering this.length | 1780 // clobbering this.length |
1796 __ movq(R8, R11); | 1781 __ movq(R8, R11); |
1797 __ addq(R8, RBX); | 1782 __ addq(R8, RBX); |
(...skipping 24 matching lines...) Expand all Loading... |
1822 | 1807 |
1823 __ jmp(return_true); | 1808 __ jmp(return_true); |
1824 } | 1809 } |
1825 | 1810 |
1826 | 1811 |
1827 // bool _substringMatches(int start, String other) | 1812 // bool _substringMatches(int start, String other) |
1828 // This intrinsic handles a OneByteString or TwoByteString receiver with a | 1813 // This intrinsic handles a OneByteString or TwoByteString receiver with a |
1829 // OneByteString other. | 1814 // OneByteString other. |
1830 void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler) { | 1815 void Intrinsifier::StringBaseSubstringMatches(Assembler* assembler) { |
1831 Label fall_through, return_true, return_false, try_two_byte; | 1816 Label fall_through, return_true, return_false, try_two_byte; |
1832 __ movq(RAX, Address(RSP, + 3 * kWordSize)); // receiver | 1817 __ movq(RAX, Address(RSP, +3 * kWordSize)); // receiver |
1833 __ movq(RBX, Address(RSP, + 2 * kWordSize)); // start | 1818 __ movq(RBX, Address(RSP, +2 * kWordSize)); // start |
1834 __ movq(RCX, Address(RSP, + 1 * kWordSize)); // other | 1819 __ movq(RCX, Address(RSP, +1 * kWordSize)); // other |
1835 | 1820 |
1836 __ testq(RBX, Immediate(kSmiTagMask)); | 1821 __ testq(RBX, Immediate(kSmiTagMask)); |
1837 __ j(NOT_ZERO, &fall_through); // 'start' is not Smi. | 1822 __ j(NOT_ZERO, &fall_through); // 'start' is not Smi. |
1838 | 1823 |
1839 __ CompareClassId(RCX, kOneByteStringCid); | 1824 __ CompareClassId(RCX, kOneByteStringCid); |
1840 __ j(NOT_EQUAL, &fall_through); | 1825 __ j(NOT_EQUAL, &fall_through); |
1841 | 1826 |
1842 __ CompareClassId(RAX, kOneByteStringCid); | 1827 __ CompareClassId(RAX, kOneByteStringCid); |
1843 __ j(NOT_EQUAL, &try_two_byte); | 1828 __ j(NOT_EQUAL, &try_two_byte); |
1844 | 1829 |
1845 GenerateSubstringMatchesSpecialization(assembler, | 1830 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid, |
1846 kOneByteStringCid, | 1831 kOneByteStringCid, &return_true, |
1847 kOneByteStringCid, | |
1848 &return_true, | |
1849 &return_false); | 1832 &return_false); |
1850 | 1833 |
1851 __ Bind(&try_two_byte); | 1834 __ Bind(&try_two_byte); |
1852 __ CompareClassId(RAX, kTwoByteStringCid); | 1835 __ CompareClassId(RAX, kTwoByteStringCid); |
1853 __ j(NOT_EQUAL, &fall_through); | 1836 __ j(NOT_EQUAL, &fall_through); |
1854 | 1837 |
1855 GenerateSubstringMatchesSpecialization(assembler, | 1838 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid, |
1856 kTwoByteStringCid, | 1839 kOneByteStringCid, &return_true, |
1857 kOneByteStringCid, | |
1858 &return_true, | |
1859 &return_false); | 1840 &return_false); |
1860 | 1841 |
1861 __ Bind(&return_true); | 1842 __ Bind(&return_true); |
1862 __ LoadObject(RAX, Bool::True()); | 1843 __ LoadObject(RAX, Bool::True()); |
1863 __ ret(); | 1844 __ ret(); |
1864 | 1845 |
1865 __ Bind(&return_false); | 1846 __ Bind(&return_false); |
1866 __ LoadObject(RAX, Bool::False()); | 1847 __ LoadObject(RAX, Bool::False()); |
1867 __ ret(); | 1848 __ ret(); |
1868 | 1849 |
1869 __ Bind(&fall_through); | 1850 __ Bind(&fall_through); |
1870 } | 1851 } |
1871 | 1852 |
1872 | 1853 |
1873 void Intrinsifier::StringBaseCharAt(Assembler* assembler) { | 1854 void Intrinsifier::StringBaseCharAt(Assembler* assembler) { |
1874 Label fall_through, try_two_byte_string; | 1855 Label fall_through, try_two_byte_string; |
1875 __ movq(RCX, Address(RSP, + 1 * kWordSize)); // Index. | 1856 __ movq(RCX, Address(RSP, +1 * kWordSize)); // Index. |
1876 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // String. | 1857 __ movq(RAX, Address(RSP, +2 * kWordSize)); // String. |
1877 __ testq(RCX, Immediate(kSmiTagMask)); | 1858 __ testq(RCX, Immediate(kSmiTagMask)); |
1878 __ j(NOT_ZERO, &fall_through); // Non-smi index. | 1859 __ j(NOT_ZERO, &fall_through); // Non-smi index. |
1879 // Range check. | 1860 // Range check. |
1880 __ cmpq(RCX, FieldAddress(RAX, String::length_offset())); | 1861 __ cmpq(RCX, FieldAddress(RAX, String::length_offset())); |
1881 // Runtime throws exception. | 1862 // Runtime throws exception. |
1882 __ j(ABOVE_EQUAL, &fall_through); | 1863 __ j(ABOVE_EQUAL, &fall_through); |
1883 __ CompareClassId(RAX, kOneByteStringCid); | 1864 __ CompareClassId(RAX, kOneByteStringCid); |
1884 __ j(NOT_EQUAL, &try_two_byte_string, Assembler::kNearJump); | 1865 __ j(NOT_EQUAL, &try_two_byte_string, Assembler::kNearJump); |
1885 __ SmiUntag(RCX); | 1866 __ SmiUntag(RCX); |
1886 __ movzxb(RCX, FieldAddress(RAX, RCX, TIMES_1, OneByteString::data_offset())); | 1867 __ movzxb(RCX, FieldAddress(RAX, RCX, TIMES_1, OneByteString::data_offset())); |
1887 __ cmpq(RCX, Immediate(Symbols::kNumberOfOneCharCodeSymbols)); | 1868 __ cmpq(RCX, Immediate(Symbols::kNumberOfOneCharCodeSymbols)); |
1888 __ j(GREATER_EQUAL, &fall_through); | 1869 __ j(GREATER_EQUAL, &fall_through); |
1889 __ movq(RAX, Address(THR, Thread::predefined_symbols_address_offset())); | 1870 __ movq(RAX, Address(THR, Thread::predefined_symbols_address_offset())); |
1890 __ movq(RAX, Address(RAX, | 1871 __ movq(RAX, Address(RAX, RCX, TIMES_8, |
1891 RCX, | |
1892 TIMES_8, | |
1893 Symbols::kNullCharCodeSymbolOffset * kWordSize)); | 1872 Symbols::kNullCharCodeSymbolOffset * kWordSize)); |
1894 __ ret(); | 1873 __ ret(); |
1895 | 1874 |
1896 __ Bind(&try_two_byte_string); | 1875 __ Bind(&try_two_byte_string); |
1897 __ CompareClassId(RAX, kTwoByteStringCid); | 1876 __ CompareClassId(RAX, kTwoByteStringCid); |
1898 __ j(NOT_EQUAL, &fall_through); | 1877 __ j(NOT_EQUAL, &fall_through); |
1899 ASSERT(kSmiTagShift == 1); | 1878 ASSERT(kSmiTagShift == 1); |
1900 __ movzxw(RCX, FieldAddress(RAX, RCX, TIMES_1, OneByteString::data_offset())); | 1879 __ movzxw(RCX, FieldAddress(RAX, RCX, TIMES_1, OneByteString::data_offset())); |
1901 __ cmpq(RCX, Immediate(Symbols::kNumberOfOneCharCodeSymbols)); | 1880 __ cmpq(RCX, Immediate(Symbols::kNumberOfOneCharCodeSymbols)); |
1902 __ j(GREATER_EQUAL, &fall_through); | 1881 __ j(GREATER_EQUAL, &fall_through); |
1903 __ movq(RAX, Address(THR, Thread::predefined_symbols_address_offset())); | 1882 __ movq(RAX, Address(THR, Thread::predefined_symbols_address_offset())); |
1904 __ movq(RAX, Address(RAX, | 1883 __ movq(RAX, Address(RAX, RCX, TIMES_8, |
1905 RCX, | |
1906 TIMES_8, | |
1907 Symbols::kNullCharCodeSymbolOffset * kWordSize)); | 1884 Symbols::kNullCharCodeSymbolOffset * kWordSize)); |
1908 __ ret(); | 1885 __ ret(); |
1909 | 1886 |
1910 __ Bind(&fall_through); | 1887 __ Bind(&fall_through); |
1911 } | 1888 } |
1912 | 1889 |
1913 | 1890 |
1914 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) { | 1891 void Intrinsifier::StringBaseIsEmpty(Assembler* assembler) { |
1915 Label is_true; | 1892 Label is_true; |
1916 // Get length. | 1893 // Get length. |
1917 __ movq(RAX, Address(RSP, + 1 * kWordSize)); // String object. | 1894 __ movq(RAX, Address(RSP, +1 * kWordSize)); // String object. |
1918 __ movq(RAX, FieldAddress(RAX, String::length_offset())); | 1895 __ movq(RAX, FieldAddress(RAX, String::length_offset())); |
1919 __ cmpq(RAX, Immediate(Smi::RawValue(0))); | 1896 __ cmpq(RAX, Immediate(Smi::RawValue(0))); |
1920 __ j(EQUAL, &is_true, Assembler::kNearJump); | 1897 __ j(EQUAL, &is_true, Assembler::kNearJump); |
1921 __ LoadObject(RAX, Bool::False()); | 1898 __ LoadObject(RAX, Bool::False()); |
1922 __ ret(); | 1899 __ ret(); |
1923 __ Bind(&is_true); | 1900 __ Bind(&is_true); |
1924 __ LoadObject(RAX, Bool::True()); | 1901 __ LoadObject(RAX, Bool::True()); |
1925 __ ret(); | 1902 __ ret(); |
1926 } | 1903 } |
1927 | 1904 |
1928 | 1905 |
1929 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { | 1906 void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) { |
1930 Label compute_hash; | 1907 Label compute_hash; |
1931 __ movq(RBX, Address(RSP, + 1 * kWordSize)); // OneByteString object. | 1908 __ movq(RBX, Address(RSP, +1 * kWordSize)); // OneByteString object. |
1932 __ movq(RAX, FieldAddress(RBX, String::hash_offset())); | 1909 __ movq(RAX, FieldAddress(RBX, String::hash_offset())); |
1933 __ cmpq(RAX, Immediate(0)); | 1910 __ cmpq(RAX, Immediate(0)); |
1934 __ j(EQUAL, &compute_hash, Assembler::kNearJump); | 1911 __ j(EQUAL, &compute_hash, Assembler::kNearJump); |
1935 __ ret(); | 1912 __ ret(); |
1936 | 1913 |
1937 __ Bind(&compute_hash); | 1914 __ Bind(&compute_hash); |
1938 // Hash not yet computed, use algorithm of class StringHasher. | 1915 // Hash not yet computed, use algorithm of class StringHasher. |
1939 __ movq(RCX, FieldAddress(RBX, String::length_offset())); | 1916 __ movq(RCX, FieldAddress(RBX, String::length_offset())); |
1940 __ SmiUntag(RCX); | 1917 __ SmiUntag(RCX); |
1941 __ xorq(RAX, RAX); | 1918 __ xorq(RAX, RAX); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1975 __ shll(RDX, Immediate(3)); | 1952 __ shll(RDX, Immediate(3)); |
1976 __ addl(RAX, RDX); | 1953 __ addl(RAX, RDX); |
1977 __ movq(RDX, RAX); | 1954 __ movq(RDX, RAX); |
1978 __ shrl(RDX, Immediate(11)); | 1955 __ shrl(RDX, Immediate(11)); |
1979 __ xorl(RAX, RDX); | 1956 __ xorl(RAX, RDX); |
1980 __ movq(RDX, RAX); | 1957 __ movq(RDX, RAX); |
1981 __ shll(RDX, Immediate(15)); | 1958 __ shll(RDX, Immediate(15)); |
1982 __ addl(RAX, RDX); | 1959 __ addl(RAX, RDX); |
1983 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1); | 1960 // hash_ = hash_ & ((static_cast<intptr_t>(1) << bits) - 1); |
1984 __ andl(RAX, | 1961 __ andl(RAX, |
1985 Immediate(((static_cast<intptr_t>(1) << String::kHashBits) - 1))); | 1962 Immediate(((static_cast<intptr_t>(1) << String::kHashBits) - 1))); |
1986 | 1963 |
1987 // return hash_ == 0 ? 1 : hash_; | 1964 // return hash_ == 0 ? 1 : hash_; |
1988 __ cmpq(RAX, Immediate(0)); | 1965 __ cmpq(RAX, Immediate(0)); |
1989 __ j(NOT_EQUAL, &set_hash_code, Assembler::kNearJump); | 1966 __ j(NOT_EQUAL, &set_hash_code, Assembler::kNearJump); |
1990 __ incq(RAX); | 1967 __ incq(RAX); |
1991 __ Bind(&set_hash_code); | 1968 __ Bind(&set_hash_code); |
1992 __ SmiTag(RAX); | 1969 __ SmiTag(RAX); |
1993 __ StoreIntoSmiField(FieldAddress(RBX, String::hash_offset()), RAX); | 1970 __ StoreIntoSmiField(FieldAddress(RBX, String::hash_offset()), RAX); |
1994 __ ret(); | 1971 __ ret(); |
1995 } | 1972 } |
(...skipping 18 matching lines...) Expand all Loading... |
2014 __ andq(RDI, Immediate(-kObjectAlignment)); | 1991 __ andq(RDI, Immediate(-kObjectAlignment)); |
2015 | 1992 |
2016 const intptr_t cid = kOneByteStringCid; | 1993 const intptr_t cid = kOneByteStringCid; |
2017 Heap::Space space = Heap::kNew; | 1994 Heap::Space space = Heap::kNew; |
2018 __ movq(R13, Address(THR, Thread::heap_offset())); | 1995 __ movq(R13, Address(THR, Thread::heap_offset())); |
2019 __ movq(RAX, Address(R13, Heap::TopOffset(space))); | 1996 __ movq(RAX, Address(R13, Heap::TopOffset(space))); |
2020 | 1997 |
2021 // RDI: allocation size. | 1998 // RDI: allocation size. |
2022 __ movq(RCX, RAX); | 1999 __ movq(RCX, RAX); |
2023 __ addq(RCX, RDI); | 2000 __ addq(RCX, RDI); |
2024 __ j(CARRY, &pop_and_fail); | 2001 __ j(CARRY, &pop_and_fail); |
2025 | 2002 |
2026 // Check if the allocation fits into the remaining space. | 2003 // Check if the allocation fits into the remaining space. |
2027 // RAX: potential new object start. | 2004 // RAX: potential new object start. |
2028 // RCX: potential next object start. | 2005 // RCX: potential next object start. |
2029 // RDI: allocation size. | 2006 // RDI: allocation size. |
2030 // R13: heap. | 2007 // R13: heap. |
2031 __ cmpq(RCX, Address(R13, Heap::EndOffset(space))); | 2008 __ cmpq(RCX, Address(R13, Heap::EndOffset(space))); |
2032 __ j(ABOVE_EQUAL, &pop_and_fail); | 2009 __ j(ABOVE_EQUAL, &pop_and_fail); |
2033 | 2010 |
2034 // Successfully allocated the object(s), now update top to point to | 2011 // Successfully allocated the object(s), now update top to point to |
(...skipping 16 matching lines...) Expand all Loading... |
2051 __ xorq(RDI, RDI); | 2028 __ xorq(RDI, RDI); |
2052 __ Bind(&done); | 2029 __ Bind(&done); |
2053 | 2030 |
2054 // Get the class index and insert it into the tags. | 2031 // Get the class index and insert it into the tags. |
2055 __ orq(RDI, Immediate(RawObject::ClassIdTag::encode(cid))); | 2032 __ orq(RDI, Immediate(RawObject::ClassIdTag::encode(cid))); |
2056 __ movq(FieldAddress(RAX, String::tags_offset()), RDI); // Tags. | 2033 __ movq(FieldAddress(RAX, String::tags_offset()), RDI); // Tags. |
2057 } | 2034 } |
2058 | 2035 |
2059 // Set the length field. | 2036 // Set the length field. |
2060 __ popq(RDI); | 2037 __ popq(RDI); |
2061 __ StoreIntoObjectNoBarrier(RAX, | 2038 __ StoreIntoObjectNoBarrier(RAX, FieldAddress(RAX, String::length_offset()), |
2062 FieldAddress(RAX, String::length_offset()), | |
2063 RDI); | 2039 RDI); |
2064 // Clear hash. | 2040 // Clear hash. |
2065 __ ZeroInitSmiField(FieldAddress(RAX, String::hash_offset())); | 2041 __ ZeroInitSmiField(FieldAddress(RAX, String::hash_offset())); |
2066 __ jmp(ok, Assembler::kNearJump); | 2042 __ jmp(ok, Assembler::kNearJump); |
2067 | 2043 |
2068 __ Bind(&pop_and_fail); | 2044 __ Bind(&pop_and_fail); |
2069 __ popq(RDI); | 2045 __ popq(RDI); |
2070 __ jmp(failure); | 2046 __ jmp(failure); |
2071 } | 2047 } |
2072 | 2048 |
2073 | 2049 |
2074 // Arg0: OneByteString (receiver). | 2050 // Arg0: OneByteString (receiver). |
2075 // Arg1: Start index as Smi. | 2051 // Arg1: Start index as Smi. |
2076 // Arg2: End index as Smi. | 2052 // Arg2: End index as Smi. |
2077 // The indexes must be valid. | 2053 // The indexes must be valid. |
2078 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { | 2054 void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) { |
2079 const intptr_t kStringOffset = 3 * kWordSize; | 2055 const intptr_t kStringOffset = 3 * kWordSize; |
2080 const intptr_t kStartIndexOffset = 2 * kWordSize; | 2056 const intptr_t kStartIndexOffset = 2 * kWordSize; |
2081 const intptr_t kEndIndexOffset = 1 * kWordSize; | 2057 const intptr_t kEndIndexOffset = 1 * kWordSize; |
2082 Label fall_through, ok; | 2058 Label fall_through, ok; |
2083 __ movq(RSI, Address(RSP, + kStartIndexOffset)); | 2059 __ movq(RSI, Address(RSP, +kStartIndexOffset)); |
2084 __ movq(RDI, Address(RSP, + kEndIndexOffset)); | 2060 __ movq(RDI, Address(RSP, +kEndIndexOffset)); |
2085 __ orq(RSI, RDI); | 2061 __ orq(RSI, RDI); |
2086 __ testq(RSI, Immediate(kSmiTagMask)); | 2062 __ testq(RSI, Immediate(kSmiTagMask)); |
2087 __ j(NOT_ZERO, &fall_through); // 'start', 'end' not Smi. | 2063 __ j(NOT_ZERO, &fall_through); // 'start', 'end' not Smi. |
2088 | 2064 |
2089 __ subq(RDI, Address(RSP, + kStartIndexOffset)); | 2065 __ subq(RDI, Address(RSP, +kStartIndexOffset)); |
2090 TryAllocateOnebyteString(assembler, &ok, &fall_through, RDI); | 2066 TryAllocateOnebyteString(assembler, &ok, &fall_through, RDI); |
2091 __ Bind(&ok); | 2067 __ Bind(&ok); |
2092 // RAX: new string as tagged pointer. | 2068 // RAX: new string as tagged pointer. |
2093 // Copy string. | 2069 // Copy string. |
2094 __ movq(RSI, Address(RSP, + kStringOffset)); | 2070 __ movq(RSI, Address(RSP, +kStringOffset)); |
2095 __ movq(RBX, Address(RSP, + kStartIndexOffset)); | 2071 __ movq(RBX, Address(RSP, +kStartIndexOffset)); |
2096 __ SmiUntag(RBX); | 2072 __ SmiUntag(RBX); |
2097 __ leaq(RSI, FieldAddress(RSI, RBX, TIMES_1, OneByteString::data_offset())); | 2073 __ leaq(RSI, FieldAddress(RSI, RBX, TIMES_1, OneByteString::data_offset())); |
2098 // RSI: Start address to copy from (untagged). | 2074 // RSI: Start address to copy from (untagged). |
2099 // RBX: Untagged start index. | 2075 // RBX: Untagged start index. |
2100 __ movq(RCX, Address(RSP, + kEndIndexOffset)); | 2076 __ movq(RCX, Address(RSP, +kEndIndexOffset)); |
2101 __ SmiUntag(RCX); | 2077 __ SmiUntag(RCX); |
2102 __ subq(RCX, RBX); | 2078 __ subq(RCX, RBX); |
2103 __ xorq(RDX, RDX); | 2079 __ xorq(RDX, RDX); |
2104 // RSI: Start address to copy from (untagged). | 2080 // RSI: Start address to copy from (untagged). |
2105 // RCX: Untagged number of bytes to copy. | 2081 // RCX: Untagged number of bytes to copy. |
2106 // RAX: Tagged result string | 2082 // RAX: Tagged result string |
2107 // RDX: Loop counter. | 2083 // RDX: Loop counter. |
2108 // RBX: Scratch register. | 2084 // RBX: Scratch register. |
2109 Label loop, check; | 2085 Label loop, check; |
2110 __ jmp(&check, Assembler::kNearJump); | 2086 __ jmp(&check, Assembler::kNearJump); |
2111 __ Bind(&loop); | 2087 __ Bind(&loop); |
2112 __ movzxb(RBX, Address(RSI, RDX, TIMES_1, 0)); | 2088 __ movzxb(RBX, Address(RSI, RDX, TIMES_1, 0)); |
2113 __ movb(FieldAddress(RAX, RDX, TIMES_1, OneByteString::data_offset()), RBX); | 2089 __ movb(FieldAddress(RAX, RDX, TIMES_1, OneByteString::data_offset()), RBX); |
2114 __ incq(RDX); | 2090 __ incq(RDX); |
2115 __ Bind(&check); | 2091 __ Bind(&check); |
2116 __ cmpq(RDX, RCX); | 2092 __ cmpq(RDX, RCX); |
2117 __ j(LESS, &loop, Assembler::kNearJump); | 2093 __ j(LESS, &loop, Assembler::kNearJump); |
2118 __ ret(); | 2094 __ ret(); |
2119 __ Bind(&fall_through); | 2095 __ Bind(&fall_through); |
2120 } | 2096 } |
2121 | 2097 |
2122 | 2098 |
2123 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) { | 2099 void Intrinsifier::OneByteStringSetAt(Assembler* assembler) { |
2124 __ movq(RCX, Address(RSP, + 1 * kWordSize)); // Value. | 2100 __ movq(RCX, Address(RSP, +1 * kWordSize)); // Value. |
2125 __ movq(RBX, Address(RSP, + 2 * kWordSize)); // Index. | 2101 __ movq(RBX, Address(RSP, +2 * kWordSize)); // Index. |
2126 __ movq(RAX, Address(RSP, + 3 * kWordSize)); // OneByteString. | 2102 __ movq(RAX, Address(RSP, +3 * kWordSize)); // OneByteString. |
2127 __ SmiUntag(RBX); | 2103 __ SmiUntag(RBX); |
2128 __ SmiUntag(RCX); | 2104 __ SmiUntag(RCX); |
2129 __ movb(FieldAddress(RAX, RBX, TIMES_1, OneByteString::data_offset()), RCX); | 2105 __ movb(FieldAddress(RAX, RBX, TIMES_1, OneByteString::data_offset()), RCX); |
2130 __ ret(); | 2106 __ ret(); |
2131 } | 2107 } |
2132 | 2108 |
2133 | 2109 |
2134 void Intrinsifier::OneByteString_allocate(Assembler* assembler) { | 2110 void Intrinsifier::OneByteString_allocate(Assembler* assembler) { |
2135 __ movq(RDI, Address(RSP, + 1 * kWordSize)); // Length.v= | 2111 __ movq(RDI, Address(RSP, +1 * kWordSize)); // Length.v= |
2136 Label fall_through, ok; | 2112 Label fall_through, ok; |
2137 TryAllocateOnebyteString(assembler, &ok, &fall_through, RDI); | 2113 TryAllocateOnebyteString(assembler, &ok, &fall_through, RDI); |
2138 // RDI: Start address to copy from (untagged). | 2114 // RDI: Start address to copy from (untagged). |
2139 | 2115 |
2140 __ Bind(&ok); | 2116 __ Bind(&ok); |
2141 __ ret(); | 2117 __ ret(); |
2142 | 2118 |
2143 __ Bind(&fall_through); | 2119 __ Bind(&fall_through); |
2144 } | 2120 } |
2145 | 2121 |
2146 | 2122 |
2147 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). | 2123 // TODO(srdjan): Add combinations (one-byte/two-byte/external strings). |
2148 static void StringEquality(Assembler* assembler, intptr_t string_cid) { | 2124 static void StringEquality(Assembler* assembler, intptr_t string_cid) { |
2149 Label fall_through, is_true, is_false, loop; | 2125 Label fall_through, is_true, is_false, loop; |
2150 __ movq(RAX, Address(RSP, + 2 * kWordSize)); // This. | 2126 __ movq(RAX, Address(RSP, +2 * kWordSize)); // This. |
2151 __ movq(RCX, Address(RSP, + 1 * kWordSize)); // Other. | 2127 __ movq(RCX, Address(RSP, +1 * kWordSize)); // Other. |
2152 | 2128 |
2153 // Are identical? | 2129 // Are identical? |
2154 __ cmpq(RAX, RCX); | 2130 __ cmpq(RAX, RCX); |
2155 __ j(EQUAL, &is_true, Assembler::kNearJump); | 2131 __ j(EQUAL, &is_true, Assembler::kNearJump); |
2156 | 2132 |
2157 // Is other OneByteString? | 2133 // Is other OneByteString? |
2158 __ testq(RCX, Immediate(kSmiTagMask)); | 2134 __ testq(RCX, Immediate(kSmiTagMask)); |
2159 __ j(ZERO, &is_false); // Smi | 2135 __ j(ZERO, &is_false); // Smi |
2160 __ CompareClassId(RCX, string_cid); | 2136 __ CompareClassId(RCX, string_cid); |
2161 __ j(NOT_EQUAL, &fall_through, Assembler::kNearJump); | 2137 __ j(NOT_EQUAL, &fall_through, Assembler::kNearJump); |
2162 | 2138 |
2163 // Have same length? | 2139 // Have same length? |
2164 __ movq(RDI, FieldAddress(RAX, String::length_offset())); | 2140 __ movq(RDI, FieldAddress(RAX, String::length_offset())); |
2165 __ cmpq(RDI, FieldAddress(RCX, String::length_offset())); | 2141 __ cmpq(RDI, FieldAddress(RCX, String::length_offset())); |
2166 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump); | 2142 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump); |
2167 | 2143 |
2168 // Check contents, no fall-through possible. | 2144 // Check contents, no fall-through possible. |
2169 // TODO(srdjan): write a faster check. | 2145 // TODO(srdjan): write a faster check. |
2170 __ SmiUntag(RDI); | 2146 __ SmiUntag(RDI); |
2171 __ Bind(&loop); | 2147 __ Bind(&loop); |
2172 __ decq(RDI); | 2148 __ decq(RDI); |
2173 __ cmpq(RDI, Immediate(0)); | 2149 __ cmpq(RDI, Immediate(0)); |
2174 __ j(LESS, &is_true, Assembler::kNearJump); | 2150 __ j(LESS, &is_true, Assembler::kNearJump); |
2175 if (string_cid == kOneByteStringCid) { | 2151 if (string_cid == kOneByteStringCid) { |
2176 __ movzxb(RBX, | 2152 __ movzxb(RBX, |
2177 FieldAddress(RAX, RDI, TIMES_1, OneByteString::data_offset())); | 2153 FieldAddress(RAX, RDI, TIMES_1, OneByteString::data_offset())); |
2178 __ movzxb(RDX, | 2154 __ movzxb(RDX, |
2179 FieldAddress(RCX, RDI, TIMES_1, OneByteString::data_offset())); | 2155 FieldAddress(RCX, RDI, TIMES_1, OneByteString::data_offset())); |
2180 } else if (string_cid == kTwoByteStringCid) { | 2156 } else if (string_cid == kTwoByteStringCid) { |
2181 __ movzxw(RBX, | 2157 __ movzxw(RBX, |
2182 FieldAddress(RAX, RDI, TIMES_2, TwoByteString::data_offset())); | 2158 FieldAddress(RAX, RDI, TIMES_2, TwoByteString::data_offset())); |
2183 __ movzxw(RDX, | 2159 __ movzxw(RDX, |
2184 FieldAddress(RCX, RDI, TIMES_2, TwoByteString::data_offset())); | 2160 FieldAddress(RCX, RDI, TIMES_2, TwoByteString::data_offset())); |
2185 } else { | 2161 } else { |
2186 UNIMPLEMENTED(); | 2162 UNIMPLEMENTED(); |
2187 } | 2163 } |
2188 __ cmpq(RBX, RDX); | 2164 __ cmpq(RBX, RDX); |
2189 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump); | 2165 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump); |
2190 __ jmp(&loop, Assembler::kNearJump); | 2166 __ jmp(&loop, Assembler::kNearJump); |
2191 | 2167 |
2192 __ Bind(&is_true); | 2168 __ Bind(&is_true); |
2193 __ LoadObject(RAX, Bool::True()); | 2169 __ LoadObject(RAX, Bool::True()); |
2194 __ ret(); | 2170 __ ret(); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2243 } | 2219 } |
2244 | 2220 |
2245 | 2221 |
2246 // On stack: user tag (+1), return-address (+0). | 2222 // On stack: user tag (+1), return-address (+0). |
2247 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { | 2223 void Intrinsifier::UserTag_makeCurrent(Assembler* assembler) { |
2248 // RBX: Isolate. | 2224 // RBX: Isolate. |
2249 __ LoadIsolate(RBX); | 2225 __ LoadIsolate(RBX); |
2250 // RAX: Current user tag. | 2226 // RAX: Current user tag. |
2251 __ movq(RAX, Address(RBX, Isolate::current_tag_offset())); | 2227 __ movq(RAX, Address(RBX, Isolate::current_tag_offset())); |
2252 // R10: UserTag. | 2228 // R10: UserTag. |
2253 __ movq(R10, Address(RSP, + 1 * kWordSize)); | 2229 __ movq(R10, Address(RSP, +1 * kWordSize)); |
2254 // Set Isolate::current_tag_. | 2230 // Set Isolate::current_tag_. |
2255 __ movq(Address(RBX, Isolate::current_tag_offset()), R10); | 2231 __ movq(Address(RBX, Isolate::current_tag_offset()), R10); |
2256 // R10: UserTag's tag. | 2232 // R10: UserTag's tag. |
2257 __ movq(R10, FieldAddress(R10, UserTag::tag_offset())); | 2233 __ movq(R10, FieldAddress(R10, UserTag::tag_offset())); |
2258 // Set Isolate::user_tag_. | 2234 // Set Isolate::user_tag_. |
2259 __ movq(Address(RBX, Isolate::user_tag_offset()), R10); | 2235 __ movq(Address(RBX, Isolate::user_tag_offset()), R10); |
2260 __ ret(); | 2236 __ ret(); |
2261 } | 2237 } |
2262 | 2238 |
2263 | 2239 |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2295 __ Bind(&true_label); | 2271 __ Bind(&true_label); |
2296 __ LoadObject(RAX, Bool::True()); | 2272 __ LoadObject(RAX, Bool::True()); |
2297 __ ret(); | 2273 __ ret(); |
2298 } | 2274 } |
2299 | 2275 |
2300 #undef __ | 2276 #undef __ |
2301 | 2277 |
2302 } // namespace dart | 2278 } // namespace dart |
2303 | 2279 |
2304 #endif // defined TARGET_ARCH_X64 | 2280 #endif // defined TARGET_ARCH_X64 |
OLD | NEW |