Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(116)

Side by Side Diff: runtime/vm/intrinsifier_arm64.cc

Issue 293993013: Beings adding SIMD support to arm64. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/intermediate_language_x64.cc ('k') | runtime/vm/simulator_arm64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6 #if defined(TARGET_ARCH_ARM64) 6 #if defined(TARGET_ARCH_ARM64)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after
190 // Store the type argument field in the growable array object. 190 // Store the type argument field in the growable array object.
191 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument. 191 __ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument.
192 __ StoreIntoObjectNoBarrier( 192 __ StoreIntoObjectNoBarrier(
193 R0, 193 R0,
194 FieldAddress(R0, GrowableObjectArray::type_arguments_offset()), 194 FieldAddress(R0, GrowableObjectArray::type_arguments_offset()),
195 R1); 195 R1);
196 196
197 // Set the length field in the growable array object to 0. 197 // Set the length field in the growable array object to 0.
198 __ LoadImmediate(R1, 0, kNoPP); 198 __ LoadImmediate(R1, 0, kNoPP);
199 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset())); 199 __ str(R1, FieldAddress(R0, GrowableObjectArray::length_offset()));
200 __ UpdateAllocationStats(kGrowableObjectArrayCid, R1, kNoPP); 200 __ UpdateAllocationStats(kGrowableObjectArrayCid, kNoPP);
201 __ ret(); // Returns the newly allocated object in R0. 201 __ ret(); // Returns the newly allocated object in R0.
202 202
203 __ Bind(&fall_through); 203 __ Bind(&fall_through);
204 } 204 }
205 205
206 206
207 void Intrinsifier::GrowableList_getLength(Assembler* assembler) { 207 void Intrinsifier::GrowableList_getLength(Assembler* assembler) {
208 __ ldr(R0, Address(SP, 0 * kWordSize)); 208 __ ldr(R0, Address(SP, 0 * kWordSize));
209 __ ldr(R0, FieldAddress(R0, GrowableObjectArray::length_offset())); 209 __ ldr(R0, FieldAddress(R0, GrowableObjectArray::length_offset()));
210 __ ret(); 210 __ ret();
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after
402 __ LoadImmediate(R3, heap->EndAddress(), kNoPP); \ 402 __ LoadImmediate(R3, heap->EndAddress(), kNoPP); \
403 __ ldr(R3, Address(R3, 0)); \ 403 __ ldr(R3, Address(R3, 0)); \
404 __ cmp(R1, Operand(R3)); \ 404 __ cmp(R1, Operand(R3)); \
405 __ b(&fall_through, CS); \ 405 __ b(&fall_through, CS); \
406 \ 406 \
407 /* Successfully allocated the object(s), now update top to point to */ \ 407 /* Successfully allocated the object(s), now update top to point to */ \
408 /* next object start and initialize the object. */ \ 408 /* next object start and initialize the object. */ \
409 __ LoadImmediate(R3, heap->TopAddress(), kNoPP); \ 409 __ LoadImmediate(R3, heap->TopAddress(), kNoPP); \
410 __ str(R1, Address(R3, 0)); \ 410 __ str(R1, Address(R3, 0)); \
411 __ AddImmediate(R0, R0, kHeapObjectTag, kNoPP); \ 411 __ AddImmediate(R0, R0, kHeapObjectTag, kNoPP); \
412 __ UpdateAllocationStatsWithSize(cid, R2, R4, kNoPP); \ 412 __ UpdateAllocationStatsWithSize(cid, R2, kNoPP); \
413 /* Initialize the tags. */ \ 413 /* Initialize the tags. */ \
414 /* R0: new object start as a tagged pointer. */ \ 414 /* R0: new object start as a tagged pointer. */ \
415 /* R1: new object end address. */ \ 415 /* R1: new object end address. */ \
416 /* R2: allocation size. */ \ 416 /* R2: allocation size. */ \
417 { \ 417 { \
418 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag, kNoPP); \ 418 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag, kNoPP); \
419 __ Lsl(R2, R2, RawObject::kSizeTagPos - kObjectAlignmentLog2); \ 419 __ Lsl(R2, R2, RawObject::kSizeTagPos - kObjectAlignmentLog2); \
420 __ csel(R2, ZR, R2, HI); \ 420 __ csel(R2, ZR, R2, HI); \
421 \ 421 \
422 /* Get the class index and insert it into the tags. */ \ 422 /* Get the class index and insert it into the tags. */ \
(...skipping 529 matching lines...) Expand 10 before | Expand all | Expand 10 after
952 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); 952 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP);
953 switch (kind) { 953 switch (kind) {
954 case Token::kADD: __ faddd(V0, V0, V1); break; 954 case Token::kADD: __ faddd(V0, V0, V1); break;
955 case Token::kSUB: __ fsubd(V0, V0, V1); break; 955 case Token::kSUB: __ fsubd(V0, V0, V1); break;
956 case Token::kMUL: __ fmuld(V0, V0, V1); break; 956 case Token::kMUL: __ fmuld(V0, V0, V1); break;
957 case Token::kDIV: __ fdivd(V0, V0, V1); break; 957 case Token::kDIV: __ fdivd(V0, V0, V1); break;
958 default: UNREACHABLE(); 958 default: UNREACHABLE();
959 } 959 }
960 const Class& double_class = Class::Handle( 960 const Class& double_class = Class::Handle(
961 Isolate::Current()->object_store()->double_class()); 961 Isolate::Current()->object_store()->double_class());
962 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); 962 __ TryAllocate(double_class, &fall_through, R0, kNoPP);
963 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); 963 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP);
964 __ ret(); 964 __ ret();
965 __ Bind(&fall_through); 965 __ Bind(&fall_through);
966 } 966 }
967 967
968 968
969 void Intrinsifier::Double_add(Assembler* assembler) { 969 void Intrinsifier::Double_add(Assembler* assembler) {
970 DoubleArithmeticOperations(assembler, Token::kADD); 970 DoubleArithmeticOperations(assembler, Token::kADD);
971 } 971 }
972 972
(...skipping 21 matching lines...) Expand all
994 __ tsti(R0, kSmiTagMask); 994 __ tsti(R0, kSmiTagMask);
995 __ b(&fall_through, NE); 995 __ b(&fall_through, NE);
996 // Is Smi. 996 // Is Smi.
997 __ SmiUntag(R0); 997 __ SmiUntag(R0);
998 __ scvtfd(V1, R0); 998 __ scvtfd(V1, R0);
999 __ ldr(R0, Address(SP, 1 * kWordSize)); 999 __ ldr(R0, Address(SP, 1 * kWordSize));
1000 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); 1000 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP);
1001 __ fmuld(V0, V0, V1); 1001 __ fmuld(V0, V0, V1);
1002 const Class& double_class = Class::Handle( 1002 const Class& double_class = Class::Handle(
1003 Isolate::Current()->object_store()->double_class()); 1003 Isolate::Current()->object_store()->double_class());
1004 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); 1004 __ TryAllocate(double_class, &fall_through, R0, kNoPP);
1005 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); 1005 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP);
1006 __ ret(); 1006 __ ret();
1007 __ Bind(&fall_through); 1007 __ Bind(&fall_through);
1008 } 1008 }
1009 1009
1010 1010
1011 void Intrinsifier::Double_fromInteger(Assembler* assembler) { 1011 void Intrinsifier::Double_fromInteger(Assembler* assembler) {
1012 Label fall_through; 1012 Label fall_through;
1013 1013
1014 __ ldr(R0, Address(SP, 0 * kWordSize)); 1014 __ ldr(R0, Address(SP, 0 * kWordSize));
1015 __ tsti(R0, kSmiTagMask); 1015 __ tsti(R0, kSmiTagMask);
1016 __ b(&fall_through, NE); 1016 __ b(&fall_through, NE);
1017 // Is Smi. 1017 // Is Smi.
1018 __ SmiUntag(R0); 1018 __ SmiUntag(R0);
1019 __ scvtfd(V0, R0); 1019 __ scvtfd(V0, R0);
1020 const Class& double_class = Class::Handle( 1020 const Class& double_class = Class::Handle(
1021 Isolate::Current()->object_store()->double_class()); 1021 Isolate::Current()->object_store()->double_class());
1022 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); 1022 __ TryAllocate(double_class, &fall_through, R0, kNoPP);
1023 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); 1023 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP);
1024 __ ret(); 1024 __ ret();
1025 __ Bind(&fall_through); 1025 __ Bind(&fall_through);
1026 } 1026 }
1027 1027
1028 1028
1029 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { 1029 void Intrinsifier::Double_getIsNaN(Assembler* assembler) {
1030 Label is_true; 1030 Label is_true;
1031 __ ldr(R0, Address(SP, 0 * kWordSize)); 1031 __ ldr(R0, Address(SP, 0 * kWordSize));
1032 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP); 1032 __ LoadDFieldFromOffset(V0, R0, Double::value_offset(), kNoPP);
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1092 1092
1093 void Intrinsifier::Math_sqrt(Assembler* assembler) { 1093 void Intrinsifier::Math_sqrt(Assembler* assembler) {
1094 Label fall_through, is_smi, double_op; 1094 Label fall_through, is_smi, double_op;
1095 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); 1095 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through);
1096 // Argument is double and is in R0. 1096 // Argument is double and is in R0.
1097 __ LoadDFieldFromOffset(V1, R0, Double::value_offset(), kNoPP); 1097 __ LoadDFieldFromOffset(V1, R0, Double::value_offset(), kNoPP);
1098 __ Bind(&double_op); 1098 __ Bind(&double_op);
1099 __ fsqrtd(V0, V1); 1099 __ fsqrtd(V0, V1);
1100 const Class& double_class = Class::Handle( 1100 const Class& double_class = Class::Handle(
1101 Isolate::Current()->object_store()->double_class()); 1101 Isolate::Current()->object_store()->double_class());
1102 __ TryAllocate(double_class, &fall_through, R0, R1, kNoPP); 1102 __ TryAllocate(double_class, &fall_through, R0, kNoPP);
1103 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP); 1103 __ StoreDFieldToOffset(V0, R0, Double::value_offset(), kNoPP);
1104 __ ret(); 1104 __ ret();
1105 __ Bind(&is_smi); 1105 __ Bind(&is_smi);
1106 __ SmiUntag(R0); 1106 __ SmiUntag(R0);
1107 __ scvtfd(V1, R0); 1107 __ scvtfd(V1, R0);
1108 __ b(&double_op); 1108 __ b(&double_op);
1109 __ Bind(&fall_through); 1109 __ Bind(&fall_through);
1110 } 1110 }
1111 1111
1112 1112
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after
1315 // R3: heap->Top->Address(). 1315 // R3: heap->Top->Address().
1316 __ LoadImmediate(R7, heap->EndAddress(), kNoPP); 1316 __ LoadImmediate(R7, heap->EndAddress(), kNoPP);
1317 __ ldr(R7, Address(R7)); 1317 __ ldr(R7, Address(R7));
1318 __ cmp(R1, Operand(R7)); 1318 __ cmp(R1, Operand(R7));
1319 __ b(&fail, CS); 1319 __ b(&fail, CS);
1320 1320
1321 // Successfully allocated the object(s), now update top to point to 1321 // Successfully allocated the object(s), now update top to point to
1322 // next object start and initialize the object. 1322 // next object start and initialize the object.
1323 __ str(R1, Address(R3)); 1323 __ str(R1, Address(R3));
1324 __ AddImmediate(R0, R0, kHeapObjectTag, kNoPP); 1324 __ AddImmediate(R0, R0, kHeapObjectTag, kNoPP);
1325 __ UpdateAllocationStatsWithSize(kOneByteStringCid, R2, R3, kNoPP); 1325 __ UpdateAllocationStatsWithSize(kOneByteStringCid, R2, kNoPP);
1326 1326
1327 // Initialize the tags. 1327 // Initialize the tags.
1328 // R0: new object start as a tagged pointer. 1328 // R0: new object start as a tagged pointer.
1329 // R1: new object end address. 1329 // R1: new object end address.
1330 // R2: allocation size. 1330 // R2: allocation size.
1331 { 1331 {
1332 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; 1332 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
1333 const Class& cls = 1333 const Class& cls =
1334 Class::Handle(isolate->object_store()->one_byte_string_class()); 1334 Class::Handle(isolate->object_store()->one_byte_string_class());
1335 1335
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after
1552 Isolate* isolate = Isolate::Current(); 1552 Isolate* isolate = Isolate::Current();
1553 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate), kNoPP); 1553 __ LoadImmediate(R1, reinterpret_cast<uword>(isolate), kNoPP);
1554 // Set return value to Isolate::current_tag_. 1554 // Set return value to Isolate::current_tag_.
1555 __ ldr(R0, Address(R1, Isolate::current_tag_offset())); 1555 __ ldr(R0, Address(R1, Isolate::current_tag_offset()));
1556 __ ret(); 1556 __ ret();
1557 } 1557 }
1558 1558
1559 } // namespace dart 1559 } // namespace dart
1560 1560
1561 #endif // defined TARGET_ARCH_ARM64 1561 #endif // defined TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « runtime/vm/intermediate_language_x64.cc ('k') | runtime/vm/simulator_arm64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698