Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(261)

Side by Side Diff: src/arm/stub-cache-arm.cc

Issue 6597029: [Isolates] Merge r 6300:6500 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/simulator-arm.cc ('k') | src/assembler.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 355 matching lines...) Expand 10 before | Expand all | Expand 10 after
366 366
367 367
368 // Generate code to load the length from a string object and return the length. 368 // Generate code to load the length from a string object and return the length.
369 // If the receiver object is not a string or a wrapped string object the 369 // If the receiver object is not a string or a wrapped string object the
370 // execution continues at the miss label. The register containing the 370 // execution continues at the miss label. The register containing the
371 // receiver is potentially clobbered. 371 // receiver is potentially clobbered.
372 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, 372 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
373 Register receiver, 373 Register receiver,
374 Register scratch1, 374 Register scratch1,
375 Register scratch2, 375 Register scratch2,
376 Label* miss) { 376 Label* miss,
377 bool support_wrappers) {
377 Label check_wrapper; 378 Label check_wrapper;
378 379
379 // Check if the object is a string leaving the instance type in the 380 // Check if the object is a string leaving the instance type in the
380 // scratch1 register. 381 // scratch1 register.
381 GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper); 382 GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
383 support_wrappers ? &check_wrapper : miss);
382 384
383 // Load length directly from the string. 385 // Load length directly from the string.
384 __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); 386 __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
385 __ Ret(); 387 __ Ret();
386 388
387 // Check if the object is a JSValue wrapper. 389 if (support_wrappers) {
388 __ bind(&check_wrapper); 390 // Check if the object is a JSValue wrapper.
389 __ cmp(scratch1, Operand(JS_VALUE_TYPE)); 391 __ bind(&check_wrapper);
390 __ b(ne, miss); 392 __ cmp(scratch1, Operand(JS_VALUE_TYPE));
393 __ b(ne, miss);
391 394
392 // Unwrap the value and check if the wrapped value is a string. 395 // Unwrap the value and check if the wrapped value is a string.
393 __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); 396 __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
394 GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); 397 GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
395 __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset)); 398 __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
396 __ Ret(); 399 __ Ret();
400 }
397 } 401 }
398 402
399 403
400 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, 404 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
401 Register receiver, 405 Register receiver,
402 Register scratch1, 406 Register scratch1,
403 Register scratch2, 407 Register scratch2,
404 Label* miss_label) { 408 Label* miss_label) {
405 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); 409 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
406 __ mov(r0, scratch1); 410 __ mov(r0, scratch1);
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
517 static void GenerateCallFunction(MacroAssembler* masm, 521 static void GenerateCallFunction(MacroAssembler* masm,
518 Object* object, 522 Object* object,
519 const ParameterCount& arguments, 523 const ParameterCount& arguments,
520 Label* miss) { 524 Label* miss) {
521 // ----------- S t a t e ------------- 525 // ----------- S t a t e -------------
522 // -- r0: receiver 526 // -- r0: receiver
523 // -- r1: function to call 527 // -- r1: function to call
524 // ----------------------------------- 528 // -----------------------------------
525 529
526 // Check that the function really is a function. 530 // Check that the function really is a function.
527 __ BranchOnSmi(r1, miss); 531 __ JumpIfSmi(r1, miss);
528 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); 532 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
529 __ b(ne, miss); 533 __ b(ne, miss);
530 534
531 // Patch the receiver on the stack with the global proxy if 535 // Patch the receiver on the stack with the global proxy if
532 // necessary. 536 // necessary.
533 if (object->IsGlobalObject()) { 537 if (object->IsGlobalObject()) {
534 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); 538 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
535 __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize)); 539 __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
536 } 540 }
537 541
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
656 LookupResult* lookup, 660 LookupResult* lookup,
657 Register receiver, 661 Register receiver,
658 Register scratch1, 662 Register scratch1,
659 Register scratch2, 663 Register scratch2,
660 Register scratch3, 664 Register scratch3,
661 Label* miss) { 665 Label* miss) {
662 ASSERT(holder->HasNamedInterceptor()); 666 ASSERT(holder->HasNamedInterceptor());
663 ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); 667 ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
664 668
665 // Check that the receiver isn't a smi. 669 // Check that the receiver isn't a smi.
666 __ BranchOnSmi(receiver, miss); 670 __ JumpIfSmi(receiver, miss);
667 671
668 CallOptimization optimization(lookup); 672 CallOptimization optimization(lookup);
669 673
670 if (optimization.is_constant_call()) { 674 if (optimization.is_constant_call()) {
671 CompileCacheable(masm, 675 CompileCacheable(masm,
672 object, 676 object,
673 receiver, 677 receiver,
674 scratch1, 678 scratch1,
675 scratch2, 679 scratch2,
676 scratch3, 680 scratch3,
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after
898 miss); 902 miss);
899 if (result->IsFailure()) return result; 903 if (result->IsFailure()) return result;
900 } 904 }
901 ASSERT(current->IsJSObject()); 905 ASSERT(current->IsJSObject());
902 current = JSObject::cast(current->GetPrototype()); 906 current = JSObject::cast(current->GetPrototype());
903 } 907 }
904 return NULL; 908 return NULL;
905 } 909 }
906 910
907 911
912 // Convert and store int passed in register ival to IEEE 754 single precision
913 // floating point value at memory location (dst + 4 * wordoffset)
914 // If VFP3 is available use it for conversion.
915 static void StoreIntAsFloat(MacroAssembler* masm,
916 Register dst,
917 Register wordoffset,
918 Register ival,
919 Register fval,
920 Register scratch1,
921 Register scratch2) {
922 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
923 CpuFeatures::Scope scope(VFP3);
924 __ vmov(s0, ival);
925 __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
926 __ vcvt_f32_s32(s0, s0);
927 __ vstr(s0, scratch1, 0);
928 } else {
929 Label not_special, done;
930 // Move sign bit from source to destination. This works because the sign
931 // bit in the exponent word of the double has the same position and polarity
932 // as the 2's complement sign bit in a Smi.
933 ASSERT(kBinary32SignMask == 0x80000000u);
934
935 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
936 // Negate value if it is negative.
937 __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
938
939 // We have -1, 0 or 1, which we treat specially. Register ival contains
940 // absolute value: it is either equal to 1 (special case of -1 and 1),
941 // greater than 1 (not a special case) or less than 1 (special case of 0).
942 __ cmp(ival, Operand(1));
943 __ b(gt, &not_special);
944
945 // For 1 or -1 we need to or in the 0 exponent (biased).
946 static const uint32_t exponent_word_for_1 =
947 kBinary32ExponentBias << kBinary32ExponentShift;
948
949 __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
950 __ b(&done);
951
952 __ bind(&not_special);
953 // Count leading zeros.
954 // Gets the wrong answer for 0, but we already checked for that case above.
955 Register zeros = scratch2;
956 __ CountLeadingZeros(zeros, ival, scratch1);
957
958 // Compute exponent and or it into the exponent register.
959 __ rsb(scratch1,
960 zeros,
961 Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
962
963 __ orr(fval,
964 fval,
965 Operand(scratch1, LSL, kBinary32ExponentShift));
966
967 // Shift up the source chopping the top bit off.
968 __ add(zeros, zeros, Operand(1));
969 // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
970 __ mov(ival, Operand(ival, LSL, zeros));
971 // And the top (top 20 bits).
972 __ orr(fval,
973 fval,
974 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
975
976 __ bind(&done);
977 __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
978 }
979 }
980
981
982 // Convert unsigned integer with specified number of leading zeroes in binary
983 // representation to IEEE 754 double.
984 // Integer to convert is passed in register hiword.
985 // Resulting double is returned in registers hiword:loword.
986 // This functions does not work correctly for 0.
987 static void GenerateUInt2Double(MacroAssembler* masm,
988 Register hiword,
989 Register loword,
990 Register scratch,
991 int leading_zeroes) {
992 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
993 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
994
995 const int mantissa_shift_for_hi_word =
996 meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
997
998 const int mantissa_shift_for_lo_word =
999 kBitsPerInt - mantissa_shift_for_hi_word;
1000
1001 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
1002 if (mantissa_shift_for_hi_word > 0) {
1003 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
1004 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
1005 } else {
1006 __ mov(loword, Operand(0, RelocInfo::NONE));
1007 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
1008 }
1009
1010 // If least significant bit of biased exponent was not 1 it was corrupted
1011 // by most significant bit of mantissa so we should fix that.
1012 if (!(biased_exponent & 1)) {
1013 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
1014 }
1015 }
1016
908 1017
909 #undef __ 1018 #undef __
910 #define __ ACCESS_MASM(masm()) 1019 #define __ ACCESS_MASM(masm())
911 1020
912 1021
913 Register StubCompiler::CheckPrototypes(JSObject* object, 1022 Register StubCompiler::CheckPrototypes(JSObject* object,
914 Register object_reg, 1023 Register object_reg,
915 JSObject* holder, 1024 JSObject* holder,
916 Register holder_reg, 1025 Register holder_reg,
917 Register scratch1, 1026 Register scratch1,
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
1085 Register reg = 1194 Register reg =
1086 CheckPrototypes(object, receiver, holder, 1195 CheckPrototypes(object, receiver, holder,
1087 scratch1, scratch2, scratch3, name, miss); 1196 scratch1, scratch2, scratch3, name, miss);
1088 1197
1089 // Return the constant value. 1198 // Return the constant value.
1090 __ mov(r0, Operand(Handle<Object>(value))); 1199 __ mov(r0, Operand(Handle<Object>(value)));
1091 __ Ret(); 1200 __ Ret();
1092 } 1201 }
1093 1202
1094 1203
1095 bool StubCompiler::GenerateLoadCallback(JSObject* object, 1204 MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
1096 JSObject* holder, 1205 JSObject* holder,
1097 Register receiver, 1206 Register receiver,
1098 Register name_reg, 1207 Register name_reg,
1099 Register scratch1, 1208 Register scratch1,
1100 Register scratch2, 1209 Register scratch2,
1101 Register scratch3, 1210 Register scratch3,
1102 AccessorInfo* callback, 1211 AccessorInfo* callback,
1103 String* name, 1212 String* name,
1104 Label* miss, 1213 Label* miss) {
1105 Failure** failure) {
1106 // Check that the receiver isn't a smi. 1214 // Check that the receiver isn't a smi.
1107 __ tst(receiver, Operand(kSmiTagMask)); 1215 __ tst(receiver, Operand(kSmiTagMask));
1108 __ b(eq, miss); 1216 __ b(eq, miss);
1109 1217
1110 // Check that the maps haven't changed. 1218 // Check that the maps haven't changed.
1111 Register reg = 1219 Register reg =
1112 CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, 1220 CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
1113 name, miss); 1221 name, miss);
1114 1222
1115 // Push the arguments on the JS stack of the caller. 1223 // Push the arguments on the JS stack of the caller.
1116 __ push(receiver); // Receiver. 1224 __ push(receiver); // Receiver.
1117 __ mov(scratch3, Operand(Handle<AccessorInfo>(callback))); // callback data 1225 __ mov(scratch3, Operand(Handle<AccessorInfo>(callback))); // callback data
1118 __ ldr(ip, FieldMemOperand(scratch3, AccessorInfo::kDataOffset)); 1226 __ ldr(ip, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
1119 __ Push(reg, ip, scratch3, name_reg); 1227 __ Push(reg, ip, scratch3, name_reg);
1120 1228
1121 // Do tail-call to the runtime system. 1229 // Do tail-call to the runtime system.
1122 ExternalReference load_callback_property = 1230 ExternalReference load_callback_property =
1123 ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); 1231 ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
1124 __ TailCallExternalReference(load_callback_property, 5, 1); 1232 __ TailCallExternalReference(load_callback_property, 5, 1);
1125 1233
1126 return true; 1234 return HEAP->undefined_value(); // Success.
1127 } 1235 }
1128 1236
1129 1237
1130 void StubCompiler::GenerateLoadInterceptor(JSObject* object, 1238 void StubCompiler::GenerateLoadInterceptor(JSObject* object,
1131 JSObject* interceptor_holder, 1239 JSObject* interceptor_holder,
1132 LookupResult* lookup, 1240 LookupResult* lookup,
1133 Register receiver, 1241 Register receiver,
1134 Register name_reg, 1242 Register name_reg,
1135 Register scratch1, 1243 Register scratch1,
1136 Register scratch2, 1244 Register scratch2,
1137 Register scratch3, 1245 Register scratch3,
1138 String* name, 1246 String* name,
1139 Label* miss) { 1247 Label* miss) {
1140 ASSERT(interceptor_holder->HasNamedInterceptor()); 1248 ASSERT(interceptor_holder->HasNamedInterceptor());
1141 ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); 1249 ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1142 1250
1143 // Check that the receiver isn't a smi. 1251 // Check that the receiver isn't a smi.
1144 __ BranchOnSmi(receiver, miss); 1252 __ JumpIfSmi(receiver, miss);
1145 1253
1146 // So far the most popular follow ups for interceptor loads are FIELD 1254 // So far the most popular follow ups for interceptor loads are FIELD
1147 // and CALLBACKS, so inline only them, other cases may be added 1255 // and CALLBACKS, so inline only them, other cases may be added
1148 // later. 1256 // later.
1149 bool compile_followup_inline = false; 1257 bool compile_followup_inline = false;
1150 if (lookup->IsProperty() && lookup->IsCacheable()) { 1258 if (lookup->IsProperty() && lookup->IsCacheable()) {
1151 if (lookup->type() == FIELD) { 1259 if (lookup->type() == FIELD) {
1152 compile_followup_inline = true; 1260 compile_followup_inline = true;
1153 } else if (lookup->type() == CALLBACKS && 1261 } else if (lookup->type() == CALLBACKS &&
1154 lookup->GetCallbackObject()->IsAccessorInfo() && 1262 lookup->GetCallbackObject()->IsAccessorInfo() &&
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
1328 __ cmp(r4, r3); 1436 __ cmp(r4, r3);
1329 __ b(ne, miss); 1437 __ b(ne, miss);
1330 } else { 1438 } else {
1331 __ cmp(r1, Operand(Handle<JSFunction>(function))); 1439 __ cmp(r1, Operand(Handle<JSFunction>(function)));
1332 __ b(ne, miss); 1440 __ b(ne, miss);
1333 } 1441 }
1334 } 1442 }
1335 1443
1336 1444
1337 MaybeObject* CallStubCompiler::GenerateMissBranch() { 1445 MaybeObject* CallStubCompiler::GenerateMissBranch() {
1446 MaybeObject* maybe_obj = Isolate::Current()->stub_cache()->ComputeCallMiss(
1447 arguments().immediate(), kind_);
1338 Object* obj; 1448 Object* obj;
1339 { MaybeObject* maybe_obj = Isolate::Current()->stub_cache()->ComputeCallMiss( 1449 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
1340 arguments().immediate(), kind_);
1341 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
1342 }
1343 __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET); 1450 __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
1344 return obj; 1451 return obj;
1345 } 1452 }
1346 1453
1347 1454
1348 MaybeObject* CallStubCompiler::CompileCallField(JSObject* object, 1455 MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
1349 JSObject* holder, 1456 JSObject* holder,
1350 int index, 1457 int index,
1351 String* name) { 1458 String* name) {
1352 // ----------- S t a t e ------------- 1459 // ----------- S t a t e -------------
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1403 1510
1404 GenerateNameCheck(name, &miss); 1511 GenerateNameCheck(name, &miss);
1405 1512
1406 Register receiver = r1; 1513 Register receiver = r1;
1407 1514
1408 // Get the receiver from the stack 1515 // Get the receiver from the stack
1409 const int argc = arguments().immediate(); 1516 const int argc = arguments().immediate();
1410 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); 1517 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1411 1518
1412 // Check that the receiver isn't a smi. 1519 // Check that the receiver isn't a smi.
1413 __ BranchOnSmi(receiver, &miss); 1520 __ JumpIfSmi(receiver, &miss);
1414 1521
1415 // Check that the maps haven't changed. 1522 // Check that the maps haven't changed.
1416 CheckPrototypes(JSObject::cast(object), receiver, 1523 CheckPrototypes(JSObject::cast(object), receiver,
1417 holder, r3, r0, r4, name, &miss); 1524 holder, r3, r0, r4, name, &miss);
1418 1525
1419 if (argc == 0) { 1526 if (argc == 0) {
1420 // Nothing to do, just return the length. 1527 // Nothing to do, just return the length.
1421 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); 1528 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1422 __ Drop(argc + 1); 1529 __ Drop(argc + 1);
1423 __ Ret(); 1530 __ Ret();
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1457 __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); 1564 __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
1458 // We may need a register containing the address end_elements below, 1565 // We may need a register containing the address end_elements below,
1459 // so write back the value in end_elements. 1566 // so write back the value in end_elements.
1460 __ add(end_elements, elements, 1567 __ add(end_elements, elements,
1461 Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); 1568 Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1462 const int kEndElementsOffset = 1569 const int kEndElementsOffset =
1463 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; 1570 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
1464 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); 1571 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
1465 1572
1466 // Check for a smi. 1573 // Check for a smi.
1467 __ BranchOnNotSmi(r4, &with_write_barrier); 1574 __ JumpIfNotSmi(r4, &with_write_barrier);
1468 __ bind(&exit); 1575 __ bind(&exit);
1469 __ Drop(argc + 1); 1576 __ Drop(argc + 1);
1470 __ Ret(); 1577 __ Ret();
1471 1578
1472 __ bind(&with_write_barrier); 1579 __ bind(&with_write_barrier);
1473 __ InNewSpace(elements, r4, eq, &exit); 1580 __ InNewSpace(elements, r4, eq, &exit);
1474 __ RecordWriteHelper(elements, end_elements, r4); 1581 __ RecordWriteHelper(elements, end_elements, r4);
1475 __ Drop(argc + 1); 1582 __ Drop(argc + 1);
1476 __ Ret(); 1583 __ Ret();
1477 1584
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
1564 Register receiver = r1; 1671 Register receiver = r1;
1565 Register elements = r3; 1672 Register elements = r3;
1566 1673
1567 GenerateNameCheck(name, &miss); 1674 GenerateNameCheck(name, &miss);
1568 1675
1569 // Get the receiver from the stack 1676 // Get the receiver from the stack
1570 const int argc = arguments().immediate(); 1677 const int argc = arguments().immediate();
1571 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); 1678 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1572 1679
1573 // Check that the receiver isn't a smi. 1680 // Check that the receiver isn't a smi.
1574 __ BranchOnSmi(receiver, &miss); 1681 __ JumpIfSmi(receiver, &miss);
1575 1682
1576 // Check that the maps haven't changed. 1683 // Check that the maps haven't changed.
1577 CheckPrototypes(JSObject::cast(object), 1684 CheckPrototypes(JSObject::cast(object),
1578 receiver, holder, elements, r4, r0, name, &miss); 1685 receiver, holder, elements, r4, r0, name, &miss);
1579 1686
1580 // Get the elements array of the object. 1687 // Get the elements array of the object.
1581 __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); 1688 __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1582 1689
1583 // Check that the elements are in fast mode and writable. 1690 // Check that the elements are in fast mode and writable.
1584 __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, &call_builtin, true); 1691 __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, &call_builtin, true);
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1642 // -- ... 1749 // -- ...
1643 // -- sp[argc * 4] : receiver 1750 // -- sp[argc * 4] : receiver
1644 // ----------------------------------- 1751 // -----------------------------------
1645 1752
1646 // If object is not a string, bail out to regular call. 1753 // If object is not a string, bail out to regular call.
1647 if (!object->IsString() || cell != NULL) return HEAP->undefined_value(); 1754 if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
1648 1755
1649 const int argc = arguments().immediate(); 1756 const int argc = arguments().immediate();
1650 1757
1651 Label miss; 1758 Label miss;
1759 Label name_miss;
1652 Label index_out_of_range; 1760 Label index_out_of_range;
1653 GenerateNameCheck(name, &miss); 1761 Label* index_out_of_range_label = &index_out_of_range;
1762
1763 if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
1764 index_out_of_range_label = &miss;
1765 }
1766
1767 GenerateNameCheck(name, &name_miss);
1654 1768
1655 // Check that the maps starting from the prototype haven't changed. 1769 // Check that the maps starting from the prototype haven't changed.
1656 GenerateDirectLoadGlobalFunctionPrototype(masm(), 1770 GenerateDirectLoadGlobalFunctionPrototype(masm(),
1657 Context::STRING_FUNCTION_INDEX, 1771 Context::STRING_FUNCTION_INDEX,
1658 r0, 1772 r0,
1659 &miss); 1773 &miss);
1660 ASSERT(object != holder); 1774 ASSERT(object != holder);
1661 CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, 1775 CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
1662 r1, r3, r4, name, &miss); 1776 r1, r3, r4, name, &miss);
1663 1777
1664 Register receiver = r1; 1778 Register receiver = r1;
1665 Register index = r4; 1779 Register index = r4;
1666 Register scratch = r3; 1780 Register scratch = r3;
1667 Register result = r0; 1781 Register result = r0;
1668 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); 1782 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1669 if (argc > 0) { 1783 if (argc > 0) {
1670 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize)); 1784 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
1671 } else { 1785 } else {
1672 __ LoadRoot(index, Heap::kUndefinedValueRootIndex); 1786 __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1673 } 1787 }
1674 1788
1675 StringCharCodeAtGenerator char_code_at_generator(receiver, 1789 StringCharCodeAtGenerator char_code_at_generator(receiver,
1676 index, 1790 index,
1677 scratch, 1791 scratch,
1678 result, 1792 result,
1679 &miss, // When not a string. 1793 &miss, // When not a string.
1680 &miss, // When not a number. 1794 &miss, // When not a number.
1681 &index_out_of_range, 1795 index_out_of_range_label,
1682 STRING_INDEX_IS_NUMBER); 1796 STRING_INDEX_IS_NUMBER);
1683 char_code_at_generator.GenerateFast(masm()); 1797 char_code_at_generator.GenerateFast(masm());
1684 __ Drop(argc + 1); 1798 __ Drop(argc + 1);
1685 __ Ret(); 1799 __ Ret();
1686 1800
1687 StubRuntimeCallHelper call_helper; 1801 StubRuntimeCallHelper call_helper;
1688 char_code_at_generator.GenerateSlow(masm(), call_helper); 1802 char_code_at_generator.GenerateSlow(masm(), call_helper);
1689 1803
1690 __ bind(&index_out_of_range); 1804 if (index_out_of_range.is_linked()) {
1691 __ LoadRoot(r0, Heap::kNanValueRootIndex); 1805 __ bind(&index_out_of_range);
1692 __ Drop(argc + 1); 1806 __ LoadRoot(r0, Heap::kNanValueRootIndex);
1693 __ Ret(); 1807 __ Drop(argc + 1);
1808 __ Ret();
1809 }
1694 1810
1695 __ bind(&miss); 1811 __ bind(&miss);
1812 // Restore function name in r2.
1813 __ Move(r2, Handle<String>(name));
1814 __ bind(&name_miss);
1696 Object* obj; 1815 Object* obj;
1697 { MaybeObject* maybe_obj = GenerateMissBranch(); 1816 { MaybeObject* maybe_obj = GenerateMissBranch();
1698 if (!maybe_obj->ToObject(&obj)) return maybe_obj; 1817 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
1699 } 1818 }
1700 1819
1701 // Return the generated code. 1820 // Return the generated code.
1702 return GetCode(function); 1821 return GetCode(function);
1703 } 1822 }
1704 1823
1705 1824
(...skipping 10 matching lines...) Expand all
1716 // -- ... 1835 // -- ...
1717 // -- sp[argc * 4] : receiver 1836 // -- sp[argc * 4] : receiver
1718 // ----------------------------------- 1837 // -----------------------------------
1719 1838
1720 // If object is not a string, bail out to regular call. 1839 // If object is not a string, bail out to regular call.
1721 if (!object->IsString() || cell != NULL) return HEAP->undefined_value(); 1840 if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
1722 1841
1723 const int argc = arguments().immediate(); 1842 const int argc = arguments().immediate();
1724 1843
1725 Label miss; 1844 Label miss;
1845 Label name_miss;
1726 Label index_out_of_range; 1846 Label index_out_of_range;
1847 Label* index_out_of_range_label = &index_out_of_range;
1727 1848
1728 GenerateNameCheck(name, &miss); 1849 if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
1850 index_out_of_range_label = &miss;
1851 }
1852
1853 GenerateNameCheck(name, &name_miss);
1729 1854
1730 // Check that the maps starting from the prototype haven't changed. 1855 // Check that the maps starting from the prototype haven't changed.
1731 GenerateDirectLoadGlobalFunctionPrototype(masm(), 1856 GenerateDirectLoadGlobalFunctionPrototype(masm(),
1732 Context::STRING_FUNCTION_INDEX, 1857 Context::STRING_FUNCTION_INDEX,
1733 r0, 1858 r0,
1734 &miss); 1859 &miss);
1735 ASSERT(object != holder); 1860 ASSERT(object != holder);
1736 CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, 1861 CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
1737 r1, r3, r4, name, &miss); 1862 r1, r3, r4, name, &miss);
1738 1863
1739 Register receiver = r0; 1864 Register receiver = r0;
1740 Register index = r4; 1865 Register index = r4;
1741 Register scratch1 = r1; 1866 Register scratch1 = r1;
1742 Register scratch2 = r3; 1867 Register scratch2 = r3;
1743 Register result = r0; 1868 Register result = r0;
1744 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); 1869 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1745 if (argc > 0) { 1870 if (argc > 0) {
1746 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize)); 1871 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
1747 } else { 1872 } else {
1748 __ LoadRoot(index, Heap::kUndefinedValueRootIndex); 1873 __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1749 } 1874 }
1750 1875
1751 StringCharAtGenerator char_at_generator(receiver, 1876 StringCharAtGenerator char_at_generator(receiver,
1752 index, 1877 index,
1753 scratch1, 1878 scratch1,
1754 scratch2, 1879 scratch2,
1755 result, 1880 result,
1756 &miss, // When not a string. 1881 &miss, // When not a string.
1757 &miss, // When not a number. 1882 &miss, // When not a number.
1758 &index_out_of_range, 1883 index_out_of_range_label,
1759 STRING_INDEX_IS_NUMBER); 1884 STRING_INDEX_IS_NUMBER);
1760 char_at_generator.GenerateFast(masm()); 1885 char_at_generator.GenerateFast(masm());
1761 __ Drop(argc + 1); 1886 __ Drop(argc + 1);
1762 __ Ret(); 1887 __ Ret();
1763 1888
1764 StubRuntimeCallHelper call_helper; 1889 StubRuntimeCallHelper call_helper;
1765 char_at_generator.GenerateSlow(masm(), call_helper); 1890 char_at_generator.GenerateSlow(masm(), call_helper);
1766 1891
1767 __ bind(&index_out_of_range); 1892 if (index_out_of_range.is_linked()) {
1768 __ LoadRoot(r0, Heap::kEmptyStringRootIndex); 1893 __ bind(&index_out_of_range);
1769 __ Drop(argc + 1); 1894 __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
1770 __ Ret(); 1895 __ Drop(argc + 1);
1896 __ Ret();
1897 }
1771 1898
1772 __ bind(&miss); 1899 __ bind(&miss);
1900 // Restore function name in r2.
1901 __ Move(r2, Handle<String>(name));
1902 __ bind(&name_miss);
1773 Object* obj; 1903 Object* obj;
1774 { MaybeObject* maybe_obj = GenerateMissBranch(); 1904 { MaybeObject* maybe_obj = GenerateMissBranch();
1775 if (!maybe_obj->ToObject(&obj)) return maybe_obj; 1905 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
1776 } 1906 }
1777 1907
1778 // Return the generated code. 1908 // Return the generated code.
1779 return GetCode(function); 1909 return GetCode(function);
1780 } 1910 }
1781 1911
1782 1912
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
1880 // arguments, bail out to the regular call. 2010 // arguments, bail out to the regular call.
1881 if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value(); 2011 if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
1882 2012
1883 Label miss, slow; 2013 Label miss, slow;
1884 GenerateNameCheck(name, &miss); 2014 GenerateNameCheck(name, &miss);
1885 2015
1886 if (cell == NULL) { 2016 if (cell == NULL) {
1887 __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); 2017 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
1888 2018
1889 STATIC_ASSERT(kSmiTag == 0); 2019 STATIC_ASSERT(kSmiTag == 0);
1890 __ BranchOnSmi(r1, &miss); 2020 __ JumpIfSmi(r1, &miss);
1891 2021
1892 CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, 2022 CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
1893 &miss); 2023 &miss);
1894 } else { 2024 } else {
1895 ASSERT(cell->value() == function); 2025 ASSERT(cell->value() == function);
1896 GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss); 2026 GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
1897 GenerateLoadFunctionFromCell(cell, function, &miss); 2027 GenerateLoadFunctionFromCell(cell, function, &miss);
1898 } 2028 }
1899 2029
1900 // Load the (only) argument into r0. 2030 // Load the (only) argument into r0.
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
1950 // They are invariant through a Math.Floor call, so just 2080 // They are invariant through a Math.Floor call, so just
1951 // return the original argument. 2081 // return the original argument.
1952 __ sub(r7, r6, Operand(HeapNumber::kExponentMask 2082 __ sub(r7, r6, Operand(HeapNumber::kExponentMask
1953 >> HeapNumber::kMantissaBitsInTopWord), SetCC); 2083 >> HeapNumber::kMantissaBitsInTopWord), SetCC);
1954 __ b(&restore_fpscr_and_return, eq); 2084 __ b(&restore_fpscr_and_return, eq);
1955 // We had an overflow or underflow in the conversion. Check if we 2085 // We had an overflow or underflow in the conversion. Check if we
1956 // have a big exponent. 2086 // have a big exponent.
1957 __ cmp(r7, Operand(HeapNumber::kMantissaBits)); 2087 __ cmp(r7, Operand(HeapNumber::kMantissaBits));
1958 // If greater or equal, the argument is already round and in r0. 2088 // If greater or equal, the argument is already round and in r0.
1959 __ b(&restore_fpscr_and_return, ge); 2089 __ b(&restore_fpscr_and_return, ge);
1960 __ b(&slow); 2090 __ b(&wont_fit_smi);
1961 2091
1962 __ bind(&no_vfp_exception); 2092 __ bind(&no_vfp_exception);
1963 // Move the result back to general purpose register r0. 2093 // Move the result back to general purpose register r0.
1964 __ vmov(r0, s0); 2094 __ vmov(r0, s0);
1965 // Check if the result fits into a smi. 2095 // Check if the result fits into a smi.
1966 __ add(r1, r0, Operand(0x40000000), SetCC); 2096 __ add(r1, r0, Operand(0x40000000), SetCC);
1967 __ b(&wont_fit_smi, mi); 2097 __ b(&wont_fit_smi, mi);
1968 // Tag the result. 2098 // Tag the result.
1969 STATIC_ASSERT(kSmiTag == 0); 2099 STATIC_ASSERT(kSmiTag == 0);
1970 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); 2100 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1971 2101
1972 // Check for -0. 2102 // Check for -0.
1973 __ cmp(r0, Operand(0, RelocInfo::NONE)); 2103 __ cmp(r0, Operand(0, RelocInfo::NONE));
1974 __ b(&restore_fpscr_and_return, ne); 2104 __ b(&restore_fpscr_and_return, ne);
1975 // r5 already holds the HeapNumber exponent. 2105 // r5 already holds the HeapNumber exponent.
1976 __ tst(r5, Operand(HeapNumber::kSignMask)); 2106 __ tst(r5, Operand(HeapNumber::kSignMask));
1977 // If our HeapNumber is negative it was -0, so load its address and return. 2107 // If our HeapNumber is negative it was -0, so load its address and return.
1978 // Else r0 is loaded with 0, so we can also just return. 2108 // Else r0 is loaded with 0, so we can also just return.
1979 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne); 2109 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
1980 2110
1981 __ bind(&restore_fpscr_and_return); 2111 __ bind(&restore_fpscr_and_return);
1982 // Restore FPSCR and return. 2112 // Restore FPSCR and return.
1983 __ vmsr(r3); 2113 __ vmsr(r3);
1984 __ Drop(argc + 1); 2114 __ Drop(argc + 1);
1985 __ Ret(); 2115 __ Ret();
1986 2116
1987 __ bind(&wont_fit_smi); 2117 __ bind(&wont_fit_smi);
1988 __ bind(&slow);
1989 // Restore FPCSR and fall to slow case. 2118 // Restore FPCSR and fall to slow case.
1990 __ vmsr(r3); 2119 __ vmsr(r3);
1991 2120
2121 __ bind(&slow);
1992 // Tail call the full function. We do not have to patch the receiver 2122 // Tail call the full function. We do not have to patch the receiver
1993 // because the function makes no use of it. 2123 // because the function makes no use of it.
1994 __ InvokeFunction(function, arguments(), JUMP_FUNCTION); 2124 __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
1995 2125
1996 __ bind(&miss); 2126 __ bind(&miss);
1997 // r2: function name. 2127 // r2: function name.
1998 MaybeObject* obj = GenerateMissBranch(); 2128 MaybeObject* obj = GenerateMissBranch();
1999 if (obj->IsFailure()) return obj; 2129 if (obj->IsFailure()) return obj;
2000 2130
2001 // Return the generated code. 2131 // Return the generated code.
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
2039 GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss); 2169 GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
2040 GenerateLoadFunctionFromCell(cell, function, &miss); 2170 GenerateLoadFunctionFromCell(cell, function, &miss);
2041 } 2171 }
2042 2172
2043 // Load the (only) argument into r0. 2173 // Load the (only) argument into r0.
2044 __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); 2174 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
2045 2175
2046 // Check if the argument is a smi. 2176 // Check if the argument is a smi.
2047 Label not_smi; 2177 Label not_smi;
2048 STATIC_ASSERT(kSmiTag == 0); 2178 STATIC_ASSERT(kSmiTag == 0);
2049 __ BranchOnNotSmi(r0, &not_smi); 2179 __ JumpIfNotSmi(r0, &not_smi);
2050 2180
2051 // Do bitwise not or do nothing depending on the sign of the 2181 // Do bitwise not or do nothing depending on the sign of the
2052 // argument. 2182 // argument.
2053 __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1)); 2183 __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
2054 2184
2055 // Add 1 or do nothing depending on the sign of the argument. 2185 // Add 1 or do nothing depending on the sign of the argument.
2056 __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC); 2186 __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
2057 2187
2058 // If the result is still negative, go to the slow case. 2188 // If the result is still negative, go to the slow case.
2059 // This only happens for the most negative smi. 2189 // This only happens for the most negative smi.
(...skipping 553 matching lines...) Expand 10 before | Expand all | Expand 10 after
2613 JSObject* object, 2743 JSObject* object,
2614 JSObject* holder, 2744 JSObject* holder,
2615 AccessorInfo* callback) { 2745 AccessorInfo* callback) {
2616 // ----------- S t a t e ------------- 2746 // ----------- S t a t e -------------
2617 // -- r0 : receiver 2747 // -- r0 : receiver
2618 // -- r2 : name 2748 // -- r2 : name
2619 // -- lr : return address 2749 // -- lr : return address
2620 // ----------------------------------- 2750 // -----------------------------------
2621 Label miss; 2751 Label miss;
2622 2752
2623 Failure* failure = Failure::InternalError(); 2753 MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
2624 bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, 2754 callback, name, &miss);
2625 callback, name, &miss, &failure); 2755 if (result->IsFailure()) {
2626 if (!success) {
2627 miss.Unuse(); 2756 miss.Unuse();
2628 return failure; 2757 return result;
2629 } 2758 }
2630 2759
2631 __ bind(&miss); 2760 __ bind(&miss);
2632 GenerateLoadMiss(masm(), Code::LOAD_IC); 2761 GenerateLoadMiss(masm(), Code::LOAD_IC);
2633 2762
2634 // Return the generated code. 2763 // Return the generated code.
2635 return GetCode(CALLBACKS, name); 2764 return GetCode(CALLBACKS, name);
2636 } 2765 }
2637 2766
2638 2767
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
2765 // -- lr : return address 2894 // -- lr : return address
2766 // -- r0 : key 2895 // -- r0 : key
2767 // -- r1 : receiver 2896 // -- r1 : receiver
2768 // ----------------------------------- 2897 // -----------------------------------
2769 Label miss; 2898 Label miss;
2770 2899
2771 // Check the key is the cached one. 2900 // Check the key is the cached one.
2772 __ cmp(r0, Operand(Handle<String>(name))); 2901 __ cmp(r0, Operand(Handle<String>(name)));
2773 __ b(ne, &miss); 2902 __ b(ne, &miss);
2774 2903
2775 Failure* failure = Failure::InternalError(); 2904 MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
2776 bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, 2905 r4, callback, name, &miss);
2777 callback, name, &miss, &failure); 2906 if (result->IsFailure()) {
2778 if (!success) {
2779 miss.Unuse(); 2907 miss.Unuse();
2780 return failure; 2908 return result;
2781 } 2909 }
2782 2910
2783 __ bind(&miss); 2911 __ bind(&miss);
2784 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); 2912 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2785 2913
2786 return GetCode(CALLBACKS, name); 2914 return GetCode(CALLBACKS, name);
2787 } 2915 }
2788 2916
2789 2917
2790 MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name, 2918 MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
2870 // -- r0 : key 2998 // -- r0 : key
2871 // -- r1 : receiver 2999 // -- r1 : receiver
2872 // ----------------------------------- 3000 // -----------------------------------
2873 Label miss; 3001 Label miss;
2874 __ IncrementCounter(COUNTERS->keyed_load_string_length(), 1, r2, r3); 3002 __ IncrementCounter(COUNTERS->keyed_load_string_length(), 1, r2, r3);
2875 3003
2876 // Check the key is the cached one. 3004 // Check the key is the cached one.
2877 __ cmp(r0, Operand(Handle<String>(name))); 3005 __ cmp(r0, Operand(Handle<String>(name)));
2878 __ b(ne, &miss); 3006 __ b(ne, &miss);
2879 3007
2880 GenerateLoadStringLength(masm(), r1, r2, r3, &miss); 3008 GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
2881 __ bind(&miss); 3009 __ bind(&miss);
2882 __ DecrementCounter(COUNTERS->keyed_load_string_length(), 1, r2, r3); 3010 __ DecrementCounter(COUNTERS->keyed_load_string_length(), 1, r2, r3);
2883 3011
2884 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); 3012 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2885 3013
2886 return GetCode(CALLBACKS, name); 3014 return GetCode(CALLBACKS, name);
2887 } 3015 }
2888 3016
2889 3017
2890 MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { 3018 MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
(...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after
3207 Code* code = Isolate::Current()->builtins()->builtin( 3335 Code* code = Isolate::Current()->builtins()->builtin(
3208 Builtins::JSConstructStubGeneric); 3336 Builtins::JSConstructStubGeneric);
3209 Handle<Code> generic_construct_stub(code); 3337 Handle<Code> generic_construct_stub(code);
3210 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); 3338 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
3211 3339
3212 // Return the generated code. 3340 // Return the generated code.
3213 return GetCode(); 3341 return GetCode();
3214 } 3342 }
3215 3343
3216 3344
3345 static bool IsElementTypeSigned(ExternalArrayType array_type) {
3346 switch (array_type) {
3347 case kExternalByteArray:
3348 case kExternalShortArray:
3349 case kExternalIntArray:
3350 return true;
3351
3352 case kExternalUnsignedByteArray:
3353 case kExternalUnsignedShortArray:
3354 case kExternalUnsignedIntArray:
3355 return false;
3356
3357 default:
3358 UNREACHABLE();
3359 return false;
3360 }
3361 }
3362
3363
3364 MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
3365 ExternalArrayType array_type, Code::Flags flags) {
3366 // ---------- S t a t e --------------
3367 // -- lr : return address
3368 // -- r0 : key
3369 // -- r1 : receiver
3370 // -----------------------------------
3371 Label slow, failed_allocation;
3372
3373 Register key = r0;
3374 Register receiver = r1;
3375
3376 // Check that the object isn't a smi
3377 __ JumpIfSmi(receiver, &slow);
3378
3379 // Check that the key is a smi.
3380 __ JumpIfNotSmi(key, &slow);
3381
3382 // Check that the object is a JS object. Load map into r2.
3383 __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
3384 __ b(lt, &slow);
3385
3386 // Check that the receiver does not require access checks. We need
3387 // to check this explicitly since this generic stub does not perform
3388 // map checks.
3389 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
3390 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
3391 __ b(ne, &slow);
3392
3393 // Check that the elements array is the appropriate type of
3394 // ExternalArray.
3395 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3396 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
3397 __ LoadRoot(ip, HEAP->RootIndexForExternalArrayType(array_type));
3398 __ cmp(r2, ip);
3399 __ b(ne, &slow);
3400
3401 // Check that the index is in range.
3402 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
3403 __ cmp(ip, Operand(key, ASR, kSmiTagSize));
3404 // Unsigned comparison catches both negative and too-large values.
3405 __ b(lo, &slow);
3406
3407 // r3: elements array
3408 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3409 // r3: base pointer of external storage
3410
3411 // We are not untagging smi key and instead work with it
3412 // as if it was premultiplied by 2.
3413 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
3414
3415 Register value = r2;
3416 switch (array_type) {
3417 case kExternalByteArray:
3418 __ ldrsb(value, MemOperand(r3, key, LSR, 1));
3419 break;
3420 case kExternalUnsignedByteArray:
3421 __ ldrb(value, MemOperand(r3, key, LSR, 1));
3422 break;
3423 case kExternalShortArray:
3424 __ ldrsh(value, MemOperand(r3, key, LSL, 0));
3425 break;
3426 case kExternalUnsignedShortArray:
3427 __ ldrh(value, MemOperand(r3, key, LSL, 0));
3428 break;
3429 case kExternalIntArray:
3430 case kExternalUnsignedIntArray:
3431 __ ldr(value, MemOperand(r3, key, LSL, 1));
3432 break;
3433 case kExternalFloatArray:
3434 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
3435 CpuFeatures::Scope scope(VFP3);
3436 __ add(r2, r3, Operand(key, LSL, 1));
3437 __ vldr(s0, r2, 0);
3438 } else {
3439 __ ldr(value, MemOperand(r3, key, LSL, 1));
3440 }
3441 break;
3442 default:
3443 UNREACHABLE();
3444 break;
3445 }
3446
3447 // For integer array types:
3448 // r2: value
3449 // For floating-point array type
3450 // s0: value (if VFP3 is supported)
3451 // r2: value (if VFP3 is not supported)
3452
3453 if (array_type == kExternalIntArray) {
3454 // For the Int and UnsignedInt array types, we need to see whether
3455 // the value can be represented in a Smi. If not, we need to convert
3456 // it to a HeapNumber.
3457 Label box_int;
3458 __ cmp(value, Operand(0xC0000000));
3459 __ b(mi, &box_int);
3460 // Tag integer as smi and return it.
3461 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3462 __ Ret();
3463
3464 __ bind(&box_int);
3465 // Allocate a HeapNumber for the result and perform int-to-double
3466 // conversion. Don't touch r0 or r1 as they are needed if allocation
3467 // fails.
3468 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3469 __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
3470 // Now we can use r0 for the result as key is not needed any more.
3471 __ mov(r0, r5);
3472
3473 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
3474 CpuFeatures::Scope scope(VFP3);
3475 __ vmov(s0, value);
3476 __ vcvt_f64_s32(d0, s0);
3477 __ sub(r3, r0, Operand(kHeapObjectTag));
3478 __ vstr(d0, r3, HeapNumber::kValueOffset);
3479 __ Ret();
3480 } else {
3481 WriteInt32ToHeapNumberStub stub(value, r0, r3);
3482 __ TailCallStub(&stub);
3483 }
3484 } else if (array_type == kExternalUnsignedIntArray) {
3485 // The test is different for unsigned int values. Since we need
3486 // the value to be in the range of a positive smi, we can't
3487 // handle either of the top two bits being set in the value.
3488 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
3489 CpuFeatures::Scope scope(VFP3);
3490 Label box_int, done;
3491 __ tst(value, Operand(0xC0000000));
3492 __ b(ne, &box_int);
3493 // Tag integer as smi and return it.
3494 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3495 __ Ret();
3496
3497 __ bind(&box_int);
3498 __ vmov(s0, value);
3499 // Allocate a HeapNumber for the result and perform int-to-double
3500 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
3501 // registers - also when jumping due to exhausted young space.
3502 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3503 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3504
3505 __ vcvt_f64_u32(d0, s0);
3506 __ sub(r1, r2, Operand(kHeapObjectTag));
3507 __ vstr(d0, r1, HeapNumber::kValueOffset);
3508
3509 __ mov(r0, r2);
3510 __ Ret();
3511 } else {
3512 // Check whether unsigned integer fits into smi.
3513 Label box_int_0, box_int_1, done;
3514 __ tst(value, Operand(0x80000000));
3515 __ b(ne, &box_int_0);
3516 __ tst(value, Operand(0x40000000));
3517 __ b(ne, &box_int_1);
3518 // Tag integer as smi and return it.
3519 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3520 __ Ret();
3521
3522 Register hiword = value; // r2.
3523 Register loword = r3;
3524
3525 __ bind(&box_int_0);
3526 // Integer does not have leading zeros.
3527 GenerateUInt2Double(masm(), hiword, loword, r4, 0);
3528 __ b(&done);
3529
3530 __ bind(&box_int_1);
3531 // Integer has one leading zero.
3532 GenerateUInt2Double(masm(), hiword, loword, r4, 1);
3533
3534
3535 __ bind(&done);
3536 // Integer was converted to double in registers hiword:loword.
3537 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
3538 // clobbers all registers - also when jumping due to exhausted young
3539 // space.
3540 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3541 __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
3542
3543 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
3544 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
3545
3546 __ mov(r0, r4);
3547 __ Ret();
3548 }
3549 } else if (array_type == kExternalFloatArray) {
3550 // For the floating-point array type, we need to always allocate a
3551 // HeapNumber.
3552 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
3553 CpuFeatures::Scope scope(VFP3);
3554 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3555 // AllocateHeapNumber clobbers all registers - also when jumping due to
3556 // exhausted young space.
3557 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3558 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3559 __ vcvt_f64_f32(d0, s0);
3560 __ sub(r1, r2, Operand(kHeapObjectTag));
3561 __ vstr(d0, r1, HeapNumber::kValueOffset);
3562
3563 __ mov(r0, r2);
3564 __ Ret();
3565 } else {
3566 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3567 // AllocateHeapNumber clobbers all registers - also when jumping due to
3568 // exhausted young space.
3569 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3570 __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
3571 // VFP is not available, do manual single to double conversion.
3572
3573 // r2: floating point value (binary32)
3574 // r3: heap number for result
3575
3576 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
3577 // the slow case from here.
3578 __ and_(r0, value, Operand(kBinary32MantissaMask));
3579
3580 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
3581 // the slow case from here.
3582 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
3583 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3584
3585 Label exponent_rebiased;
3586 __ teq(r1, Operand(0x00));
3587 __ b(eq, &exponent_rebiased);
3588
3589 __ teq(r1, Operand(0xff));
3590 __ mov(r1, Operand(0x7ff), LeaveCC, eq);
3591 __ b(eq, &exponent_rebiased);
3592
3593 // Rebias exponent.
3594 __ add(r1,
3595 r1,
3596 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
3597
3598 __ bind(&exponent_rebiased);
3599 __ and_(r2, value, Operand(kBinary32SignMask));
3600 value = no_reg;
3601 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
3602
3603 // Shift mantissa.
3604 static const int kMantissaShiftForHiWord =
3605 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3606
3607 static const int kMantissaShiftForLoWord =
3608 kBitsPerInt - kMantissaShiftForHiWord;
3609
3610 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
3611 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
3612
3613 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
3614 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
3615
3616 __ mov(r0, r3);
3617 __ Ret();
3618 }
3619
3620 } else {
3621 // Tag integer as smi and return it.
3622 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3623 __ Ret();
3624 }
3625
3626 // Slow case, key and receiver still in r0 and r1.
3627 __ bind(&slow);
3628 __ IncrementCounter(COUNTERS->keyed_load_external_array_slow(), 1, r2, r3);
3629
3630 // ---------- S t a t e --------------
3631 // -- lr : return address
3632 // -- r0 : key
3633 // -- r1 : receiver
3634 // -----------------------------------
3635
3636 __ Push(r1, r0);
3637
3638 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
3639
3640 return GetCode(flags);
3641 }
3642
3643
3644 MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
3645 ExternalArrayType array_type, Code::Flags flags) {
3646 // ---------- S t a t e --------------
3647 // -- r0 : value
3648 // -- r1 : key
3649 // -- r2 : receiver
3650 // -- lr : return address
3651 // -----------------------------------
3652 Label slow, check_heap_number;
3653
3654 // Register usage.
3655 Register value = r0;
3656 Register key = r1;
3657 Register receiver = r2;
3658 // r3 mostly holds the elements array or the destination external array.
3659
3660 // Check that the object isn't a smi.
3661 __ JumpIfSmi(receiver, &slow);
3662
3663 // Check that the object is a JS object. Load map into r3.
3664 __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
3665 __ b(le, &slow);
3666
3667 // Check that the receiver does not require access checks. We need
3668 // to do this because this generic stub does not perform map checks.
3669 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
3670 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
3671 __ b(ne, &slow);
3672
3673 // Check that the key is a smi.
3674 __ JumpIfNotSmi(key, &slow);
3675
3676 // Check that the elements array is the appropriate type of ExternalArray.
3677 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3678 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
3679 __ LoadRoot(ip, HEAP->RootIndexForExternalArrayType(array_type));
3680 __ cmp(r4, ip);
3681 __ b(ne, &slow);
3682
3683 // Check that the index is in range.
3684 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
3685 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
3686 __ cmp(r4, ip);
3687 // Unsigned comparison catches both negative and too-large values.
3688 __ b(hs, &slow);
3689
3690 // Handle both smis and HeapNumbers in the fast path. Go to the
3691 // runtime for all other kinds of values.
3692 // r3: external array.
3693 // r4: key (integer).
3694 __ JumpIfNotSmi(value, &check_heap_number);
3695 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
3696 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3697
3698 // r3: base pointer of external storage.
3699 // r4: key (integer).
3700 // r5: value (integer).
3701 switch (array_type) {
3702 case kExternalByteArray:
3703 case kExternalUnsignedByteArray:
3704 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3705 break;
3706 case kExternalShortArray:
3707 case kExternalUnsignedShortArray:
3708 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3709 break;
3710 case kExternalIntArray:
3711 case kExternalUnsignedIntArray:
3712 __ str(r5, MemOperand(r3, r4, LSL, 2));
3713 break;
3714 case kExternalFloatArray:
3715 // Perform int-to-float conversion and store to memory.
3716 StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
3717 break;
3718 default:
3719 UNREACHABLE();
3720 break;
3721 }
3722
3723 // Entry registers are intact, r0 holds the value which is the return value.
3724 __ Ret();
3725
3726
3727 // r3: external array.
3728 // r4: index (integer).
3729 __ bind(&check_heap_number);
3730 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
3731 __ b(ne, &slow);
3732
3733 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3734
3735 // r3: base pointer of external storage.
3736 // r4: key (integer).
3737
3738 // The WebGL specification leaves the behavior of storing NaN and
3739 // +/-Infinity into integer arrays basically undefined. For more
3740 // reproducible behavior, convert these to zero.
3741 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
3742 CpuFeatures::Scope scope(VFP3);
3743
3744
3745 if (array_type == kExternalFloatArray) {
3746 // vldr requires offset to be a multiple of 4 so we can not
3747 // include -kHeapObjectTag into it.
3748 __ sub(r5, r0, Operand(kHeapObjectTag));
3749 __ vldr(d0, r5, HeapNumber::kValueOffset);
3750 __ add(r5, r3, Operand(r4, LSL, 2));
3751 __ vcvt_f32_f64(s0, d0);
3752 __ vstr(s0, r5, 0);
3753 } else {
3754 // Need to perform float-to-int conversion.
3755 // Test for NaN or infinity (both give zero).
3756 __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
3757
3758 // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
3759 // include -kHeapObjectTag into it.
3760 __ sub(r5, value, Operand(kHeapObjectTag));
3761 __ vldr(d0, r5, HeapNumber::kValueOffset);
3762
3763 __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
3764 // NaNs and Infinities have all-one exponents so they sign extend to -1.
3765 __ cmp(r6, Operand(-1));
3766 __ mov(r5, Operand(0), LeaveCC, eq);
3767
3768 // Not infinity or NaN simply convert to int.
3769 if (IsElementTypeSigned(array_type)) {
3770 __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
3771 } else {
3772 __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
3773 }
3774 __ vmov(r5, s0, ne);
3775
3776 switch (array_type) {
3777 case kExternalByteArray:
3778 case kExternalUnsignedByteArray:
3779 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3780 break;
3781 case kExternalShortArray:
3782 case kExternalUnsignedShortArray:
3783 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3784 break;
3785 case kExternalIntArray:
3786 case kExternalUnsignedIntArray:
3787 __ str(r5, MemOperand(r3, r4, LSL, 2));
3788 break;
3789 default:
3790 UNREACHABLE();
3791 break;
3792 }
3793 }
3794
3795 // Entry registers are intact, r0 holds the value which is the return value.
3796 __ Ret();
3797 } else {
3798 // VFP3 is not available do manual conversions.
3799 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
3800 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
3801
3802 if (array_type == kExternalFloatArray) {
3803 Label done, nan_or_infinity_or_zero;
3804 static const int kMantissaInHiWordShift =
3805 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3806
3807 static const int kMantissaInLoWordShift =
3808 kBitsPerInt - kMantissaInHiWordShift;
3809
3810 // Test for all special exponent values: zeros, subnormal numbers, NaNs
3811 // and infinities. All these should be converted to 0.
3812 __ mov(r7, Operand(HeapNumber::kExponentMask));
3813 __ and_(r9, r5, Operand(r7), SetCC);
3814 __ b(eq, &nan_or_infinity_or_zero);
3815
3816 __ teq(r9, Operand(r7));
3817 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
3818 __ b(eq, &nan_or_infinity_or_zero);
3819
3820 // Rebias exponent.
3821 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
3822 __ add(r9,
3823 r9,
3824 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
3825
3826 __ cmp(r9, Operand(kBinary32MaxExponent));
3827 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
3828 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
3829 __ b(gt, &done);
3830
3831 __ cmp(r9, Operand(kBinary32MinExponent));
3832 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
3833 __ b(lt, &done);
3834
3835 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
3836 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3837 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
3838 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
3839 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
3840
3841 __ bind(&done);
3842 __ str(r5, MemOperand(r3, r4, LSL, 2));
3843 // Entry registers are intact, r0 holds the value which is the return
3844 // value.
3845 __ Ret();
3846
3847 __ bind(&nan_or_infinity_or_zero);
3848 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
3849 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3850 __ orr(r9, r9, r7);
3851 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
3852 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
3853 __ b(&done);
3854 } else {
3855 bool is_signed_type = IsElementTypeSigned(array_type);
3856 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
3857 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
3858
3859 Label done, sign;
3860
3861 // Test for all special exponent values: zeros, subnormal numbers, NaNs
3862 // and infinities. All these should be converted to 0.
3863 __ mov(r7, Operand(HeapNumber::kExponentMask));
3864 __ and_(r9, r5, Operand(r7), SetCC);
3865 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3866 __ b(eq, &done);
3867
3868 __ teq(r9, Operand(r7));
3869 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3870 __ b(eq, &done);
3871
3872 // Unbias exponent.
3873 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
3874 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
3875 // If exponent is negative then result is 0.
3876 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
3877 __ b(mi, &done);
3878
3879 // If exponent is too big then result is minimal value.
3880 __ cmp(r9, Operand(meaningfull_bits - 1));
3881 __ mov(r5, Operand(min_value), LeaveCC, ge);
3882 __ b(ge, &done);
3883
3884 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
3885 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3886 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
3887
3888 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
3889 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
3890 __ b(pl, &sign);
3891
3892 __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
3893 __ mov(r5, Operand(r5, LSL, r9));
3894 __ rsb(r9, r9, Operand(meaningfull_bits));
3895 __ orr(r5, r5, Operand(r6, LSR, r9));
3896
3897 __ bind(&sign);
3898 __ teq(r7, Operand(0, RelocInfo::NONE));
3899 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
3900
3901 __ bind(&done);
3902 switch (array_type) {
3903 case kExternalByteArray:
3904 case kExternalUnsignedByteArray:
3905 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3906 break;
3907 case kExternalShortArray:
3908 case kExternalUnsignedShortArray:
3909 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3910 break;
3911 case kExternalIntArray:
3912 case kExternalUnsignedIntArray:
3913 __ str(r5, MemOperand(r3, r4, LSL, 2));
3914 break;
3915 default:
3916 UNREACHABLE();
3917 break;
3918 }
3919 }
3920 }
3921
3922 // Slow case: call runtime.
3923 __ bind(&slow);
3924
3925 // Entry registers are intact.
3926 // ---------- S t a t e --------------
3927 // -- r0 : value
3928 // -- r1 : key
3929 // -- r2 : receiver
3930 // -- lr : return address
3931 // -----------------------------------
3932
3933 // Push receiver, key and value for runtime call.
3934 __ Push(r2, r1, r0);
3935
3936 __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
3937
3938 return GetCode(flags);
3939 }
3940
3941
3217 #undef __ 3942 #undef __
3218 3943
3219 } } // namespace v8::internal 3944 } } // namespace v8::internal
3220 3945
3221 #endif // V8_TARGET_ARCH_ARM 3946 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/simulator-arm.cc ('k') | src/assembler.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698