| OLD | NEW |
| 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 88 | 88 |
| 89 // If this assert fails, we have to check upper bound too. | 89 // If this assert fails, we have to check upper bound too. |
| 90 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); | 90 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); |
| 91 | 91 |
| 92 GenerateGlobalInstanceTypeCheck(masm, t1, miss); | 92 GenerateGlobalInstanceTypeCheck(masm, t1, miss); |
| 93 | 93 |
| 94 // Check that the global object does not require access checks. | 94 // Check that the global object does not require access checks. |
| 95 __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset)); | 95 __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset)); |
| 96 __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) | | 96 __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) | |
| 97 (1 << Map::kHasNamedInterceptor))); | 97 (1 << Map::kHasNamedInterceptor))); |
| 98 __ b(nz, miss); | 98 __ b(ne, miss); |
| 99 | 99 |
| 100 __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 100 __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 101 __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset)); | 101 __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 102 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); | 102 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); |
| 103 __ cmp(t1, ip); | 103 __ cmp(t1, ip); |
| 104 __ b(nz, miss); | 104 __ b(ne, miss); |
| 105 } | 105 } |
| 106 | 106 |
| 107 | 107 |
| 108 // Probe the string dictionary in the |elements| register. Jump to the | 108 // Probe the string dictionary in the |elements| register. Jump to the |
| 109 // |done| label if a property with the given name is found. Jump to | 109 // |done| label if a property with the given name is found. Jump to |
| 110 // the |miss| label otherwise. | 110 // the |miss| label otherwise. |
| 111 static void GenerateStringDictionaryProbes(MacroAssembler* masm, | 111 static void GenerateStringDictionaryProbes(MacroAssembler* masm, |
| 112 Label* miss, | 112 Label* miss, |
| 113 Label* done, | 113 Label* done, |
| 114 Register elements, | 114 Register elements, |
| 115 Register name, | 115 Register name, |
| 116 Register scratch1, | 116 Register scratch1, |
| 117 Register scratch2) { | 117 Register scratch2) { |
| 118 // Assert that name contains a string. |
| 119 if (FLAG_debug_code) __ AbortIfNotString(name); |
| 120 |
| 118 // Compute the capacity mask. | 121 // Compute the capacity mask. |
| 119 const int kCapacityOffset = StringDictionary::kHeaderSize + | 122 const int kCapacityOffset = StringDictionary::kHeaderSize + |
| 120 StringDictionary::kCapacityIndex * kPointerSize; | 123 StringDictionary::kCapacityIndex * kPointerSize; |
| 121 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 124 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); |
| 122 __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int | 125 __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int |
| 123 __ sub(scratch1, scratch1, Operand(1)); | 126 __ sub(scratch1, scratch1, Operand(1)); |
| 124 | 127 |
| 125 const int kElementsStartOffset = StringDictionary::kHeaderSize + | 128 const int kElementsStartOffset = StringDictionary::kHeaderSize + |
| 126 StringDictionary::kElementsStartIndex * kPointerSize; | 129 StringDictionary::kElementsStartIndex * kPointerSize; |
| 127 | 130 |
| (...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 374 // -- sp[0] : receiver | 377 // -- sp[0] : receiver |
| 375 // ----------------------------------- | 378 // ----------------------------------- |
| 376 Label miss; | 379 Label miss; |
| 377 | 380 |
| 378 StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss); | 381 StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss); |
| 379 __ bind(&miss); | 382 __ bind(&miss); |
| 380 StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); | 383 StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); |
| 381 } | 384 } |
| 382 | 385 |
| 383 | 386 |
| 384 void LoadIC::GenerateStringLength(MacroAssembler* masm) { | 387 void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) { |
| 385 // ----------- S t a t e ------------- | 388 // ----------- S t a t e ------------- |
| 386 // -- r2 : name | 389 // -- r2 : name |
| 387 // -- lr : return address | 390 // -- lr : return address |
| 388 // -- r0 : receiver | 391 // -- r0 : receiver |
| 389 // -- sp[0] : receiver | 392 // -- sp[0] : receiver |
| 390 // ----------------------------------- | 393 // ----------------------------------- |
| 391 Label miss; | 394 Label miss; |
| 392 | 395 |
| 393 StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss); | 396 StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss, |
| 397 support_wrappers); |
| 394 // Cache miss: Jump to runtime. | 398 // Cache miss: Jump to runtime. |
| 395 __ bind(&miss); | 399 __ bind(&miss); |
| 396 StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); | 400 StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); |
| 397 } | 401 } |
| 398 | 402 |
| 399 | 403 |
| 400 void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { | 404 void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { |
| 401 // ----------- S t a t e ------------- | 405 // ----------- S t a t e ------------- |
| 402 // -- r2 : name | 406 // -- r2 : name |
| 403 // -- lr : return address | 407 // -- lr : return address |
| (...skipping 10 matching lines...) Expand all Loading... |
| 414 | 418 |
| 415 // Checks the receiver for special cases (value type, slow case bits). | 419 // Checks the receiver for special cases (value type, slow case bits). |
| 416 // Falls through for regular JS object. | 420 // Falls through for regular JS object. |
| 417 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | 421 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, |
| 418 Register receiver, | 422 Register receiver, |
| 419 Register map, | 423 Register map, |
| 420 Register scratch, | 424 Register scratch, |
| 421 int interceptor_bit, | 425 int interceptor_bit, |
| 422 Label* slow) { | 426 Label* slow) { |
| 423 // Check that the object isn't a smi. | 427 // Check that the object isn't a smi. |
| 424 __ BranchOnSmi(receiver, slow); | 428 __ JumpIfSmi(receiver, slow); |
| 425 // Get the map of the receiver. | 429 // Get the map of the receiver. |
| 426 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 430 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 427 // Check bit field. | 431 // Check bit field. |
| 428 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); | 432 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 429 __ tst(scratch, | 433 __ tst(scratch, |
| 430 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); | 434 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); |
| 431 __ b(nz, slow); | 435 __ b(ne, slow); |
| 432 // Check that the object is some kind of JS object EXCEPT JS Value type. | 436 // Check that the object is some kind of JS object EXCEPT JS Value type. |
| 433 // In the case that the object is a value-wrapper object, | 437 // In the case that the object is a value-wrapper object, |
| 434 // we enter the runtime system to make sure that indexing into string | 438 // we enter the runtime system to make sure that indexing into string |
| 435 // objects work as intended. | 439 // objects work as intended. |
| 436 ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); | 440 ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); |
| 437 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 441 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 438 __ cmp(scratch, Operand(JS_OBJECT_TYPE)); | 442 __ cmp(scratch, Operand(JS_OBJECT_TYPE)); |
| 439 __ b(lt, slow); | 443 __ b(lt, slow); |
| 440 } | 444 } |
| 441 | 445 |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 537 static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, | 541 static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, |
| 538 int argc, | 542 int argc, |
| 539 Code::Kind kind) { | 543 Code::Kind kind) { |
| 540 // ----------- S t a t e ------------- | 544 // ----------- S t a t e ------------- |
| 541 // -- r1 : receiver | 545 // -- r1 : receiver |
| 542 // -- r2 : name | 546 // -- r2 : name |
| 543 // ----------------------------------- | 547 // ----------------------------------- |
| 544 Label number, non_number, non_string, boolean, probe, miss; | 548 Label number, non_number, non_string, boolean, probe, miss; |
| 545 | 549 |
| 546 // Probe the stub cache. | 550 // Probe the stub cache. |
| 547 Code::Flags flags = | 551 Code::Flags flags = Code::ComputeFlags(kind, |
| 548 Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc); | 552 NOT_IN_LOOP, |
| 553 MONOMORPHIC, |
| 554 Code::kNoExtraICState, |
| 555 NORMAL, |
| 556 argc); |
| 549 StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); | 557 StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); |
| 550 | 558 |
| 551 // If the stub cache probing failed, the receiver might be a value. | 559 // If the stub cache probing failed, the receiver might be a value. |
| 552 // For value objects, we use the map of the prototype objects for | 560 // For value objects, we use the map of the prototype objects for |
| 553 // the corresponding JSValue for the cache and that is what we need | 561 // the corresponding JSValue for the cache and that is what we need |
| 554 // to probe. | 562 // to probe. |
| 555 // | 563 // |
| 556 // Check for number. | 564 // Check for number. |
| 557 __ tst(r1, Operand(kSmiTagMask)); | 565 __ tst(r1, Operand(kSmiTagMask)); |
| 558 __ b(eq, &number); | 566 __ b(eq, &number); |
| (...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 740 // ----------------------------------- | 748 // ----------------------------------- |
| 741 | 749 |
| 742 // Get the receiver of the function from the stack into r1. | 750 // Get the receiver of the function from the stack into r1. |
| 743 __ ldr(r1, MemOperand(sp, argc * kPointerSize)); | 751 __ ldr(r1, MemOperand(sp, argc * kPointerSize)); |
| 744 | 752 |
| 745 Label do_call, slow_call, slow_load, slow_reload_receiver; | 753 Label do_call, slow_call, slow_load, slow_reload_receiver; |
| 746 Label check_number_dictionary, check_string, lookup_monomorphic_cache; | 754 Label check_number_dictionary, check_string, lookup_monomorphic_cache; |
| 747 Label index_smi, index_string; | 755 Label index_smi, index_string; |
| 748 | 756 |
| 749 // Check that the key is a smi. | 757 // Check that the key is a smi. |
| 750 __ BranchOnNotSmi(r2, &check_string); | 758 __ JumpIfNotSmi(r2, &check_string); |
| 751 __ bind(&index_smi); | 759 __ bind(&index_smi); |
| 752 // Now the key is known to be a smi. This place is also jumped to from below | 760 // Now the key is known to be a smi. This place is also jumped to from below |
| 753 // where a numeric string is converted to a smi. | 761 // where a numeric string is converted to a smi. |
| 754 | 762 |
| 755 GenerateKeyedLoadReceiverCheck( | 763 GenerateKeyedLoadReceiverCheck( |
| 756 masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call); | 764 masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call); |
| 757 | 765 |
| 758 GenerateFastArrayLoad( | 766 GenerateFastArrayLoad( |
| 759 masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load); | 767 masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load); |
| 760 __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1, r0, r3); | 768 __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1, r0, r3); |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 833 __ jmp(&index_smi); | 841 __ jmp(&index_smi); |
| 834 } | 842 } |
| 835 | 843 |
| 836 | 844 |
| 837 void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { | 845 void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { |
| 838 // ----------- S t a t e ------------- | 846 // ----------- S t a t e ------------- |
| 839 // -- r2 : name | 847 // -- r2 : name |
| 840 // -- lr : return address | 848 // -- lr : return address |
| 841 // ----------------------------------- | 849 // ----------------------------------- |
| 842 | 850 |
| 851 // Check if the name is a string. |
| 852 Label miss; |
| 853 __ tst(r2, Operand(kSmiTagMask)); |
| 854 __ b(eq, &miss); |
| 855 __ IsObjectJSStringType(r2, r0, &miss); |
| 856 |
| 843 GenerateCallNormal(masm, argc); | 857 GenerateCallNormal(masm, argc); |
| 858 __ bind(&miss); |
| 844 GenerateMiss(masm, argc); | 859 GenerateMiss(masm, argc); |
| 845 } | 860 } |
| 846 | 861 |
| 847 | 862 |
| 848 // Defined in ic.cc. | 863 // Defined in ic.cc. |
| 849 Object* LoadIC_Miss(Arguments args); | 864 Object* LoadIC_Miss(Arguments args); |
| 850 | 865 |
| 851 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { | 866 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { |
| 852 // ----------- S t a t e ------------- | 867 // ----------- S t a t e ------------- |
| 853 // -- r2 : name | 868 // -- r2 : name |
| (...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1158 // -- r0 : key | 1173 // -- r0 : key |
| 1159 // -- r1 : receiver | 1174 // -- r1 : receiver |
| 1160 // ----------------------------------- | 1175 // ----------------------------------- |
| 1161 Label slow, check_string, index_smi, index_string, property_array_property; | 1176 Label slow, check_string, index_smi, index_string, property_array_property; |
| 1162 Label check_pixel_array, probe_dictionary, check_number_dictionary; | 1177 Label check_pixel_array, probe_dictionary, check_number_dictionary; |
| 1163 | 1178 |
| 1164 Register key = r0; | 1179 Register key = r0; |
| 1165 Register receiver = r1; | 1180 Register receiver = r1; |
| 1166 | 1181 |
| 1167 // Check that the key is a smi. | 1182 // Check that the key is a smi. |
| 1168 __ BranchOnNotSmi(key, &check_string); | 1183 __ JumpIfNotSmi(key, &check_string); |
| 1169 __ bind(&index_smi); | 1184 __ bind(&index_smi); |
| 1170 // Now the key is known to be a smi. This place is also jumped to from below | 1185 // Now the key is known to be a smi. This place is also jumped to from below |
| 1171 // where a numeric string is converted to a smi. | 1186 // where a numeric string is converted to a smi. |
| 1172 | 1187 |
| 1173 GenerateKeyedLoadReceiverCheck( | 1188 GenerateKeyedLoadReceiverCheck( |
| 1174 masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow); | 1189 masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow); |
| 1175 | 1190 |
| 1176 // Check the "has fast elements" bit in the receiver's map which is | 1191 // Check the "has fast elements" bit in the receiver's map which is |
| 1177 // now in r2. | 1192 // now in r2. |
| 1178 __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset)); | 1193 __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset)); |
| 1179 __ tst(r3, Operand(1 << Map::kHasFastElements)); | 1194 __ tst(r3, Operand(1 << Map::kHasFastElements)); |
| 1180 __ b(eq, &check_pixel_array); | 1195 __ b(eq, &check_pixel_array); |
| 1181 | 1196 |
| 1182 GenerateFastArrayLoad( | 1197 GenerateFastArrayLoad( |
| 1183 masm, receiver, key, r4, r3, r2, r0, NULL, &slow); | 1198 masm, receiver, key, r4, r3, r2, r0, NULL, &slow); |
| 1184 __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3); | 1199 __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3); |
| 1185 __ Ret(); | 1200 __ Ret(); |
| 1186 | 1201 |
| 1187 // Check whether the elements is a pixel array. | 1202 // Check whether the elements is a pixel array. |
| 1188 // r0: key | 1203 // r0: key |
| 1189 // r1: receiver | 1204 // r1: receiver |
| 1190 __ bind(&check_pixel_array); | 1205 __ bind(&check_pixel_array); |
| 1191 __ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset)); | 1206 |
| 1192 __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset)); | 1207 GenerateFastPixelArrayLoad(masm, |
| 1193 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); | 1208 r1, |
| 1194 __ cmp(r3, ip); | 1209 r0, |
| 1195 __ b(ne, &check_number_dictionary); | 1210 r3, |
| 1196 __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset)); | 1211 r4, |
| 1197 __ mov(r2, Operand(key, ASR, kSmiTagSize)); | 1212 r2, |
| 1198 __ cmp(r2, ip); | 1213 r5, |
| 1199 __ b(hs, &slow); | 1214 r0, |
| 1200 __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset)); | 1215 &check_number_dictionary, |
| 1201 __ ldrb(r2, MemOperand(ip, r2)); | 1216 NULL, |
| 1202 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); // Tag result as smi. | 1217 &slow); |
| 1203 __ Ret(); | |
| 1204 | 1218 |
| 1205 __ bind(&check_number_dictionary); | 1219 __ bind(&check_number_dictionary); |
| 1206 // Check whether the elements is a number dictionary. | 1220 // Check whether the elements is a number dictionary. |
| 1207 // r0: key | 1221 // r0: key |
| 1208 // r3: elements map | 1222 // r3: elements map |
| 1209 // r4: elements | 1223 // r4: elements |
| 1210 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); | 1224 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); |
| 1211 __ cmp(r3, ip); | 1225 __ cmp(r3, ip); |
| 1212 __ b(ne, &slow); | 1226 __ b(ne, &slow); |
| 1213 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); | 1227 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1330 __ Ret(); | 1344 __ Ret(); |
| 1331 | 1345 |
| 1332 StubRuntimeCallHelper call_helper; | 1346 StubRuntimeCallHelper call_helper; |
| 1333 char_at_generator.GenerateSlow(masm, call_helper); | 1347 char_at_generator.GenerateSlow(masm, call_helper); |
| 1334 | 1348 |
| 1335 __ bind(&miss); | 1349 __ bind(&miss); |
| 1336 GenerateMiss(masm); | 1350 GenerateMiss(masm); |
| 1337 } | 1351 } |
| 1338 | 1352 |
| 1339 | 1353 |
| 1340 // Convert unsigned integer with specified number of leading zeroes in binary | |
| 1341 // representation to IEEE 754 double. | |
| 1342 // Integer to convert is passed in register hiword. | |
| 1343 // Resulting double is returned in registers hiword:loword. | |
| 1344 // This functions does not work correctly for 0. | |
| 1345 static void GenerateUInt2Double(MacroAssembler* masm, | |
| 1346 Register hiword, | |
| 1347 Register loword, | |
| 1348 Register scratch, | |
| 1349 int leading_zeroes) { | |
| 1350 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; | |
| 1351 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; | |
| 1352 | |
| 1353 const int mantissa_shift_for_hi_word = | |
| 1354 meaningful_bits - HeapNumber::kMantissaBitsInTopWord; | |
| 1355 | |
| 1356 const int mantissa_shift_for_lo_word = | |
| 1357 kBitsPerInt - mantissa_shift_for_hi_word; | |
| 1358 | |
| 1359 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); | |
| 1360 if (mantissa_shift_for_hi_word > 0) { | |
| 1361 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); | |
| 1362 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); | |
| 1363 } else { | |
| 1364 __ mov(loword, Operand(0, RelocInfo::NONE)); | |
| 1365 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); | |
| 1366 } | |
| 1367 | |
| 1368 // If least significant bit of biased exponent was not 1 it was corrupted | |
| 1369 // by most significant bit of mantissa so we should fix that. | |
| 1370 if (!(biased_exponent & 1)) { | |
| 1371 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); | |
| 1372 } | |
| 1373 } | |
| 1374 | |
| 1375 | |
| 1376 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, | |
| 1377 ExternalArrayType array_type) { | |
| 1378 // ---------- S t a t e -------------- | |
| 1379 // -- lr : return address | |
| 1380 // -- r0 : key | |
| 1381 // -- r1 : receiver | |
| 1382 // ----------------------------------- | |
| 1383 Label slow, failed_allocation; | |
| 1384 | |
| 1385 Register key = r0; | |
| 1386 Register receiver = r1; | |
| 1387 | |
| 1388 // Check that the object isn't a smi | |
| 1389 __ BranchOnSmi(receiver, &slow); | |
| 1390 | |
| 1391 // Check that the key is a smi. | |
| 1392 __ BranchOnNotSmi(key, &slow); | |
| 1393 | |
| 1394 // Check that the object is a JS object. Load map into r2. | |
| 1395 __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE); | |
| 1396 __ b(lt, &slow); | |
| 1397 | |
| 1398 // Check that the receiver does not require access checks. We need | |
| 1399 // to check this explicitly since this generic stub does not perform | |
| 1400 // map checks. | |
| 1401 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); | |
| 1402 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded)); | |
| 1403 __ b(ne, &slow); | |
| 1404 | |
| 1405 // Check that the elements array is the appropriate type of | |
| 1406 // ExternalArray. | |
| 1407 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1408 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); | |
| 1409 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); | |
| 1410 __ cmp(r2, ip); | |
| 1411 __ b(ne, &slow); | |
| 1412 | |
| 1413 // Check that the index is in range. | |
| 1414 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); | |
| 1415 __ cmp(ip, Operand(key, ASR, kSmiTagSize)); | |
| 1416 // Unsigned comparison catches both negative and too-large values. | |
| 1417 __ b(lo, &slow); | |
| 1418 | |
| 1419 // r3: elements array | |
| 1420 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | |
| 1421 // r3: base pointer of external storage | |
| 1422 | |
| 1423 // We are not untagging smi key and instead work with it | |
| 1424 // as if it was premultiplied by 2. | |
| 1425 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); | |
| 1426 | |
| 1427 Register value = r2; | |
| 1428 switch (array_type) { | |
| 1429 case kExternalByteArray: | |
| 1430 __ ldrsb(value, MemOperand(r3, key, LSR, 1)); | |
| 1431 break; | |
| 1432 case kExternalUnsignedByteArray: | |
| 1433 __ ldrb(value, MemOperand(r3, key, LSR, 1)); | |
| 1434 break; | |
| 1435 case kExternalShortArray: | |
| 1436 __ ldrsh(value, MemOperand(r3, key, LSL, 0)); | |
| 1437 break; | |
| 1438 case kExternalUnsignedShortArray: | |
| 1439 __ ldrh(value, MemOperand(r3, key, LSL, 0)); | |
| 1440 break; | |
| 1441 case kExternalIntArray: | |
| 1442 case kExternalUnsignedIntArray: | |
| 1443 __ ldr(value, MemOperand(r3, key, LSL, 1)); | |
| 1444 break; | |
| 1445 case kExternalFloatArray: | |
| 1446 if (CpuFeatures::IsSupported(VFP3)) { | |
| 1447 CpuFeatures::Scope scope(VFP3); | |
| 1448 __ add(r2, r3, Operand(key, LSL, 1)); | |
| 1449 __ vldr(s0, r2, 0); | |
| 1450 } else { | |
| 1451 __ ldr(value, MemOperand(r3, key, LSL, 1)); | |
| 1452 } | |
| 1453 break; | |
| 1454 default: | |
| 1455 UNREACHABLE(); | |
| 1456 break; | |
| 1457 } | |
| 1458 | |
| 1459 // For integer array types: | |
| 1460 // r2: value | |
| 1461 // For floating-point array type | |
| 1462 // s0: value (if VFP3 is supported) | |
| 1463 // r2: value (if VFP3 is not supported) | |
| 1464 | |
| 1465 if (array_type == kExternalIntArray) { | |
| 1466 // For the Int and UnsignedInt array types, we need to see whether | |
| 1467 // the value can be represented in a Smi. If not, we need to convert | |
| 1468 // it to a HeapNumber. | |
| 1469 Label box_int; | |
| 1470 __ cmp(value, Operand(0xC0000000)); | |
| 1471 __ b(mi, &box_int); | |
| 1472 // Tag integer as smi and return it. | |
| 1473 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
| 1474 __ Ret(); | |
| 1475 | |
| 1476 __ bind(&box_int); | |
| 1477 // Allocate a HeapNumber for the result and perform int-to-double | |
| 1478 // conversion. Don't touch r0 or r1 as they are needed if allocation | |
| 1479 // fails. | |
| 1480 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
| 1481 __ AllocateHeapNumber(r5, r3, r4, r6, &slow); | |
| 1482 // Now we can use r0 for the result as key is not needed any more. | |
| 1483 __ mov(r0, r5); | |
| 1484 | |
| 1485 if (CpuFeatures::IsSupported(VFP3)) { | |
| 1486 CpuFeatures::Scope scope(VFP3); | |
| 1487 __ vmov(s0, value); | |
| 1488 __ vcvt_f64_s32(d0, s0); | |
| 1489 __ sub(r3, r0, Operand(kHeapObjectTag)); | |
| 1490 __ vstr(d0, r3, HeapNumber::kValueOffset); | |
| 1491 __ Ret(); | |
| 1492 } else { | |
| 1493 WriteInt32ToHeapNumberStub stub(value, r0, r3); | |
| 1494 __ TailCallStub(&stub); | |
| 1495 } | |
| 1496 } else if (array_type == kExternalUnsignedIntArray) { | |
| 1497 // The test is different for unsigned int values. Since we need | |
| 1498 // the value to be in the range of a positive smi, we can't | |
| 1499 // handle either of the top two bits being set in the value. | |
| 1500 if (CpuFeatures::IsSupported(VFP3)) { | |
| 1501 CpuFeatures::Scope scope(VFP3); | |
| 1502 Label box_int, done; | |
| 1503 __ tst(value, Operand(0xC0000000)); | |
| 1504 __ b(ne, &box_int); | |
| 1505 // Tag integer as smi and return it. | |
| 1506 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
| 1507 __ Ret(); | |
| 1508 | |
| 1509 __ bind(&box_int); | |
| 1510 __ vmov(s0, value); | |
| 1511 // Allocate a HeapNumber for the result and perform int-to-double | |
| 1512 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all | |
| 1513 // registers - also when jumping due to exhausted young space. | |
| 1514 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
| 1515 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); | |
| 1516 | |
| 1517 __ vcvt_f64_u32(d0, s0); | |
| 1518 __ sub(r1, r2, Operand(kHeapObjectTag)); | |
| 1519 __ vstr(d0, r1, HeapNumber::kValueOffset); | |
| 1520 | |
| 1521 __ mov(r0, r2); | |
| 1522 __ Ret(); | |
| 1523 } else { | |
| 1524 // Check whether unsigned integer fits into smi. | |
| 1525 Label box_int_0, box_int_1, done; | |
| 1526 __ tst(value, Operand(0x80000000)); | |
| 1527 __ b(ne, &box_int_0); | |
| 1528 __ tst(value, Operand(0x40000000)); | |
| 1529 __ b(ne, &box_int_1); | |
| 1530 // Tag integer as smi and return it. | |
| 1531 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
| 1532 __ Ret(); | |
| 1533 | |
| 1534 Register hiword = value; // r2. | |
| 1535 Register loword = r3; | |
| 1536 | |
| 1537 __ bind(&box_int_0); | |
| 1538 // Integer does not have leading zeros. | |
| 1539 GenerateUInt2Double(masm, hiword, loword, r4, 0); | |
| 1540 __ b(&done); | |
| 1541 | |
| 1542 __ bind(&box_int_1); | |
| 1543 // Integer has one leading zero. | |
| 1544 GenerateUInt2Double(masm, hiword, loword, r4, 1); | |
| 1545 | |
| 1546 | |
| 1547 __ bind(&done); | |
| 1548 // Integer was converted to double in registers hiword:loword. | |
| 1549 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber | |
| 1550 // clobbers all registers - also when jumping due to exhausted young | |
| 1551 // space. | |
| 1552 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
| 1553 __ AllocateHeapNumber(r4, r5, r7, r6, &slow); | |
| 1554 | |
| 1555 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); | |
| 1556 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); | |
| 1557 | |
| 1558 __ mov(r0, r4); | |
| 1559 __ Ret(); | |
| 1560 } | |
| 1561 } else if (array_type == kExternalFloatArray) { | |
| 1562 // For the floating-point array type, we need to always allocate a | |
| 1563 // HeapNumber. | |
| 1564 if (CpuFeatures::IsSupported(VFP3)) { | |
| 1565 CpuFeatures::Scope scope(VFP3); | |
| 1566 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | |
| 1567 // AllocateHeapNumber clobbers all registers - also when jumping due to | |
| 1568 // exhausted young space. | |
| 1569 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
| 1570 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); | |
| 1571 __ vcvt_f64_f32(d0, s0); | |
| 1572 __ sub(r1, r2, Operand(kHeapObjectTag)); | |
| 1573 __ vstr(d0, r1, HeapNumber::kValueOffset); | |
| 1574 | |
| 1575 __ mov(r0, r2); | |
| 1576 __ Ret(); | |
| 1577 } else { | |
| 1578 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | |
| 1579 // AllocateHeapNumber clobbers all registers - also when jumping due to | |
| 1580 // exhausted young space. | |
| 1581 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
| 1582 __ AllocateHeapNumber(r3, r4, r5, r6, &slow); | |
| 1583 // VFP is not available, do manual single to double conversion. | |
| 1584 | |
| 1585 // r2: floating point value (binary32) | |
| 1586 // r3: heap number for result | |
| 1587 | |
| 1588 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to | |
| 1589 // the slow case from here. | |
| 1590 __ and_(r0, value, Operand(kBinary32MantissaMask)); | |
| 1591 | |
| 1592 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to | |
| 1593 // the slow case from here. | |
| 1594 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); | |
| 1595 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); | |
| 1596 | |
| 1597 Label exponent_rebiased; | |
| 1598 __ teq(r1, Operand(0x00)); | |
| 1599 __ b(eq, &exponent_rebiased); | |
| 1600 | |
| 1601 __ teq(r1, Operand(0xff)); | |
| 1602 __ mov(r1, Operand(0x7ff), LeaveCC, eq); | |
| 1603 __ b(eq, &exponent_rebiased); | |
| 1604 | |
| 1605 // Rebias exponent. | |
| 1606 __ add(r1, | |
| 1607 r1, | |
| 1608 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); | |
| 1609 | |
| 1610 __ bind(&exponent_rebiased); | |
| 1611 __ and_(r2, value, Operand(kBinary32SignMask)); | |
| 1612 value = no_reg; | |
| 1613 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); | |
| 1614 | |
| 1615 // Shift mantissa. | |
| 1616 static const int kMantissaShiftForHiWord = | |
| 1617 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; | |
| 1618 | |
| 1619 static const int kMantissaShiftForLoWord = | |
| 1620 kBitsPerInt - kMantissaShiftForHiWord; | |
| 1621 | |
| 1622 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); | |
| 1623 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); | |
| 1624 | |
| 1625 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); | |
| 1626 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); | |
| 1627 | |
| 1628 __ mov(r0, r3); | |
| 1629 __ Ret(); | |
| 1630 } | |
| 1631 | |
| 1632 } else { | |
| 1633 // Tag integer as smi and return it. | |
| 1634 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
| 1635 __ Ret(); | |
| 1636 } | |
| 1637 | |
| 1638 // Slow case, key and receiver still in r0 and r1. | |
| 1639 __ bind(&slow); | |
| 1640 __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3); | |
| 1641 GenerateRuntimeGetProperty(masm); | |
| 1642 } | |
| 1643 | |
| 1644 | |
| 1645 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | 1354 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { |
| 1646 // ---------- S t a t e -------------- | 1355 // ---------- S t a t e -------------- |
| 1647 // -- lr : return address | 1356 // -- lr : return address |
| 1648 // -- r0 : key | 1357 // -- r0 : key |
| 1649 // -- r1 : receiver | 1358 // -- r1 : receiver |
| 1650 // ----------------------------------- | 1359 // ----------------------------------- |
| 1651 Label slow; | 1360 Label slow; |
| 1652 | 1361 |
| 1653 // Check that the receiver isn't a smi. | 1362 // Check that the receiver isn't a smi. |
| 1654 __ BranchOnSmi(r1, &slow); | 1363 __ JumpIfSmi(r1, &slow); |
| 1655 | 1364 |
| 1656 // Check that the key is an array index, that is Uint32. | 1365 // Check that the key is an array index, that is Uint32. |
| 1657 __ tst(r0, Operand(kSmiTagMask | kSmiSignMask)); | 1366 __ tst(r0, Operand(kSmiTagMask | kSmiSignMask)); |
| 1658 __ b(ne, &slow); | 1367 __ b(ne, &slow); |
| 1659 | 1368 |
| 1660 // Get the map of the receiver. | 1369 // Get the map of the receiver. |
| 1661 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); | 1370 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); |
| 1662 | 1371 |
| 1663 // Check that it has indexed interceptor and access checks | 1372 // Check that it has indexed interceptor and access checks |
| 1664 // are not enabled for this object. | 1373 // are not enabled for this object. |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1763 __ bind(&slow); | 1472 __ bind(&slow); |
| 1764 // Entry registers are intact. | 1473 // Entry registers are intact. |
| 1765 // r0: value. | 1474 // r0: value. |
| 1766 // r1: key. | 1475 // r1: key. |
| 1767 // r2: receiver. | 1476 // r2: receiver. |
| 1768 GenerateRuntimeSetProperty(masm); | 1477 GenerateRuntimeSetProperty(masm); |
| 1769 | 1478 |
| 1770 // Check whether the elements is a pixel array. | 1479 // Check whether the elements is a pixel array. |
| 1771 // r4: elements map. | 1480 // r4: elements map. |
| 1772 __ bind(&check_pixel_array); | 1481 __ bind(&check_pixel_array); |
| 1773 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); | 1482 GenerateFastPixelArrayStore(masm, |
| 1774 __ cmp(r4, ip); | 1483 r2, |
| 1775 __ b(ne, &slow); | 1484 r1, |
| 1776 // Check that the value is a smi. If a conversion is needed call into the | 1485 r0, |
| 1777 // runtime to convert and clamp. | 1486 elements, |
| 1778 __ BranchOnNotSmi(value, &slow); | 1487 r4, |
| 1779 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key. | 1488 r5, |
| 1780 __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset)); | 1489 r6, |
| 1781 __ cmp(r4, Operand(ip)); | 1490 false, |
| 1782 __ b(hs, &slow); | 1491 false, |
| 1783 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. | 1492 NULL, |
| 1784 __ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255]. | 1493 &slow, |
| 1785 | 1494 &slow, |
| 1786 // Get the pointer to the external array. This clobbers elements. | 1495 &slow); |
| 1787 __ ldr(elements, | |
| 1788 FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); | |
| 1789 __ strb(r5, MemOperand(elements, r4)); // Elements is now external array. | |
| 1790 __ Ret(); | |
| 1791 | 1496 |
| 1792 // Extra capacity case: Check if there is extra capacity to | 1497 // Extra capacity case: Check if there is extra capacity to |
| 1793 // perform the store and update the length. Used for adding one | 1498 // perform the store and update the length. Used for adding one |
| 1794 // element to the array by writing to array[array.length]. | 1499 // element to the array by writing to array[array.length]. |
| 1795 __ bind(&extra); | 1500 __ bind(&extra); |
| 1796 // Condition code from comparing key and array length is still available. | 1501 // Condition code from comparing key and array length is still available. |
| 1797 __ b(ne, &slow); // Only support writing to writing to array[array.length]. | 1502 __ b(ne, &slow); // Only support writing to writing to array[array.length]. |
| 1798 // Check for room in the elements backing store. | 1503 // Check for room in the elements backing store. |
| 1799 // Both the key and the length of FixedArray are smis. | 1504 // Both the key and the length of FixedArray are smis. |
| 1800 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 1505 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1834 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER | 1539 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER |
| 1835 // Update write barrier for the elements array address. | 1540 // Update write barrier for the elements array address. |
| 1836 __ sub(r4, r5, Operand(elements)); | 1541 __ sub(r4, r5, Operand(elements)); |
| 1837 __ RecordWrite(elements, Operand(r4), r5, r6); | 1542 __ RecordWrite(elements, Operand(r4), r5, r6); |
| 1838 #endif | 1543 #endif |
| 1839 | 1544 |
| 1840 __ Ret(); | 1545 __ Ret(); |
| 1841 } | 1546 } |
| 1842 | 1547 |
| 1843 | 1548 |
| 1844 // Convert and store int passed in register ival to IEEE 754 single precision | 1549 void StoreIC::GenerateMegamorphic(MacroAssembler* masm, |
| 1845 // floating point value at memory location (dst + 4 * wordoffset) | 1550 Code::ExtraICState extra_ic_state) { |
| 1846 // If VFP3 is available use it for conversion. | |
| 1847 static void StoreIntAsFloat(MacroAssembler* masm, | |
| 1848 Register dst, | |
| 1849 Register wordoffset, | |
| 1850 Register ival, | |
| 1851 Register fval, | |
| 1852 Register scratch1, | |
| 1853 Register scratch2) { | |
| 1854 if (CpuFeatures::IsSupported(VFP3)) { | |
| 1855 CpuFeatures::Scope scope(VFP3); | |
| 1856 __ vmov(s0, ival); | |
| 1857 __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); | |
| 1858 __ vcvt_f32_s32(s0, s0); | |
| 1859 __ vstr(s0, scratch1, 0); | |
| 1860 } else { | |
| 1861 Label not_special, done; | |
| 1862 // Move sign bit from source to destination. This works because the sign | |
| 1863 // bit in the exponent word of the double has the same position and polarity | |
| 1864 // as the 2's complement sign bit in a Smi. | |
| 1865 ASSERT(kBinary32SignMask == 0x80000000u); | |
| 1866 | |
| 1867 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); | |
| 1868 // Negate value if it is negative. | |
| 1869 __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne); | |
| 1870 | |
| 1871 // We have -1, 0 or 1, which we treat specially. Register ival contains | |
| 1872 // absolute value: it is either equal to 1 (special case of -1 and 1), | |
| 1873 // greater than 1 (not a special case) or less than 1 (special case of 0). | |
| 1874 __ cmp(ival, Operand(1)); | |
| 1875 __ b(gt, ¬_special); | |
| 1876 | |
| 1877 // For 1 or -1 we need to or in the 0 exponent (biased). | |
| 1878 static const uint32_t exponent_word_for_1 = | |
| 1879 kBinary32ExponentBias << kBinary32ExponentShift; | |
| 1880 | |
| 1881 __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq); | |
| 1882 __ b(&done); | |
| 1883 | |
| 1884 __ bind(¬_special); | |
| 1885 // Count leading zeros. | |
| 1886 // Gets the wrong answer for 0, but we already checked for that case above. | |
| 1887 Register zeros = scratch2; | |
| 1888 __ CountLeadingZeros(zeros, ival, scratch1); | |
| 1889 | |
| 1890 // Compute exponent and or it into the exponent register. | |
| 1891 __ rsb(scratch1, | |
| 1892 zeros, | |
| 1893 Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); | |
| 1894 | |
| 1895 __ orr(fval, | |
| 1896 fval, | |
| 1897 Operand(scratch1, LSL, kBinary32ExponentShift)); | |
| 1898 | |
| 1899 // Shift up the source chopping the top bit off. | |
| 1900 __ add(zeros, zeros, Operand(1)); | |
| 1901 // This wouldn't work for 1 and -1 as the shift would be 32 which means 0. | |
| 1902 __ mov(ival, Operand(ival, LSL, zeros)); | |
| 1903 // And the top (top 20 bits). | |
| 1904 __ orr(fval, | |
| 1905 fval, | |
| 1906 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); | |
| 1907 | |
| 1908 __ bind(&done); | |
| 1909 __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); | |
| 1910 } | |
| 1911 } | |
| 1912 | |
| 1913 | |
| 1914 static bool IsElementTypeSigned(ExternalArrayType array_type) { | |
| 1915 switch (array_type) { | |
| 1916 case kExternalByteArray: | |
| 1917 case kExternalShortArray: | |
| 1918 case kExternalIntArray: | |
| 1919 return true; | |
| 1920 | |
| 1921 case kExternalUnsignedByteArray: | |
| 1922 case kExternalUnsignedShortArray: | |
| 1923 case kExternalUnsignedIntArray: | |
| 1924 return false; | |
| 1925 | |
| 1926 default: | |
| 1927 UNREACHABLE(); | |
| 1928 return false; | |
| 1929 } | |
| 1930 } | |
| 1931 | |
| 1932 | |
| 1933 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, | |
| 1934 ExternalArrayType array_type) { | |
| 1935 // ---------- S t a t e -------------- | |
| 1936 // -- r0 : value | |
| 1937 // -- r1 : key | |
| 1938 // -- r2 : receiver | |
| 1939 // -- lr : return address | |
| 1940 // ----------------------------------- | |
| 1941 Label slow, check_heap_number; | |
| 1942 | |
| 1943 // Register usage. | |
| 1944 Register value = r0; | |
| 1945 Register key = r1; | |
| 1946 Register receiver = r2; | |
| 1947 // r3 mostly holds the elements array or the destination external array. | |
| 1948 | |
| 1949 // Check that the object isn't a smi. | |
| 1950 __ BranchOnSmi(receiver, &slow); | |
| 1951 | |
| 1952 // Check that the object is a JS object. Load map into r3. | |
| 1953 __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE); | |
| 1954 __ b(le, &slow); | |
| 1955 | |
| 1956 // Check that the receiver does not require access checks. We need | |
| 1957 // to do this because this generic stub does not perform map checks. | |
| 1958 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); | |
| 1959 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); | |
| 1960 __ b(ne, &slow); | |
| 1961 | |
| 1962 // Check that the key is a smi. | |
| 1963 __ BranchOnNotSmi(key, &slow); | |
| 1964 | |
| 1965 // Check that the elements array is the appropriate type of ExternalArray. | |
| 1966 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1967 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); | |
| 1968 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); | |
| 1969 __ cmp(r4, ip); | |
| 1970 __ b(ne, &slow); | |
| 1971 | |
| 1972 // Check that the index is in range. | |
| 1973 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index. | |
| 1974 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); | |
| 1975 __ cmp(r4, ip); | |
| 1976 // Unsigned comparison catches both negative and too-large values. | |
| 1977 __ b(hs, &slow); | |
| 1978 | |
| 1979 // Handle both smis and HeapNumbers in the fast path. Go to the | |
| 1980 // runtime for all other kinds of values. | |
| 1981 // r3: external array. | |
| 1982 // r4: key (integer). | |
| 1983 __ BranchOnNotSmi(value, &check_heap_number); | |
| 1984 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. | |
| 1985 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | |
| 1986 | |
| 1987 // r3: base pointer of external storage. | |
| 1988 // r4: key (integer). | |
| 1989 // r5: value (integer). | |
| 1990 switch (array_type) { | |
| 1991 case kExternalByteArray: | |
| 1992 case kExternalUnsignedByteArray: | |
| 1993 __ strb(r5, MemOperand(r3, r4, LSL, 0)); | |
| 1994 break; | |
| 1995 case kExternalShortArray: | |
| 1996 case kExternalUnsignedShortArray: | |
| 1997 __ strh(r5, MemOperand(r3, r4, LSL, 1)); | |
| 1998 break; | |
| 1999 case kExternalIntArray: | |
| 2000 case kExternalUnsignedIntArray: | |
| 2001 __ str(r5, MemOperand(r3, r4, LSL, 2)); | |
| 2002 break; | |
| 2003 case kExternalFloatArray: | |
| 2004 // Perform int-to-float conversion and store to memory. | |
| 2005 StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); | |
| 2006 break; | |
| 2007 default: | |
| 2008 UNREACHABLE(); | |
| 2009 break; | |
| 2010 } | |
| 2011 | |
| 2012 // Entry registers are intact, r0 holds the value which is the return value. | |
| 2013 __ Ret(); | |
| 2014 | |
| 2015 | |
| 2016 // r3: external array. | |
| 2017 // r4: index (integer). | |
| 2018 __ bind(&check_heap_number); | |
| 2019 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); | |
| 2020 __ b(ne, &slow); | |
| 2021 | |
| 2022 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | |
| 2023 | |
| 2024 // r3: base pointer of external storage. | |
| 2025 // r4: key (integer). | |
| 2026 | |
| 2027 // The WebGL specification leaves the behavior of storing NaN and | |
| 2028 // +/-Infinity into integer arrays basically undefined. For more | |
| 2029 // reproducible behavior, convert these to zero. | |
| 2030 if (CpuFeatures::IsSupported(VFP3)) { | |
| 2031 CpuFeatures::Scope scope(VFP3); | |
| 2032 | |
| 2033 | |
| 2034 if (array_type == kExternalFloatArray) { | |
| 2035 // vldr requires offset to be a multiple of 4 so we can not | |
| 2036 // include -kHeapObjectTag into it. | |
| 2037 __ sub(r5, r0, Operand(kHeapObjectTag)); | |
| 2038 __ vldr(d0, r5, HeapNumber::kValueOffset); | |
| 2039 __ add(r5, r3, Operand(r4, LSL, 2)); | |
| 2040 __ vcvt_f32_f64(s0, d0); | |
| 2041 __ vstr(s0, r5, 0); | |
| 2042 } else { | |
| 2043 // Need to perform float-to-int conversion. | |
| 2044 // Test for NaN or infinity (both give zero). | |
| 2045 __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset)); | |
| 2046 | |
| 2047 // Hoisted load. vldr requires offset to be a multiple of 4 so we can not | |
| 2048 // include -kHeapObjectTag into it. | |
| 2049 __ sub(r5, r0, Operand(kHeapObjectTag)); | |
| 2050 __ vldr(d0, r5, HeapNumber::kValueOffset); | |
| 2051 | |
| 2052 __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
| 2053 // NaNs and Infinities have all-one exponents so they sign extend to -1. | |
| 2054 __ cmp(r6, Operand(-1)); | |
| 2055 __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq); | |
| 2056 | |
| 2057 // Not infinity or NaN simply convert to int. | |
| 2058 if (IsElementTypeSigned(array_type)) { | |
| 2059 __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne); | |
| 2060 } else { | |
| 2061 __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne); | |
| 2062 } | |
| 2063 __ vmov(r5, s0, ne); | |
| 2064 | |
| 2065 switch (array_type) { | |
| 2066 case kExternalByteArray: | |
| 2067 case kExternalUnsignedByteArray: | |
| 2068 __ strb(r5, MemOperand(r3, r4, LSL, 0)); | |
| 2069 break; | |
| 2070 case kExternalShortArray: | |
| 2071 case kExternalUnsignedShortArray: | |
| 2072 __ strh(r5, MemOperand(r3, r4, LSL, 1)); | |
| 2073 break; | |
| 2074 case kExternalIntArray: | |
| 2075 case kExternalUnsignedIntArray: | |
| 2076 __ str(r5, MemOperand(r3, r4, LSL, 2)); | |
| 2077 break; | |
| 2078 default: | |
| 2079 UNREACHABLE(); | |
| 2080 break; | |
| 2081 } | |
| 2082 } | |
| 2083 | |
| 2084 // Entry registers are intact, r0 holds the value which is the return value. | |
| 2085 __ Ret(); | |
| 2086 } else { | |
| 2087 // VFP3 is not available do manual conversions. | |
| 2088 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); | |
| 2089 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); | |
| 2090 | |
| 2091 if (array_type == kExternalFloatArray) { | |
| 2092 Label done, nan_or_infinity_or_zero; | |
| 2093 static const int kMantissaInHiWordShift = | |
| 2094 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; | |
| 2095 | |
| 2096 static const int kMantissaInLoWordShift = | |
| 2097 kBitsPerInt - kMantissaInHiWordShift; | |
| 2098 | |
| 2099 // Test for all special exponent values: zeros, subnormal numbers, NaNs | |
| 2100 // and infinities. All these should be converted to 0. | |
| 2101 __ mov(r7, Operand(HeapNumber::kExponentMask)); | |
| 2102 __ and_(r9, r5, Operand(r7), SetCC); | |
| 2103 __ b(eq, &nan_or_infinity_or_zero); | |
| 2104 | |
| 2105 __ teq(r9, Operand(r7)); | |
| 2106 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); | |
| 2107 __ b(eq, &nan_or_infinity_or_zero); | |
| 2108 | |
| 2109 // Rebias exponent. | |
| 2110 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); | |
| 2111 __ add(r9, | |
| 2112 r9, | |
| 2113 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); | |
| 2114 | |
| 2115 __ cmp(r9, Operand(kBinary32MaxExponent)); | |
| 2116 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); | |
| 2117 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); | |
| 2118 __ b(gt, &done); | |
| 2119 | |
| 2120 __ cmp(r9, Operand(kBinary32MinExponent)); | |
| 2121 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); | |
| 2122 __ b(lt, &done); | |
| 2123 | |
| 2124 __ and_(r7, r5, Operand(HeapNumber::kSignMask)); | |
| 2125 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); | |
| 2126 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); | |
| 2127 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); | |
| 2128 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); | |
| 2129 | |
| 2130 __ bind(&done); | |
| 2131 __ str(r5, MemOperand(r3, r4, LSL, 2)); | |
| 2132 // Entry registers are intact, r0 holds the value which is the return | |
| 2133 // value. | |
| 2134 __ Ret(); | |
| 2135 | |
| 2136 __ bind(&nan_or_infinity_or_zero); | |
| 2137 __ and_(r7, r5, Operand(HeapNumber::kSignMask)); | |
| 2138 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); | |
| 2139 __ orr(r9, r9, r7); | |
| 2140 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); | |
| 2141 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); | |
| 2142 __ b(&done); | |
| 2143 } else { | |
| 2144 bool is_signed_type = IsElementTypeSigned(array_type); | |
| 2145 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; | |
| 2146 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; | |
| 2147 | |
| 2148 Label done, sign; | |
| 2149 | |
| 2150 // Test for all special exponent values: zeros, subnormal numbers, NaNs | |
| 2151 // and infinities. All these should be converted to 0. | |
| 2152 __ mov(r7, Operand(HeapNumber::kExponentMask)); | |
| 2153 __ and_(r9, r5, Operand(r7), SetCC); | |
| 2154 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); | |
| 2155 __ b(eq, &done); | |
| 2156 | |
| 2157 __ teq(r9, Operand(r7)); | |
| 2158 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); | |
| 2159 __ b(eq, &done); | |
| 2160 | |
| 2161 // Unbias exponent. | |
| 2162 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); | |
| 2163 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); | |
| 2164 // If exponent is negative than result is 0. | |
| 2165 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); | |
| 2166 __ b(mi, &done); | |
| 2167 | |
| 2168 // If exponent is too big than result is minimal value. | |
| 2169 __ cmp(r9, Operand(meaningfull_bits - 1)); | |
| 2170 __ mov(r5, Operand(min_value), LeaveCC, ge); | |
| 2171 __ b(ge, &done); | |
| 2172 | |
| 2173 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); | |
| 2174 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); | |
| 2175 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); | |
| 2176 | |
| 2177 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); | |
| 2178 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); | |
| 2179 __ b(pl, &sign); | |
| 2180 | |
| 2181 __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); | |
| 2182 __ mov(r5, Operand(r5, LSL, r9)); | |
| 2183 __ rsb(r9, r9, Operand(meaningfull_bits)); | |
| 2184 __ orr(r5, r5, Operand(r6, LSR, r9)); | |
| 2185 | |
| 2186 __ bind(&sign); | |
| 2187 __ teq(r7, Operand(0, RelocInfo::NONE)); | |
| 2188 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); | |
| 2189 | |
| 2190 __ bind(&done); | |
| 2191 switch (array_type) { | |
| 2192 case kExternalByteArray: | |
| 2193 case kExternalUnsignedByteArray: | |
| 2194 __ strb(r5, MemOperand(r3, r4, LSL, 0)); | |
| 2195 break; | |
| 2196 case kExternalShortArray: | |
| 2197 case kExternalUnsignedShortArray: | |
| 2198 __ strh(r5, MemOperand(r3, r4, LSL, 1)); | |
| 2199 break; | |
| 2200 case kExternalIntArray: | |
| 2201 case kExternalUnsignedIntArray: | |
| 2202 __ str(r5, MemOperand(r3, r4, LSL, 2)); | |
| 2203 break; | |
| 2204 default: | |
| 2205 UNREACHABLE(); | |
| 2206 break; | |
| 2207 } | |
| 2208 } | |
| 2209 } | |
| 2210 | |
| 2211 // Slow case: call runtime. | |
| 2212 __ bind(&slow); | |
| 2213 | |
| 2214 // Entry registers are intact. | |
| 2215 // r0: value | |
| 2216 // r1: key | |
| 2217 // r2: receiver | |
| 2218 GenerateRuntimeSetProperty(masm); | |
| 2219 } | |
| 2220 | |
| 2221 | |
| 2222 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | |
| 2223 // ----------- S t a t e ------------- | 1551 // ----------- S t a t e ------------- |
| 2224 // -- r0 : value | 1552 // -- r0 : value |
| 2225 // -- r1 : receiver | 1553 // -- r1 : receiver |
| 2226 // -- r2 : name | 1554 // -- r2 : name |
| 2227 // -- lr : return address | 1555 // -- lr : return address |
| 2228 // ----------------------------------- | 1556 // ----------------------------------- |
| 2229 | 1557 |
| 2230 // Get the receiver from the stack and probe the stub cache. | 1558 // Get the receiver from the stack and probe the stub cache. |
| 2231 Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, | 1559 Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, |
| 2232 NOT_IN_LOOP, | 1560 NOT_IN_LOOP, |
| 2233 MONOMORPHIC); | 1561 MONOMORPHIC, |
| 1562 extra_ic_state); |
| 2234 StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); | 1563 StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); |
| 2235 | 1564 |
| 2236 // Cache miss: Jump to runtime. | 1565 // Cache miss: Jump to runtime. |
| 2237 GenerateMiss(masm); | 1566 GenerateMiss(masm); |
| 2238 } | 1567 } |
| 2239 | 1568 |
| 2240 | 1569 |
| 2241 void StoreIC::GenerateMiss(MacroAssembler* masm) { | 1570 void StoreIC::GenerateMiss(MacroAssembler* masm) { |
| 2242 // ----------- S t a t e ------------- | 1571 // ----------- S t a t e ------------- |
| 2243 // -- r0 : value | 1572 // -- r0 : value |
| (...skipping 24 matching lines...) Expand all Loading... |
| 2268 // to JSArray. | 1597 // to JSArray. |
| 2269 // Value must be a number, but only smis are accepted as the most common case. | 1598 // Value must be a number, but only smis are accepted as the most common case. |
| 2270 | 1599 |
| 2271 Label miss; | 1600 Label miss; |
| 2272 | 1601 |
| 2273 Register receiver = r1; | 1602 Register receiver = r1; |
| 2274 Register value = r0; | 1603 Register value = r0; |
| 2275 Register scratch = r3; | 1604 Register scratch = r3; |
| 2276 | 1605 |
| 2277 // Check that the receiver isn't a smi. | 1606 // Check that the receiver isn't a smi. |
| 2278 __ BranchOnSmi(receiver, &miss); | 1607 __ JumpIfSmi(receiver, &miss); |
| 2279 | 1608 |
| 2280 // Check that the object is a JS array. | 1609 // Check that the object is a JS array. |
| 2281 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); | 1610 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); |
| 2282 __ b(ne, &miss); | 1611 __ b(ne, &miss); |
| 2283 | 1612 |
| 2284 // Check that elements are FixedArray. | 1613 // Check that elements are FixedArray. |
| 2285 // We rely on StoreIC_ArrayLength below to deal with all types of | 1614 // We rely on StoreIC_ArrayLength below to deal with all types of |
| 2286 // fast elements (including COW). | 1615 // fast elements (including COW). |
| 2287 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); | 1616 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); |
| 2288 __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); | 1617 __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); |
| 2289 __ b(ne, &miss); | 1618 __ b(ne, &miss); |
| 2290 | 1619 |
| 2291 // Check that value is a smi. | 1620 // Check that value is a smi. |
| 2292 __ BranchOnNotSmi(value, &miss); | 1621 __ JumpIfNotSmi(value, &miss); |
| 2293 | 1622 |
| 2294 // Prepare tail call to StoreIC_ArrayLength. | 1623 // Prepare tail call to StoreIC_ArrayLength. |
| 2295 __ Push(receiver, value); | 1624 __ Push(receiver, value); |
| 2296 | 1625 |
| 2297 ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength)); | 1626 ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength)); |
| 2298 __ TailCallExternalReference(ref, 2, 1); | 1627 __ TailCallExternalReference(ref, 2, 1); |
| 2299 | 1628 |
| 2300 __ bind(&miss); | 1629 __ bind(&miss); |
| 2301 | 1630 |
| 2302 GenerateMiss(masm); | 1631 GenerateMiss(masm); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2352 case Token::GT: | 1681 case Token::GT: |
| 2353 // Reverse left and right operands to obtain ECMA-262 conversion order. | 1682 // Reverse left and right operands to obtain ECMA-262 conversion order. |
| 2354 return lt; | 1683 return lt; |
| 2355 case Token::LTE: | 1684 case Token::LTE: |
| 2356 // Reverse left and right operands to obtain ECMA-262 conversion order. | 1685 // Reverse left and right operands to obtain ECMA-262 conversion order. |
| 2357 return ge; | 1686 return ge; |
| 2358 case Token::GTE: | 1687 case Token::GTE: |
| 2359 return ge; | 1688 return ge; |
| 2360 default: | 1689 default: |
| 2361 UNREACHABLE(); | 1690 UNREACHABLE(); |
| 2362 return no_condition; | 1691 return kNoCondition; |
| 2363 } | 1692 } |
| 2364 } | 1693 } |
| 2365 | 1694 |
| 2366 | 1695 |
| 2367 void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { | 1696 void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { |
| 2368 HandleScope scope; | 1697 HandleScope scope; |
| 2369 Handle<Code> rewritten; | 1698 Handle<Code> rewritten; |
| 2370 State previous_state = GetState(); | 1699 State previous_state = GetState(); |
| 2371 State state = TargetState(previous_state, false, x, y); | 1700 State state = TargetState(previous_state, false, x, y); |
| 2372 if (state == GENERIC) { | 1701 if (state == GENERIC) { |
| 2373 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); | 1702 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); |
| 2374 rewritten = stub.GetCode(); | 1703 rewritten = stub.GetCode(); |
| 2375 } else { | 1704 } else { |
| 2376 ICCompareStub stub(op_, state); | 1705 ICCompareStub stub(op_, state); |
| 2377 rewritten = stub.GetCode(); | 1706 rewritten = stub.GetCode(); |
| 2378 } | 1707 } |
| 2379 set_target(*rewritten); | 1708 set_target(*rewritten); |
| 2380 | 1709 |
| 2381 #ifdef DEBUG | 1710 #ifdef DEBUG |
| 2382 if (FLAG_trace_ic) { | 1711 if (FLAG_trace_ic) { |
| 2383 PrintF("[CompareIC (%s->%s)#%s]\n", | 1712 PrintF("[CompareIC (%s->%s)#%s]\n", |
| 2384 GetStateName(previous_state), | 1713 GetStateName(previous_state), |
| 2385 GetStateName(state), | 1714 GetStateName(state), |
| 2386 Token::Name(op_)); | 1715 Token::Name(op_)); |
| 2387 } | 1716 } |
| 2388 #endif | 1717 #endif |
| 1718 |
| 1719 // Activate inlined smi code. |
| 1720 if (previous_state == UNINITIALIZED) { |
| 1721 PatchInlinedSmiCode(address()); |
| 1722 } |
| 2389 } | 1723 } |
| 2390 | 1724 |
| 2391 | 1725 |
| 2392 void PatchInlinedSmiCode(Address address) { | 1726 void PatchInlinedSmiCode(Address address) { |
| 2393 UNIMPLEMENTED(); | 1727 Address cmp_instruction_address = |
| 1728 address + Assembler::kCallTargetAddressOffset; |
| 1729 |
| 1730 // If the instruction following the call is not a cmp rx, #yyy, nothing |
| 1731 // was inlined. |
| 1732 Instr instr = Assembler::instr_at(cmp_instruction_address); |
| 1733 if (!Assembler::IsCmpImmediate(instr)) { |
| 1734 return; |
| 1735 } |
| 1736 |
| 1737 // The delta to the start of the map check instruction and the |
| 1738 // condition code uses at the patched jump. |
| 1739 int delta = Assembler::GetCmpImmediateRawImmediate(instr); |
| 1740 delta += |
| 1741 Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask; |
| 1742 // If the delta is 0 the instruction is cmp r0, #0 which also signals that |
| 1743 // nothing was inlined. |
| 1744 if (delta == 0) { |
| 1745 return; |
| 1746 } |
| 1747 |
| 1748 #ifdef DEBUG |
| 1749 if (FLAG_trace_ic) { |
| 1750 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", |
| 1751 address, cmp_instruction_address, delta); |
| 1752 } |
| 1753 #endif |
| 1754 |
| 1755 Address patch_address = |
| 1756 cmp_instruction_address - delta * Instruction::kInstrSize; |
| 1757 Instr instr_at_patch = Assembler::instr_at(patch_address); |
| 1758 Instr branch_instr = |
| 1759 Assembler::instr_at(patch_address + Instruction::kInstrSize); |
| 1760 ASSERT(Assembler::IsCmpRegister(instr_at_patch)); |
| 1761 ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), |
| 1762 Assembler::GetRm(instr_at_patch).code()); |
| 1763 ASSERT(Assembler::IsBranch(branch_instr)); |
| 1764 if (Assembler::GetCondition(branch_instr) == eq) { |
| 1765 // This is patching a "jump if not smi" site to be active. |
| 1766 // Changing |
| 1767 // cmp rx, rx |
| 1768 // b eq, <target> |
| 1769 // to |
| 1770 // tst rx, #kSmiTagMask |
| 1771 // b ne, <target> |
| 1772 CodePatcher patcher(patch_address, 2); |
| 1773 Register reg = Assembler::GetRn(instr_at_patch); |
| 1774 patcher.masm()->tst(reg, Operand(kSmiTagMask)); |
| 1775 patcher.EmitCondition(ne); |
| 1776 } else { |
| 1777 ASSERT(Assembler::GetCondition(branch_instr) == ne); |
| 1778 // This is patching a "jump if smi" site to be active. |
| 1779 // Changing |
| 1780 // cmp rx, rx |
| 1781 // b ne, <target> |
| 1782 // to |
| 1783 // tst rx, #kSmiTagMask |
| 1784 // b eq, <target> |
| 1785 CodePatcher patcher(patch_address, 2); |
| 1786 Register reg = Assembler::GetRn(instr_at_patch); |
| 1787 patcher.masm()->tst(reg, Operand(kSmiTagMask)); |
| 1788 patcher.EmitCondition(eq); |
| 1789 } |
| 2394 } | 1790 } |
| 2395 | 1791 |
| 2396 | 1792 |
| 2397 } } // namespace v8::internal | 1793 } } // namespace v8::internal |
| 2398 | 1794 |
| 2399 #endif // V8_TARGET_ARCH_ARM | 1795 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |