| OLD | NEW |
| 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
| 15 // | 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #if defined(V8_TARGET_ARCH_ARM) |
| 31 |
| 30 #include "assembler-arm.h" | 32 #include "assembler-arm.h" |
| 31 #include "codegen.h" | 33 #include "codegen.h" |
| 32 #include "codegen-inl.h" | 34 #include "codegen-inl.h" |
| 33 #include "disasm.h" | 35 #include "disasm.h" |
| 34 #include "ic-inl.h" | 36 #include "ic-inl.h" |
| 35 #include "runtime.h" | 37 #include "runtime.h" |
| 36 #include "stub-cache.h" | 38 #include "stub-cache.h" |
| 37 | 39 |
| 38 namespace v8 { | 40 namespace v8 { |
| 39 namespace internal { | 41 namespace internal { |
| (...skipping 594 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 634 } | 636 } |
| 635 | 637 |
| 636 | 638 |
| 637 bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { | 639 bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { |
| 638 Address inline_end_address; | 640 Address inline_end_address; |
| 639 if (!IsInlinedICSite(address, &inline_end_address)) return false; | 641 if (!IsInlinedICSite(address, &inline_end_address)) return false; |
| 640 | 642 |
| 641 // Patch the map check. | 643 // Patch the map check. |
| 642 Address ldr_map_instr_address = | 644 Address ldr_map_instr_address = |
| 643 inline_end_address - | 645 inline_end_address - |
| 644 CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatchSize * | 646 (CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch * |
| 645 Assembler::kInstrSize; | 647 Assembler::kInstrSize); |
| 646 Assembler::set_target_address_at(ldr_map_instr_address, | 648 Assembler::set_target_address_at(ldr_map_instr_address, |
| 647 reinterpret_cast<Address>(map)); | 649 reinterpret_cast<Address>(map)); |
| 648 return true; | 650 return true; |
| 649 } | 651 } |
| 650 | 652 |
| 651 | 653 |
| 652 void KeyedStoreIC::ClearInlinedVersion(Address address) { | 654 void KeyedStoreIC::ClearInlinedVersion(Address address) { |
| 653 // Insert null as the elements map to check for. This will make | 655 // Insert null as the elements map to check for. This will make |
| 654 // sure that the elements fast-case map check fails so that control | 656 // sure that the elements fast-case map check fails so that control |
| 655 // flows to the IC instead of the inlined version. | 657 // flows to the IC instead of the inlined version. |
| 656 PatchInlinedStore(address, Heap::null_value()); | 658 PatchInlinedStore(address, Heap::null_value()); |
| 657 } | 659 } |
| 658 | 660 |
| 659 | 661 |
| 660 void KeyedStoreIC::RestoreInlinedVersion(Address address) { | 662 void KeyedStoreIC::RestoreInlinedVersion(Address address) { |
| 661 // Restore the fast-case elements map check so that the inlined | 663 // Restore the fast-case elements map check so that the inlined |
| 662 // version can be used again. | 664 // version can be used again. |
| 663 PatchInlinedStore(address, Heap::fixed_array_map()); | 665 PatchInlinedStore(address, Heap::fixed_array_map()); |
| 664 } | 666 } |
| 665 | 667 |
| 666 | 668 |
| 667 bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) { | 669 bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) { |
| 668 // Find the end of the inlined code for handling the store if this is an | 670 // Find the end of the inlined code for handling the store if this is an |
| 669 // inlined IC call site. | 671 // inlined IC call site. |
| 670 Address inline_end_address; | 672 Address inline_end_address; |
| 671 if (!IsInlinedICSite(address, &inline_end_address)) return false; | 673 if (!IsInlinedICSite(address, &inline_end_address)) return false; |
| 672 | 674 |
| 673 // Patch the map check. | 675 // Patch the map check. |
| 674 Address ldr_map_instr_address = | 676 Address ldr_map_instr_address = |
| 675 inline_end_address - 5 * Assembler::kInstrSize; | 677 inline_end_address - |
| 678 (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch * |
| 679 Assembler::kInstrSize); |
| 676 Assembler::set_target_address_at(ldr_map_instr_address, | 680 Assembler::set_target_address_at(ldr_map_instr_address, |
| 677 reinterpret_cast<Address>(map)); | 681 reinterpret_cast<Address>(map)); |
| 678 return true; | 682 return true; |
| 679 } | 683 } |
| 680 | 684 |
| 681 | 685 |
| 682 Object* KeyedLoadIC_Miss(Arguments args); | 686 Object* KeyedLoadIC_Miss(Arguments args); |
| 683 | 687 |
| 684 | 688 |
| 685 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | 689 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { |
| (...skipping 514 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1200 IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1); | 1204 IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1); |
| 1201 | 1205 |
| 1202 __ bind(&slow); | 1206 __ bind(&slow); |
| 1203 GenerateMiss(masm); | 1207 GenerateMiss(masm); |
| 1204 } | 1208 } |
| 1205 | 1209 |
| 1206 | 1210 |
| 1207 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { | 1211 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { |
| 1208 // ---------- S t a t e -------------- | 1212 // ---------- S t a t e -------------- |
| 1209 // -- r0 : value | 1213 // -- r0 : value |
| 1214 // -- r1 : key |
| 1215 // -- r2 : receiver |
| 1210 // -- lr : return address | 1216 // -- lr : return address |
| 1211 // -- sp[0] : key | |
| 1212 // -- sp[1] : receiver | |
| 1213 // ----------------------------------- | 1217 // ----------------------------------- |
| 1214 | 1218 |
| 1215 __ ldm(ia, sp, r2.bit() | r3.bit()); | 1219 // Push receiver, key and value for runtime call. |
| 1216 __ Push(r3, r2, r0); | 1220 __ Push(r2, r1, r0); |
| 1217 | 1221 |
| 1218 ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss)); | 1222 ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss)); |
| 1219 __ TailCallExternalReference(ref, 3, 1); | 1223 __ TailCallExternalReference(ref, 3, 1); |
| 1220 } | 1224 } |
| 1221 | 1225 |
| 1222 | 1226 |
| 1223 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { | 1227 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { |
| 1224 // ---------- S t a t e -------------- | 1228 // ---------- S t a t e -------------- |
| 1225 // -- r0 : value | 1229 // -- r0 : value |
| 1230 // -- r1 : key |
| 1231 // -- r2 : receiver |
| 1226 // -- lr : return address | 1232 // -- lr : return address |
| 1227 // -- sp[0] : key | |
| 1228 // -- sp[1] : receiver | |
| 1229 // ----------------------------------- | 1233 // ----------------------------------- |
| 1230 __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object | 1234 |
| 1231 __ Push(r3, r1, r0); | 1235 // Push receiver, key and value for runtime call. |
| 1236 __ Push(r2, r1, r0); |
| 1232 | 1237 |
| 1233 __ TailCallRuntime(Runtime::kSetProperty, 3, 1); | 1238 __ TailCallRuntime(Runtime::kSetProperty, 3, 1); |
| 1234 } | 1239 } |
| 1235 | 1240 |
| 1236 | 1241 |
| 1237 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { | 1242 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { |
| 1238 // ---------- S t a t e -------------- | 1243 // ---------- S t a t e -------------- |
| 1239 // -- r0 : value | 1244 // -- r0 : value |
| 1245 // -- r1 : key |
| 1246 // -- r2 : receiver |
| 1240 // -- lr : return address | 1247 // -- lr : return address |
| 1241 // -- sp[0] : key | |
| 1242 // -- sp[1] : receiver | |
| 1243 // ----------------------------------- | 1248 // ----------------------------------- |
| 1244 Label slow, fast, array, extra, exit, check_pixel_array; | 1249 Label slow, fast, array, extra, check_pixel_array; |
| 1245 | 1250 |
| 1246 // Get the key and the object from the stack. | 1251 // Register usage. |
| 1247 __ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver | 1252 Register value = r0; |
| 1253 Register key = r1; |
| 1254 Register receiver = r2; |
| 1255 Register elements = r3; // Elements array of the receiver. |
| 1256 // r4 and r5 are used as general scratch registers. |
| 1257 |
| 1248 // Check that the key is a smi. | 1258 // Check that the key is a smi. |
| 1249 __ tst(r1, Operand(kSmiTagMask)); | 1259 __ tst(key, Operand(kSmiTagMask)); |
| 1250 __ b(ne, &slow); | 1260 __ b(ne, &slow); |
| 1251 // Check that the object isn't a smi. | 1261 // Check that the object isn't a smi. |
| 1252 __ tst(r3, Operand(kSmiTagMask)); | 1262 __ tst(receiver, Operand(kSmiTagMask)); |
| 1253 __ b(eq, &slow); | 1263 __ b(eq, &slow); |
| 1254 // Get the map of the object. | 1264 // Get the map of the object. |
| 1255 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); | 1265 __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 1256 // Check that the receiver does not require access checks. We need | 1266 // Check that the receiver does not require access checks. We need |
| 1257 // to do this because this generic stub does not perform map checks. | 1267 // to do this because this generic stub does not perform map checks. |
| 1258 __ ldrb(ip, FieldMemOperand(r2, Map::kBitFieldOffset)); | 1268 __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset)); |
| 1259 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); | 1269 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); |
| 1260 __ b(ne, &slow); | 1270 __ b(ne, &slow); |
| 1261 // Check if the object is a JS array or not. | 1271 // Check if the object is a JS array or not. |
| 1262 __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); | 1272 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
| 1263 __ cmp(r2, Operand(JS_ARRAY_TYPE)); | 1273 __ cmp(r4, Operand(JS_ARRAY_TYPE)); |
| 1264 // r1 == key. | |
| 1265 __ b(eq, &array); | 1274 __ b(eq, &array); |
| 1266 // Check that the object is some kind of JS object. | 1275 // Check that the object is some kind of JS object. |
| 1267 __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); | 1276 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); |
| 1268 __ b(lt, &slow); | 1277 __ b(lt, &slow); |
| 1269 | 1278 |
| 1270 | |
| 1271 // Object case: Check key against length in the elements array. | 1279 // Object case: Check key against length in the elements array. |
| 1272 __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset)); | 1280 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 1273 // Check that the object is in fast mode (not dictionary). | 1281 // Check that the object is in fast mode (not dictionary). |
| 1274 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); | 1282 __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 1275 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 1283 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
| 1276 __ cmp(r2, ip); | 1284 __ cmp(r4, ip); |
| 1277 __ b(ne, &check_pixel_array); | 1285 __ b(ne, &check_pixel_array); |
| 1278 // Untag the key (for checking against untagged length in the fixed array). | 1286 // Untag the key (for checking against untagged length in the fixed array). |
| 1279 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); | 1287 __ mov(r4, Operand(key, ASR, kSmiTagSize)); |
| 1280 // Compute address to store into and check array bounds. | 1288 // Compute address to store into and check array bounds. |
| 1281 __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 1289 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 1282 __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2)); | 1290 __ cmp(r4, Operand(ip)); |
| 1283 __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset)); | |
| 1284 __ cmp(r1, Operand(ip)); | |
| 1285 __ b(lo, &fast); | 1291 __ b(lo, &fast); |
| 1286 | 1292 |
| 1287 | 1293 // Slow case, handle jump to runtime. |
| 1288 // Slow case: | |
| 1289 __ bind(&slow); | 1294 __ bind(&slow); |
| 1295 // Entry registers are intact. |
| 1296 // r0: value. |
| 1297 // r1: key. |
| 1298 // r2: receiver. |
| 1290 GenerateRuntimeSetProperty(masm); | 1299 GenerateRuntimeSetProperty(masm); |
| 1291 | 1300 |
| 1292 // Check whether the elements is a pixel array. | 1301 // Check whether the elements is a pixel array. |
| 1293 // r0: value | 1302 // r4: elements map. |
| 1294 // r1: index (as a smi), zero-extended. | |
| 1295 // r3: elements array | |
| 1296 __ bind(&check_pixel_array); | 1303 __ bind(&check_pixel_array); |
| 1297 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); | 1304 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); |
| 1298 __ cmp(r2, ip); | 1305 __ cmp(r4, ip); |
| 1299 __ b(ne, &slow); | 1306 __ b(ne, &slow); |
| 1300 // Check that the value is a smi. If a conversion is needed call into the | 1307 // Check that the value is a smi. If a conversion is needed call into the |
| 1301 // runtime to convert and clamp. | 1308 // runtime to convert and clamp. |
| 1302 __ BranchOnNotSmi(r0, &slow); | 1309 __ BranchOnNotSmi(value, &slow); |
| 1303 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the key. | 1310 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key. |
| 1304 __ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset)); | 1311 __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset)); |
| 1305 __ cmp(r1, Operand(ip)); | 1312 __ cmp(r4, Operand(ip)); |
| 1306 __ b(hs, &slow); | 1313 __ b(hs, &slow); |
| 1307 __ mov(r4, r0); // Save the value. | 1314 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. |
| 1308 __ mov(r0, Operand(r0, ASR, kSmiTagSize)); // Untag the value. | |
| 1309 { // Clamp the value to [0..255]. | 1315 { // Clamp the value to [0..255]. |
| 1310 Label done; | 1316 Label done; |
| 1311 __ tst(r0, Operand(0xFFFFFF00)); | 1317 __ tst(r5, Operand(0xFFFFFF00)); |
| 1312 __ b(eq, &done); | 1318 __ b(eq, &done); |
| 1313 __ mov(r0, Operand(0), LeaveCC, mi); // 0 if negative. | 1319 __ mov(r5, Operand(0), LeaveCC, mi); // 0 if negative. |
| 1314 __ mov(r0, Operand(255), LeaveCC, pl); // 255 if positive. | 1320 __ mov(r5, Operand(255), LeaveCC, pl); // 255 if positive. |
| 1315 __ bind(&done); | 1321 __ bind(&done); |
| 1316 } | 1322 } |
| 1317 __ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset)); | 1323 // Get the pointer to the external array. This clobbers elements. |
| 1318 __ strb(r0, MemOperand(r2, r1)); | 1324 __ ldr(elements, |
| 1319 __ mov(r0, Operand(r4)); // Return the original value. | 1325 FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); |
| 1326 __ strb(r5, MemOperand(elements, r4)); // Elements is now external array. |
| 1320 __ Ret(); | 1327 __ Ret(); |
| 1321 | 1328 |
| 1322 | |
| 1323 // Extra capacity case: Check if there is extra capacity to | 1329 // Extra capacity case: Check if there is extra capacity to |
| 1324 // perform the store and update the length. Used for adding one | 1330 // perform the store and update the length. Used for adding one |
| 1325 // element to the array by writing to array[array.length]. | 1331 // element to the array by writing to array[array.length]. |
| 1326 // r0 == value, r1 == key, r2 == elements, r3 == object | |
| 1327 __ bind(&extra); | 1332 __ bind(&extra); |
| 1328 __ b(ne, &slow); // do not leave holes in the array | 1333 // Condition code from comparing key and array length is still available. |
| 1329 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag | 1334 __ b(ne, &slow); // Only support writing to writing to array[array.length]. |
| 1330 __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset)); | 1335 // Check for room in the elements backing store. |
| 1331 __ cmp(r1, Operand(ip)); | 1336 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag key. |
| 1337 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 1338 __ cmp(r4, Operand(ip)); |
| 1332 __ b(hs, &slow); | 1339 __ b(hs, &slow); |
| 1333 __ mov(r1, Operand(r1, LSL, kSmiTagSize)); // restore tag | 1340 // Calculate key + 1 as smi. |
| 1334 __ add(r1, r1, Operand(1 << kSmiTagSize)); // and increment | 1341 ASSERT_EQ(0, kSmiTag); |
| 1335 __ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset)); | 1342 __ add(r4, key, Operand(Smi::FromInt(1))); |
| 1336 __ mov(r3, Operand(r2)); | 1343 __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1337 // NOTE: Computing the address to store into must take the fact | |
| 1338 // that the key has been incremented into account. | |
| 1339 int displacement = FixedArray::kHeaderSize - kHeapObjectTag - | |
| 1340 ((1 << kSmiTagSize) * 2); | |
| 1341 __ add(r2, r2, Operand(displacement)); | |
| 1342 __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 1343 __ b(&fast); | 1344 __ b(&fast); |
| 1344 | 1345 |
| 1345 | |
| 1346 // Array case: Get the length and the elements array from the JS | 1346 // Array case: Get the length and the elements array from the JS |
| 1347 // array. Check that the array is in fast mode; if it is the | 1347 // array. Check that the array is in fast mode; if it is the |
| 1348 // length is always a smi. | 1348 // length is always a smi. |
| 1349 // r0 == value, r3 == object | |
| 1350 __ bind(&array); | 1349 __ bind(&array); |
| 1351 __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset)); | 1350 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 1352 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); | 1351 __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 1353 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 1352 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
| 1354 __ cmp(r1, ip); | 1353 __ cmp(r4, ip); |
| 1355 __ b(ne, &slow); | 1354 __ b(ne, &slow); |
| 1356 | 1355 |
| 1357 // Check the key against the length in the array, compute the | 1356 // Check the key against the length in the array. |
| 1358 // address to store into and fall through to fast case. | 1357 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1359 __ ldr(r1, MemOperand(sp)); // restore key | 1358 __ cmp(key, Operand(ip)); |
| 1360 // r0 == value, r1 == key, r2 == elements, r3 == object. | |
| 1361 __ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset)); | |
| 1362 __ cmp(r1, Operand(ip)); | |
| 1363 __ b(hs, &extra); | 1359 __ b(hs, &extra); |
| 1364 __ mov(r3, Operand(r2)); | 1360 // Fall through to fast case. |
| 1365 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 1366 __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 1367 | 1361 |
| 1362 __ bind(&fast); |
| 1363 // Fast case, store the value to the elements backing store. |
| 1364 __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 1365 __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); |
| 1366 __ str(value, MemOperand(r5)); |
| 1367 // Skip write barrier if the written value is a smi. |
| 1368 __ tst(value, Operand(kSmiTagMask)); |
| 1369 __ Ret(eq); |
| 1370 // Update write barrier for the elements array address. |
| 1371 __ sub(r4, r5, Operand(elements)); |
| 1372 __ RecordWrite(elements, r4, r5); |
| 1368 | 1373 |
| 1369 // Fast case: Do the store. | |
| 1370 // r0 == value, r2 == address to store into, r3 == elements | |
| 1371 __ bind(&fast); | |
| 1372 __ str(r0, MemOperand(r2)); | |
| 1373 // Skip write barrier if the written value is a smi. | |
| 1374 __ tst(r0, Operand(kSmiTagMask)); | |
| 1375 __ b(eq, &exit); | |
| 1376 // Update write barrier for the elements array address. | |
| 1377 __ sub(r1, r2, Operand(r3)); | |
| 1378 __ RecordWrite(r3, r1, r2); | |
| 1379 | |
| 1380 __ bind(&exit); | |
| 1381 __ Ret(); | 1374 __ Ret(); |
| 1382 } | 1375 } |
| 1383 | 1376 |
| 1384 | 1377 |
| 1385 // Convert int passed in register ival to IEE 754 single precision | 1378 // Convert int passed in register ival to IEE 754 single precision |
| 1386 // floating point value and store it into register fval. | 1379 // floating point value and store it into register fval. |
| 1387 // If VFP3 is available use it for conversion. | 1380 // If VFP3 is available use it for conversion. |
| 1388 static void ConvertIntToFloat(MacroAssembler* masm, | 1381 static void ConvertIntToFloat(MacroAssembler* masm, |
| 1389 Register ival, | 1382 Register ival, |
| 1390 Register fval, | 1383 Register fval, |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1464 UNREACHABLE(); | 1457 UNREACHABLE(); |
| 1465 return false; | 1458 return false; |
| 1466 } | 1459 } |
| 1467 } | 1460 } |
| 1468 | 1461 |
| 1469 | 1462 |
| 1470 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, | 1463 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, |
| 1471 ExternalArrayType array_type) { | 1464 ExternalArrayType array_type) { |
| 1472 // ---------- S t a t e -------------- | 1465 // ---------- S t a t e -------------- |
| 1473 // -- r0 : value | 1466 // -- r0 : value |
| 1467 // -- r1 : key |
| 1468 // -- r2 : receiver |
| 1474 // -- lr : return address | 1469 // -- lr : return address |
| 1475 // -- sp[0] : key | |
| 1476 // -- sp[1] : receiver | |
| 1477 // ----------------------------------- | 1470 // ----------------------------------- |
| 1478 Label slow, check_heap_number; | 1471 Label slow, check_heap_number; |
| 1479 | 1472 |
| 1480 // Get the key and the object from the stack. | 1473 // Register usage. |
| 1481 __ ldm(ia, sp, r1.bit() | r2.bit()); // r1 = key, r2 = receiver | 1474 Register value = r0; |
| 1475 Register key = r1; |
| 1476 Register receiver = r2; |
| 1477 // r3 mostly holds the elements array or the destination external array. |
| 1482 | 1478 |
| 1483 // Check that the object isn't a smi. | 1479 // Check that the object isn't a smi. |
| 1484 __ BranchOnSmi(r2, &slow); | 1480 __ BranchOnSmi(receiver, &slow); |
| 1485 | 1481 |
| 1486 // Check that the object is a JS object. Load map into r3 | 1482 // Check that the object is a JS object. Load map into r3. |
| 1487 __ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE); | 1483 __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE); |
| 1488 __ b(le, &slow); | 1484 __ b(le, &slow); |
| 1489 | 1485 |
| 1490 // Check that the receiver does not require access checks. We need | 1486 // Check that the receiver does not require access checks. We need |
| 1491 // to do this because this generic stub does not perform map checks. | 1487 // to do this because this generic stub does not perform map checks. |
| 1492 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); | 1488 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); |
| 1493 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); | 1489 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); |
| 1494 __ b(ne, &slow); | 1490 __ b(ne, &slow); |
| 1495 | 1491 |
| 1496 // Check that the key is a smi. | 1492 // Check that the key is a smi. |
| 1497 __ BranchOnNotSmi(r1, &slow); | 1493 __ BranchOnNotSmi(key, &slow); |
| 1498 | 1494 |
| 1499 // Check that the elements array is the appropriate type of | 1495 // Check that the elements array is the appropriate type of ExternalArray. |
| 1500 // ExternalArray. | 1496 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 1501 // r0: value | 1497 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); |
| 1502 // r1: index (smi) | |
| 1503 // r2: object | |
| 1504 __ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset)); | |
| 1505 __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); | |
| 1506 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); | 1498 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); |
| 1507 __ cmp(r3, ip); | 1499 __ cmp(r4, ip); |
| 1508 __ b(ne, &slow); | 1500 __ b(ne, &slow); |
| 1509 | 1501 |
| 1510 // Check that the index is in range. | 1502 // Check that the index is in range. |
| 1511 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the index. | 1503 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index. |
| 1512 __ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset)); | 1504 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); |
| 1513 __ cmp(r1, ip); | 1505 __ cmp(r4, ip); |
| 1514 // Unsigned comparison catches both negative and too-large values. | 1506 // Unsigned comparison catches both negative and too-large values. |
| 1515 __ b(hs, &slow); | 1507 __ b(hs, &slow); |
| 1516 | 1508 |
| 1517 // Handle both smis and HeapNumbers in the fast path. Go to the | 1509 // Handle both smis and HeapNumbers in the fast path. Go to the |
| 1518 // runtime for all other kinds of values. | 1510 // runtime for all other kinds of values. |
| 1519 // r0: value | 1511 // r3: external array. |
| 1520 // r1: index (integer) | 1512 // r4: key (integer). |
| 1521 // r2: array | 1513 __ BranchOnNotSmi(value, &check_heap_number); |
| 1522 __ BranchOnNotSmi(r0, &check_heap_number); | 1514 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. |
| 1523 __ mov(r3, Operand(r0, ASR, kSmiTagSize)); // Untag the value. | 1515 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
| 1524 __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset)); | |
| 1525 | 1516 |
| 1526 // r1: index (integer) | 1517 // r3: base pointer of external storage. |
| 1527 // r2: base pointer of external storage | 1518 // r4: key (integer). |
| 1528 // r3: value (integer) | 1519 // r5: value (integer). |
| 1529 switch (array_type) { | 1520 switch (array_type) { |
| 1530 case kExternalByteArray: | 1521 case kExternalByteArray: |
| 1531 case kExternalUnsignedByteArray: | 1522 case kExternalUnsignedByteArray: |
| 1532 __ strb(r3, MemOperand(r2, r1, LSL, 0)); | 1523 __ strb(r5, MemOperand(r3, r4, LSL, 0)); |
| 1533 break; | 1524 break; |
| 1534 case kExternalShortArray: | 1525 case kExternalShortArray: |
| 1535 case kExternalUnsignedShortArray: | 1526 case kExternalUnsignedShortArray: |
| 1536 __ strh(r3, MemOperand(r2, r1, LSL, 1)); | 1527 __ strh(r5, MemOperand(r3, r4, LSL, 1)); |
| 1537 break; | 1528 break; |
| 1538 case kExternalIntArray: | 1529 case kExternalIntArray: |
| 1539 case kExternalUnsignedIntArray: | 1530 case kExternalUnsignedIntArray: |
| 1540 __ str(r3, MemOperand(r2, r1, LSL, 2)); | 1531 __ str(r5, MemOperand(r3, r4, LSL, 2)); |
| 1541 break; | 1532 break; |
| 1542 case kExternalFloatArray: | 1533 case kExternalFloatArray: |
| 1543 // Need to perform int-to-float conversion. | 1534 // Need to perform int-to-float conversion. |
| 1544 ConvertIntToFloat(masm, r3, r4, r5, r6); | 1535 ConvertIntToFloat(masm, r5, r6, r7, r9); |
| 1545 __ str(r4, MemOperand(r2, r1, LSL, 2)); | 1536 __ str(r6, MemOperand(r3, r4, LSL, 2)); |
| 1546 break; | 1537 break; |
| 1547 default: | 1538 default: |
| 1548 UNREACHABLE(); | 1539 UNREACHABLE(); |
| 1549 break; | 1540 break; |
| 1550 } | 1541 } |
| 1551 | 1542 |
| 1552 // r0: value | 1543 // Entry registers are intact, r0 holds the value which is the return value. |
| 1553 __ Ret(); | 1544 __ Ret(); |
| 1554 | 1545 |
| 1555 | 1546 |
| 1556 // r0: value | 1547 // r3: external array. |
| 1557 // r1: index (integer) | 1548 // r4: index (integer). |
| 1558 // r2: external array object | |
| 1559 __ bind(&check_heap_number); | 1549 __ bind(&check_heap_number); |
| 1560 __ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE); | 1550 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); |
| 1561 __ b(ne, &slow); | 1551 __ b(ne, &slow); |
| 1562 | 1552 |
| 1563 __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset)); | 1553 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
| 1554 |
| 1555 // r3: base pointer of external storage. |
| 1556 // r4: key (integer). |
| 1564 | 1557 |
| 1565 // The WebGL specification leaves the behavior of storing NaN and | 1558 // The WebGL specification leaves the behavior of storing NaN and |
| 1566 // +/-Infinity into integer arrays basically undefined. For more | 1559 // +/-Infinity into integer arrays basically undefined. For more |
| 1567 // reproducible behavior, convert these to zero. | 1560 // reproducible behavior, convert these to zero. |
| 1568 if (CpuFeatures::IsSupported(VFP3)) { | 1561 if (CpuFeatures::IsSupported(VFP3)) { |
| 1569 CpuFeatures::Scope scope(VFP3); | 1562 CpuFeatures::Scope scope(VFP3); |
| 1570 | 1563 |
| 1571 // vldr requires offset to be a multiple of 4 so we can not | 1564 // vldr requires offset to be a multiple of 4 so we can not |
| 1572 // include -kHeapObjectTag into it. | 1565 // include -kHeapObjectTag into it. |
| 1573 __ sub(r3, r0, Operand(kHeapObjectTag)); | 1566 __ sub(r5, r0, Operand(kHeapObjectTag)); |
| 1574 __ vldr(d0, r3, HeapNumber::kValueOffset); | 1567 __ vldr(d0, r5, HeapNumber::kValueOffset); |
| 1575 | 1568 |
| 1576 if (array_type == kExternalFloatArray) { | 1569 if (array_type == kExternalFloatArray) { |
| 1577 __ vcvt_f32_f64(s0, d0); | 1570 __ vcvt_f32_f64(s0, d0); |
| 1578 __ vmov(r3, s0); | 1571 __ vmov(r5, s0); |
| 1579 __ str(r3, MemOperand(r2, r1, LSL, 2)); | 1572 __ str(r5, MemOperand(r3, r4, LSL, 2)); |
| 1580 } else { | 1573 } else { |
| 1581 Label done; | 1574 Label done; |
| 1582 | 1575 |
| 1583 // Need to perform float-to-int conversion. | 1576 // Need to perform float-to-int conversion. |
| 1584 // Test for NaN. | 1577 // Test for NaN. |
| 1585 __ vcmp(d0, d0); | 1578 __ vcmp(d0, d0); |
| 1586 // Move vector status bits to normal status bits. | 1579 // Move vector status bits to normal status bits. |
| 1587 __ vmrs(v8::internal::pc); | 1580 __ vmrs(v8::internal::pc); |
| 1588 __ mov(r3, Operand(0), LeaveCC, vs); // NaN converts to 0 | 1581 __ mov(r5, Operand(0), LeaveCC, vs); // NaN converts to 0. |
| 1589 __ b(vs, &done); | 1582 __ b(vs, &done); |
| 1590 | 1583 |
| 1591 // Test whether exponent equal to 0x7FF (infinity or NaN) | 1584 // Test whether exponent equal to 0x7FF (infinity or NaN). |
| 1592 __ vmov(r4, r3, d0); | 1585 __ vmov(r6, r7, d0); |
| 1593 __ mov(r5, Operand(0x7FF00000)); | 1586 __ mov(r5, Operand(0x7FF00000)); |
| 1594 __ and_(r3, r3, Operand(r5)); | 1587 __ and_(r6, r6, Operand(r5)); |
| 1595 __ teq(r3, Operand(r5)); | 1588 __ teq(r6, Operand(r5)); |
| 1596 __ mov(r3, Operand(0), LeaveCC, eq); | 1589 __ mov(r6, Operand(0), LeaveCC, eq); |
| 1597 | 1590 |
| 1598 // Not infinity or NaN simply convert to int | 1591 // Not infinity or NaN simply convert to int. |
| 1599 if (IsElementTypeSigned(array_type)) { | 1592 if (IsElementTypeSigned(array_type)) { |
| 1600 __ vcvt_s32_f64(s0, d0, ne); | 1593 __ vcvt_s32_f64(s0, d0, ne); |
| 1601 } else { | 1594 } else { |
| 1602 __ vcvt_u32_f64(s0, d0, ne); | 1595 __ vcvt_u32_f64(s0, d0, ne); |
| 1603 } | 1596 } |
| 1604 | 1597 |
| 1605 __ vmov(r3, s0, ne); | 1598 __ vmov(r5, s0, ne); |
| 1606 | 1599 |
| 1607 __ bind(&done); | 1600 __ bind(&done); |
| 1608 switch (array_type) { | 1601 switch (array_type) { |
| 1609 case kExternalByteArray: | 1602 case kExternalByteArray: |
| 1610 case kExternalUnsignedByteArray: | 1603 case kExternalUnsignedByteArray: |
| 1611 __ strb(r3, MemOperand(r2, r1, LSL, 0)); | 1604 __ strb(r5, MemOperand(r3, r4, LSL, 0)); |
| 1612 break; | 1605 break; |
| 1613 case kExternalShortArray: | 1606 case kExternalShortArray: |
| 1614 case kExternalUnsignedShortArray: | 1607 case kExternalUnsignedShortArray: |
| 1615 __ strh(r3, MemOperand(r2, r1, LSL, 1)); | 1608 __ strh(r5, MemOperand(r3, r4, LSL, 1)); |
| 1616 break; | 1609 break; |
| 1617 case kExternalIntArray: | 1610 case kExternalIntArray: |
| 1618 case kExternalUnsignedIntArray: | 1611 case kExternalUnsignedIntArray: |
| 1619 __ str(r3, MemOperand(r2, r1, LSL, 2)); | 1612 __ str(r5, MemOperand(r3, r4, LSL, 2)); |
| 1620 break; | 1613 break; |
| 1621 default: | 1614 default: |
| 1622 UNREACHABLE(); | 1615 UNREACHABLE(); |
| 1623 break; | 1616 break; |
| 1624 } | 1617 } |
| 1625 } | 1618 } |
| 1626 | 1619 |
| 1627 // r0: original value | 1620 // Entry registers are intact, r0 holds the value which is the return value. |
| 1628 __ Ret(); | 1621 __ Ret(); |
| 1629 } else { | 1622 } else { |
| 1630 // VFP3 is not available do manual conversions | 1623 // VFP3 is not available do manual conversions. |
| 1631 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 1624 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); |
| 1632 __ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 1625 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); |
| 1633 | 1626 |
| 1634 if (array_type == kExternalFloatArray) { | 1627 if (array_type == kExternalFloatArray) { |
| 1635 Label done, nan_or_infinity_or_zero; | 1628 Label done, nan_or_infinity_or_zero; |
| 1636 static const int kMantissaInHiWordShift = | 1629 static const int kMantissaInHiWordShift = |
| 1637 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; | 1630 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; |
| 1638 | 1631 |
| 1639 static const int kMantissaInLoWordShift = | 1632 static const int kMantissaInLoWordShift = |
| 1640 kBitsPerInt - kMantissaInHiWordShift; | 1633 kBitsPerInt - kMantissaInHiWordShift; |
| 1641 | 1634 |
| 1642 // Test for all special exponent values: zeros, subnormal numbers, NaNs | 1635 // Test for all special exponent values: zeros, subnormal numbers, NaNs |
| 1643 // and infinities. All these should be converted to 0. | 1636 // and infinities. All these should be converted to 0. |
| 1644 __ mov(r5, Operand(HeapNumber::kExponentMask)); | 1637 __ mov(r7, Operand(HeapNumber::kExponentMask)); |
| 1645 __ and_(r6, r3, Operand(r5), SetCC); | 1638 __ and_(r9, r5, Operand(r7), SetCC); |
| 1646 __ b(eq, &nan_or_infinity_or_zero); | 1639 __ b(eq, &nan_or_infinity_or_zero); |
| 1647 | 1640 |
| 1648 __ teq(r6, Operand(r5)); | 1641 __ teq(r9, Operand(r7)); |
| 1649 __ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq); | 1642 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); |
| 1650 __ b(eq, &nan_or_infinity_or_zero); | 1643 __ b(eq, &nan_or_infinity_or_zero); |
| 1651 | 1644 |
| 1652 // Rebias exponent. | 1645 // Rebias exponent. |
| 1653 __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift)); | 1646 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); |
| 1654 __ add(r6, | 1647 __ add(r9, |
| 1655 r6, | 1648 r9, |
| 1656 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); | 1649 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); |
| 1657 | 1650 |
| 1658 __ cmp(r6, Operand(kBinary32MaxExponent)); | 1651 __ cmp(r9, Operand(kBinary32MaxExponent)); |
| 1659 __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt); | 1652 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); |
| 1660 __ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt); | 1653 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); |
| 1661 __ b(gt, &done); | 1654 __ b(gt, &done); |
| 1662 | 1655 |
| 1663 __ cmp(r6, Operand(kBinary32MinExponent)); | 1656 __ cmp(r9, Operand(kBinary32MinExponent)); |
| 1664 __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt); | 1657 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); |
| 1665 __ b(lt, &done); | 1658 __ b(lt, &done); |
| 1666 | 1659 |
| 1667 __ and_(r7, r3, Operand(HeapNumber::kSignMask)); | 1660 __ and_(r7, r5, Operand(HeapNumber::kSignMask)); |
| 1668 __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); | 1661 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); |
| 1669 __ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift)); | 1662 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); |
| 1670 __ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift)); | 1663 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); |
| 1671 __ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift)); | 1664 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); |
| 1672 | 1665 |
| 1673 __ bind(&done); | 1666 __ bind(&done); |
| 1674 __ str(r3, MemOperand(r2, r1, LSL, 2)); | 1667 __ str(r5, MemOperand(r3, r4, LSL, 2)); |
| 1668 // Entry registers are intact, r0 holds the value which is the return |
| 1669 // value. |
| 1675 __ Ret(); | 1670 __ Ret(); |
| 1676 | 1671 |
| 1677 __ bind(&nan_or_infinity_or_zero); | 1672 __ bind(&nan_or_infinity_or_zero); |
| 1678 __ and_(r7, r3, Operand(HeapNumber::kSignMask)); | 1673 __ and_(r7, r5, Operand(HeapNumber::kSignMask)); |
| 1679 __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); | 1674 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); |
| 1680 __ orr(r6, r6, r7); | 1675 __ orr(r9, r9, r7); |
| 1681 __ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift)); | 1676 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); |
| 1682 __ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift)); | 1677 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); |
| 1683 __ b(&done); | 1678 __ b(&done); |
| 1684 } else { | 1679 } else { |
| 1685 bool is_signed_type = IsElementTypeSigned(array_type); | 1680 bool is_signed_type = IsElementTypeSigned(array_type); |
| 1686 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; | 1681 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; |
| 1687 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; | 1682 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; |
| 1688 | 1683 |
| 1689 Label done, sign; | 1684 Label done, sign; |
| 1690 | 1685 |
| 1691 // Test for all special exponent values: zeros, subnormal numbers, NaNs | 1686 // Test for all special exponent values: zeros, subnormal numbers, NaNs |
| 1692 // and infinities. All these should be converted to 0. | 1687 // and infinities. All these should be converted to 0. |
| 1693 __ mov(r5, Operand(HeapNumber::kExponentMask)); | 1688 __ mov(r7, Operand(HeapNumber::kExponentMask)); |
| 1694 __ and_(r6, r3, Operand(r5), SetCC); | 1689 __ and_(r9, r5, Operand(r7), SetCC); |
| 1695 __ mov(r3, Operand(0), LeaveCC, eq); | 1690 __ mov(r5, Operand(0), LeaveCC, eq); |
| 1696 __ b(eq, &done); | 1691 __ b(eq, &done); |
| 1697 | 1692 |
| 1698 __ teq(r6, Operand(r5)); | 1693 __ teq(r9, Operand(r7)); |
| 1699 __ mov(r3, Operand(0), LeaveCC, eq); | 1694 __ mov(r5, Operand(0), LeaveCC, eq); |
| 1700 __ b(eq, &done); | 1695 __ b(eq, &done); |
| 1701 | 1696 |
| 1702 // Unbias exponent. | 1697 // Unbias exponent. |
| 1703 __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift)); | 1698 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); |
| 1704 __ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC); | 1699 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); |
| 1705 // If exponent is negative than result is 0. | 1700 // If exponent is negative than result is 0. |
| 1706 __ mov(r3, Operand(0), LeaveCC, mi); | 1701 __ mov(r5, Operand(0), LeaveCC, mi); |
| 1707 __ b(mi, &done); | 1702 __ b(mi, &done); |
| 1708 | 1703 |
| 1709 // If exponent is too big than result is minimal value | 1704 // If exponent is too big than result is minimal value. |
| 1710 __ cmp(r6, Operand(meaningfull_bits - 1)); | 1705 __ cmp(r9, Operand(meaningfull_bits - 1)); |
| 1711 __ mov(r3, Operand(min_value), LeaveCC, ge); | 1706 __ mov(r5, Operand(min_value), LeaveCC, ge); |
| 1712 __ b(ge, &done); | 1707 __ b(ge, &done); |
| 1713 | 1708 |
| 1714 __ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC); | 1709 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); |
| 1715 __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); | 1710 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); |
| 1716 __ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); | 1711 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); |
| 1717 | 1712 |
| 1718 __ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); | 1713 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); |
| 1719 __ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl); | 1714 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); |
| 1720 __ b(pl, &sign); | 1715 __ b(pl, &sign); |
| 1721 | 1716 |
| 1722 __ rsb(r6, r6, Operand(0)); | 1717 __ rsb(r9, r9, Operand(0)); |
| 1723 __ mov(r3, Operand(r3, LSL, r6)); | 1718 __ mov(r5, Operand(r5, LSL, r9)); |
| 1724 __ rsb(r6, r6, Operand(meaningfull_bits)); | 1719 __ rsb(r9, r9, Operand(meaningfull_bits)); |
| 1725 __ orr(r3, r3, Operand(r4, LSR, r6)); | 1720 __ orr(r5, r5, Operand(r6, LSR, r9)); |
| 1726 | 1721 |
| 1727 __ bind(&sign); | 1722 __ bind(&sign); |
| 1728 __ teq(r5, Operand(0)); | 1723 __ teq(r7, Operand(0)); |
| 1729 __ rsb(r3, r3, Operand(0), LeaveCC, ne); | 1724 __ rsb(r5, r5, Operand(0), LeaveCC, ne); |
| 1730 | 1725 |
| 1731 __ bind(&done); | 1726 __ bind(&done); |
| 1732 switch (array_type) { | 1727 switch (array_type) { |
| 1733 case kExternalByteArray: | 1728 case kExternalByteArray: |
| 1734 case kExternalUnsignedByteArray: | 1729 case kExternalUnsignedByteArray: |
| 1735 __ strb(r3, MemOperand(r2, r1, LSL, 0)); | 1730 __ strb(r5, MemOperand(r3, r4, LSL, 0)); |
| 1736 break; | 1731 break; |
| 1737 case kExternalShortArray: | 1732 case kExternalShortArray: |
| 1738 case kExternalUnsignedShortArray: | 1733 case kExternalUnsignedShortArray: |
| 1739 __ strh(r3, MemOperand(r2, r1, LSL, 1)); | 1734 __ strh(r5, MemOperand(r3, r4, LSL, 1)); |
| 1740 break; | 1735 break; |
| 1741 case kExternalIntArray: | 1736 case kExternalIntArray: |
| 1742 case kExternalUnsignedIntArray: | 1737 case kExternalUnsignedIntArray: |
| 1743 __ str(r3, MemOperand(r2, r1, LSL, 2)); | 1738 __ str(r5, MemOperand(r3, r4, LSL, 2)); |
| 1744 break; | 1739 break; |
| 1745 default: | 1740 default: |
| 1746 UNREACHABLE(); | 1741 UNREACHABLE(); |
| 1747 break; | 1742 break; |
| 1748 } | 1743 } |
| 1749 } | 1744 } |
| 1750 } | 1745 } |
| 1751 | 1746 |
| 1752 // Slow case: call runtime. | 1747 // Slow case: call runtime. |
| 1753 __ bind(&slow); | 1748 __ bind(&slow); |
| 1749 |
| 1750 // Entry registers are intact. |
| 1751 // r0: value |
| 1752 // r1: key |
| 1753 // r2: receiver |
| 1754 GenerateRuntimeSetProperty(masm); | 1754 GenerateRuntimeSetProperty(masm); |
| 1755 } | 1755 } |
| 1756 | 1756 |
| 1757 | 1757 |
| 1758 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | 1758 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { |
| 1759 // ----------- S t a t e ------------- | 1759 // ----------- S t a t e ------------- |
| 1760 // -- r0 : value | 1760 // -- r0 : value |
| 1761 // -- r1 : receiver | 1761 // -- r1 : receiver |
| 1762 // -- r2 : name | 1762 // -- r2 : name |
| 1763 // -- lr : return address | 1763 // -- lr : return address |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1834 __ bind(&miss); | 1834 __ bind(&miss); |
| 1835 | 1835 |
| 1836 GenerateMiss(masm); | 1836 GenerateMiss(masm); |
| 1837 } | 1837 } |
| 1838 | 1838 |
| 1839 | 1839 |
| 1840 #undef __ | 1840 #undef __ |
| 1841 | 1841 |
| 1842 | 1842 |
| 1843 } } // namespace v8::internal | 1843 } } // namespace v8::internal |
| 1844 |
| 1845 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |