Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(223)

Side by Side Diff: src/arm/ic-arm.cc

Issue 2116003: ARM: Pass arguments to keyed store IC in registers... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/full-codegen-arm.cc ('k') | src/arm/stub-cache-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 625 matching lines...) Expand 10 before | Expand all | Expand 10 after
636 } 636 }
637 637
638 638
639 bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { 639 bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
640 Address inline_end_address; 640 Address inline_end_address;
641 if (!IsInlinedICSite(address, &inline_end_address)) return false; 641 if (!IsInlinedICSite(address, &inline_end_address)) return false;
642 642
643 // Patch the map check. 643 // Patch the map check.
644 Address ldr_map_instr_address = 644 Address ldr_map_instr_address =
645 inline_end_address - 645 inline_end_address -
646 CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatchSize * 646 (CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch *
647 Assembler::kInstrSize; 647 Assembler::kInstrSize);
648 Assembler::set_target_address_at(ldr_map_instr_address, 648 Assembler::set_target_address_at(ldr_map_instr_address,
649 reinterpret_cast<Address>(map)); 649 reinterpret_cast<Address>(map));
650 return true; 650 return true;
651 } 651 }
652 652
653 653
654 void KeyedStoreIC::ClearInlinedVersion(Address address) { 654 void KeyedStoreIC::ClearInlinedVersion(Address address) {
655 // Insert null as the elements map to check for. This will make 655 // Insert null as the elements map to check for. This will make
656 // sure that the elements fast-case map check fails so that control 656 // sure that the elements fast-case map check fails so that control
657 // flows to the IC instead of the inlined version. 657 // flows to the IC instead of the inlined version.
658 PatchInlinedStore(address, Heap::null_value()); 658 PatchInlinedStore(address, Heap::null_value());
659 } 659 }
660 660
661 661
662 void KeyedStoreIC::RestoreInlinedVersion(Address address) { 662 void KeyedStoreIC::RestoreInlinedVersion(Address address) {
663 // Restore the fast-case elements map check so that the inlined 663 // Restore the fast-case elements map check so that the inlined
664 // version can be used again. 664 // version can be used again.
665 PatchInlinedStore(address, Heap::fixed_array_map()); 665 PatchInlinedStore(address, Heap::fixed_array_map());
666 } 666 }
667 667
668 668
669 bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) { 669 bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
670 // Find the end of the inlined code for handling the store if this is an 670 // Find the end of the inlined code for handling the store if this is an
671 // inlined IC call site. 671 // inlined IC call site.
672 Address inline_end_address; 672 Address inline_end_address;
673 if (!IsInlinedICSite(address, &inline_end_address)) return false; 673 if (!IsInlinedICSite(address, &inline_end_address)) return false;
674 674
675 // Patch the map check. 675 // Patch the map check.
676 Address ldr_map_instr_address = 676 Address ldr_map_instr_address =
677 inline_end_address - 5 * Assembler::kInstrSize; 677 inline_end_address -
678 (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
679 Assembler::kInstrSize);
678 Assembler::set_target_address_at(ldr_map_instr_address, 680 Assembler::set_target_address_at(ldr_map_instr_address,
679 reinterpret_cast<Address>(map)); 681 reinterpret_cast<Address>(map));
680 return true; 682 return true;
681 } 683 }
682 684
683 685
684 Object* KeyedLoadIC_Miss(Arguments args); 686 Object* KeyedLoadIC_Miss(Arguments args);
685 687
686 688
687 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { 689 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
(...skipping 514 matching lines...) Expand 10 before | Expand all | Expand 10 after
1202 IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1); 1204 IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
1203 1205
1204 __ bind(&slow); 1206 __ bind(&slow);
1205 GenerateMiss(masm); 1207 GenerateMiss(masm);
1206 } 1208 }
1207 1209
1208 1210
1209 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { 1211 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
1210 // ---------- S t a t e -------------- 1212 // ---------- S t a t e --------------
1211 // -- r0 : value 1213 // -- r0 : value
1214 // -- r1 : key
1215 // -- r2 : receiver
1212 // -- lr : return address 1216 // -- lr : return address
1213 // -- sp[0] : key
1214 // -- sp[1] : receiver
1215 // ----------------------------------- 1217 // -----------------------------------
1216 1218
1217 __ ldm(ia, sp, r2.bit() | r3.bit()); 1219 // Push receiver, key and value for runtime call.
1218 __ Push(r3, r2, r0); 1220 __ Push(r2, r1, r0);
1219 1221
1220 ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss)); 1222 ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
1221 __ TailCallExternalReference(ref, 3, 1); 1223 __ TailCallExternalReference(ref, 3, 1);
1222 } 1224 }
1223 1225
1224 1226
1225 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { 1227 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
1226 // ---------- S t a t e -------------- 1228 // ---------- S t a t e --------------
1227 // -- r0 : value 1229 // -- r0 : value
1230 // -- r1 : key
1231 // -- r2 : receiver
1228 // -- lr : return address 1232 // -- lr : return address
1229 // -- sp[0] : key
1230 // -- sp[1] : receiver
1231 // ----------------------------------- 1233 // -----------------------------------
1232 __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object 1234
1233 __ Push(r3, r1, r0); 1235 // Push receiver, key and value for runtime call.
1236 __ Push(r2, r1, r0);
1234 1237
1235 __ TailCallRuntime(Runtime::kSetProperty, 3, 1); 1238 __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
1236 } 1239 }
1237 1240
1238 1241
1239 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { 1242 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
1240 // ---------- S t a t e -------------- 1243 // ---------- S t a t e --------------
1241 // -- r0 : value 1244 // -- r0 : value
1245 // -- r1 : key
1246 // -- r2 : receiver
1242 // -- lr : return address 1247 // -- lr : return address
1243 // -- sp[0] : key
1244 // -- sp[1] : receiver
1245 // ----------------------------------- 1248 // -----------------------------------
1246 Label slow, fast, array, extra, exit, check_pixel_array; 1249 Label slow, fast, array, extra, check_pixel_array;
1247 1250
1248 // Get the key and the object from the stack. 1251 // Register usage.
1249 __ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver 1252 Register value = r0;
1253 Register key = r1;
1254 Register receiver = r2;
1255 Register elements = r3; // Elements array of the receiver.
1256 // r4 and r5 are used as general scratch registers.
1257
1250 // Check that the key is a smi. 1258 // Check that the key is a smi.
1251 __ tst(r1, Operand(kSmiTagMask)); 1259 __ tst(key, Operand(kSmiTagMask));
1252 __ b(ne, &slow); 1260 __ b(ne, &slow);
1253 // Check that the object isn't a smi. 1261 // Check that the object isn't a smi.
1254 __ tst(r3, Operand(kSmiTagMask)); 1262 __ tst(receiver, Operand(kSmiTagMask));
1255 __ b(eq, &slow); 1263 __ b(eq, &slow);
1256 // Get the map of the object. 1264 // Get the map of the object.
1257 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); 1265 __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
1258 // Check that the receiver does not require access checks. We need 1266 // Check that the receiver does not require access checks. We need
1259 // to do this because this generic stub does not perform map checks. 1267 // to do this because this generic stub does not perform map checks.
1260 __ ldrb(ip, FieldMemOperand(r2, Map::kBitFieldOffset)); 1268 __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
1261 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); 1269 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
1262 __ b(ne, &slow); 1270 __ b(ne, &slow);
1263 // Check if the object is a JS array or not. 1271 // Check if the object is a JS array or not.
1264 __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); 1272 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
1265 __ cmp(r2, Operand(JS_ARRAY_TYPE)); 1273 __ cmp(r4, Operand(JS_ARRAY_TYPE));
1266 // r1 == key.
1267 __ b(eq, &array); 1274 __ b(eq, &array);
1268 // Check that the object is some kind of JS object. 1275 // Check that the object is some kind of JS object.
1269 __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); 1276 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
1270 __ b(lt, &slow); 1277 __ b(lt, &slow);
1271 1278
1272
1273 // Object case: Check key against length in the elements array. 1279 // Object case: Check key against length in the elements array.
1274 __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset)); 1280 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1275 // Check that the object is in fast mode (not dictionary). 1281 // Check that the object is in fast mode (not dictionary).
1276 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); 1282 __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
1277 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); 1283 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
1278 __ cmp(r2, ip); 1284 __ cmp(r4, ip);
1279 __ b(ne, &check_pixel_array); 1285 __ b(ne, &check_pixel_array);
1280 // Untag the key (for checking against untagged length in the fixed array). 1286 // Untag the key (for checking against untagged length in the fixed array).
1281 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); 1287 __ mov(r4, Operand(key, ASR, kSmiTagSize));
1282 // Compute address to store into and check array bounds. 1288 // Compute address to store into and check array bounds.
1283 __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 1289 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1284 __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2)); 1290 __ cmp(r4, Operand(ip));
1285 __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset));
1286 __ cmp(r1, Operand(ip));
1287 __ b(lo, &fast); 1291 __ b(lo, &fast);
1288 1292
1289 1293 // Slow case, handle jump to runtime.
1290 // Slow case:
1291 __ bind(&slow); 1294 __ bind(&slow);
1295 // Entry registers are intact.
1296 // r0: value.
1297 // r1: key.
1298 // r2: receiver.
1292 GenerateRuntimeSetProperty(masm); 1299 GenerateRuntimeSetProperty(masm);
1293 1300
1294 // Check whether the elements is a pixel array. 1301 // Check whether the elements is a pixel array.
1295 // r0: value 1302 // r4: elements map.
1296 // r1: index (as a smi), zero-extended.
1297 // r3: elements array
1298 __ bind(&check_pixel_array); 1303 __ bind(&check_pixel_array);
1299 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); 1304 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
1300 __ cmp(r2, ip); 1305 __ cmp(r4, ip);
1301 __ b(ne, &slow); 1306 __ b(ne, &slow);
1302 // Check that the value is a smi. If a conversion is needed call into the 1307 // Check that the value is a smi. If a conversion is needed call into the
1303 // runtime to convert and clamp. 1308 // runtime to convert and clamp.
1304 __ BranchOnNotSmi(r0, &slow); 1309 __ BranchOnNotSmi(value, &slow);
1305 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the key. 1310 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
1306 __ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset)); 1311 __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
1307 __ cmp(r1, Operand(ip)); 1312 __ cmp(r4, Operand(ip));
1308 __ b(hs, &slow); 1313 __ b(hs, &slow);
1309 __ mov(r4, r0); // Save the value. 1314 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
1310 __ mov(r0, Operand(r0, ASR, kSmiTagSize)); // Untag the value.
1311 { // Clamp the value to [0..255]. 1315 { // Clamp the value to [0..255].
1312 Label done; 1316 Label done;
1313 __ tst(r0, Operand(0xFFFFFF00)); 1317 __ tst(r5, Operand(0xFFFFFF00));
1314 __ b(eq, &done); 1318 __ b(eq, &done);
1315 __ mov(r0, Operand(0), LeaveCC, mi); // 0 if negative. 1319 __ mov(r5, Operand(0), LeaveCC, mi); // 0 if negative.
1316 __ mov(r0, Operand(255), LeaveCC, pl); // 255 if positive. 1320 __ mov(r5, Operand(255), LeaveCC, pl); // 255 if positive.
1317 __ bind(&done); 1321 __ bind(&done);
1318 } 1322 }
1319 __ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset)); 1323 // Get the pointer to the external array. This clobbers elements.
1320 __ strb(r0, MemOperand(r2, r1)); 1324 __ ldr(elements,
1321 __ mov(r0, Operand(r4)); // Return the original value. 1325 FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
1326 __ strb(r5, MemOperand(elements, r4)); // Elements is now external array.
1322 __ Ret(); 1327 __ Ret();
1323 1328
1324
1325 // Extra capacity case: Check if there is extra capacity to 1329 // Extra capacity case: Check if there is extra capacity to
1326 // perform the store and update the length. Used for adding one 1330 // perform the store and update the length. Used for adding one
1327 // element to the array by writing to array[array.length]. 1331 // element to the array by writing to array[array.length].
1328 // r0 == value, r1 == key, r2 == elements, r3 == object
1329 __ bind(&extra); 1332 __ bind(&extra);
1330 __ b(ne, &slow); // do not leave holes in the array 1333 // Condition code from comparing key and array length is still available.
1331 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag 1334 __ b(ne, &slow); // Only support writing to writing to array[array.length].
1332 __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset)); 1335 // Check for room in the elements backing store.
1333 __ cmp(r1, Operand(ip)); 1336 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag key.
1337 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1338 __ cmp(r4, Operand(ip));
1334 __ b(hs, &slow); 1339 __ b(hs, &slow);
1335 __ mov(r1, Operand(r1, LSL, kSmiTagSize)); // restore tag 1340 // Calculate key + 1 as smi.
1336 __ add(r1, r1, Operand(1 << kSmiTagSize)); // and increment 1341 ASSERT_EQ(0, kSmiTag);
1337 __ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset)); 1342 __ add(r4, key, Operand(Smi::FromInt(1)));
1338 __ mov(r3, Operand(r2)); 1343 __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
1339 // NOTE: Computing the address to store into must take the fact
1340 // that the key has been incremented into account.
1341 int displacement = FixedArray::kHeaderSize - kHeapObjectTag -
1342 ((1 << kSmiTagSize) * 2);
1343 __ add(r2, r2, Operand(displacement));
1344 __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
1345 __ b(&fast); 1344 __ b(&fast);
1346 1345
1347
1348 // Array case: Get the length and the elements array from the JS 1346 // Array case: Get the length and the elements array from the JS
1349 // array. Check that the array is in fast mode; if it is the 1347 // array. Check that the array is in fast mode; if it is the
1350 // length is always a smi. 1348 // length is always a smi.
1351 // r0 == value, r3 == object
1352 __ bind(&array); 1349 __ bind(&array);
1353 __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset)); 1350 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1354 __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); 1351 __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
1355 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); 1352 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
1356 __ cmp(r1, ip); 1353 __ cmp(r4, ip);
1357 __ b(ne, &slow); 1354 __ b(ne, &slow);
1358 1355
1359 // Check the key against the length in the array, compute the 1356 // Check the key against the length in the array.
1360 // address to store into and fall through to fast case. 1357 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
1361 __ ldr(r1, MemOperand(sp)); // restore key 1358 __ cmp(key, Operand(ip));
1362 // r0 == value, r1 == key, r2 == elements, r3 == object.
1363 __ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
1364 __ cmp(r1, Operand(ip));
1365 __ b(hs, &extra); 1359 __ b(hs, &extra);
1366 __ mov(r3, Operand(r2)); 1360 // Fall through to fast case.
1367 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1368 __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
1369 1361
1362 __ bind(&fast);
1363 // Fast case, store the value to the elements backing store.
1364 __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1365 __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
1366 __ str(value, MemOperand(r5));
1367 // Skip write barrier if the written value is a smi.
1368 __ tst(value, Operand(kSmiTagMask));
1369 __ Ret(eq);
1370 // Update write barrier for the elements array address.
1371 __ sub(r4, r5, Operand(elements));
1372 __ RecordWrite(elements, r4, r5);
1370 1373
1371 // Fast case: Do the store.
1372 // r0 == value, r2 == address to store into, r3 == elements
1373 __ bind(&fast);
1374 __ str(r0, MemOperand(r2));
1375 // Skip write barrier if the written value is a smi.
1376 __ tst(r0, Operand(kSmiTagMask));
1377 __ b(eq, &exit);
1378 // Update write barrier for the elements array address.
1379 __ sub(r1, r2, Operand(r3));
1380 __ RecordWrite(r3, r1, r2);
1381
1382 __ bind(&exit);
1383 __ Ret(); 1374 __ Ret();
1384 } 1375 }
1385 1376
1386 1377
1387 // Convert int passed in register ival to IEE 754 single precision 1378 // Convert int passed in register ival to IEE 754 single precision
1388 // floating point value and store it into register fval. 1379 // floating point value and store it into register fval.
1389 // If VFP3 is available use it for conversion. 1380 // If VFP3 is available use it for conversion.
1390 static void ConvertIntToFloat(MacroAssembler* masm, 1381 static void ConvertIntToFloat(MacroAssembler* masm,
1391 Register ival, 1382 Register ival,
1392 Register fval, 1383 Register fval,
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1466 UNREACHABLE(); 1457 UNREACHABLE();
1467 return false; 1458 return false;
1468 } 1459 }
1469 } 1460 }
1470 1461
1471 1462
1472 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, 1463 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
1473 ExternalArrayType array_type) { 1464 ExternalArrayType array_type) {
1474 // ---------- S t a t e -------------- 1465 // ---------- S t a t e --------------
1475 // -- r0 : value 1466 // -- r0 : value
1467 // -- r1 : key
1468 // -- r2 : receiver
1476 // -- lr : return address 1469 // -- lr : return address
1477 // -- sp[0] : key
1478 // -- sp[1] : receiver
1479 // ----------------------------------- 1470 // -----------------------------------
1480 Label slow, check_heap_number; 1471 Label slow, check_heap_number;
1481 1472
1482 // Get the key and the object from the stack. 1473 // Register usage.
1483 __ ldm(ia, sp, r1.bit() | r2.bit()); // r1 = key, r2 = receiver 1474 Register value = r0;
1475 Register key = r1;
1476 Register receiver = r2;
1477 // r3 mostly holds the elements array or the destination external array.
1484 1478
1485 // Check that the object isn't a smi. 1479 // Check that the object isn't a smi.
1486 __ BranchOnSmi(r2, &slow); 1480 __ BranchOnSmi(receiver, &slow);
1487 1481
1488 // Check that the object is a JS object. Load map into r3 1482 // Check that the object is a JS object. Load map into r3.
1489 __ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE); 1483 __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
1490 __ b(le, &slow); 1484 __ b(le, &slow);
1491 1485
1492 // Check that the receiver does not require access checks. We need 1486 // Check that the receiver does not require access checks. We need
1493 // to do this because this generic stub does not perform map checks. 1487 // to do this because this generic stub does not perform map checks.
1494 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); 1488 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
1495 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); 1489 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
1496 __ b(ne, &slow); 1490 __ b(ne, &slow);
1497 1491
1498 // Check that the key is a smi. 1492 // Check that the key is a smi.
1499 __ BranchOnNotSmi(r1, &slow); 1493 __ BranchOnNotSmi(key, &slow);
1500 1494
1501 // Check that the elements array is the appropriate type of 1495 // Check that the elements array is the appropriate type of ExternalArray.
1502 // ExternalArray. 1496 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
1503 // r0: value 1497 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
1504 // r1: index (smi)
1505 // r2: object
1506 __ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
1507 __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
1508 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); 1498 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
1509 __ cmp(r3, ip); 1499 __ cmp(r4, ip);
1510 __ b(ne, &slow); 1500 __ b(ne, &slow);
1511 1501
1512 // Check that the index is in range. 1502 // Check that the index is in range.
1513 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the index. 1503 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
1514 __ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset)); 1504 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
1515 __ cmp(r1, ip); 1505 __ cmp(r4, ip);
1516 // Unsigned comparison catches both negative and too-large values. 1506 // Unsigned comparison catches both negative and too-large values.
1517 __ b(hs, &slow); 1507 __ b(hs, &slow);
1518 1508
1519 // Handle both smis and HeapNumbers in the fast path. Go to the 1509 // Handle both smis and HeapNumbers in the fast path. Go to the
1520 // runtime for all other kinds of values. 1510 // runtime for all other kinds of values.
1521 // r0: value 1511 // r3: external array.
1522 // r1: index (integer) 1512 // r4: key (integer).
1523 // r2: array 1513 __ BranchOnNotSmi(value, &check_heap_number);
1524 __ BranchOnNotSmi(r0, &check_heap_number); 1514 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
1525 __ mov(r3, Operand(r0, ASR, kSmiTagSize)); // Untag the value. 1515 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
1526 __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
1527 1516
1528 // r1: index (integer) 1517 // r3: base pointer of external storage.
1529 // r2: base pointer of external storage 1518 // r4: key (integer).
1530 // r3: value (integer) 1519 // r5: value (integer).
1531 switch (array_type) { 1520 switch (array_type) {
1532 case kExternalByteArray: 1521 case kExternalByteArray:
1533 case kExternalUnsignedByteArray: 1522 case kExternalUnsignedByteArray:
1534 __ strb(r3, MemOperand(r2, r1, LSL, 0)); 1523 __ strb(r5, MemOperand(r3, r4, LSL, 0));
1535 break; 1524 break;
1536 case kExternalShortArray: 1525 case kExternalShortArray:
1537 case kExternalUnsignedShortArray: 1526 case kExternalUnsignedShortArray:
1538 __ strh(r3, MemOperand(r2, r1, LSL, 1)); 1527 __ strh(r5, MemOperand(r3, r4, LSL, 1));
1539 break; 1528 break;
1540 case kExternalIntArray: 1529 case kExternalIntArray:
1541 case kExternalUnsignedIntArray: 1530 case kExternalUnsignedIntArray:
1542 __ str(r3, MemOperand(r2, r1, LSL, 2)); 1531 __ str(r5, MemOperand(r3, r4, LSL, 2));
1543 break; 1532 break;
1544 case kExternalFloatArray: 1533 case kExternalFloatArray:
1545 // Need to perform int-to-float conversion. 1534 // Need to perform int-to-float conversion.
1546 ConvertIntToFloat(masm, r3, r4, r5, r6); 1535 ConvertIntToFloat(masm, r5, r6, r7, r9);
1547 __ str(r4, MemOperand(r2, r1, LSL, 2)); 1536 __ str(r6, MemOperand(r3, r4, LSL, 2));
1548 break; 1537 break;
1549 default: 1538 default:
1550 UNREACHABLE(); 1539 UNREACHABLE();
1551 break; 1540 break;
1552 } 1541 }
1553 1542
1554 // r0: value 1543 // Entry registers are intact, r0 holds the value which is the return value.
1555 __ Ret(); 1544 __ Ret();
1556 1545
1557 1546
1558 // r0: value 1547 // r3: external array.
1559 // r1: index (integer) 1548 // r4: index (integer).
1560 // r2: external array object
1561 __ bind(&check_heap_number); 1549 __ bind(&check_heap_number);
1562 __ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE); 1550 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
1563 __ b(ne, &slow); 1551 __ b(ne, &slow);
1564 1552
1565 __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset)); 1553 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
1554
1555 // r3: base pointer of external storage.
1556 // r4: key (integer).
1566 1557
1567 // The WebGL specification leaves the behavior of storing NaN and 1558 // The WebGL specification leaves the behavior of storing NaN and
1568 // +/-Infinity into integer arrays basically undefined. For more 1559 // +/-Infinity into integer arrays basically undefined. For more
1569 // reproducible behavior, convert these to zero. 1560 // reproducible behavior, convert these to zero.
1570 if (CpuFeatures::IsSupported(VFP3)) { 1561 if (CpuFeatures::IsSupported(VFP3)) {
1571 CpuFeatures::Scope scope(VFP3); 1562 CpuFeatures::Scope scope(VFP3);
1572 1563
1573 // vldr requires offset to be a multiple of 4 so we can not 1564 // vldr requires offset to be a multiple of 4 so we can not
1574 // include -kHeapObjectTag into it. 1565 // include -kHeapObjectTag into it.
1575 __ sub(r3, r0, Operand(kHeapObjectTag)); 1566 __ sub(r5, r0, Operand(kHeapObjectTag));
1576 __ vldr(d0, r3, HeapNumber::kValueOffset); 1567 __ vldr(d0, r5, HeapNumber::kValueOffset);
1577 1568
1578 if (array_type == kExternalFloatArray) { 1569 if (array_type == kExternalFloatArray) {
1579 __ vcvt_f32_f64(s0, d0); 1570 __ vcvt_f32_f64(s0, d0);
1580 __ vmov(r3, s0); 1571 __ vmov(r5, s0);
1581 __ str(r3, MemOperand(r2, r1, LSL, 2)); 1572 __ str(r5, MemOperand(r3, r4, LSL, 2));
1582 } else { 1573 } else {
1583 Label done; 1574 Label done;
1584 1575
1585 // Need to perform float-to-int conversion. 1576 // Need to perform float-to-int conversion.
1586 // Test for NaN. 1577 // Test for NaN.
1587 __ vcmp(d0, d0); 1578 __ vcmp(d0, d0);
1588 // Move vector status bits to normal status bits. 1579 // Move vector status bits to normal status bits.
1589 __ vmrs(v8::internal::pc); 1580 __ vmrs(v8::internal::pc);
1590 __ mov(r3, Operand(0), LeaveCC, vs); // NaN converts to 0 1581 __ mov(r5, Operand(0), LeaveCC, vs); // NaN converts to 0.
1591 __ b(vs, &done); 1582 __ b(vs, &done);
1592 1583
1593 // Test whether exponent equal to 0x7FF (infinity or NaN) 1584 // Test whether exponent equal to 0x7FF (infinity or NaN).
1594 __ vmov(r4, r3, d0); 1585 __ vmov(r6, r7, d0);
1595 __ mov(r5, Operand(0x7FF00000)); 1586 __ mov(r5, Operand(0x7FF00000));
1596 __ and_(r3, r3, Operand(r5)); 1587 __ and_(r6, r6, Operand(r5));
1597 __ teq(r3, Operand(r5)); 1588 __ teq(r6, Operand(r5));
1598 __ mov(r3, Operand(0), LeaveCC, eq); 1589 __ mov(r6, Operand(0), LeaveCC, eq);
1599 1590
1600 // Not infinity or NaN simply convert to int 1591 // Not infinity or NaN simply convert to int.
1601 if (IsElementTypeSigned(array_type)) { 1592 if (IsElementTypeSigned(array_type)) {
1602 __ vcvt_s32_f64(s0, d0, ne); 1593 __ vcvt_s32_f64(s0, d0, ne);
1603 } else { 1594 } else {
1604 __ vcvt_u32_f64(s0, d0, ne); 1595 __ vcvt_u32_f64(s0, d0, ne);
1605 } 1596 }
1606 1597
1607 __ vmov(r3, s0, ne); 1598 __ vmov(r5, s0, ne);
1608 1599
1609 __ bind(&done); 1600 __ bind(&done);
1610 switch (array_type) { 1601 switch (array_type) {
1611 case kExternalByteArray: 1602 case kExternalByteArray:
1612 case kExternalUnsignedByteArray: 1603 case kExternalUnsignedByteArray:
1613 __ strb(r3, MemOperand(r2, r1, LSL, 0)); 1604 __ strb(r5, MemOperand(r3, r4, LSL, 0));
1614 break; 1605 break;
1615 case kExternalShortArray: 1606 case kExternalShortArray:
1616 case kExternalUnsignedShortArray: 1607 case kExternalUnsignedShortArray:
1617 __ strh(r3, MemOperand(r2, r1, LSL, 1)); 1608 __ strh(r5, MemOperand(r3, r4, LSL, 1));
1618 break; 1609 break;
1619 case kExternalIntArray: 1610 case kExternalIntArray:
1620 case kExternalUnsignedIntArray: 1611 case kExternalUnsignedIntArray:
1621 __ str(r3, MemOperand(r2, r1, LSL, 2)); 1612 __ str(r5, MemOperand(r3, r4, LSL, 2));
1622 break; 1613 break;
1623 default: 1614 default:
1624 UNREACHABLE(); 1615 UNREACHABLE();
1625 break; 1616 break;
1626 } 1617 }
1627 } 1618 }
1628 1619
1629 // r0: original value 1620 // Entry registers are intact, r0 holds the value which is the return value.
1630 __ Ret(); 1621 __ Ret();
1631 } else { 1622 } else {
1632 // VFP3 is not available do manual conversions 1623 // VFP3 is not available do manual conversions.
1633 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 1624 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
1634 __ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); 1625 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
1635 1626
1636 if (array_type == kExternalFloatArray) { 1627 if (array_type == kExternalFloatArray) {
1637 Label done, nan_or_infinity_or_zero; 1628 Label done, nan_or_infinity_or_zero;
1638 static const int kMantissaInHiWordShift = 1629 static const int kMantissaInHiWordShift =
1639 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; 1630 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
1640 1631
1641 static const int kMantissaInLoWordShift = 1632 static const int kMantissaInLoWordShift =
1642 kBitsPerInt - kMantissaInHiWordShift; 1633 kBitsPerInt - kMantissaInHiWordShift;
1643 1634
1644 // Test for all special exponent values: zeros, subnormal numbers, NaNs 1635 // Test for all special exponent values: zeros, subnormal numbers, NaNs
1645 // and infinities. All these should be converted to 0. 1636 // and infinities. All these should be converted to 0.
1646 __ mov(r5, Operand(HeapNumber::kExponentMask)); 1637 __ mov(r7, Operand(HeapNumber::kExponentMask));
1647 __ and_(r6, r3, Operand(r5), SetCC); 1638 __ and_(r9, r5, Operand(r7), SetCC);
1648 __ b(eq, &nan_or_infinity_or_zero); 1639 __ b(eq, &nan_or_infinity_or_zero);
1649 1640
1650 __ teq(r6, Operand(r5)); 1641 __ teq(r9, Operand(r7));
1651 __ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq); 1642 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
1652 __ b(eq, &nan_or_infinity_or_zero); 1643 __ b(eq, &nan_or_infinity_or_zero);
1653 1644
1654 // Rebias exponent. 1645 // Rebias exponent.
1655 __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift)); 1646 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
1656 __ add(r6, 1647 __ add(r9,
1657 r6, 1648 r9,
1658 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); 1649 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
1659 1650
1660 __ cmp(r6, Operand(kBinary32MaxExponent)); 1651 __ cmp(r9, Operand(kBinary32MaxExponent));
1661 __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt); 1652 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
1662 __ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt); 1653 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
1663 __ b(gt, &done); 1654 __ b(gt, &done);
1664 1655
1665 __ cmp(r6, Operand(kBinary32MinExponent)); 1656 __ cmp(r9, Operand(kBinary32MinExponent));
1666 __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt); 1657 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
1667 __ b(lt, &done); 1658 __ b(lt, &done);
1668 1659
1669 __ and_(r7, r3, Operand(HeapNumber::kSignMask)); 1660 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
1670 __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); 1661 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
1671 __ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift)); 1662 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
1672 __ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift)); 1663 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
1673 __ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift)); 1664 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
1674 1665
1675 __ bind(&done); 1666 __ bind(&done);
1676 __ str(r3, MemOperand(r2, r1, LSL, 2)); 1667 __ str(r5, MemOperand(r3, r4, LSL, 2));
1668 // Entry registers are intact, r0 holds the value which is the return
1669 // value.
1677 __ Ret(); 1670 __ Ret();
1678 1671
1679 __ bind(&nan_or_infinity_or_zero); 1672 __ bind(&nan_or_infinity_or_zero);
1680 __ and_(r7, r3, Operand(HeapNumber::kSignMask)); 1673 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
1681 __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); 1674 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
1682 __ orr(r6, r6, r7); 1675 __ orr(r9, r9, r7);
1683 __ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift)); 1676 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
1684 __ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift)); 1677 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
1685 __ b(&done); 1678 __ b(&done);
1686 } else { 1679 } else {
1687 bool is_signed_type = IsElementTypeSigned(array_type); 1680 bool is_signed_type = IsElementTypeSigned(array_type);
1688 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; 1681 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
1689 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; 1682 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
1690 1683
1691 Label done, sign; 1684 Label done, sign;
1692 1685
1693 // Test for all special exponent values: zeros, subnormal numbers, NaNs 1686 // Test for all special exponent values: zeros, subnormal numbers, NaNs
1694 // and infinities. All these should be converted to 0. 1687 // and infinities. All these should be converted to 0.
1695 __ mov(r5, Operand(HeapNumber::kExponentMask)); 1688 __ mov(r7, Operand(HeapNumber::kExponentMask));
1696 __ and_(r6, r3, Operand(r5), SetCC); 1689 __ and_(r9, r5, Operand(r7), SetCC);
1697 __ mov(r3, Operand(0), LeaveCC, eq); 1690 __ mov(r5, Operand(0), LeaveCC, eq);
1698 __ b(eq, &done); 1691 __ b(eq, &done);
1699 1692
1700 __ teq(r6, Operand(r5)); 1693 __ teq(r9, Operand(r7));
1701 __ mov(r3, Operand(0), LeaveCC, eq); 1694 __ mov(r5, Operand(0), LeaveCC, eq);
1702 __ b(eq, &done); 1695 __ b(eq, &done);
1703 1696
1704 // Unbias exponent. 1697 // Unbias exponent.
1705 __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift)); 1698 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
1706 __ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC); 1699 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
1707 // If exponent is negative than result is 0. 1700 // If exponent is negative than result is 0.
1708 __ mov(r3, Operand(0), LeaveCC, mi); 1701 __ mov(r5, Operand(0), LeaveCC, mi);
1709 __ b(mi, &done); 1702 __ b(mi, &done);
1710 1703
1711 // If exponent is too big than result is minimal value 1704 // If exponent is too big than result is minimal value.
1712 __ cmp(r6, Operand(meaningfull_bits - 1)); 1705 __ cmp(r9, Operand(meaningfull_bits - 1));
1713 __ mov(r3, Operand(min_value), LeaveCC, ge); 1706 __ mov(r5, Operand(min_value), LeaveCC, ge);
1714 __ b(ge, &done); 1707 __ b(ge, &done);
1715 1708
1716 __ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC); 1709 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
1717 __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); 1710 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
1718 __ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); 1711 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
1719 1712
1720 __ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); 1713 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
1721 __ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl); 1714 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
1722 __ b(pl, &sign); 1715 __ b(pl, &sign);
1723 1716
1724 __ rsb(r6, r6, Operand(0)); 1717 __ rsb(r9, r9, Operand(0));
1725 __ mov(r3, Operand(r3, LSL, r6)); 1718 __ mov(r5, Operand(r5, LSL, r9));
1726 __ rsb(r6, r6, Operand(meaningfull_bits)); 1719 __ rsb(r9, r9, Operand(meaningfull_bits));
1727 __ orr(r3, r3, Operand(r4, LSR, r6)); 1720 __ orr(r5, r5, Operand(r6, LSR, r9));
1728 1721
1729 __ bind(&sign); 1722 __ bind(&sign);
1730 __ teq(r5, Operand(0)); 1723 __ teq(r7, Operand(0));
1731 __ rsb(r3, r3, Operand(0), LeaveCC, ne); 1724 __ rsb(r5, r5, Operand(0), LeaveCC, ne);
1732 1725
1733 __ bind(&done); 1726 __ bind(&done);
1734 switch (array_type) { 1727 switch (array_type) {
1735 case kExternalByteArray: 1728 case kExternalByteArray:
1736 case kExternalUnsignedByteArray: 1729 case kExternalUnsignedByteArray:
1737 __ strb(r3, MemOperand(r2, r1, LSL, 0)); 1730 __ strb(r5, MemOperand(r3, r4, LSL, 0));
1738 break; 1731 break;
1739 case kExternalShortArray: 1732 case kExternalShortArray:
1740 case kExternalUnsignedShortArray: 1733 case kExternalUnsignedShortArray:
1741 __ strh(r3, MemOperand(r2, r1, LSL, 1)); 1734 __ strh(r5, MemOperand(r3, r4, LSL, 1));
1742 break; 1735 break;
1743 case kExternalIntArray: 1736 case kExternalIntArray:
1744 case kExternalUnsignedIntArray: 1737 case kExternalUnsignedIntArray:
1745 __ str(r3, MemOperand(r2, r1, LSL, 2)); 1738 __ str(r5, MemOperand(r3, r4, LSL, 2));
1746 break; 1739 break;
1747 default: 1740 default:
1748 UNREACHABLE(); 1741 UNREACHABLE();
1749 break; 1742 break;
1750 } 1743 }
1751 } 1744 }
1752 } 1745 }
1753 1746
1754 // Slow case: call runtime. 1747 // Slow case: call runtime.
1755 __ bind(&slow); 1748 __ bind(&slow);
1749
1750 // Entry registers are intact.
1751 // r0: value
1752 // r1: key
1753 // r2: receiver
1756 GenerateRuntimeSetProperty(masm); 1754 GenerateRuntimeSetProperty(masm);
1757 } 1755 }
1758 1756
1759 1757
1760 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { 1758 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1761 // ----------- S t a t e ------------- 1759 // ----------- S t a t e -------------
1762 // -- r0 : value 1760 // -- r0 : value
1763 // -- r1 : receiver 1761 // -- r1 : receiver
1764 // -- r2 : name 1762 // -- r2 : name
1765 // -- lr : return address 1763 // -- lr : return address
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
1838 GenerateMiss(masm); 1836 GenerateMiss(masm);
1839 } 1837 }
1840 1838
1841 1839
1842 #undef __ 1840 #undef __
1843 1841
1844 1842
1845 } } // namespace v8::internal 1843 } } // namespace v8::internal
1846 1844
1847 #endif // V8_TARGET_ARCH_ARM 1845 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/full-codegen-arm.cc ('k') | src/arm/stub-cache-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698