Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(228)

Side by Side Diff: src/arm/ic-arm.cc

Issue 6597029: [Isolates] Merge r 6300:6500 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/full-codegen-arm.cc ('k') | src/arm/jump-target-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
88 88
89 // If this assert fails, we have to check upper bound too. 89 // If this assert fails, we have to check upper bound too.
90 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); 90 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
91 91
92 GenerateGlobalInstanceTypeCheck(masm, t1, miss); 92 GenerateGlobalInstanceTypeCheck(masm, t1, miss);
93 93
94 // Check that the global object does not require access checks. 94 // Check that the global object does not require access checks.
95 __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset)); 95 __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
96 __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) | 96 __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
97 (1 << Map::kHasNamedInterceptor))); 97 (1 << Map::kHasNamedInterceptor)));
98 __ b(nz, miss); 98 __ b(ne, miss);
99 99
100 __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 100 __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
101 __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset)); 101 __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
102 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); 102 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
103 __ cmp(t1, ip); 103 __ cmp(t1, ip);
104 __ b(nz, miss); 104 __ b(ne, miss);
105 } 105 }
106 106
107 107
108 // Probe the string dictionary in the |elements| register. Jump to the 108 // Probe the string dictionary in the |elements| register. Jump to the
109 // |done| label if a property with the given name is found. Jump to 109 // |done| label if a property with the given name is found. Jump to
110 // the |miss| label otherwise. 110 // the |miss| label otherwise.
111 static void GenerateStringDictionaryProbes(MacroAssembler* masm, 111 static void GenerateStringDictionaryProbes(MacroAssembler* masm,
112 Label* miss, 112 Label* miss,
113 Label* done, 113 Label* done,
114 Register elements, 114 Register elements,
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after
372 // -- sp[0] : receiver 372 // -- sp[0] : receiver
373 // ----------------------------------- 373 // -----------------------------------
374 Label miss; 374 Label miss;
375 375
376 StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss); 376 StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
377 __ bind(&miss); 377 __ bind(&miss);
378 StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); 378 StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
379 } 379 }
380 380
381 381
382 void LoadIC::GenerateStringLength(MacroAssembler* masm) { 382 void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
383 // ----------- S t a t e ------------- 383 // ----------- S t a t e -------------
384 // -- r2 : name 384 // -- r2 : name
385 // -- lr : return address 385 // -- lr : return address
386 // -- r0 : receiver 386 // -- r0 : receiver
387 // -- sp[0] : receiver 387 // -- sp[0] : receiver
388 // ----------------------------------- 388 // -----------------------------------
389 Label miss; 389 Label miss;
390 390
391 StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss); 391 StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
392 support_wrappers);
392 // Cache miss: Jump to runtime. 393 // Cache miss: Jump to runtime.
393 __ bind(&miss); 394 __ bind(&miss);
394 StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); 395 StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
395 } 396 }
396 397
397 398
398 void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { 399 void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
399 // ----------- S t a t e ------------- 400 // ----------- S t a t e -------------
400 // -- r2 : name 401 // -- r2 : name
401 // -- lr : return address 402 // -- lr : return address
(...skipping 10 matching lines...) Expand all
412 413
413 // Checks the receiver for special cases (value type, slow case bits). 414 // Checks the receiver for special cases (value type, slow case bits).
414 // Falls through for regular JS object. 415 // Falls through for regular JS object.
415 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, 416 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
416 Register receiver, 417 Register receiver,
417 Register map, 418 Register map,
418 Register scratch, 419 Register scratch,
419 int interceptor_bit, 420 int interceptor_bit,
420 Label* slow) { 421 Label* slow) {
421 // Check that the object isn't a smi. 422 // Check that the object isn't a smi.
422 __ BranchOnSmi(receiver, slow); 423 __ JumpIfSmi(receiver, slow);
423 // Get the map of the receiver. 424 // Get the map of the receiver.
424 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); 425 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
425 // Check bit field. 426 // Check bit field.
426 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); 427 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
427 __ tst(scratch, 428 __ tst(scratch,
428 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); 429 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
429 __ b(nz, slow); 430 __ b(ne, slow);
430 // Check that the object is some kind of JS object EXCEPT JS Value type. 431 // Check that the object is some kind of JS object EXCEPT JS Value type.
431 // In the case that the object is a value-wrapper object, 432 // In the case that the object is a value-wrapper object,
432 // we enter the runtime system to make sure that indexing into string 433 // we enter the runtime system to make sure that indexing into string
433 // objects work as intended. 434 // objects work as intended.
434 ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); 435 ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
435 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); 436 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
436 __ cmp(scratch, Operand(JS_OBJECT_TYPE)); 437 __ cmp(scratch, Operand(JS_OBJECT_TYPE));
437 __ b(lt, slow); 438 __ b(lt, slow);
438 } 439 }
439 440
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
535 static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, 536 static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
536 int argc, 537 int argc,
537 Code::Kind kind) { 538 Code::Kind kind) {
538 // ----------- S t a t e ------------- 539 // ----------- S t a t e -------------
539 // -- r1 : receiver 540 // -- r1 : receiver
540 // -- r2 : name 541 // -- r2 : name
541 // ----------------------------------- 542 // -----------------------------------
542 Label number, non_number, non_string, boolean, probe, miss; 543 Label number, non_number, non_string, boolean, probe, miss;
543 544
544 // Probe the stub cache. 545 // Probe the stub cache.
545 Code::Flags flags = 546 Code::Flags flags = Code::ComputeFlags(kind,
546 Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc); 547 NOT_IN_LOOP,
548 MONOMORPHIC,
549 Code::kNoExtraICState,
550 NORMAL,
551 argc);
547 Isolate::Current()->stub_cache()->GenerateProbe( 552 Isolate::Current()->stub_cache()->GenerateProbe(
548 masm, flags, r1, r2, r3, r4, r5); 553 masm, flags, r1, r2, r3, r4, r5);
549 554
550 // If the stub cache probing failed, the receiver might be a value. 555 // If the stub cache probing failed, the receiver might be a value.
551 // For value objects, we use the map of the prototype objects for 556 // For value objects, we use the map of the prototype objects for
552 // the corresponding JSValue for the cache and that is what we need 557 // the corresponding JSValue for the cache and that is what we need
553 // to probe. 558 // to probe.
554 // 559 //
555 // Check for number. 560 // Check for number.
556 __ tst(r1, Operand(kSmiTagMask)); 561 __ tst(r1, Operand(kSmiTagMask));
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
740 // ----------------------------------- 745 // -----------------------------------
741 746
742 // Get the receiver of the function from the stack into r1. 747 // Get the receiver of the function from the stack into r1.
743 __ ldr(r1, MemOperand(sp, argc * kPointerSize)); 748 __ ldr(r1, MemOperand(sp, argc * kPointerSize));
744 749
745 Label do_call, slow_call, slow_load, slow_reload_receiver; 750 Label do_call, slow_call, slow_load, slow_reload_receiver;
746 Label check_number_dictionary, check_string, lookup_monomorphic_cache; 751 Label check_number_dictionary, check_string, lookup_monomorphic_cache;
747 Label index_smi, index_string; 752 Label index_smi, index_string;
748 753
749 // Check that the key is a smi. 754 // Check that the key is a smi.
750 __ BranchOnNotSmi(r2, &check_string); 755 __ JumpIfNotSmi(r2, &check_string);
751 __ bind(&index_smi); 756 __ bind(&index_smi);
752 // Now the key is known to be a smi. This place is also jumped to from below 757 // Now the key is known to be a smi. This place is also jumped to from below
753 // where a numeric string is converted to a smi. 758 // where a numeric string is converted to a smi.
754 759
755 GenerateKeyedLoadReceiverCheck( 760 GenerateKeyedLoadReceiverCheck(
756 masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call); 761 masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
757 762
758 GenerateFastArrayLoad( 763 GenerateFastArrayLoad(
759 masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load); 764 masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
760 __ IncrementCounter(COUNTERS->keyed_call_generic_smi_fast(), 1, r0, r3); 765 __ IncrementCounter(COUNTERS->keyed_call_generic_smi_fast(), 1, r0, r3);
(...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after
1157 // -- r0 : key 1162 // -- r0 : key
1158 // -- r1 : receiver 1163 // -- r1 : receiver
1159 // ----------------------------------- 1164 // -----------------------------------
1160 Label slow, check_string, index_smi, index_string, property_array_property; 1165 Label slow, check_string, index_smi, index_string, property_array_property;
1161 Label check_pixel_array, probe_dictionary, check_number_dictionary; 1166 Label check_pixel_array, probe_dictionary, check_number_dictionary;
1162 1167
1163 Register key = r0; 1168 Register key = r0;
1164 Register receiver = r1; 1169 Register receiver = r1;
1165 1170
1166 // Check that the key is a smi. 1171 // Check that the key is a smi.
1167 __ BranchOnNotSmi(key, &check_string); 1172 __ JumpIfNotSmi(key, &check_string);
1168 __ bind(&index_smi); 1173 __ bind(&index_smi);
1169 // Now the key is known to be a smi. This place is also jumped to from below 1174 // Now the key is known to be a smi. This place is also jumped to from below
1170 // where a numeric string is converted to a smi. 1175 // where a numeric string is converted to a smi.
1171 1176
1172 GenerateKeyedLoadReceiverCheck( 1177 GenerateKeyedLoadReceiverCheck(
1173 masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow); 1178 masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
1174 1179
1175 // Check the "has fast elements" bit in the receiver's map which is 1180 // Check the "has fast elements" bit in the receiver's map which is
1176 // now in r2. 1181 // now in r2.
1177 __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset)); 1182 __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
1329 __ Ret(); 1334 __ Ret();
1330 1335
1331 StubRuntimeCallHelper call_helper; 1336 StubRuntimeCallHelper call_helper;
1332 char_at_generator.GenerateSlow(masm, call_helper); 1337 char_at_generator.GenerateSlow(masm, call_helper);
1333 1338
1334 __ bind(&miss); 1339 __ bind(&miss);
1335 GenerateMiss(masm); 1340 GenerateMiss(masm);
1336 } 1341 }
1337 1342
1338 1343
1339 // Convert unsigned integer with specified number of leading zeroes in binary
1340 // representation to IEEE 754 double.
1341 // Integer to convert is passed in register hiword.
1342 // Resulting double is returned in registers hiword:loword.
1343 // This functions does not work correctly for 0.
1344 static void GenerateUInt2Double(MacroAssembler* masm,
1345 Register hiword,
1346 Register loword,
1347 Register scratch,
1348 int leading_zeroes) {
1349 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
1350 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
1351
1352 const int mantissa_shift_for_hi_word =
1353 meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
1354
1355 const int mantissa_shift_for_lo_word =
1356 kBitsPerInt - mantissa_shift_for_hi_word;
1357
1358 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
1359 if (mantissa_shift_for_hi_word > 0) {
1360 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
1361 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
1362 } else {
1363 __ mov(loword, Operand(0, RelocInfo::NONE));
1364 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
1365 }
1366
1367 // If least significant bit of biased exponent was not 1 it was corrupted
1368 // by most significant bit of mantissa so we should fix that.
1369 if (!(biased_exponent & 1)) {
1370 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
1371 }
1372 }
1373
1374
1375 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
1376 ExternalArrayType array_type) {
1377 // ---------- S t a t e --------------
1378 // -- lr : return address
1379 // -- r0 : key
1380 // -- r1 : receiver
1381 // -----------------------------------
1382 Label slow, failed_allocation;
1383
1384 Register key = r0;
1385 Register receiver = r1;
1386
1387 // Check that the object isn't a smi
1388 __ BranchOnSmi(receiver, &slow);
1389
1390 // Check that the key is a smi.
1391 __ BranchOnNotSmi(key, &slow);
1392
1393 // Check that the object is a JS object. Load map into r2.
1394 __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
1395 __ b(lt, &slow);
1396
1397 // Check that the receiver does not require access checks. We need
1398 // to check this explicitly since this generic stub does not perform
1399 // map checks.
1400 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
1401 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
1402 __ b(ne, &slow);
1403
1404 // Check that the elements array is the appropriate type of
1405 // ExternalArray.
1406 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
1407 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
1408 __ LoadRoot(ip, HEAP->RootIndexForExternalArrayType(array_type));
1409 __ cmp(r2, ip);
1410 __ b(ne, &slow);
1411
1412 // Check that the index is in range.
1413 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
1414 __ cmp(ip, Operand(key, ASR, kSmiTagSize));
1415 // Unsigned comparison catches both negative and too-large values.
1416 __ b(lo, &slow);
1417
1418 // r3: elements array
1419 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
1420 // r3: base pointer of external storage
1421
1422 // We are not untagging smi key and instead work with it
1423 // as if it was premultiplied by 2.
1424 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
1425
1426 Register value = r2;
1427 switch (array_type) {
1428 case kExternalByteArray:
1429 __ ldrsb(value, MemOperand(r3, key, LSR, 1));
1430 break;
1431 case kExternalUnsignedByteArray:
1432 __ ldrb(value, MemOperand(r3, key, LSR, 1));
1433 break;
1434 case kExternalShortArray:
1435 __ ldrsh(value, MemOperand(r3, key, LSL, 0));
1436 break;
1437 case kExternalUnsignedShortArray:
1438 __ ldrh(value, MemOperand(r3, key, LSL, 0));
1439 break;
1440 case kExternalIntArray:
1441 case kExternalUnsignedIntArray:
1442 __ ldr(value, MemOperand(r3, key, LSL, 1));
1443 break;
1444 case kExternalFloatArray:
1445 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
1446 CpuFeatures::Scope scope(VFP3);
1447 __ add(r2, r3, Operand(key, LSL, 1));
1448 __ vldr(s0, r2, 0);
1449 } else {
1450 __ ldr(value, MemOperand(r3, key, LSL, 1));
1451 }
1452 break;
1453 default:
1454 UNREACHABLE();
1455 break;
1456 }
1457
1458 // For integer array types:
1459 // r2: value
1460 // For floating-point array type
1461 // s0: value (if VFP3 is supported)
1462 // r2: value (if VFP3 is not supported)
1463
1464 if (array_type == kExternalIntArray) {
1465 // For the Int and UnsignedInt array types, we need to see whether
1466 // the value can be represented in a Smi. If not, we need to convert
1467 // it to a HeapNumber.
1468 Label box_int;
1469 __ cmp(value, Operand(0xC0000000));
1470 __ b(mi, &box_int);
1471 // Tag integer as smi and return it.
1472 __ mov(r0, Operand(value, LSL, kSmiTagSize));
1473 __ Ret();
1474
1475 __ bind(&box_int);
1476 // Allocate a HeapNumber for the result and perform int-to-double
1477 // conversion. Don't touch r0 or r1 as they are needed if allocation
1478 // fails.
1479 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
1480 __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
1481 // Now we can use r0 for the result as key is not needed any more.
1482 __ mov(r0, r5);
1483
1484 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
1485 CpuFeatures::Scope scope(VFP3);
1486 __ vmov(s0, value);
1487 __ vcvt_f64_s32(d0, s0);
1488 __ sub(r3, r0, Operand(kHeapObjectTag));
1489 __ vstr(d0, r3, HeapNumber::kValueOffset);
1490 __ Ret();
1491 } else {
1492 WriteInt32ToHeapNumberStub stub(value, r0, r3);
1493 __ TailCallStub(&stub);
1494 }
1495 } else if (array_type == kExternalUnsignedIntArray) {
1496 // The test is different for unsigned int values. Since we need
1497 // the value to be in the range of a positive smi, we can't
1498 // handle either of the top two bits being set in the value.
1499 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
1500 CpuFeatures::Scope scope(VFP3);
1501 Label box_int, done;
1502 __ tst(value, Operand(0xC0000000));
1503 __ b(ne, &box_int);
1504 // Tag integer as smi and return it.
1505 __ mov(r0, Operand(value, LSL, kSmiTagSize));
1506 __ Ret();
1507
1508 __ bind(&box_int);
1509 __ vmov(s0, value);
1510 // Allocate a HeapNumber for the result and perform int-to-double
1511 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
1512 // registers - also when jumping due to exhausted young space.
1513 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
1514 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
1515
1516 __ vcvt_f64_u32(d0, s0);
1517 __ sub(r1, r2, Operand(kHeapObjectTag));
1518 __ vstr(d0, r1, HeapNumber::kValueOffset);
1519
1520 __ mov(r0, r2);
1521 __ Ret();
1522 } else {
1523 // Check whether unsigned integer fits into smi.
1524 Label box_int_0, box_int_1, done;
1525 __ tst(value, Operand(0x80000000));
1526 __ b(ne, &box_int_0);
1527 __ tst(value, Operand(0x40000000));
1528 __ b(ne, &box_int_1);
1529 // Tag integer as smi and return it.
1530 __ mov(r0, Operand(value, LSL, kSmiTagSize));
1531 __ Ret();
1532
1533 Register hiword = value; // r2.
1534 Register loword = r3;
1535
1536 __ bind(&box_int_0);
1537 // Integer does not have leading zeros.
1538 GenerateUInt2Double(masm, hiword, loword, r4, 0);
1539 __ b(&done);
1540
1541 __ bind(&box_int_1);
1542 // Integer has one leading zero.
1543 GenerateUInt2Double(masm, hiword, loword, r4, 1);
1544
1545
1546 __ bind(&done);
1547 // Integer was converted to double in registers hiword:loword.
1548 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
1549 // clobbers all registers - also when jumping due to exhausted young
1550 // space.
1551 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
1552 __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
1553
1554 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
1555 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
1556
1557 __ mov(r0, r4);
1558 __ Ret();
1559 }
1560 } else if (array_type == kExternalFloatArray) {
1561 // For the floating-point array type, we need to always allocate a
1562 // HeapNumber.
1563 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
1564 CpuFeatures::Scope scope(VFP3);
1565 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
1566 // AllocateHeapNumber clobbers all registers - also when jumping due to
1567 // exhausted young space.
1568 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
1569 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
1570 __ vcvt_f64_f32(d0, s0);
1571 __ sub(r1, r2, Operand(kHeapObjectTag));
1572 __ vstr(d0, r1, HeapNumber::kValueOffset);
1573
1574 __ mov(r0, r2);
1575 __ Ret();
1576 } else {
1577 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
1578 // AllocateHeapNumber clobbers all registers - also when jumping due to
1579 // exhausted young space.
1580 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
1581 __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
1582 // VFP is not available, do manual single to double conversion.
1583
1584 // r2: floating point value (binary32)
1585 // r3: heap number for result
1586
1587 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
1588 // the slow case from here.
1589 __ and_(r0, value, Operand(kBinary32MantissaMask));
1590
1591 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
1592 // the slow case from here.
1593 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
1594 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
1595
1596 Label exponent_rebiased;
1597 __ teq(r1, Operand(0x00, RelocInfo::NONE));
1598 __ b(eq, &exponent_rebiased);
1599
1600 __ teq(r1, Operand(0xff));
1601 __ mov(r1, Operand(0x7ff), LeaveCC, eq);
1602 __ b(eq, &exponent_rebiased);
1603
1604 // Rebias exponent.
1605 __ add(r1,
1606 r1,
1607 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
1608
1609 __ bind(&exponent_rebiased);
1610 __ and_(r2, value, Operand(kBinary32SignMask));
1611 value = no_reg;
1612 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
1613
1614 // Shift mantissa.
1615 static const int kMantissaShiftForHiWord =
1616 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
1617
1618 static const int kMantissaShiftForLoWord =
1619 kBitsPerInt - kMantissaShiftForHiWord;
1620
1621 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
1622 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
1623
1624 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
1625 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
1626
1627 __ mov(r0, r3);
1628 __ Ret();
1629 }
1630
1631 } else {
1632 // Tag integer as smi and return it.
1633 __ mov(r0, Operand(value, LSL, kSmiTagSize));
1634 __ Ret();
1635 }
1636
1637 // Slow case, key and receiver still in r0 and r1.
1638 __ bind(&slow);
1639 __ IncrementCounter(COUNTERS->keyed_load_external_array_slow(), 1, r2, r3);
1640 GenerateRuntimeGetProperty(masm);
1641 }
1642
1643
1644 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { 1344 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1645 // ---------- S t a t e -------------- 1345 // ---------- S t a t e --------------
1646 // -- lr : return address 1346 // -- lr : return address
1647 // -- r0 : key 1347 // -- r0 : key
1648 // -- r1 : receiver 1348 // -- r1 : receiver
1649 // ----------------------------------- 1349 // -----------------------------------
1650 Label slow; 1350 Label slow;
1651 1351
1652 // Check that the receiver isn't a smi. 1352 // Check that the receiver isn't a smi.
1653 __ BranchOnSmi(r1, &slow); 1353 __ JumpIfSmi(r1, &slow);
1654 1354
1655 // Check that the key is an array index, that is Uint32. 1355 // Check that the key is an array index, that is Uint32.
1656 __ tst(r0, Operand(kSmiTagMask | kSmiSignMask)); 1356 __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
1657 __ b(ne, &slow); 1357 __ b(ne, &slow);
1658 1358
1659 // Get the map of the receiver. 1359 // Get the map of the receiver.
1660 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); 1360 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1661 1361
1662 // Check that it has indexed interceptor and access checks 1362 // Check that it has indexed interceptor and access checks
1663 // are not enabled for this object. 1363 // are not enabled for this object.
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
1767 GenerateRuntimeSetProperty(masm); 1467 GenerateRuntimeSetProperty(masm);
1768 1468
1769 // Check whether the elements is a pixel array. 1469 // Check whether the elements is a pixel array.
1770 // r4: elements map. 1470 // r4: elements map.
1771 __ bind(&check_pixel_array); 1471 __ bind(&check_pixel_array);
1772 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); 1472 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
1773 __ cmp(r4, ip); 1473 __ cmp(r4, ip);
1774 __ b(ne, &slow); 1474 __ b(ne, &slow);
1775 // Check that the value is a smi. If a conversion is needed call into the 1475 // Check that the value is a smi. If a conversion is needed call into the
1776 // runtime to convert and clamp. 1476 // runtime to convert and clamp.
1777 __ BranchOnNotSmi(value, &slow); 1477 __ JumpIfNotSmi(value, &slow);
1778 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key. 1478 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
1779 __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset)); 1479 __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
1780 __ cmp(r4, Operand(ip)); 1480 __ cmp(r4, Operand(ip));
1781 __ b(hs, &slow); 1481 __ b(hs, &slow);
1782 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. 1482 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
1783 __ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255]. 1483 __ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255].
1784 1484
1785 // Get the pointer to the external array. This clobbers elements. 1485 // Get the pointer to the external array. This clobbers elements.
1786 __ ldr(elements, 1486 __ ldr(elements,
1787 FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); 1487 FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1830 __ tst(value, Operand(kSmiTagMask)); 1530 __ tst(value, Operand(kSmiTagMask));
1831 __ Ret(eq); 1531 __ Ret(eq);
1832 // Update write barrier for the elements array address. 1532 // Update write barrier for the elements array address.
1833 __ sub(r4, r5, Operand(elements)); 1533 __ sub(r4, r5, Operand(elements));
1834 __ RecordWrite(elements, Operand(r4), r5, r6); 1534 __ RecordWrite(elements, Operand(r4), r5, r6);
1835 1535
1836 __ Ret(); 1536 __ Ret();
1837 } 1537 }
1838 1538
1839 1539
1840 // Convert and store int passed in register ival to IEEE 754 single precision
1841 // floating point value at memory location (dst + 4 * wordoffset)
1842 // If VFP3 is available use it for conversion.
1843 static void StoreIntAsFloat(MacroAssembler* masm,
1844 Register dst,
1845 Register wordoffset,
1846 Register ival,
1847 Register fval,
1848 Register scratch1,
1849 Register scratch2) {
1850 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
1851 CpuFeatures::Scope scope(VFP3);
1852 __ vmov(s0, ival);
1853 __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
1854 __ vcvt_f32_s32(s0, s0);
1855 __ vstr(s0, scratch1, 0);
1856 } else {
1857 Label not_special, done;
1858 // Move sign bit from source to destination. This works because the sign
1859 // bit in the exponent word of the double has the same position and polarity
1860 // as the 2's complement sign bit in a Smi.
1861 ASSERT(kBinary32SignMask == 0x80000000u);
1862
1863 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
1864 // Negate value if it is negative.
1865 __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1866
1867 // We have -1, 0 or 1, which we treat specially. Register ival contains
1868 // absolute value: it is either equal to 1 (special case of -1 and 1),
1869 // greater than 1 (not a special case) or less than 1 (special case of 0).
1870 __ cmp(ival, Operand(1));
1871 __ b(gt, &not_special);
1872
1873 // For 1 or -1 we need to or in the 0 exponent (biased).
1874 static const uint32_t exponent_word_for_1 =
1875 kBinary32ExponentBias << kBinary32ExponentShift;
1876
1877 __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
1878 __ b(&done);
1879
1880 __ bind(&not_special);
1881 // Count leading zeros.
1882 // Gets the wrong answer for 0, but we already checked for that case above.
1883 Register zeros = scratch2;
1884 __ CountLeadingZeros(zeros, ival, scratch1);
1885
1886 // Compute exponent and or it into the exponent register.
1887 __ rsb(scratch1,
1888 zeros,
1889 Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
1890
1891 __ orr(fval,
1892 fval,
1893 Operand(scratch1, LSL, kBinary32ExponentShift));
1894
1895 // Shift up the source chopping the top bit off.
1896 __ add(zeros, zeros, Operand(1));
1897 // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
1898 __ mov(ival, Operand(ival, LSL, zeros));
1899 // And the top (top 20 bits).
1900 __ orr(fval,
1901 fval,
1902 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
1903
1904 __ bind(&done);
1905 __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
1906 }
1907 }
1908
1909
1910 static bool IsElementTypeSigned(ExternalArrayType array_type) {
1911 switch (array_type) {
1912 case kExternalByteArray:
1913 case kExternalShortArray:
1914 case kExternalIntArray:
1915 return true;
1916
1917 case kExternalUnsignedByteArray:
1918 case kExternalUnsignedShortArray:
1919 case kExternalUnsignedIntArray:
1920 return false;
1921
1922 default:
1923 UNREACHABLE();
1924 return false;
1925 }
1926 }
1927
1928
1929 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
1930 ExternalArrayType array_type) {
1931 // ---------- S t a t e --------------
1932 // -- r0 : value
1933 // -- r1 : key
1934 // -- r2 : receiver
1935 // -- lr : return address
1936 // -----------------------------------
1937 Label slow, check_heap_number;
1938
1939 // Register usage.
1940 Register value = r0;
1941 Register key = r1;
1942 Register receiver = r2;
1943 // r3 mostly holds the elements array or the destination external array.
1944
1945 // Check that the object isn't a smi.
1946 __ BranchOnSmi(receiver, &slow);
1947
1948 // Check that the object is a JS object. Load map into r3.
1949 __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
1950 __ b(le, &slow);
1951
1952 // Check that the receiver does not require access checks. We need
1953 // to do this because this generic stub does not perform map checks.
1954 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
1955 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
1956 __ b(ne, &slow);
1957
1958 // Check that the key is a smi.
1959 __ BranchOnNotSmi(key, &slow);
1960
1961 // Check that the elements array is the appropriate type of ExternalArray.
1962 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
1963 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
1964 __ LoadRoot(ip, HEAP->RootIndexForExternalArrayType(array_type));
1965 __ cmp(r4, ip);
1966 __ b(ne, &slow);
1967
1968 // Check that the index is in range.
1969 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
1970 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
1971 __ cmp(r4, ip);
1972 // Unsigned comparison catches both negative and too-large values.
1973 __ b(hs, &slow);
1974
1975 // Handle both smis and HeapNumbers in the fast path. Go to the
1976 // runtime for all other kinds of values.
1977 // r3: external array.
1978 // r4: key (integer).
1979 __ BranchOnNotSmi(value, &check_heap_number);
1980 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
1981 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
1982
1983 // r3: base pointer of external storage.
1984 // r4: key (integer).
1985 // r5: value (integer).
1986 switch (array_type) {
1987 case kExternalByteArray:
1988 case kExternalUnsignedByteArray:
1989 __ strb(r5, MemOperand(r3, r4, LSL, 0));
1990 break;
1991 case kExternalShortArray:
1992 case kExternalUnsignedShortArray:
1993 __ strh(r5, MemOperand(r3, r4, LSL, 1));
1994 break;
1995 case kExternalIntArray:
1996 case kExternalUnsignedIntArray:
1997 __ str(r5, MemOperand(r3, r4, LSL, 2));
1998 break;
1999 case kExternalFloatArray:
2000 // Perform int-to-float conversion and store to memory.
2001 StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
2002 break;
2003 default:
2004 UNREACHABLE();
2005 break;
2006 }
2007
2008 // Entry registers are intact, r0 holds the value which is the return value.
2009 __ Ret();
2010
2011
2012 // r3: external array.
2013 // r4: index (integer).
2014 __ bind(&check_heap_number);
2015 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
2016 __ b(ne, &slow);
2017
2018 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
2019
2020 // r3: base pointer of external storage.
2021 // r4: key (integer).
2022
2023 // The WebGL specification leaves the behavior of storing NaN and
2024 // +/-Infinity into integer arrays basically undefined. For more
2025 // reproducible behavior, convert these to zero.
2026 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
2027 CpuFeatures::Scope scope(VFP3);
2028
2029
2030 if (array_type == kExternalFloatArray) {
2031 // vldr requires offset to be a multiple of 4 so we can not
2032 // include -kHeapObjectTag into it.
2033 __ sub(r5, r0, Operand(kHeapObjectTag));
2034 __ vldr(d0, r5, HeapNumber::kValueOffset);
2035 __ add(r5, r3, Operand(r4, LSL, 2));
2036 __ vcvt_f32_f64(s0, d0);
2037 __ vstr(s0, r5, 0);
2038 } else {
2039 // Need to perform float-to-int conversion.
2040 // Test for NaN or infinity (both give zero).
2041 __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
2042
2043 // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
2044 // include -kHeapObjectTag into it.
2045 __ sub(r5, r0, Operand(kHeapObjectTag));
2046 __ vldr(d0, r5, HeapNumber::kValueOffset);
2047
2048 __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2049 // NaNs and Infinities have all-one exponents so they sign extend to -1.
2050 __ cmp(r6, Operand(-1));
2051 __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
2052
2053 // Not infinity or NaN simply convert to int.
2054 if (IsElementTypeSigned(array_type)) {
2055 __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
2056 } else {
2057 __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
2058 }
2059 __ vmov(r5, s0, ne);
2060
2061 switch (array_type) {
2062 case kExternalByteArray:
2063 case kExternalUnsignedByteArray:
2064 __ strb(r5, MemOperand(r3, r4, LSL, 0));
2065 break;
2066 case kExternalShortArray:
2067 case kExternalUnsignedShortArray:
2068 __ strh(r5, MemOperand(r3, r4, LSL, 1));
2069 break;
2070 case kExternalIntArray:
2071 case kExternalUnsignedIntArray:
2072 __ str(r5, MemOperand(r3, r4, LSL, 2));
2073 break;
2074 default:
2075 UNREACHABLE();
2076 break;
2077 }
2078 }
2079
2080 // Entry registers are intact, r0 holds the value which is the return value.
2081 __ Ret();
2082 } else {
2083 // VFP3 is not available do manual conversions.
2084 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
2085 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2086
2087 if (array_type == kExternalFloatArray) {
2088 Label done, nan_or_infinity_or_zero;
2089 static const int kMantissaInHiWordShift =
2090 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
2091
2092 static const int kMantissaInLoWordShift =
2093 kBitsPerInt - kMantissaInHiWordShift;
2094
2095 // Test for all special exponent values: zeros, subnormal numbers, NaNs
2096 // and infinities. All these should be converted to 0.
2097 __ mov(r7, Operand(HeapNumber::kExponentMask));
2098 __ and_(r9, r5, Operand(r7), SetCC);
2099 __ b(eq, &nan_or_infinity_or_zero);
2100
2101 __ teq(r9, Operand(r7));
2102 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
2103 __ b(eq, &nan_or_infinity_or_zero);
2104
2105 // Rebias exponent.
2106 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
2107 __ add(r9,
2108 r9,
2109 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
2110
2111 __ cmp(r9, Operand(kBinary32MaxExponent));
2112 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
2113 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
2114 __ b(gt, &done);
2115
2116 __ cmp(r9, Operand(kBinary32MinExponent));
2117 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
2118 __ b(lt, &done);
2119
2120 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
2121 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
2122 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
2123 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
2124 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
2125
2126 __ bind(&done);
2127 __ str(r5, MemOperand(r3, r4, LSL, 2));
2128 // Entry registers are intact, r0 holds the value which is the return
2129 // value.
2130 __ Ret();
2131
2132 __ bind(&nan_or_infinity_or_zero);
2133 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
2134 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
2135 __ orr(r9, r9, r7);
2136 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
2137 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
2138 __ b(&done);
2139 } else {
2140 bool is_signed_type = IsElementTypeSigned(array_type);
2141 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
2142 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
2143
2144 Label done, sign;
2145
2146 // Test for all special exponent values: zeros, subnormal numbers, NaNs
2147 // and infinities. All these should be converted to 0.
2148 __ mov(r7, Operand(HeapNumber::kExponentMask));
2149 __ and_(r9, r5, Operand(r7), SetCC);
2150 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
2151 __ b(eq, &done);
2152
2153 __ teq(r9, Operand(r7));
2154 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
2155 __ b(eq, &done);
2156
2157 // Unbias exponent.
2158 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
2159 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
2160 // If exponent is negative than result is 0.
2161 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
2162 __ b(mi, &done);
2163
2164 // If exponent is too big than result is minimal value.
2165 __ cmp(r9, Operand(meaningfull_bits - 1));
2166 __ mov(r5, Operand(min_value), LeaveCC, ge);
2167 __ b(ge, &done);
2168
2169 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
2170 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
2171 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
2172
2173 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
2174 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
2175 __ b(pl, &sign);
2176
2177 __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
2178 __ mov(r5, Operand(r5, LSL, r9));
2179 __ rsb(r9, r9, Operand(meaningfull_bits));
2180 __ orr(r5, r5, Operand(r6, LSR, r9));
2181
2182 __ bind(&sign);
2183 __ teq(r7, Operand(0, RelocInfo::NONE));
2184 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
2185
2186 __ bind(&done);
2187 switch (array_type) {
2188 case kExternalByteArray:
2189 case kExternalUnsignedByteArray:
2190 __ strb(r5, MemOperand(r3, r4, LSL, 0));
2191 break;
2192 case kExternalShortArray:
2193 case kExternalUnsignedShortArray:
2194 __ strh(r5, MemOperand(r3, r4, LSL, 1));
2195 break;
2196 case kExternalIntArray:
2197 case kExternalUnsignedIntArray:
2198 __ str(r5, MemOperand(r3, r4, LSL, 2));
2199 break;
2200 default:
2201 UNREACHABLE();
2202 break;
2203 }
2204 }
2205 }
2206
2207 // Slow case: call runtime.
2208 __ bind(&slow);
2209
2210 // Entry registers are intact.
2211 // r0: value
2212 // r1: key
2213 // r2: receiver
2214 GenerateRuntimeSetProperty(masm);
2215 }
2216
2217
2218 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { 1540 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
2219 // ----------- S t a t e ------------- 1541 // ----------- S t a t e -------------
2220 // -- r0 : value 1542 // -- r0 : value
2221 // -- r1 : receiver 1543 // -- r1 : receiver
2222 // -- r2 : name 1544 // -- r2 : name
2223 // -- lr : return address 1545 // -- lr : return address
2224 // ----------------------------------- 1546 // -----------------------------------
2225 1547
2226 // Get the receiver from the stack and probe the stub cache. 1548 // Get the receiver from the stack and probe the stub cache.
2227 Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, 1549 Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
2266 // to JSArray. 1588 // to JSArray.
2267 // Value must be a number, but only smis are accepted as the most common case. 1589 // Value must be a number, but only smis are accepted as the most common case.
2268 1590
2269 Label miss; 1591 Label miss;
2270 1592
2271 Register receiver = r1; 1593 Register receiver = r1;
2272 Register value = r0; 1594 Register value = r0;
2273 Register scratch = r3; 1595 Register scratch = r3;
2274 1596
2275 // Check that the receiver isn't a smi. 1597 // Check that the receiver isn't a smi.
2276 __ BranchOnSmi(receiver, &miss); 1598 __ JumpIfSmi(receiver, &miss);
2277 1599
2278 // Check that the object is a JS array. 1600 // Check that the object is a JS array.
2279 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); 1601 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
2280 __ b(ne, &miss); 1602 __ b(ne, &miss);
2281 1603
2282 // Check that elements are FixedArray. 1604 // Check that elements are FixedArray.
2283 // We rely on StoreIC_ArrayLength below to deal with all types of 1605 // We rely on StoreIC_ArrayLength below to deal with all types of
2284 // fast elements (including COW). 1606 // fast elements (including COW).
2285 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); 1607 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
2286 __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); 1608 __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
2287 __ b(ne, &miss); 1609 __ b(ne, &miss);
2288 1610
2289 // Check that value is a smi. 1611 // Check that value is a smi.
2290 __ BranchOnNotSmi(value, &miss); 1612 __ JumpIfNotSmi(value, &miss);
2291 1613
2292 // Prepare tail call to StoreIC_ArrayLength. 1614 // Prepare tail call to StoreIC_ArrayLength.
2293 __ Push(receiver, value); 1615 __ Push(receiver, value);
2294 1616
2295 ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength)); 1617 ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
2296 __ TailCallExternalReference(ref, 2, 1); 1618 __ TailCallExternalReference(ref, 2, 1);
2297 1619
2298 __ bind(&miss); 1620 __ bind(&miss);
2299 1621
2300 GenerateMiss(masm); 1622 GenerateMiss(masm);
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
2350 case Token::GT: 1672 case Token::GT:
2351 // Reverse left and right operands to obtain ECMA-262 conversion order. 1673 // Reverse left and right operands to obtain ECMA-262 conversion order.
2352 return lt; 1674 return lt;
2353 case Token::LTE: 1675 case Token::LTE:
2354 // Reverse left and right operands to obtain ECMA-262 conversion order. 1676 // Reverse left and right operands to obtain ECMA-262 conversion order.
2355 return ge; 1677 return ge;
2356 case Token::GTE: 1678 case Token::GTE:
2357 return ge; 1679 return ge;
2358 default: 1680 default:
2359 UNREACHABLE(); 1681 UNREACHABLE();
2360 return no_condition; 1682 return kNoCondition;
2361 } 1683 }
2362 } 1684 }
2363 1685
2364 1686
2365 void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { 1687 void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
2366 HandleScope scope; 1688 HandleScope scope;
2367 Handle<Code> rewritten; 1689 Handle<Code> rewritten;
2368 State previous_state = GetState(); 1690 State previous_state = GetState();
2369 State state = TargetState(previous_state, false, x, y); 1691 State state = TargetState(previous_state, false, x, y);
2370 if (state == GENERIC) { 1692 if (state == GENERIC) {
(...skipping 10 matching lines...) Expand all
2381 PrintF("[CompareIC (%s->%s)#%s]\n", 1703 PrintF("[CompareIC (%s->%s)#%s]\n",
2382 GetStateName(previous_state), 1704 GetStateName(previous_state),
2383 GetStateName(state), 1705 GetStateName(state),
2384 Token::Name(op_)); 1706 Token::Name(op_));
2385 } 1707 }
2386 #endif 1708 #endif
2387 } 1709 }
2388 1710
2389 1711
2390 void PatchInlinedSmiCode(Address address) { 1712 void PatchInlinedSmiCode(Address address) {
2391 UNIMPLEMENTED(); 1713 // Currently there is no smi inlining in the ARM full code generator.
2392 } 1714 }
2393 1715
2394 1716
2395 } } // namespace v8::internal 1717 } } // namespace v8::internal
2396 1718
2397 #endif // V8_TARGET_ARCH_ARM 1719 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/full-codegen-arm.cc ('k') | src/arm/jump-target-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698