Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(352)

Side by Side Diff: src/code-stub-assembler.cc

Issue 2206333003: [stubs] Convert GrowElementsStub to TurboFan (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/code-stub-assembler.h ('k') | src/code-stubs.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 the V8 project authors. All rights reserved. 1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/code-stub-assembler.h" 5 #include "src/code-stub-assembler.h"
6 #include "src/code-factory.h" 6 #include "src/code-factory.h"
7 #include "src/frames-inl.h" 7 #include "src/frames-inl.h"
8 #include "src/frames.h" 8 #include "src/frames.h"
9 #include "src/ic/stub-cache.h" 9 #include "src/ic/stub-cache.h"
10 10
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
74 } 74 }
75 75
76 Node* CodeStubAssembler::HashSeed() { 76 Node* CodeStubAssembler::HashSeed() {
77 return LoadAndUntagToWord32Root(Heap::kHashSeedRootIndex); 77 return LoadAndUntagToWord32Root(Heap::kHashSeedRootIndex);
78 } 78 }
79 79
80 Node* CodeStubAssembler::StaleRegisterConstant() { 80 Node* CodeStubAssembler::StaleRegisterConstant() {
81 return LoadRoot(Heap::kStaleRegisterRootIndex); 81 return LoadRoot(Heap::kStaleRegisterRootIndex);
82 } 82 }
83 83
84 Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
85 if (mode == SMI_PARAMETERS) {
86 return SmiConstant(Smi::FromInt(value));
87 } else {
88 DCHECK_EQ(INTEGER_PARAMETERS, mode);
89 return IntPtrConstant(value);
90 }
91 }
92
84 Node* CodeStubAssembler::Float64Round(Node* x) { 93 Node* CodeStubAssembler::Float64Round(Node* x) {
85 Node* one = Float64Constant(1.0); 94 Node* one = Float64Constant(1.0);
86 Node* one_half = Float64Constant(0.5); 95 Node* one_half = Float64Constant(0.5);
87 96
88 Variable var_x(this, MachineRepresentation::kFloat64); 97 Variable var_x(this, MachineRepresentation::kFloat64);
89 Label return_x(this); 98 Label return_x(this);
90 99
91 // Round up {x} towards Infinity. 100 // Round up {x} towards Infinity.
92 var_x.Bind(Float64Ceil(x)); 101 var_x.Bind(Float64Ceil(x));
93 102
(...skipping 893 matching lines...) Expand 10 before | Expand all | Expand 10 after
987 } 996 }
988 997
989 Node* CodeStubAssembler::LoadProperties(Node* object) { 998 Node* CodeStubAssembler::LoadProperties(Node* object) {
990 return LoadObjectField(object, JSObject::kPropertiesOffset); 999 return LoadObjectField(object, JSObject::kPropertiesOffset);
991 } 1000 }
992 1001
993 Node* CodeStubAssembler::LoadElements(Node* object) { 1002 Node* CodeStubAssembler::LoadElements(Node* object) {
994 return LoadObjectField(object, JSObject::kElementsOffset); 1003 return LoadObjectField(object, JSObject::kElementsOffset);
995 } 1004 }
996 1005
1006 Node* CodeStubAssembler::LoadFixedArrayBaseLength(compiler::Node* array) {
1007 return LoadObjectField(array, FixedArrayBase::kLengthOffset);
1008 }
1009
997 Node* CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(Node* array) { 1010 Node* CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(Node* array) {
998 return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset); 1011 return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset);
999 } 1012 }
1000 1013
1001 Node* CodeStubAssembler::LoadMapBitField(Node* map) { 1014 Node* CodeStubAssembler::LoadMapBitField(Node* map) {
1002 return LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8()); 1015 return LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8());
1003 } 1016 }
1004 1017
1005 Node* CodeStubAssembler::LoadMapBitField2(Node* map) { 1018 Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
1006 return LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8()); 1019 return LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8());
(...skipping 328 matching lines...) Expand 10 before | Expand all | Expand 10 after
1335 int base_size = JSArray::kSize + FixedArray::kHeaderSize; 1348 int base_size = JSArray::kSize + FixedArray::kHeaderSize;
1336 int elements_offset = JSArray::kSize; 1349 int elements_offset = JSArray::kSize;
1337 1350
1338 Comment("begin allocation of JSArray"); 1351 Comment("begin allocation of JSArray");
1339 1352
1340 if (allocation_site != nullptr) { 1353 if (allocation_site != nullptr) {
1341 base_size += AllocationMemento::kSize; 1354 base_size += AllocationMemento::kSize;
1342 elements_offset += AllocationMemento::kSize; 1355 elements_offset += AllocationMemento::kSize;
1343 } 1356 }
1344 1357
1345 int32_t capacity;
1346 bool constant_capacity = ToInt32Constant(capacity_node, capacity);
1347 Node* total_size = 1358 Node* total_size =
1348 ElementOffsetFromIndex(capacity_node, kind, mode, base_size); 1359 ElementOffsetFromIndex(capacity_node, kind, mode, base_size);
1349 1360
1350 // Allocate both array and elements object, and initialize the JSArray. 1361 // Allocate both array and elements object, and initialize the JSArray.
1351 Heap* heap = isolate()->heap(); 1362 Heap* heap = isolate()->heap();
1352 Node* array = Allocate(total_size); 1363 Node* array = Allocate(total_size);
1353 StoreMapNoWriteBarrier(array, array_map); 1364 StoreMapNoWriteBarrier(array, array_map);
1354 Node* empty_properties = LoadRoot(Heap::kEmptyFixedArrayRootIndex); 1365 Node* empty_properties = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
1355 StoreObjectFieldNoWriteBarrier(array, JSArray::kPropertiesOffset, 1366 StoreObjectFieldNoWriteBarrier(array, JSArray::kPropertiesOffset,
1356 empty_properties); 1367 empty_properties);
1357 StoreObjectFieldNoWriteBarrier( 1368 StoreObjectFieldNoWriteBarrier(
1358 array, JSArray::kLengthOffset, 1369 array, JSArray::kLengthOffset,
1359 mode == SMI_PARAMETERS ? length_node : SmiTag(length_node)); 1370 mode == SMI_PARAMETERS ? length_node : SmiTag(length_node));
1360 1371
1361 if (allocation_site != nullptr) { 1372 if (allocation_site != nullptr) {
1362 InitializeAllocationMemento(array, JSArray::kSize, allocation_site); 1373 InitializeAllocationMemento(array, JSArray::kSize, allocation_site);
1363 } 1374 }
1364 1375
1365 // Setup elements object. 1376 // Setup elements object.
1366 Node* elements = InnerAllocate(array, elements_offset); 1377 Node* elements = InnerAllocate(array, elements_offset);
1367 StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements); 1378 StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements);
1368 Handle<Map> elements_map(is_double ? heap->fixed_double_array_map() 1379 Handle<Map> elements_map(is_double ? heap->fixed_double_array_map()
1369 : heap->fixed_array_map()); 1380 : heap->fixed_array_map());
1370 StoreMapNoWriteBarrier(elements, HeapConstant(elements_map)); 1381 StoreMapNoWriteBarrier(elements, HeapConstant(elements_map));
1371 StoreObjectFieldNoWriteBarrier( 1382 StoreObjectFieldNoWriteBarrier(
1372 elements, FixedArray::kLengthOffset, 1383 elements, FixedArray::kLengthOffset,
1373 mode == SMI_PARAMETERS ? capacity_node : SmiTag(capacity_node)); 1384 mode == SMI_PARAMETERS ? capacity_node : SmiTag(capacity_node));
1374 1385
1386 FillFixedArrayWithHole(kind, elements, IntPtrConstant(0), capacity_node,
1387 mode);
1388
1389 return array;
1390 }
1391
1392 Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
1393 Node* capacity_node,
1394 ParameterMode mode) {
1395 Node* total_size = ElementOffsetFromIndex(capacity_node, kind, mode,
1396 FixedArray::kHeaderSize);
1397
1398 // Allocate both array and elements object, and initialize the JSArray.
1399 Node* array = Allocate(total_size);
1400 Heap* heap = isolate()->heap();
1401 Handle<Map> map(IsFastDoubleElementsKind(kind)
1402 ? heap->fixed_double_array_map()
1403 : heap->fixed_array_map());
1404 StoreMapNoWriteBarrier(array, HeapConstant(map));
1405 StoreObjectFieldNoWriteBarrier(
1406 array, FixedArray::kLengthOffset,
1407 mode == INTEGER_PARAMETERS ? SmiTag(capacity_node) : capacity_node);
1408 return array;
1409 }
1410
1411 void CodeStubAssembler::FillFixedArrayWithHole(ElementsKind kind,
1412 compiler::Node* array,
1413 compiler::Node* from_node,
1414 compiler::Node* to_node,
1415 ParameterMode mode) {
1375 int const first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; 1416 int const first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
1417 Heap* heap = isolate()->heap();
1376 Node* hole = HeapConstant(Handle<HeapObject>(heap->the_hole_value())); 1418 Node* hole = HeapConstant(Handle<HeapObject>(heap->the_hole_value()));
1377 Node* double_hole = 1419 Node* double_hole =
1378 Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32); 1420 Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
1379 DCHECK_EQ(kHoleNanLower32, kHoleNanUpper32); 1421 DCHECK_EQ(kHoleNanLower32, kHoleNanUpper32);
1380 if (constant_capacity && capacity <= kElementLoopUnrollThreshold) { 1422 bool is_double = IsFastDoubleElementsKind(kind);
1381 for (int i = 0; i < capacity; ++i) { 1423 int32_t to;
1424 bool constant_to = ToInt32Constant(to_node, to);
1425 int32_t from;
1426 bool constant_from = ToInt32Constant(from_node, from);
1427 if (constant_to && constant_from &&
1428 (to - from) <= kElementLoopUnrollThreshold) {
1429 for (int i = from; i < to; ++i) {
1382 if (is_double) { 1430 if (is_double) {
1383 Node* offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode, 1431 Node* offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
1384 first_element_offset); 1432 first_element_offset);
1385 // Don't use doubles to store the hole double, since manipulating the 1433 // Don't use doubles to store the hole double, since manipulating the
1386 // signaling NaN used for the hole in C++, e.g. with bit_cast, will 1434 // signaling NaN used for the hole in C++, e.g. with bit_cast, will
1387 // change its value on ia32 (the x87 stack is used to return values 1435 // change its value on ia32 (the x87 stack is used to return values
1388 // and stores to the stack silently clear the signalling bit). 1436 // and stores to the stack silently clear the signalling bit).
1389 // 1437 //
1390 // TODO(danno): When we have a Float32/Float64 wrapper class that 1438 // TODO(danno): When we have a Float32/Float64 wrapper class that
1391 // preserves double bits during manipulation, remove this code/change 1439 // preserves double bits during manipulation, remove this code/change
1392 // this to an indexed Float64 store. 1440 // this to an indexed Float64 store.
1393 if (Is64()) { 1441 if (Is64()) {
1394 StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset, 1442 StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
1395 double_hole); 1443 double_hole);
1396 } else { 1444 } else {
1397 StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset, 1445 StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
1398 double_hole); 1446 double_hole);
1399 offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode, 1447 offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
1400 first_element_offset + kPointerSize); 1448 first_element_offset + kPointerSize);
1401 StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset, 1449 StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
1402 double_hole); 1450 double_hole);
1403 } 1451 }
1404 } else { 1452 } else {
1405 StoreFixedArrayElement(elements, Int32Constant(i), hole, 1453 StoreFixedArrayElement(array, Int32Constant(i), hole,
1406 SKIP_WRITE_BARRIER); 1454 SKIP_WRITE_BARRIER);
1407 } 1455 }
1408 } 1456 }
1409 } else { 1457 } else {
1410 Variable current(this, MachineRepresentation::kTagged); 1458 Variable current(this, MachineRepresentation::kTagged);
1411 Label test(this); 1459 Label test(this);
1412 Label decrement(this, &current); 1460 Label decrement(this, &current);
1413 Label done(this); 1461 Label done(this);
1414 Node* limit = IntPtrAdd(elements, IntPtrConstant(first_element_offset)); 1462 Node* limit =
1415 current.Bind( 1463 IntPtrAdd(array, ElementOffsetFromIndex(from_node, kind, mode));
1416 IntPtrAdd(limit, ElementOffsetFromIndex(capacity_node, kind, mode, 0))); 1464 current.Bind(IntPtrAdd(array, ElementOffsetFromIndex(to_node, kind, mode)));
1417 1465
1418 Branch(WordEqual(current.value(), limit), &done, &decrement); 1466 Branch(WordEqual(current.value(), limit), &done, &decrement);
1419 1467
1420 Bind(&decrement); 1468 Bind(&decrement);
1421 current.Bind(IntPtrSub( 1469 current.Bind(IntPtrSub(
1422 current.value(), 1470 current.value(),
1423 Int32Constant(IsFastDoubleElementsKind(kind) ? kDoubleSize 1471 Int32Constant(IsFastDoubleElementsKind(kind) ? kDoubleSize
1424 : kPointerSize))); 1472 : kPointerSize)));
1425 if (is_double) { 1473 if (is_double) {
1426 // Don't use doubles to store the hole double, since manipulating the 1474 // Don't use doubles to store the hole double, since manipulating the
1427 // signaling NaN used for the hole in C++, e.g. with bit_cast, will 1475 // signaling NaN used for the hole in C++, e.g. with bit_cast, will
1428 // change its value on ia32 (the x87 stack is used to return values 1476 // change its value on ia32 (the x87 stack is used to return values
1429 // and stores to the stack silently clear the signalling bit). 1477 // and stores to the stack silently clear the signalling bit).
1430 // 1478 //
1431 // TODO(danno): When we have a Float32/Float64 wrapper class that 1479 // TODO(danno): When we have a Float32/Float64 wrapper class that
1432 // preserves double bits during manipulation, remove this code/change 1480 // preserves double bits during manipulation, remove this code/change
1433 // this to an indexed Float64 store. 1481 // this to an indexed Float64 store.
1434 if (Is64()) { 1482 if (Is64()) {
1435 StoreNoWriteBarrier(MachineRepresentation::kWord64, current.value(), 1483 StoreNoWriteBarrier(MachineRepresentation::kWord64, current.value(),
1436 double_hole); 1484 Int64Constant(first_element_offset), double_hole);
1437 } else { 1485 } else {
1438 StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(), 1486 StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
1439 double_hole); 1487 Int32Constant(first_element_offset), double_hole);
1440 StoreNoWriteBarrier( 1488 StoreNoWriteBarrier(
1441 MachineRepresentation::kWord32, 1489 MachineRepresentation::kWord32,
1442 IntPtrAdd(current.value(), Int32Constant(kPointerSize)), 1490 IntPtrAdd(current.value(),
1491 Int32Constant(kPointerSize + first_element_offset)),
1443 double_hole); 1492 double_hole);
1444 } 1493 }
1445 } else { 1494 } else {
1446 StoreNoWriteBarrier(MachineRepresentation::kTagged, current.value(), 1495 StoreNoWriteBarrier(MachineRepresentation::kTagged, current.value(),
1447 hole); 1496 IntPtrConstant(first_element_offset), hole);
1448 } 1497 }
1449 Node* compare = WordNotEqual(current.value(), limit); 1498 Node* compare = WordNotEqual(current.value(), limit);
1450 Branch(compare, &decrement, &done); 1499 Branch(compare, &decrement, &done);
1451 1500
1452 Bind(&done); 1501 Bind(&done);
1453 } 1502 }
1503 }
1454 1504
1455 return array; 1505 void CodeStubAssembler::CopyFixedArrayElements(ElementsKind kind,
1506 compiler::Node* from_array,
1507 compiler::Node* to_array,
1508 compiler::Node* element_count,
1509 WriteBarrierMode barrier_mode,
1510 ParameterMode mode) {
1511 Label test(this);
1512 Label done(this);
1513 bool double_elements = IsFastDoubleElementsKind(kind);
1514 bool needs_write_barrier =
1515 barrier_mode == UPDATE_WRITE_BARRIER && !IsFastObjectElementsKind(kind);
1516 Node* limit_offset = ElementOffsetFromIndex(
1517 IntPtrConstant(0), kind, mode, FixedArray::kHeaderSize - kHeapObjectTag);
1518 Variable current_offset(this, MachineType::PointerRepresentation());
1519 current_offset.Bind(ElementOffsetFromIndex(
1520 element_count, kind, mode, FixedArray::kHeaderSize - kHeapObjectTag));
1521 Label decrement(this, &current_offset);
1522
1523 Branch(WordEqual(current_offset.value(), limit_offset), &done, &decrement);
1524
1525 Bind(&decrement);
1526 {
1527 current_offset.Bind(IntPtrSub(
1528 current_offset.value(),
1529 IntPtrConstant(double_elements ? kDoubleSize : kPointerSize)));
1530
1531 Node* value =
1532 Load(double_elements ? MachineType::Float64() : MachineType::Pointer(),
1533 from_array, current_offset.value());
1534 if (needs_write_barrier) {
1535 Store(MachineType::PointerRepresentation(), to_array,
1536 current_offset.value(), value);
1537 } else if (double_elements) {
1538 StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array,
1539 current_offset.value(), value);
1540 } else {
1541 StoreNoWriteBarrier(MachineType::PointerRepresentation(), to_array,
1542 current_offset.value(), value);
1543 }
1544 Node* compare = WordNotEqual(current_offset.value(), limit_offset);
1545 Branch(compare, &decrement, &done);
1546 }
1547
1548 Bind(&done);
1549 }
1550
1551 Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
1552 ParameterMode mode) {
1553 Node* half_old_capacity = WordShr(old_capacity, IntPtrConstant(1));
1554 Node* new_capacity = IntPtrAdd(half_old_capacity, old_capacity);
1555 Node* unconditioned_result =
1556 IntPtrAdd(new_capacity, IntPtrOrSmiConstant(16, mode));
1557 if (mode == INTEGER_PARAMETERS) {
1558 return unconditioned_result;
1559 } else {
1560 int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
1561 return WordAnd(unconditioned_result,
1562 IntPtrConstant(static_cast<size_t>(-1) << kSmiShiftBits));
1563 }
1564 }
1565
1566 Node* CodeStubAssembler::CheckAndGrowElementsCapacity(Node* context,
1567 Node* elements,
1568 ElementsKind kind,
1569 Node* key, Label* fail) {
1570 Node* capacity = LoadFixedArrayBaseLength(elements);
1571
1572 // On 32-bit platforms, there is a slight performance advantage to doing all
1573 // of the arithmetic for the new backing store with SMIs, since it's possible
1574 // to save a few tag/untag operations without paying an extra expense when
1575 // calculating array offset (the smi math can be folded away) and there are
1576 // fewer live ranges. Thus only convert |capacity| and |key| to untagged value
1577 // on 64-bit platforms.
1578 ParameterMode mode = Is64() ? INTEGER_PARAMETERS : SMI_PARAMETERS;
1579 if (mode == INTEGER_PARAMETERS) {
1580 capacity = SmiUntag(capacity);
1581 key = SmiUntag(key);
1582 }
1583
1584 // If the gap growth is too big, fall back to the runtime.
1585 Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
1586 Node* max_capacity = IntPtrAdd(capacity, max_gap);
1587 GotoIf(UintPtrGreaterThanOrEqual(key, max_capacity), fail);
1588
1589 // Calculate the capacity of the new backing tore
1590 Node* new_capacity = CalculateNewElementsCapacity(
1591 IntPtrAdd(key, IntPtrOrSmiConstant(1, mode)), mode);
1592
1593 // If size of the allocation for the new capacity doesn't fit in a page
1594 // that we can bump-pointer allocate from, fall back to the runtime,
1595 int max_size = ((Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
1596 ElementsKindToShiftSize(kind));
1597 GotoIf(UintPtrGreaterThanOrEqual(new_capacity,
1598 IntPtrOrSmiConstant(max_size, mode)),
1599 fail);
1600
1601 // Allocate the new backing store.
1602 Node* new_elements = AllocateFixedArray(kind, new_capacity, mode);
1603
1604 // Fill in the added capacity in the new store with holes.
1605 FillFixedArrayWithHole(kind, new_elements, capacity, new_capacity, mode);
1606
1607 // Copy the elements from the old elements store to the new.
1608 CopyFixedArrayElements(kind, elements, new_elements, capacity,
1609 SKIP_WRITE_BARRIER, mode);
1610
1611 return new_elements;
1456 } 1612 }
1457 1613
1458 void CodeStubAssembler::InitializeAllocationMemento( 1614 void CodeStubAssembler::InitializeAllocationMemento(
1459 compiler::Node* base_allocation, int base_allocation_size, 1615 compiler::Node* base_allocation, int base_allocation_size,
1460 compiler::Node* allocation_site) { 1616 compiler::Node* allocation_site) {
1461 StoreObjectFieldNoWriteBarrier( 1617 StoreObjectFieldNoWriteBarrier(
1462 base_allocation, AllocationMemento::kMapOffset + base_allocation_size, 1618 base_allocation, AllocationMemento::kMapOffset + base_allocation_size,
1463 HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map()))); 1619 HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map())));
1464 StoreObjectFieldNoWriteBarrier( 1620 StoreObjectFieldNoWriteBarrier(
1465 base_allocation, 1621 base_allocation,
(...skipping 2093 matching lines...) Expand 10 before | Expand all | Expand 10 after
3559 Heap::kTheHoleValueRootIndex); 3715 Heap::kTheHoleValueRootIndex);
3560 3716
3561 // Store the WeakCell in the feedback vector. 3717 // Store the WeakCell in the feedback vector.
3562 StoreFixedArrayElement(feedback_vector, slot, cell, UPDATE_WRITE_BARRIER, 3718 StoreFixedArrayElement(feedback_vector, slot, cell, UPDATE_WRITE_BARRIER,
3563 CodeStubAssembler::SMI_PARAMETERS); 3719 CodeStubAssembler::SMI_PARAMETERS);
3564 return cell; 3720 return cell;
3565 } 3721 }
3566 3722
3567 } // namespace internal 3723 } // namespace internal
3568 } // namespace v8 3724 } // namespace v8
OLDNEW
« no previous file with comments | « src/code-stub-assembler.h ('k') | src/code-stubs.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698