Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(82)

Side by Side Diff: src/code-stub-assembler.cc

Issue 2206333003: [stubs] Convert GrowElementsStub to TurboFan (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Remove dead code Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 the V8 project authors. All rights reserved. 1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/code-stub-assembler.h" 5 #include "src/code-stub-assembler.h"
6 #include "src/code-factory.h" 6 #include "src/code-factory.h"
7 #include "src/frames-inl.h" 7 #include "src/frames-inl.h"
8 #include "src/frames.h" 8 #include "src/frames.h"
9 #include "src/ic/stub-cache.h" 9 #include "src/ic/stub-cache.h"
10 10
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
74 } 74 }
75 75
76 Node* CodeStubAssembler::HashSeed() { 76 Node* CodeStubAssembler::HashSeed() {
77 return SmiToWord32(LoadRoot(Heap::kHashSeedRootIndex)); 77 return SmiToWord32(LoadRoot(Heap::kHashSeedRootIndex));
78 } 78 }
79 79
80 Node* CodeStubAssembler::StaleRegisterConstant() { 80 Node* CodeStubAssembler::StaleRegisterConstant() {
81 return LoadRoot(Heap::kStaleRegisterRootIndex); 81 return LoadRoot(Heap::kStaleRegisterRootIndex);
82 } 82 }
83 83
84 Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
85 if (mode == SMI_PARAMETERS) {
86 return SmiConstant(Smi::FromInt(value));
87 } else {
88 DCHECK_EQ(INTEGER_PARAMETERS, mode);
89 return IntPtrConstant(value);
90 }
91 }
92
84 Node* CodeStubAssembler::Float64Round(Node* x) { 93 Node* CodeStubAssembler::Float64Round(Node* x) {
85 Node* one = Float64Constant(1.0); 94 Node* one = Float64Constant(1.0);
86 Node* one_half = Float64Constant(0.5); 95 Node* one_half = Float64Constant(0.5);
87 96
88 Variable var_x(this, MachineRepresentation::kFloat64); 97 Variable var_x(this, MachineRepresentation::kFloat64);
89 Label return_x(this); 98 Label return_x(this);
90 99
91 // Round up {x} towards Infinity. 100 // Round up {x} towards Infinity.
92 var_x.Bind(Float64Ceil(x)); 101 var_x.Bind(Float64Ceil(x));
93 102
(...skipping 1168 matching lines...) Expand 10 before | Expand all | Expand 10 after
1262 int base_size = JSArray::kSize + FixedArray::kHeaderSize; 1271 int base_size = JSArray::kSize + FixedArray::kHeaderSize;
1263 int elements_offset = JSArray::kSize; 1272 int elements_offset = JSArray::kSize;
1264 1273
1265 Comment("begin allocation of JSArray"); 1274 Comment("begin allocation of JSArray");
1266 1275
1267 if (allocation_site != nullptr) { 1276 if (allocation_site != nullptr) {
1268 base_size += AllocationMemento::kSize; 1277 base_size += AllocationMemento::kSize;
1269 elements_offset += AllocationMemento::kSize; 1278 elements_offset += AllocationMemento::kSize;
1270 } 1279 }
1271 1280
1272 int32_t capacity;
1273 bool constant_capacity = ToInt32Constant(capacity_node, capacity);
1274 Node* total_size = 1281 Node* total_size =
1275 ElementOffsetFromIndex(capacity_node, kind, mode, base_size); 1282 ElementOffsetFromIndex(capacity_node, kind, mode, base_size);
1276 1283
1277 // Allocate both array and elements object, and initialize the JSArray. 1284 // Allocate both array and elements object, and initialize the JSArray.
1278 Heap* heap = isolate()->heap(); 1285 Heap* heap = isolate()->heap();
1279 Node* array = Allocate(total_size); 1286 Node* array = Allocate(total_size);
1280 StoreMapNoWriteBarrier(array, array_map); 1287 StoreMapNoWriteBarrier(array, array_map);
1281 Node* empty_properties = LoadRoot(Heap::kEmptyFixedArrayRootIndex); 1288 Node* empty_properties = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
1282 StoreObjectFieldNoWriteBarrier(array, JSArray::kPropertiesOffset, 1289 StoreObjectFieldNoWriteBarrier(array, JSArray::kPropertiesOffset,
1283 empty_properties); 1290 empty_properties);
1284 StoreObjectFieldNoWriteBarrier( 1291 StoreObjectFieldNoWriteBarrier(
1285 array, JSArray::kLengthOffset, 1292 array, JSArray::kLengthOffset,
1286 mode == SMI_PARAMETERS ? length_node : SmiTag(length_node)); 1293 mode == SMI_PARAMETERS ? length_node : SmiTag(length_node));
1287 1294
1288 if (allocation_site != nullptr) { 1295 if (allocation_site != nullptr) {
1289 InitializeAllocationMemento(array, JSArray::kSize, allocation_site); 1296 InitializeAllocationMemento(array, JSArray::kSize, allocation_site);
1290 } 1297 }
1291 1298
1292 // Setup elements object. 1299 // Setup elements object.
1293 Node* elements = InnerAllocate(array, elements_offset); 1300 Node* elements = InnerAllocate(array, elements_offset);
1294 StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements); 1301 StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements);
1295 Handle<Map> elements_map(is_double ? heap->fixed_double_array_map() 1302 Handle<Map> elements_map(is_double ? heap->fixed_double_array_map()
1296 : heap->fixed_array_map()); 1303 : heap->fixed_array_map());
1297 StoreMapNoWriteBarrier(elements, HeapConstant(elements_map)); 1304 StoreMapNoWriteBarrier(elements, HeapConstant(elements_map));
1298 StoreObjectFieldNoWriteBarrier( 1305 StoreObjectFieldNoWriteBarrier(
1299 elements, FixedArray::kLengthOffset, 1306 elements, FixedArray::kLengthOffset,
1300 mode == SMI_PARAMETERS ? capacity_node : SmiTag(capacity_node)); 1307 mode == SMI_PARAMETERS ? capacity_node : SmiTag(capacity_node));
1301 1308
1309 FillFixedArrayWithHole(kind, elements, IntPtrConstant(0), capacity_node,
1310 mode);
1311
1312 return array;
1313 }
1314
1315 Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
1316 Node* capacity_node,
1317 ParameterMode mode) {
1318 Node* total_size = ElementOffsetFromIndex(capacity_node, kind, mode,
1319 FixedArray::kHeaderSize);
1320
1321 // Allocate both array and elements object, and initialize the JSArray.
1322 Node* array = Allocate(total_size);
1323 Heap* heap = isolate()->heap();
1324 Handle<Map> map(IsFastDoubleElementsKind(kind)
1325 ? heap->fixed_double_array_map()
1326 : heap->fixed_array_map());
1327 StoreMapNoWriteBarrier(array, HeapConstant(map));
1328 StoreObjectFieldNoWriteBarrier(
1329 array, FixedArray::kLengthOffset,
1330 mode == INTEGER_PARAMETERS ? SmiTag(capacity_node) : capacity_node);
1331 return array;
1332 }
1333
1334 void CodeStubAssembler::FillFixedArrayWithHole(ElementsKind kind,
1335 compiler::Node* array,
1336 compiler::Node* from_node,
1337 compiler::Node* to_node,
1338 ParameterMode mode) {
1302 int const first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; 1339 int const first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
1340 Heap* heap = isolate()->heap();
1303 Node* hole = HeapConstant(Handle<HeapObject>(heap->the_hole_value())); 1341 Node* hole = HeapConstant(Handle<HeapObject>(heap->the_hole_value()));
1304 Node* double_hole = 1342 Node* double_hole =
1305 Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32); 1343 Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
1306 DCHECK_EQ(kHoleNanLower32, kHoleNanUpper32); 1344 DCHECK_EQ(kHoleNanLower32, kHoleNanUpper32);
1307 if (constant_capacity && capacity <= kElementLoopUnrollThreshold) { 1345 bool is_double = IsFastDoubleElementsKind(kind);
1308 for (int i = 0; i < capacity; ++i) { 1346 int32_t to;
1347 bool constant_to = ToInt32Constant(to_node, to);
1348 int32_t from;
1349 bool constant_from = ToInt32Constant(from_node, from);
1350 if (constant_to && constant_from &&
1351 (to - from) <= kElementLoopUnrollThreshold) {
1352 for (int i = from; i < to; ++i) {
1309 if (is_double) { 1353 if (is_double) {
1310 Node* offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode, 1354 Node* offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
1311 first_element_offset); 1355 first_element_offset);
1312 // Don't use doubles to store the hole double, since manipulating the 1356 // Don't use doubles to store the hole double, since manipulating the
1313 // signaling NaN used for the hole in C++, e.g. with bit_cast, will 1357 // signaling NaN used for the hole in C++, e.g. with bit_cast, will
1314 // change its value on ia32 (the x87 stack is used to return values 1358 // change its value on ia32 (the x87 stack is used to return values
1315 // and stores to the stack silently clear the signalling bit). 1359 // and stores to the stack silently clear the signalling bit).
1316 // 1360 //
1317 // TODO(danno): When we have a Float32/Float64 wrapper class that 1361 // TODO(danno): When we have a Float32/Float64 wrapper class that
1318 // preserves double bits during manipulation, remove this code/change 1362 // preserves double bits during manipulation, remove this code/change
1319 // this to an indexed Float64 store. 1363 // this to an indexed Float64 store.
1320 if (Is64()) { 1364 if (Is64()) {
1321 StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset, 1365 StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
1322 double_hole); 1366 double_hole);
1323 } else { 1367 } else {
1324 StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset, 1368 StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
1325 double_hole); 1369 double_hole);
1326 offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode, 1370 offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
1327 first_element_offset + kPointerSize); 1371 first_element_offset + kPointerSize);
1328 StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset, 1372 StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
1329 double_hole); 1373 double_hole);
1330 } 1374 }
1331 } else { 1375 } else {
1332 StoreFixedArrayElement(elements, Int32Constant(i), hole, 1376 StoreFixedArrayElement(array, Int32Constant(i), hole,
1333 SKIP_WRITE_BARRIER); 1377 SKIP_WRITE_BARRIER);
1334 } 1378 }
1335 } 1379 }
1336 } else { 1380 } else {
1337 Variable current(this, MachineRepresentation::kTagged); 1381 Variable current(this, MachineRepresentation::kTagged);
1338 Label test(this); 1382 Label test(this);
1339 Label decrement(this, &current); 1383 Label decrement(this, &current);
1340 Label done(this); 1384 Label done(this);
1341 Node* limit = IntPtrAdd(elements, IntPtrConstant(first_element_offset)); 1385 Node* limit =
1342 current.Bind( 1386 IntPtrAdd(array, ElementOffsetFromIndex(from_node, kind, mode));
1343 IntPtrAdd(limit, ElementOffsetFromIndex(capacity_node, kind, mode, 0))); 1387 current.Bind(IntPtrAdd(array, ElementOffsetFromIndex(to_node, kind, mode)));
1344 1388
1345 Branch(WordEqual(current.value(), limit), &done, &decrement); 1389 Branch(WordEqual(current.value(), limit), &done, &decrement);
1346 1390
1347 Bind(&decrement); 1391 Bind(&decrement);
1348 current.Bind(IntPtrSub( 1392 current.Bind(IntPtrSub(
1349 current.value(), 1393 current.value(),
1350 Int32Constant(IsFastDoubleElementsKind(kind) ? kDoubleSize 1394 Int32Constant(IsFastDoubleElementsKind(kind) ? kDoubleSize
1351 : kPointerSize))); 1395 : kPointerSize)));
1352 if (is_double) { 1396 if (is_double) {
1353 // Don't use doubles to store the hole double, since manipulating the 1397 // Don't use doubles to store the hole double, since manipulating the
1354 // signaling NaN used for the hole in C++, e.g. with bit_cast, will 1398 // signaling NaN used for the hole in C++, e.g. with bit_cast, will
1355 // change its value on ia32 (the x87 stack is used to return values 1399 // change its value on ia32 (the x87 stack is used to return values
1356 // and stores to the stack silently clear the signalling bit). 1400 // and stores to the stack silently clear the signalling bit).
1357 // 1401 //
1358 // TODO(danno): When we have a Float32/Float64 wrapper class that 1402 // TODO(danno): When we have a Float32/Float64 wrapper class that
1359 // preserves double bits during manipulation, remove this code/change 1403 // preserves double bits during manipulation, remove this code/change
1360 // this to an indexed Float64 store. 1404 // this to an indexed Float64 store.
1361 if (Is64()) { 1405 if (Is64()) {
1362 StoreNoWriteBarrier(MachineRepresentation::kWord64, current.value(), 1406 StoreNoWriteBarrier(MachineRepresentation::kWord64, current.value(),
1363 double_hole); 1407 Int64Constant(first_element_offset), double_hole);
1364 } else { 1408 } else {
1365 StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(), 1409 StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
1366 double_hole); 1410 Int32Constant(first_element_offset), double_hole);
1367 StoreNoWriteBarrier( 1411 StoreNoWriteBarrier(
1368 MachineRepresentation::kWord32, 1412 MachineRepresentation::kWord32,
1369 IntPtrAdd(current.value(), Int32Constant(kPointerSize)), 1413 IntPtrAdd(current.value(),
1414 Int32Constant(kPointerSize + first_element_offset)),
1370 double_hole); 1415 double_hole);
1371 } 1416 }
1372 } else { 1417 } else {
1373 StoreNoWriteBarrier(MachineRepresentation::kTagged, current.value(), 1418 StoreNoWriteBarrier(MachineRepresentation::kTagged, current.value(),
1374 hole); 1419 IntPtrConstant(first_element_offset), hole);
1375 } 1420 }
1376 Node* compare = WordNotEqual(current.value(), limit); 1421 Node* compare = WordNotEqual(current.value(), limit);
1377 Branch(compare, &decrement, &done); 1422 Branch(compare, &decrement, &done);
1378 1423
1379 Bind(&done); 1424 Bind(&done);
1380 } 1425 }
1426 }
1381 1427
1382 return array; 1428 void CodeStubAssembler::CopyFixedArrayElements(ElementsKind kind,
1429 compiler::Node* from_array,
1430 compiler::Node* to_array,
1431 compiler::Node* element_count,
1432 WriteBarrierMode barrier_mode,
1433 ParameterMode mode) {
1434 Label test(this);
1435 Label done(this);
1436 bool needs_write_barrier =
1437 barrier_mode == UPDATE_WRITE_BARRIER && !IsFastObjectElementsKind(kind);
1438 Node* limit_offset = ElementOffsetFromIndex(
1439 IntPtrConstant(0), kind, mode, FixedArray::kHeaderSize - kHeapObjectTag);
1440 Variable current_offset(this, MachineType::PointerRepresentation());
1441 current_offset.Bind(ElementOffsetFromIndex(
1442 element_count, kind, mode, FixedArray::kHeaderSize - kHeapObjectTag));
1443 Label decrement(this, &current_offset);
1444
1445 Branch(WordEqual(current_offset.value(), limit_offset), &done, &decrement);
1446
1447 Bind(&decrement);
1448 {
1449 current_offset.Bind(
1450 IntPtrSub(current_offset.value(), IntPtrConstant(kPointerSize)));
1451
1452 Node* value =
1453 Load(MachineType::Pointer(), from_array, current_offset.value());
1454 if (needs_write_barrier) {
1455 Store(MachineType::PointerRepresentation(), to_array,
1456 current_offset.value(), value);
1457 } else {
1458 StoreNoWriteBarrier(MachineType::PointerRepresentation(), to_array,
1459 current_offset.value(), value);
1460 }
1461 Node* compare = WordNotEqual(current_offset.value(), limit_offset);
1462 Branch(compare, &decrement, &done);
1463 }
1464
1465 Bind(&done);
1466 }
1467
1468 Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
1469 ParameterMode mode) {
1470 Node* half_old_capacity = WordShr(old_capacity, IntPtrConstant(1));
1471 Node* new_capacity = IntPtrAdd(half_old_capacity, old_capacity);
1472 Node* unconditioned_result =
1473 IntPtrAdd(new_capacity, IntPtrOrSmiConstant(16, mode));
1474 if (mode == INTEGER_PARAMETERS) {
1475 return unconditioned_result;
1476 } else {
1477 int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
1478 return WordAnd(unconditioned_result,
1479 IntPtrConstant(static_cast<size_t>(-1) << kSmiShiftBits));
1480 }
1481 }
1482
1483 Node* CodeStubAssembler::CheckAndGrowElementsCapacity(
1484 Node* context, Node* object, Node* elements, ElementsKind kind,
1485 Node* capacity, Node* key, Label* fail) {
1486 // On 32-bit platforms, there is a slight performance advantage to doing all
1487 // of the arithmetic for the new backing store with SMIs, since it's possible
1488 // to save a few tag/untag operations without paying an extra expense when
1489 // calculating array offset (the smi math can be folded away) and there are
1490 // fewer live ranges. Thus only convert |capacity| and |key| to untagged value
1491 // on 64-bit platforms.
1492 ParameterMode mode = Is64() ? INTEGER_PARAMETERS : SMI_PARAMETERS;
1493 if (mode == INTEGER_PARAMETERS) {
1494 capacity = SmiUntag(capacity);
1495 key = SmiUntag(key);
1496 }
1497
1498 // If the gap growth is too big, fall back to the runtime.
1499 Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
1500 Node* max_capacity = IntPtrAdd(capacity, max_gap);
1501 GotoIf(UintPtrGreaterThanOrEqual(key, max_capacity), fail);
1502
1503 // Calculate the capacity of the new backing tore
1504 Node* new_capacity = CalculateNewElementsCapacity(
1505 IntPtrAdd(key, IntPtrOrSmiConstant(1, mode)), mode);
1506
1507 // If size of the allocation for the new capacity doesn't fit in a page
1508 // that we can bump-pointer allocate from, fall back to the runtime,
1509 int max_size = ((Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
1510 ElementsKindToShiftSize(kind));
1511 GotoIf(UintPtrGreaterThanOrEqual(new_capacity,
1512 IntPtrOrSmiConstant(max_size, mode)),
1513 fail);
1514
1515 // Allocate the new backing store.
1516 Node* new_elements = AllocateFixedArray(kind, new_capacity, mode);
1517
1518 // Fill in the added capacity in the new store with holes.
1519 FillFixedArrayWithHole(kind, new_elements, capacity, new_capacity, mode);
1520
1521 // Copy the elements from the old elements store to the new.
1522 CopyFixedArrayElements(kind, elements, new_elements, capacity,
1523 SKIP_WRITE_BARRIER, mode);
1524
1525 return new_elements;
1383 } 1526 }
1384 1527
1385 void CodeStubAssembler::InitializeAllocationMemento( 1528 void CodeStubAssembler::InitializeAllocationMemento(
1386 compiler::Node* base_allocation, int base_allocation_size, 1529 compiler::Node* base_allocation, int base_allocation_size,
1387 compiler::Node* allocation_site) { 1530 compiler::Node* allocation_site) {
1388 StoreObjectFieldNoWriteBarrier( 1531 StoreObjectFieldNoWriteBarrier(
1389 base_allocation, AllocationMemento::kMapOffset + base_allocation_size, 1532 base_allocation, AllocationMemento::kMapOffset + base_allocation_size,
1390 HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map()))); 1533 HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map())));
1391 StoreObjectFieldNoWriteBarrier( 1534 StoreObjectFieldNoWriteBarrier(
1392 base_allocation, 1535 base_allocation,
(...skipping 2093 matching lines...) Expand 10 before | Expand all | Expand 10 after
3486 Heap::kTheHoleValueRootIndex); 3629 Heap::kTheHoleValueRootIndex);
3487 3630
3488 // Store the WeakCell in the feedback vector. 3631 // Store the WeakCell in the feedback vector.
3489 StoreFixedArrayElement(feedback_vector, slot, cell, UPDATE_WRITE_BARRIER, 3632 StoreFixedArrayElement(feedback_vector, slot, cell, UPDATE_WRITE_BARRIER,
3490 CodeStubAssembler::SMI_PARAMETERS); 3633 CodeStubAssembler::SMI_PARAMETERS);
3491 return cell; 3634 return cell;
3492 } 3635 }
3493 3636
3494 } // namespace internal 3637 } // namespace internal
3495 } // namespace v8 3638 } // namespace v8
OLDNEW
« no previous file with comments | « src/code-stub-assembler.h ('k') | src/code-stubs.h » ('j') | src/code-stubs.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698