OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/change-lowering.h" | 5 #include "src/compiler/change-lowering.h" |
6 | 6 |
| 7 #include "src/address-map.h" |
7 #include "src/code-factory.h" | 8 #include "src/code-factory.h" |
8 #include "src/compiler/js-graph.h" | 9 #include "src/compiler/js-graph.h" |
9 #include "src/compiler/linkage.h" | 10 #include "src/compiler/linkage.h" |
10 #include "src/compiler/machine-operator.h" | 11 #include "src/compiler/machine-operator.h" |
11 #include "src/compiler/node-properties.h" | 12 #include "src/compiler/node-properties.h" |
12 #include "src/compiler/operator-properties.h" | 13 #include "src/compiler/operator-properties.h" |
| 14 #include "src/compiler/simplified-operator.h" |
13 | 15 |
14 namespace v8 { | 16 namespace v8 { |
15 namespace internal { | 17 namespace internal { |
16 namespace compiler { | 18 namespace compiler { |
17 | 19 |
18 ChangeLowering::~ChangeLowering() {} | 20 ChangeLowering::~ChangeLowering() {} |
19 | 21 |
20 | 22 |
21 Reduction ChangeLowering::Reduce(Node* node) { | 23 Reduction ChangeLowering::Reduce(Node* node) { |
22 Node* control = graph()->start(); | 24 Node* control = graph()->start(); |
23 switch (node->opcode()) { | 25 switch (node->opcode()) { |
24 case IrOpcode::kChangeBitToBool: | 26 case IrOpcode::kChangeBitToBool: |
25 return ChangeBitToBool(node->InputAt(0), control); | 27 return ChangeBitToBool(node->InputAt(0), control); |
26 case IrOpcode::kChangeBoolToBit: | 28 case IrOpcode::kChangeBoolToBit: |
27 return ChangeBoolToBit(node->InputAt(0)); | 29 return ChangeBoolToBit(node->InputAt(0)); |
28 case IrOpcode::kChangeFloat64ToTagged: | 30 case IrOpcode::kChangeFloat64ToTagged: |
29 return ChangeFloat64ToTagged(node->InputAt(0), control); | 31 return ChangeFloat64ToTagged(node->InputAt(0), control); |
30 case IrOpcode::kChangeInt32ToTagged: | 32 case IrOpcode::kChangeInt32ToTagged: |
31 return ChangeInt32ToTagged(node->InputAt(0), control); | 33 return ChangeInt32ToTagged(node->InputAt(0), control); |
32 case IrOpcode::kChangeTaggedToFloat64: | 34 case IrOpcode::kChangeTaggedToFloat64: |
33 return ChangeTaggedToFloat64(node->InputAt(0), control); | 35 return ChangeTaggedToFloat64(node->InputAt(0), control); |
34 case IrOpcode::kChangeTaggedToInt32: | 36 case IrOpcode::kChangeTaggedToInt32: |
35 return ChangeTaggedToUI32(node->InputAt(0), control, kSigned); | 37 return ChangeTaggedToUI32(node->InputAt(0), control, kSigned); |
36 case IrOpcode::kChangeTaggedToUint32: | 38 case IrOpcode::kChangeTaggedToUint32: |
37 return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned); | 39 return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned); |
38 case IrOpcode::kChangeUint32ToTagged: | 40 case IrOpcode::kChangeUint32ToTagged: |
39 return ChangeUint32ToTagged(node->InputAt(0), control); | 41 return ChangeUint32ToTagged(node->InputAt(0), control); |
| 42 case IrOpcode::kLoadField: |
| 43 return LoadField(node); |
| 44 case IrOpcode::kStoreField: |
| 45 return StoreField(node); |
| 46 case IrOpcode::kLoadElement: |
| 47 return LoadElement(node); |
| 48 case IrOpcode::kStoreElement: |
| 49 return StoreElement(node); |
| 50 case IrOpcode::kAllocate: |
| 51 return Allocate(node); |
40 default: | 52 default: |
41 return NoChange(); | 53 return NoChange(); |
42 } | 54 } |
43 UNREACHABLE(); | 55 UNREACHABLE(); |
44 return NoChange(); | 56 return NoChange(); |
45 } | 57 } |
46 | 58 |
47 | 59 |
48 Node* ChangeLowering::HeapNumberValueIndexConstant() { | 60 Node* ChangeLowering::HeapNumberValueIndexConstant() { |
49 return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag); | 61 return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag); |
(...skipping 350 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
400 AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), if_false); | 412 AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), if_false); |
401 | 413 |
402 Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false); | 414 Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false); |
403 Node* phi = | 415 Node* phi = |
404 graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue, vfalse, merge); | 416 graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue, vfalse, merge); |
405 | 417 |
406 return Replace(phi); | 418 return Replace(phi); |
407 } | 419 } |
408 | 420 |
409 | 421 |
| 422 namespace { |
| 423 |
| 424 WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged, |
| 425 MachineType representation, |
| 426 Type* field_type, Type* input_type) { |
| 427 if (field_type->Is(Type::TaggedSigned()) || |
| 428 input_type->Is(Type::TaggedSigned())) { |
| 429 // Write barriers are only for writes of heap objects. |
| 430 return kNoWriteBarrier; |
| 431 } |
| 432 if (input_type->Is(Type::BooleanOrNullOrUndefined())) { |
| 433 // Write barriers are not necessary when storing true, false, null or |
| 434 // undefined, because these special oddballs are always in the root set. |
| 435 return kNoWriteBarrier; |
| 436 } |
| 437 if (base_is_tagged == kTaggedBase && |
| 438 RepresentationOf(representation) == kRepTagged) { |
| 439 if (input_type->IsConstant() && |
| 440 input_type->AsConstant()->Value()->IsHeapObject()) { |
| 441 Handle<HeapObject> input = |
| 442 Handle<HeapObject>::cast(input_type->AsConstant()->Value()); |
| 443 if (input->IsMap()) { |
| 444 // Write barriers for storing maps are cheaper. |
| 445 return kMapWriteBarrier; |
| 446 } |
| 447 Isolate* const isolate = input->GetIsolate(); |
| 448 RootIndexMap root_index_map(isolate); |
| 449 int root_index = root_index_map.Lookup(*input); |
| 450 if (root_index != RootIndexMap::kInvalidRootIndex && |
| 451 isolate->heap()->RootIsImmortalImmovable(root_index)) { |
| 452 // Write barriers are unnecessary for immortal immovable roots. |
| 453 return kNoWriteBarrier; |
| 454 } |
| 455 } |
| 456 if (field_type->Is(Type::TaggedPointer()) || |
| 457 input_type->Is(Type::TaggedPointer())) { |
| 458 // Write barriers for heap objects don't need a Smi check. |
| 459 return kPointerWriteBarrier; |
| 460 } |
| 461 // Write barriers are only for writes into heap objects (i.e. tagged base). |
| 462 return kFullWriteBarrier; |
| 463 } |
| 464 return kNoWriteBarrier; |
| 465 } |
| 466 |
| 467 } // namespace |
| 468 |
| 469 |
| 470 Reduction ChangeLowering::LoadField(Node* node) { |
| 471 const FieldAccess& access = FieldAccessOf(node->op()); |
| 472 Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); |
| 473 node->InsertInput(graph()->zone(), 1, offset); |
| 474 NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); |
| 475 return Changed(node); |
| 476 } |
| 477 |
| 478 |
| 479 Reduction ChangeLowering::StoreField(Node* node) { |
| 480 const FieldAccess& access = FieldAccessOf(node->op()); |
| 481 Type* type = NodeProperties::GetType(node->InputAt(1)); |
| 482 WriteBarrierKind kind = ComputeWriteBarrierKind( |
| 483 access.base_is_tagged, access.machine_type, access.type, type); |
| 484 Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag()); |
| 485 node->InsertInput(graph()->zone(), 1, offset); |
| 486 NodeProperties::ChangeOp( |
| 487 node, machine()->Store(StoreRepresentation(access.machine_type, kind))); |
| 488 return Changed(node); |
| 489 } |
| 490 |
| 491 |
| 492 Node* ChangeLowering::ComputeIndex(const ElementAccess& access, |
| 493 Node* const key) { |
| 494 Node* index = key; |
| 495 const int element_size_shift = ElementSizeLog2Of(access.machine_type); |
| 496 if (element_size_shift) { |
| 497 index = graph()->NewNode(machine()->Word32Shl(), index, |
| 498 jsgraph()->Int32Constant(element_size_shift)); |
| 499 } |
| 500 const int fixed_offset = access.header_size - access.tag(); |
| 501 if (fixed_offset) { |
| 502 index = graph()->NewNode(machine()->Int32Add(), index, |
| 503 jsgraph()->Int32Constant(fixed_offset)); |
| 504 } |
| 505 if (machine()->Is64()) { |
| 506 // TODO(turbofan): This is probably only correct for typed arrays, and only |
| 507 // if the typed arrays are at most 2GiB in size, which happens to match |
| 508 // exactly our current situation. |
| 509 index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index); |
| 510 } |
| 511 return index; |
| 512 } |
| 513 |
| 514 |
| 515 Reduction ChangeLowering::LoadElement(Node* node) { |
| 516 const ElementAccess& access = ElementAccessOf(node->op()); |
| 517 node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1))); |
| 518 NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); |
| 519 return Changed(node); |
| 520 } |
| 521 |
| 522 |
| 523 Reduction ChangeLowering::StoreElement(Node* node) { |
| 524 const ElementAccess& access = ElementAccessOf(node->op()); |
| 525 Type* type = NodeProperties::GetType(node->InputAt(2)); |
| 526 node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1))); |
| 527 NodeProperties::ChangeOp( |
| 528 node, |
| 529 machine()->Store(StoreRepresentation( |
| 530 access.machine_type, |
| 531 ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type, |
| 532 access.type, type)))); |
| 533 return Changed(node); |
| 534 } |
| 535 |
| 536 |
| 537 Reduction ChangeLowering::Allocate(Node* node) { |
| 538 PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op()); |
| 539 if (pretenure == NOT_TENURED) { |
| 540 Callable callable = CodeFactory::AllocateInNewSpace(isolate()); |
| 541 Node* target = jsgraph()->HeapConstant(callable.code()); |
| 542 CallDescriptor* descriptor = Linkage::GetStubCallDescriptor( |
| 543 isolate(), jsgraph()->zone(), callable.descriptor(), 0, |
| 544 CallDescriptor::kNoFlags, Operator::kNoThrow); |
| 545 const Operator* op = common()->Call(descriptor); |
| 546 node->InsertInput(graph()->zone(), 0, target); |
| 547 node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant()); |
| 548 NodeProperties::ChangeOp(node, op); |
| 549 } else { |
| 550 DCHECK_EQ(TENURED, pretenure); |
| 551 AllocationSpace space = OLD_SPACE; |
| 552 Runtime::FunctionId f = Runtime::kAllocateInTargetSpace; |
| 553 Operator::Properties props = node->op()->properties(); |
| 554 CallDescriptor* desc = |
| 555 Linkage::GetRuntimeCallDescriptor(jsgraph()->zone(), f, 2, props); |
| 556 ExternalReference ref(f, jsgraph()->isolate()); |
| 557 int32_t flags = AllocateTargetSpace::encode(space); |
| 558 node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1)); |
| 559 node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags)); |
| 560 node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref)); |
| 561 node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2)); |
| 562 node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant()); |
| 563 NodeProperties::ChangeOp(node, common()->Call(desc)); |
| 564 } |
| 565 return Changed(node); |
| 566 } |
| 567 |
| 568 |
410 Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); } | 569 Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); } |
411 | 570 |
412 | 571 |
413 Graph* ChangeLowering::graph() const { return jsgraph()->graph(); } | 572 Graph* ChangeLowering::graph() const { return jsgraph()->graph(); } |
414 | 573 |
415 | 574 |
416 CommonOperatorBuilder* ChangeLowering::common() const { | 575 CommonOperatorBuilder* ChangeLowering::common() const { |
417 return jsgraph()->common(); | 576 return jsgraph()->common(); |
418 } | 577 } |
419 | 578 |
420 | 579 |
421 MachineOperatorBuilder* ChangeLowering::machine() const { | 580 MachineOperatorBuilder* ChangeLowering::machine() const { |
422 return jsgraph()->machine(); | 581 return jsgraph()->machine(); |
423 } | 582 } |
424 | 583 |
425 } // namespace compiler | 584 } // namespace compiler |
426 } // namespace internal | 585 } // namespace internal |
427 } // namespace v8 | 586 } // namespace v8 |
OLD | NEW |