Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(267)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 6311010: ARM: Port new version of ParallelMove's GapResolver to ARM. Add MemOperand s... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/lithium-gap-resolver-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "arm/lithium-codegen-arm.h" 28 #include "arm/lithium-codegen-arm.h"
29 #include "arm/lithium-gap-resolver-arm.h"
29 #include "code-stubs.h" 30 #include "code-stubs.h"
30 #include "stub-cache.h" 31 #include "stub-cache.h"
31 32
32 namespace v8 { 33 namespace v8 {
33 namespace internal { 34 namespace internal {
34 35
35 36
36 class SafepointGenerator : public PostCallGenerator { 37 class SafepointGenerator : public PostCallGenerator {
37 public: 38 public:
38 SafepointGenerator(LCodeGen* codegen, 39 SafepointGenerator(LCodeGen* codegen,
39 LPointerMap* pointers, 40 LPointerMap* pointers,
40 int deoptimization_index) 41 int deoptimization_index)
41 : codegen_(codegen), 42 : codegen_(codegen),
42 pointers_(pointers), 43 pointers_(pointers),
43 deoptimization_index_(deoptimization_index) { } 44 deoptimization_index_(deoptimization_index) { }
44 virtual ~SafepointGenerator() { } 45 virtual ~SafepointGenerator() { }
45 46
46 virtual void Generate() { 47 virtual void Generate() {
47 codegen_->RecordSafepoint(pointers_, deoptimization_index_); 48 codegen_->RecordSafepoint(pointers_, deoptimization_index_);
48 } 49 }
49 50
50 private: 51 private:
51 LCodeGen* codegen_; 52 LCodeGen* codegen_;
52 LPointerMap* pointers_; 53 LPointerMap* pointers_;
53 int deoptimization_index_; 54 int deoptimization_index_;
54 }; 55 };
55 56
56 57
57 class LGapNode: public ZoneObject {
58 public:
59 explicit LGapNode(LOperand* operand)
60 : operand_(operand), resolved_(false), visited_id_(-1) { }
61
62 LOperand* operand() const { return operand_; }
63 bool IsResolved() const { return !IsAssigned() || resolved_; }
64 void MarkResolved() {
65 ASSERT(!IsResolved());
66 resolved_ = true;
67 }
68 int visited_id() const { return visited_id_; }
69 void set_visited_id(int id) {
70 ASSERT(id > visited_id_);
71 visited_id_ = id;
72 }
73
74 bool IsAssigned() const { return assigned_from_.is_set(); }
75 LGapNode* assigned_from() const { return assigned_from_.get(); }
76 void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
77
78 private:
79 LOperand* operand_;
80 SetOncePointer<LGapNode> assigned_from_;
81 bool resolved_;
82 int visited_id_;
83 };
84
85
86 LGapResolver::LGapResolver()
87 : nodes_(32),
88 identified_cycles_(4),
89 result_(16),
90 next_visited_id_(0) {
91 }
92
93
94 const ZoneList<LMoveOperands>* LGapResolver::Resolve(
95 const ZoneList<LMoveOperands>* moves,
96 LOperand* marker_operand) {
97 nodes_.Rewind(0);
98 identified_cycles_.Rewind(0);
99 result_.Rewind(0);
100 next_visited_id_ = 0;
101
102 for (int i = 0; i < moves->length(); ++i) {
103 LMoveOperands move = moves->at(i);
104 if (!move.IsRedundant()) RegisterMove(move);
105 }
106
107 for (int i = 0; i < identified_cycles_.length(); ++i) {
108 ResolveCycle(identified_cycles_[i], marker_operand);
109 }
110
111 int unresolved_nodes;
112 do {
113 unresolved_nodes = 0;
114 for (int j = 0; j < nodes_.length(); j++) {
115 LGapNode* node = nodes_[j];
116 if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
117 AddResultMove(node->assigned_from(), node);
118 node->MarkResolved();
119 }
120 if (!node->IsResolved()) ++unresolved_nodes;
121 }
122 } while (unresolved_nodes > 0);
123 return &result_;
124 }
125
126
127 void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
128 AddResultMove(from->operand(), to->operand());
129 }
130
131
132 void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
133 result_.Add(LMoveOperands(from, to));
134 }
135
136
137 void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
138 ZoneList<LOperand*> cycle_operands(8);
139 cycle_operands.Add(marker_operand);
140 LGapNode* cur = start;
141 do {
142 cur->MarkResolved();
143 cycle_operands.Add(cur->operand());
144 cur = cur->assigned_from();
145 } while (cur != start);
146 cycle_operands.Add(marker_operand);
147
148 for (int i = cycle_operands.length() - 1; i > 0; --i) {
149 LOperand* from = cycle_operands[i];
150 LOperand* to = cycle_operands[i - 1];
151 AddResultMove(from, to);
152 }
153 }
154
155
156 bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
157 ASSERT(a != b);
158 LGapNode* cur = a;
159 while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
160 cur->set_visited_id(visited_id);
161 cur = cur->assigned_from();
162 }
163
164 return cur == b;
165 }
166
167
168 bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
169 ASSERT(a != b);
170 return CanReach(a, b, next_visited_id_++);
171 }
172
173
174 void LGapResolver::RegisterMove(LMoveOperands move) {
175 if (move.source()->IsConstantOperand()) {
176 // Constant moves should be last in the machine code. Therefore add them
177 // first to the result set.
178 AddResultMove(move.source(), move.destination());
179 } else {
180 LGapNode* from = LookupNode(move.source());
181 LGapNode* to = LookupNode(move.destination());
182 if (to->IsAssigned() && to->assigned_from() == from) {
183 move.Eliminate();
184 return;
185 }
186 ASSERT(!to->IsAssigned());
187 if (CanReach(from, to)) {
188 // This introduces a cycle. Save.
189 identified_cycles_.Add(from);
190 }
191 to->set_assigned_from(from);
192 }
193 }
194
195
196 LGapNode* LGapResolver::LookupNode(LOperand* operand) {
197 for (int i = 0; i < nodes_.length(); ++i) {
198 if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
199 }
200
201 // No node found => create a new one.
202 LGapNode* result = new LGapNode(operand);
203 nodes_.Add(result);
204 return result;
205 }
206
207
208 #define __ masm()-> 58 #define __ masm()->
209 59
210 bool LCodeGen::GenerateCode() { 60 bool LCodeGen::GenerateCode() {
211 HPhase phase("Code generation", chunk()); 61 HPhase phase("Code generation", chunk());
212 ASSERT(is_unused()); 62 ASSERT(is_unused());
213 status_ = GENERATING; 63 status_ = GENERATING;
214 CpuFeatures::Scope scope1(VFP3); 64 CpuFeatures::Scope scope1(VFP3);
215 CpuFeatures::Scope scope2(ARMv7); 65 CpuFeatures::Scope scope2(ARMv7);
216 return GeneratePrologue() && 66 return GeneratePrologue() &&
217 GenerateBody() && 67 GenerateBody() &&
(...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after
457 Abort("ToOperand IsDoubleRegister unimplemented"); 307 Abort("ToOperand IsDoubleRegister unimplemented");
458 return Operand(0); 308 return Operand(0);
459 } 309 }
460 // Stack slots not implemented, use ToMemOperand instead. 310 // Stack slots not implemented, use ToMemOperand instead.
461 UNREACHABLE(); 311 UNREACHABLE();
462 return Operand(0); 312 return Operand(0);
463 } 313 }
464 314
465 315
466 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { 316 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
467 // TODO(regis): Revisit.
468 ASSERT(!op->IsRegister()); 317 ASSERT(!op->IsRegister());
469 ASSERT(!op->IsDoubleRegister()); 318 ASSERT(!op->IsDoubleRegister());
470 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 319 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
471 int index = op->index(); 320 int index = op->index();
472 if (index >= 0) { 321 if (index >= 0) {
473 // Local or spill slot. Skip the frame pointer, function, and 322 // Local or spill slot. Skip the frame pointer, function, and
474 // context in the fixed part of the frame. 323 // context in the fixed part of the frame.
475 return MemOperand(fp, -(index + 3) * kPointerSize); 324 return MemOperand(fp, -(index + 3) * kPointerSize);
476 } else { 325 } else {
477 // Incoming parameter. Skip the return address. 326 // Incoming parameter. Skip the return address.
478 return MemOperand(fp, -(index - 1) * kPointerSize); 327 return MemOperand(fp, -(index - 1) * kPointerSize);
479 } 328 }
480 } 329 }
481 330
482 331
332 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
333 ASSERT(op->IsDoubleStackSlot());
334 int index = op->index();
335 if (index >= 0) {
336 // Local or spill slot. Skip the frame pointer, function, context,
337 // and the first word of the double in the fixed part of the frame.
338 return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
339 } else {
340 // Incoming parameter. Skip the return address and the first word of
341 // the double.
342 return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
343 }
344 }
345
346
483 void LCodeGen::WriteTranslation(LEnvironment* environment, 347 void LCodeGen::WriteTranslation(LEnvironment* environment,
484 Translation* translation) { 348 Translation* translation) {
485 if (environment == NULL) return; 349 if (environment == NULL) return;
486 350
487 // The translation includes one command per value in the environment. 351 // The translation includes one command per value in the environment.
488 int translation_size = environment->values()->length(); 352 int translation_size = environment->values()->length();
489 // The output frame height does not include the parameters. 353 // The output frame height does not include the parameters.
490 int height = translation_size - environment->parameter_count(); 354 int height = translation_size - environment->parameter_count();
491 355
492 WriteTranslation(environment->outer(), translation); 356 WriteTranslation(environment->outer(), translation);
(...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after
780 } else { 644 } else {
781 Comment(";;; B%d", label->block_id()); 645 Comment(";;; B%d", label->block_id());
782 } 646 }
783 __ bind(label->label()); 647 __ bind(label->label());
784 current_block_ = label->block_id(); 648 current_block_ = label->block_id();
785 LCodeGen::DoGap(label); 649 LCodeGen::DoGap(label);
786 } 650 }
787 651
788 652
789 void LCodeGen::DoParallelMove(LParallelMove* move) { 653 void LCodeGen::DoParallelMove(LParallelMove* move) {
790 // d0 must always be a scratch register. 654 resolver_.Resolve(move);
791 DoubleRegister dbl_scratch = d0;
792 LUnallocated marker_operand(LUnallocated::NONE);
793
794 Register core_scratch = scratch0();
795 bool destroys_core_scratch = false;
796
797 const ZoneList<LMoveOperands>* moves =
798 resolver_.Resolve(move->move_operands(), &marker_operand);
799 for (int i = moves->length() - 1; i >= 0; --i) {
800 LMoveOperands move = moves->at(i);
801 LOperand* from = move.source();
802 LOperand* to = move.destination();
803 ASSERT(!from->IsDoubleRegister() ||
804 !ToDoubleRegister(from).is(dbl_scratch));
805 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
806 ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch));
807 ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch));
808 if (from == &marker_operand) {
809 if (to->IsRegister()) {
810 __ mov(ToRegister(to), core_scratch);
811 ASSERT(destroys_core_scratch);
812 } else if (to->IsStackSlot()) {
813 __ str(core_scratch, ToMemOperand(to));
814 ASSERT(destroys_core_scratch);
815 } else if (to->IsDoubleRegister()) {
816 __ vmov(ToDoubleRegister(to), dbl_scratch);
817 } else {
818 ASSERT(to->IsDoubleStackSlot());
819 // TODO(regis): Why is vstr not taking a MemOperand?
820 // __ vstr(dbl_scratch, ToMemOperand(to));
821 MemOperand to_operand = ToMemOperand(to);
822 __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
823 }
824 } else if (to == &marker_operand) {
825 if (from->IsRegister() || from->IsConstantOperand()) {
826 __ mov(core_scratch, ToOperand(from));
827 destroys_core_scratch = true;
828 } else if (from->IsStackSlot()) {
829 __ ldr(core_scratch, ToMemOperand(from));
830 destroys_core_scratch = true;
831 } else if (from->IsDoubleRegister()) {
832 __ vmov(dbl_scratch, ToDoubleRegister(from));
833 } else {
834 ASSERT(from->IsDoubleStackSlot());
835 // TODO(regis): Why is vldr not taking a MemOperand?
836 // __ vldr(dbl_scratch, ToMemOperand(from));
837 MemOperand from_operand = ToMemOperand(from);
838 __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
839 }
840 } else if (from->IsConstantOperand()) {
841 if (to->IsRegister()) {
842 __ mov(ToRegister(to), ToOperand(from));
843 } else {
844 ASSERT(to->IsStackSlot());
845 __ mov(ip, ToOperand(from));
846 __ str(ip, ToMemOperand(to));
847 }
848 } else if (from->IsRegister()) {
849 if (to->IsRegister()) {
850 __ mov(ToRegister(to), ToOperand(from));
851 } else {
852 ASSERT(to->IsStackSlot());
853 __ str(ToRegister(from), ToMemOperand(to));
854 }
855 } else if (to->IsRegister()) {
856 ASSERT(from->IsStackSlot());
857 __ ldr(ToRegister(to), ToMemOperand(from));
858 } else if (from->IsStackSlot()) {
859 ASSERT(to->IsStackSlot());
860 __ ldr(ip, ToMemOperand(from));
861 __ str(ip, ToMemOperand(to));
862 } else if (from->IsDoubleRegister()) {
863 if (to->IsDoubleRegister()) {
864 __ vmov(ToDoubleRegister(to), ToDoubleRegister(from));
865 } else {
866 ASSERT(to->IsDoubleStackSlot());
867 // TODO(regis): Why is vstr not taking a MemOperand?
868 // __ vstr(dbl_scratch, ToMemOperand(to));
869 MemOperand to_operand = ToMemOperand(to);
870 __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset());
871 }
872 } else if (to->IsDoubleRegister()) {
873 ASSERT(from->IsDoubleStackSlot());
874 // TODO(regis): Why is vldr not taking a MemOperand?
875 // __ vldr(ToDoubleRegister(to), ToMemOperand(from));
876 MemOperand from_operand = ToMemOperand(from);
877 __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset());
878 } else {
879 ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
880 // TODO(regis): Why is vldr not taking a MemOperand?
881 // __ vldr(dbl_scratch, ToMemOperand(from));
882 MemOperand from_operand = ToMemOperand(from);
883 __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
884 // TODO(regis): Why is vstr not taking a MemOperand?
885 // __ vstr(dbl_scratch, ToMemOperand(to));
886 MemOperand to_operand = ToMemOperand(to);
887 __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
888 }
889 }
890
891 if (destroys_core_scratch) {
892 __ ldr(core_scratch, MemOperand(fp, -kPointerSize));
893 }
894
895 LInstruction* next = GetNextInstruction();
896 if (next != NULL && next->IsLazyBailout()) {
897 int pc = masm()->pc_offset();
898 safepoints_.SetPcAfterGap(pc);
899 }
900 } 655 }
901 656
902 657
903 void LCodeGen::DoGap(LGap* gap) { 658 void LCodeGen::DoGap(LGap* gap) {
904 for (int i = LGap::FIRST_INNER_POSITION; 659 for (int i = LGap::FIRST_INNER_POSITION;
905 i <= LGap::LAST_INNER_POSITION; 660 i <= LGap::LAST_INNER_POSITION;
906 i++) { 661 i++) {
907 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 662 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
908 LParallelMove* move = gap->GetParallelMove(inner_pos); 663 LParallelMove* move = gap->GetParallelMove(inner_pos);
909 if (move != NULL) DoParallelMove(move); 664 if (move != NULL) DoParallelMove(move);
(...skipping 3057 matching lines...) Expand 10 before | Expand all | Expand 10 after
3967 ASSERT(!environment->HasBeenRegistered()); 3722 ASSERT(!environment->HasBeenRegistered());
3968 RegisterEnvironmentForDeoptimization(environment); 3723 RegisterEnvironmentForDeoptimization(environment);
3969 ASSERT(osr_pc_offset_ == -1); 3724 ASSERT(osr_pc_offset_ == -1);
3970 osr_pc_offset_ = masm()->pc_offset(); 3725 osr_pc_offset_ = masm()->pc_offset();
3971 } 3726 }
3972 3727
3973 3728
3974 #undef __ 3729 #undef __
3975 3730
3976 } } // namespace v8::internal 3731 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/lithium-gap-resolver-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698