Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(46)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 6311010: ARM: Port new version of ParallelMove's GapResolver to ARM. Add MemOperand s... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "arm/lithium-codegen-arm.h" 28 #include "arm/lithium-codegen-arm.h"
29 #include "arm/lithium-gap-resolver-arm.h"
29 #include "code-stubs.h" 30 #include "code-stubs.h"
30 #include "stub-cache.h" 31 #include "stub-cache.h"
31 32
32 namespace v8 { 33 namespace v8 {
33 namespace internal { 34 namespace internal {
34 35
35 36
36 class SafepointGenerator : public PostCallGenerator { 37 class SafepointGenerator : public PostCallGenerator {
37 public: 38 public:
38 SafepointGenerator(LCodeGen* codegen, 39 SafepointGenerator(LCodeGen* codegen,
39 LPointerMap* pointers, 40 LPointerMap* pointers,
40 int deoptimization_index) 41 int deoptimization_index)
41 : codegen_(codegen), 42 : codegen_(codegen),
42 pointers_(pointers), 43 pointers_(pointers),
43 deoptimization_index_(deoptimization_index) { } 44 deoptimization_index_(deoptimization_index) { }
44 virtual ~SafepointGenerator() { } 45 virtual ~SafepointGenerator() { }
45 46
46 virtual void Generate() { 47 virtual void Generate() {
47 codegen_->RecordSafepoint(pointers_, deoptimization_index_); 48 codegen_->RecordSafepoint(pointers_, deoptimization_index_);
48 } 49 }
49 50
50 private: 51 private:
51 LCodeGen* codegen_; 52 LCodeGen* codegen_;
52 LPointerMap* pointers_; 53 LPointerMap* pointers_;
53 int deoptimization_index_; 54 int deoptimization_index_;
54 }; 55 };
55 56
56 57
57 class LGapNode: public ZoneObject {
58 public:
59 explicit LGapNode(LOperand* operand)
60 : operand_(operand), resolved_(false), visited_id_(-1) { }
61
62 LOperand* operand() const { return operand_; }
63 bool IsResolved() const { return !IsAssigned() || resolved_; }
64 void MarkResolved() {
65 ASSERT(!IsResolved());
66 resolved_ = true;
67 }
68 int visited_id() const { return visited_id_; }
69 void set_visited_id(int id) {
70 ASSERT(id > visited_id_);
71 visited_id_ = id;
72 }
73
74 bool IsAssigned() const { return assigned_from_.is_set(); }
75 LGapNode* assigned_from() const { return assigned_from_.get(); }
76 void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
77
78 private:
79 LOperand* operand_;
80 SetOncePointer<LGapNode> assigned_from_;
81 bool resolved_;
82 int visited_id_;
83 };
84
85
86 LGapResolver::LGapResolver()
87 : nodes_(32),
88 identified_cycles_(4),
89 result_(16),
90 next_visited_id_(0) {
91 }
92
93
94 const ZoneList<LMoveOperands>* LGapResolver::Resolve(
95 const ZoneList<LMoveOperands>* moves,
96 LOperand* marker_operand) {
97 nodes_.Rewind(0);
98 identified_cycles_.Rewind(0);
99 result_.Rewind(0);
100 next_visited_id_ = 0;
101
102 for (int i = 0; i < moves->length(); ++i) {
103 LMoveOperands move = moves->at(i);
104 if (!move.IsRedundant()) RegisterMove(move);
105 }
106
107 for (int i = 0; i < identified_cycles_.length(); ++i) {
108 ResolveCycle(identified_cycles_[i], marker_operand);
109 }
110
111 int unresolved_nodes;
112 do {
113 unresolved_nodes = 0;
114 for (int j = 0; j < nodes_.length(); j++) {
115 LGapNode* node = nodes_[j];
116 if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
117 AddResultMove(node->assigned_from(), node);
118 node->MarkResolved();
119 }
120 if (!node->IsResolved()) ++unresolved_nodes;
121 }
122 } while (unresolved_nodes > 0);
123 return &result_;
124 }
125
126
127 void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
128 AddResultMove(from->operand(), to->operand());
129 }
130
131
132 void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
133 result_.Add(LMoveOperands(from, to));
134 }
135
136
137 void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
138 ZoneList<LOperand*> cycle_operands(8);
139 cycle_operands.Add(marker_operand);
140 LGapNode* cur = start;
141 do {
142 cur->MarkResolved();
143 cycle_operands.Add(cur->operand());
144 cur = cur->assigned_from();
145 } while (cur != start);
146 cycle_operands.Add(marker_operand);
147
148 for (int i = cycle_operands.length() - 1; i > 0; --i) {
149 LOperand* from = cycle_operands[i];
150 LOperand* to = cycle_operands[i - 1];
151 AddResultMove(from, to);
152 }
153 }
154
155
156 bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
157 ASSERT(a != b);
158 LGapNode* cur = a;
159 while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
160 cur->set_visited_id(visited_id);
161 cur = cur->assigned_from();
162 }
163
164 return cur == b;
165 }
166
167
168 bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
169 ASSERT(a != b);
170 return CanReach(a, b, next_visited_id_++);
171 }
172
173
174 void LGapResolver::RegisterMove(LMoveOperands move) {
175 if (move.source()->IsConstantOperand()) {
176 // Constant moves should be last in the machine code. Therefore add them
177 // first to the result set.
178 AddResultMove(move.source(), move.destination());
179 } else {
180 LGapNode* from = LookupNode(move.source());
181 LGapNode* to = LookupNode(move.destination());
182 if (to->IsAssigned() && to->assigned_from() == from) {
183 move.Eliminate();
184 return;
185 }
186 ASSERT(!to->IsAssigned());
187 if (CanReach(from, to)) {
188 // This introduces a cycle. Save.
189 identified_cycles_.Add(from);
190 }
191 to->set_assigned_from(from);
192 }
193 }
194
195
196 LGapNode* LGapResolver::LookupNode(LOperand* operand) {
197 for (int i = 0; i < nodes_.length(); ++i) {
198 if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
199 }
200
201 // No node found => create a new one.
202 LGapNode* result = new LGapNode(operand);
203 nodes_.Add(result);
204 return result;
205 }
206
207
208 #define __ masm()-> 58 #define __ masm()->
209 59
210 bool LCodeGen::GenerateCode() { 60 bool LCodeGen::GenerateCode() {
211 HPhase phase("Code generation", chunk()); 61 HPhase phase("Code generation", chunk());
212 ASSERT(is_unused()); 62 ASSERT(is_unused());
213 status_ = GENERATING; 63 status_ = GENERATING;
214 CpuFeatures::Scope scope1(VFP3); 64 CpuFeatures::Scope scope1(VFP3);
215 CpuFeatures::Scope scope2(ARMv7); 65 CpuFeatures::Scope scope2(ARMv7);
216 return GeneratePrologue() && 66 return GeneratePrologue() &&
217 GenerateBody() && 67 GenerateBody() &&
(...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after
457 Abort("ToOperand IsDoubleRegister unimplemented"); 307 Abort("ToOperand IsDoubleRegister unimplemented");
458 return Operand(0); 308 return Operand(0);
459 } 309 }
460 // Stack slots not implemented, use ToMemOperand instead. 310 // Stack slots not implemented, use ToMemOperand instead.
461 UNREACHABLE(); 311 UNREACHABLE();
462 return Operand(0); 312 return Operand(0);
463 } 313 }
464 314
465 315
466 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { 316 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
467 // TODO(regis): Revisit.
468 ASSERT(!op->IsRegister()); 317 ASSERT(!op->IsRegister());
469 ASSERT(!op->IsDoubleRegister()); 318 ASSERT(!op->IsDoubleRegister());
470 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 319 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
471 int index = op->index(); 320 int index = op->index();
472 if (index >= 0) { 321 if (index >= 0) {
473 // Local or spill slot. Skip the frame pointer, function, and 322 // Local or spill slot. Skip the frame pointer, function, and
474 // context in the fixed part of the frame. 323 // context in the fixed part of the frame.
475 return MemOperand(fp, -(index + 3) * kPointerSize); 324 return MemOperand(fp, -(index + 3) * kPointerSize);
476 } else { 325 } else {
477 // Incoming parameter. Skip the return address. 326 // Incoming parameter. Skip the return address.
478 return MemOperand(fp, -(index - 1) * kPointerSize); 327 return MemOperand(fp, -(index - 1) * kPointerSize);
479 } 328 }
480 } 329 }
481 330
482 331
332 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
333 ASSERT(op->IsDoubleStackSlot());
334 int index = op->index();
335 if (index >= 0) {
336 // Local or spill slot. Skip the frame pointer, function, context,
337 // and the first word of the double in the fixed part of the frame.
Søren Thygesen Gjesse 2011/01/26 08:10:26 I know that you copied from above, but I don't lik
338 return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
339 } else {
340 // Incoming parameter. Skip the return address and the first word of
341 // the double.
342 return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
343 }
344 }
345
346
483 void LCodeGen::WriteTranslation(LEnvironment* environment, 347 void LCodeGen::WriteTranslation(LEnvironment* environment,
484 Translation* translation) { 348 Translation* translation) {
485 if (environment == NULL) return; 349 if (environment == NULL) return;
486 350
487 // The translation includes one command per value in the environment. 351 // The translation includes one command per value in the environment.
488 int translation_size = environment->values()->length(); 352 int translation_size = environment->values()->length();
489 // The output frame height does not include the parameters. 353 // The output frame height does not include the parameters.
490 int height = translation_size - environment->parameter_count(); 354 int height = translation_size - environment->parameter_count();
491 355
492 WriteTranslation(environment->outer(), translation); 356 WriteTranslation(environment->outer(), translation);
(...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after
807 } else { 671 } else {
808 Comment(";;; B%d", label->block_id()); 672 Comment(";;; B%d", label->block_id());
809 } 673 }
810 __ bind(label->label()); 674 __ bind(label->label());
811 current_block_ = label->block_id(); 675 current_block_ = label->block_id();
812 LCodeGen::DoGap(label); 676 LCodeGen::DoGap(label);
813 } 677 }
814 678
815 679
816 void LCodeGen::DoParallelMove(LParallelMove* move) { 680 void LCodeGen::DoParallelMove(LParallelMove* move) {
817 // d0 must always be a scratch register. 681 resolver_.Resolve(move);
818 DoubleRegister dbl_scratch = d0;
819 LUnallocated marker_operand(LUnallocated::NONE);
820
821 Register core_scratch = scratch0();
822 bool destroys_core_scratch = false;
823
824 const ZoneList<LMoveOperands>* moves =
825 resolver_.Resolve(move->move_operands(), &marker_operand);
826 for (int i = moves->length() - 1; i >= 0; --i) {
827 LMoveOperands move = moves->at(i);
828 LOperand* from = move.source();
829 LOperand* to = move.destination();
830 ASSERT(!from->IsDoubleRegister() ||
831 !ToDoubleRegister(from).is(dbl_scratch));
832 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
833 ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch));
834 ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch));
835 if (from == &marker_operand) {
836 if (to->IsRegister()) {
837 __ mov(ToRegister(to), core_scratch);
838 ASSERT(destroys_core_scratch);
839 } else if (to->IsStackSlot()) {
840 __ str(core_scratch, ToMemOperand(to));
841 ASSERT(destroys_core_scratch);
842 } else if (to->IsDoubleRegister()) {
843 __ vmov(ToDoubleRegister(to), dbl_scratch);
844 } else {
845 ASSERT(to->IsDoubleStackSlot());
846 // TODO(regis): Why is vstr not taking a MemOperand?
847 // __ vstr(dbl_scratch, ToMemOperand(to));
848 MemOperand to_operand = ToMemOperand(to);
849 __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
850 }
851 } else if (to == &marker_operand) {
852 if (from->IsRegister() || from->IsConstantOperand()) {
853 __ mov(core_scratch, ToOperand(from));
854 destroys_core_scratch = true;
855 } else if (from->IsStackSlot()) {
856 __ ldr(core_scratch, ToMemOperand(from));
857 destroys_core_scratch = true;
858 } else if (from->IsDoubleRegister()) {
859 __ vmov(dbl_scratch, ToDoubleRegister(from));
860 } else {
861 ASSERT(from->IsDoubleStackSlot());
862 // TODO(regis): Why is vldr not taking a MemOperand?
863 // __ vldr(dbl_scratch, ToMemOperand(from));
864 MemOperand from_operand = ToMemOperand(from);
865 __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
866 }
867 } else if (from->IsConstantOperand()) {
868 if (to->IsRegister()) {
869 __ mov(ToRegister(to), ToOperand(from));
870 } else {
871 ASSERT(to->IsStackSlot());
872 __ mov(ip, ToOperand(from));
873 __ str(ip, ToMemOperand(to));
874 }
875 } else if (from->IsRegister()) {
876 if (to->IsRegister()) {
877 __ mov(ToRegister(to), ToOperand(from));
878 } else {
879 ASSERT(to->IsStackSlot());
880 __ str(ToRegister(from), ToMemOperand(to));
881 }
882 } else if (to->IsRegister()) {
883 ASSERT(from->IsStackSlot());
884 __ ldr(ToRegister(to), ToMemOperand(from));
885 } else if (from->IsStackSlot()) {
886 ASSERT(to->IsStackSlot());
887 __ ldr(ip, ToMemOperand(from));
888 __ str(ip, ToMemOperand(to));
889 } else if (from->IsDoubleRegister()) {
890 if (to->IsDoubleRegister()) {
891 __ vmov(ToDoubleRegister(to), ToDoubleRegister(from));
892 } else {
893 ASSERT(to->IsDoubleStackSlot());
894 // TODO(regis): Why is vstr not taking a MemOperand?
895 // __ vstr(dbl_scratch, ToMemOperand(to));
896 MemOperand to_operand = ToMemOperand(to);
897 __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset());
898 }
899 } else if (to->IsDoubleRegister()) {
900 ASSERT(from->IsDoubleStackSlot());
901 // TODO(regis): Why is vldr not taking a MemOperand?
902 // __ vldr(ToDoubleRegister(to), ToMemOperand(from));
903 MemOperand from_operand = ToMemOperand(from);
904 __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset());
905 } else {
906 ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
907 // TODO(regis): Why is vldr not taking a MemOperand?
908 // __ vldr(dbl_scratch, ToMemOperand(from));
909 MemOperand from_operand = ToMemOperand(from);
910 __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
911 // TODO(regis): Why is vstr not taking a MemOperand?
912 // __ vstr(dbl_scratch, ToMemOperand(to));
913 MemOperand to_operand = ToMemOperand(to);
914 __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
915 }
916 }
917
918 if (destroys_core_scratch) {
919 __ ldr(core_scratch, MemOperand(fp, -kPointerSize));
920 }
921
922 LInstruction* next = GetNextInstruction();
923 if (next != NULL && next->IsLazyBailout()) {
924 int pc = masm()->pc_offset();
925 safepoints_.SetPcAfterGap(pc);
926 }
927 } 682 }
928 683
929 684
930 void LCodeGen::DoGap(LGap* gap) { 685 void LCodeGen::DoGap(LGap* gap) {
931 for (int i = LGap::FIRST_INNER_POSITION; 686 for (int i = LGap::FIRST_INNER_POSITION;
932 i <= LGap::LAST_INNER_POSITION; 687 i <= LGap::LAST_INNER_POSITION;
933 i++) { 688 i++) {
934 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 689 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
935 LParallelMove* move = gap->GetParallelMove(inner_pos); 690 LParallelMove* move = gap->GetParallelMove(inner_pos);
936 if (move != NULL) DoParallelMove(move); 691 if (move != NULL) DoParallelMove(move);
(...skipping 2737 matching lines...) Expand 10 before | Expand all | Expand 10 after
3674 3429
3675 3430
3676 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 3431 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
3677 Abort("DoOsrEntry unimplemented."); 3432 Abort("DoOsrEntry unimplemented.");
3678 } 3433 }
3679 3434
3680 3435
3681 #undef __ 3436 #undef __
3682 3437
3683 } } // namespace v8::internal 3438 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698