Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(201)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 6614010: [Isolates] Merge 6700:7030 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/lithium-gap-resolver-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "arm/lithium-codegen-arm.h" 28 #include "arm/lithium-codegen-arm.h"
29 #include "arm/lithium-gap-resolver-arm.h"
29 #include "code-stubs.h" 30 #include "code-stubs.h"
30 #include "stub-cache.h" 31 #include "stub-cache.h"
31 32
32 namespace v8 { 33 namespace v8 {
33 namespace internal { 34 namespace internal {
34 35
35 36
36 class SafepointGenerator : public PostCallGenerator { 37 class SafepointGenerator : public PostCallGenerator {
37 public: 38 public:
38 SafepointGenerator(LCodeGen* codegen, 39 SafepointGenerator(LCodeGen* codegen,
39 LPointerMap* pointers, 40 LPointerMap* pointers,
40 int deoptimization_index) 41 int deoptimization_index)
41 : codegen_(codegen), 42 : codegen_(codegen),
42 pointers_(pointers), 43 pointers_(pointers),
43 deoptimization_index_(deoptimization_index) { } 44 deoptimization_index_(deoptimization_index) { }
44 virtual ~SafepointGenerator() { } 45 virtual ~SafepointGenerator() { }
45 46
46 virtual void Generate() { 47 virtual void Generate() {
47 codegen_->RecordSafepoint(pointers_, deoptimization_index_); 48 codegen_->RecordSafepoint(pointers_, deoptimization_index_);
48 } 49 }
49 50
50 private: 51 private:
51 LCodeGen* codegen_; 52 LCodeGen* codegen_;
52 LPointerMap* pointers_; 53 LPointerMap* pointers_;
53 int deoptimization_index_; 54 int deoptimization_index_;
54 }; 55 };
55 56
56 57
57 class LGapNode: public ZoneObject {
58 public:
59 explicit LGapNode(LOperand* operand)
60 : operand_(operand), resolved_(false), visited_id_(-1) { }
61
62 LOperand* operand() const { return operand_; }
63 bool IsResolved() const { return !IsAssigned() || resolved_; }
64 void MarkResolved() {
65 ASSERT(!IsResolved());
66 resolved_ = true;
67 }
68 int visited_id() const { return visited_id_; }
69 void set_visited_id(int id) {
70 ASSERT(id > visited_id_);
71 visited_id_ = id;
72 }
73
74 bool IsAssigned() const { return assigned_from_.is_set(); }
75 LGapNode* assigned_from() const { return assigned_from_.get(); }
76 void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
77
78 private:
79 LOperand* operand_;
80 SetOncePointer<LGapNode> assigned_from_;
81 bool resolved_;
82 int visited_id_;
83 };
84
85
86 LGapResolver::LGapResolver()
87 : nodes_(32),
88 identified_cycles_(4),
89 result_(16),
90 next_visited_id_(0) {
91 }
92
93
94 const ZoneList<LMoveOperands>* LGapResolver::Resolve(
95 const ZoneList<LMoveOperands>* moves,
96 LOperand* marker_operand) {
97 nodes_.Rewind(0);
98 identified_cycles_.Rewind(0);
99 result_.Rewind(0);
100 next_visited_id_ = 0;
101
102 for (int i = 0; i < moves->length(); ++i) {
103 LMoveOperands move = moves->at(i);
104 if (!move.IsRedundant()) RegisterMove(move);
105 }
106
107 for (int i = 0; i < identified_cycles_.length(); ++i) {
108 ResolveCycle(identified_cycles_[i], marker_operand);
109 }
110
111 int unresolved_nodes;
112 do {
113 unresolved_nodes = 0;
114 for (int j = 0; j < nodes_.length(); j++) {
115 LGapNode* node = nodes_[j];
116 if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
117 AddResultMove(node->assigned_from(), node);
118 node->MarkResolved();
119 }
120 if (!node->IsResolved()) ++unresolved_nodes;
121 }
122 } while (unresolved_nodes > 0);
123 return &result_;
124 }
125
126
127 void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
128 AddResultMove(from->operand(), to->operand());
129 }
130
131
132 void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
133 result_.Add(LMoveOperands(from, to));
134 }
135
136
137 void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
138 ZoneList<LOperand*> cycle_operands(8);
139 cycle_operands.Add(marker_operand);
140 LGapNode* cur = start;
141 do {
142 cur->MarkResolved();
143 cycle_operands.Add(cur->operand());
144 cur = cur->assigned_from();
145 } while (cur != start);
146 cycle_operands.Add(marker_operand);
147
148 for (int i = cycle_operands.length() - 1; i > 0; --i) {
149 LOperand* from = cycle_operands[i];
150 LOperand* to = cycle_operands[i - 1];
151 AddResultMove(from, to);
152 }
153 }
154
155
156 bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
157 ASSERT(a != b);
158 LGapNode* cur = a;
159 while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
160 cur->set_visited_id(visited_id);
161 cur = cur->assigned_from();
162 }
163
164 return cur == b;
165 }
166
167
168 bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
169 ASSERT(a != b);
170 return CanReach(a, b, next_visited_id_++);
171 }
172
173
174 void LGapResolver::RegisterMove(LMoveOperands move) {
175 if (move.source()->IsConstantOperand()) {
176 // Constant moves should be last in the machine code. Therefore add them
177 // first to the result set.
178 AddResultMove(move.source(), move.destination());
179 } else {
180 LGapNode* from = LookupNode(move.source());
181 LGapNode* to = LookupNode(move.destination());
182 if (to->IsAssigned() && to->assigned_from() == from) {
183 move.Eliminate();
184 return;
185 }
186 ASSERT(!to->IsAssigned());
187 if (CanReach(from, to)) {
188 // This introduces a cycle. Save.
189 identified_cycles_.Add(from);
190 }
191 to->set_assigned_from(from);
192 }
193 }
194
195
196 LGapNode* LGapResolver::LookupNode(LOperand* operand) {
197 for (int i = 0; i < nodes_.length(); ++i) {
198 if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
199 }
200
201 // No node found => create a new one.
202 LGapNode* result = new LGapNode(operand);
203 nodes_.Add(result);
204 return result;
205 }
206
207
208 #define __ masm()-> 58 #define __ masm()->
209 59
210 bool LCodeGen::GenerateCode() { 60 bool LCodeGen::GenerateCode() {
211 HPhase phase("Code generation", chunk()); 61 HPhase phase("Code generation", chunk());
212 ASSERT(is_unused()); 62 ASSERT(is_unused());
213 status_ = GENERATING; 63 status_ = GENERATING;
214 CpuFeatures::Scope scope1(VFP3); 64 CpuFeatures::Scope scope1(VFP3);
215 CpuFeatures::Scope scope2(ARMv7); 65 CpuFeatures::Scope scope2(ARMv7);
216 return GeneratePrologue() && 66 return GeneratePrologue() &&
217 GenerateBody() && 67 GenerateBody() &&
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
287 Label loop; 137 Label loop;
288 __ bind(&loop); 138 __ bind(&loop);
289 __ push(r2); 139 __ push(r2);
290 __ sub(r0, r0, Operand(1), SetCC); 140 __ sub(r0, r0, Operand(1), SetCC);
291 __ b(ne, &loop); 141 __ b(ne, &loop);
292 } else { 142 } else {
293 __ sub(sp, sp, Operand(slots * kPointerSize)); 143 __ sub(sp, sp, Operand(slots * kPointerSize));
294 } 144 }
295 } 145 }
296 146
147 // Possibly allocate a local context.
148 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
149 if (heap_slots > 0) {
150 Comment(";;; Allocate local context");
151 // Argument to NewContext is the function, which is in r1.
152 __ push(r1);
153 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
154 FastNewContextStub stub(heap_slots);
155 __ CallStub(&stub);
156 } else {
157 __ CallRuntime(Runtime::kNewContext, 1);
158 }
159 RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
160 // Context is returned in both r0 and cp. It replaces the context
161 // passed to us. It's saved in the stack and kept live in cp.
162 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
163 // Copy any necessary parameters into the context.
164 int num_parameters = scope()->num_parameters();
165 for (int i = 0; i < num_parameters; i++) {
166 Slot* slot = scope()->parameter(i)->AsSlot();
167 if (slot != NULL && slot->type() == Slot::CONTEXT) {
168 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
169 (num_parameters - 1 - i) * kPointerSize;
170 // Load parameter from stack.
171 __ ldr(r0, MemOperand(fp, parameter_offset));
172 // Store it in the context.
173 __ mov(r1, Operand(Context::SlotOffset(slot->index())));
174 __ str(r0, MemOperand(cp, r1));
175 // Update the write barrier. This clobbers all involved
176 // registers, so we have to use two more registers to avoid
177 // clobbering cp.
178 __ mov(r2, Operand(cp));
179 __ RecordWrite(r2, Operand(r1), r3, r0);
180 }
181 }
182 Comment(";;; End allocate local context");
183 }
184
297 // Trace the call. 185 // Trace the call.
298 if (FLAG_trace) { 186 if (FLAG_trace) {
299 __ CallRuntime(Runtime::kTraceEnter, 0); 187 __ CallRuntime(Runtime::kTraceEnter, 0);
300 } 188 }
301 return !is_aborted(); 189 return !is_aborted();
302 } 190 }
303 191
304 192
305 bool LCodeGen::GenerateBody() { 193 bool LCodeGen::GenerateBody() {
306 ASSERT(is_generating()); 194 ASSERT(is_generating());
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
457 Abort("ToOperand IsDoubleRegister unimplemented"); 345 Abort("ToOperand IsDoubleRegister unimplemented");
458 return Operand(0); 346 return Operand(0);
459 } 347 }
460 // Stack slots not implemented, use ToMemOperand instead. 348 // Stack slots not implemented, use ToMemOperand instead.
461 UNREACHABLE(); 349 UNREACHABLE();
462 return Operand(0); 350 return Operand(0);
463 } 351 }
464 352
465 353
466 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { 354 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
467 // TODO(regis): Revisit.
468 ASSERT(!op->IsRegister()); 355 ASSERT(!op->IsRegister());
469 ASSERT(!op->IsDoubleRegister()); 356 ASSERT(!op->IsDoubleRegister());
470 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 357 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
471 int index = op->index(); 358 int index = op->index();
472 if (index >= 0) { 359 if (index >= 0) {
473 // Local or spill slot. Skip the frame pointer, function, and 360 // Local or spill slot. Skip the frame pointer, function, and
474 // context in the fixed part of the frame. 361 // context in the fixed part of the frame.
475 return MemOperand(fp, -(index + 3) * kPointerSize); 362 return MemOperand(fp, -(index + 3) * kPointerSize);
476 } else { 363 } else {
477 // Incoming parameter. Skip the return address. 364 // Incoming parameter. Skip the return address.
478 return MemOperand(fp, -(index - 1) * kPointerSize); 365 return MemOperand(fp, -(index - 1) * kPointerSize);
479 } 366 }
480 } 367 }
481 368
482 369
370 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
371 ASSERT(op->IsDoubleStackSlot());
372 int index = op->index();
373 if (index >= 0) {
374 // Local or spill slot. Skip the frame pointer, function, context,
375 // and the first word of the double in the fixed part of the frame.
376 return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
377 } else {
378 // Incoming parameter. Skip the return address and the first word of
379 // the double.
380 return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
381 }
382 }
383
384
483 void LCodeGen::WriteTranslation(LEnvironment* environment, 385 void LCodeGen::WriteTranslation(LEnvironment* environment,
484 Translation* translation) { 386 Translation* translation) {
485 if (environment == NULL) return; 387 if (environment == NULL) return;
486 388
487 // The translation includes one command per value in the environment. 389 // The translation includes one command per value in the environment.
488 int translation_size = environment->values()->length(); 390 int translation_size = environment->values()->length();
489 // The output frame height does not include the parameters. 391 // The output frame height does not include the parameters.
490 int height = translation_size - environment->parameter_count(); 392 int height = translation_size - environment->parameter_count();
491 393
492 WriteTranslation(environment->outer(), translation); 394 WriteTranslation(environment->outer(), translation);
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
664 } 566 }
665 567
666 568
667 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 569 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
668 int length = deoptimizations_.length(); 570 int length = deoptimizations_.length();
669 if (length == 0) return; 571 if (length == 0) return;
670 ASSERT(FLAG_deopt); 572 ASSERT(FLAG_deopt);
671 Handle<DeoptimizationInputData> data = 573 Handle<DeoptimizationInputData> data =
672 FACTORY->NewDeoptimizationInputData(length, TENURED); 574 FACTORY->NewDeoptimizationInputData(length, TENURED);
673 575
674 data->SetTranslationByteArray(*translations_.CreateByteArray()); 576 Handle<ByteArray> translations = translations_.CreateByteArray();
577 data->SetTranslationByteArray(*translations);
675 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); 578 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
676 579
677 Handle<FixedArray> literals = 580 Handle<FixedArray> literals =
678 FACTORY->NewFixedArray(deoptimization_literals_.length(), TENURED); 581 FACTORY->NewFixedArray(deoptimization_literals_.length(), TENURED);
679 for (int i = 0; i < deoptimization_literals_.length(); i++) { 582 for (int i = 0; i < deoptimization_literals_.length(); i++) {
680 literals->set(i, *deoptimization_literals_[i]); 583 literals->set(i, *deoptimization_literals_[i]);
681 } 584 }
682 data->SetLiteralArray(*literals); 585 data->SetLiteralArray(*literals);
683 586
684 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id())); 587 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
744 } 647 }
745 } 648 }
746 649
747 650
748 void LCodeGen::RecordSafepoint(LPointerMap* pointers, 651 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
749 int deoptimization_index) { 652 int deoptimization_index) {
750 RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index); 653 RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
751 } 654 }
752 655
753 656
657 void LCodeGen::RecordSafepoint(int deoptimization_index) {
658 LPointerMap empty_pointers(RelocInfo::kNoPosition);
659 RecordSafepoint(&empty_pointers, deoptimization_index);
660 }
661
662
754 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 663 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
755 int arguments, 664 int arguments,
756 int deoptimization_index) { 665 int deoptimization_index) {
757 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, 666 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
758 deoptimization_index); 667 deoptimization_index);
759 } 668 }
760 669
761 670
762 void LCodeGen::RecordSafepointWithRegistersAndDoubles( 671 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
763 LPointerMap* pointers, 672 LPointerMap* pointers,
(...skipping 16 matching lines...) Expand all
780 } else { 689 } else {
781 Comment(";;; B%d", label->block_id()); 690 Comment(";;; B%d", label->block_id());
782 } 691 }
783 __ bind(label->label()); 692 __ bind(label->label());
784 current_block_ = label->block_id(); 693 current_block_ = label->block_id();
785 LCodeGen::DoGap(label); 694 LCodeGen::DoGap(label);
786 } 695 }
787 696
788 697
789 void LCodeGen::DoParallelMove(LParallelMove* move) { 698 void LCodeGen::DoParallelMove(LParallelMove* move) {
790 // d0 must always be a scratch register. 699 resolver_.Resolve(move);
791 DoubleRegister dbl_scratch = d0;
792 LUnallocated marker_operand(LUnallocated::NONE);
793
794 Register core_scratch = scratch0();
795 bool destroys_core_scratch = false;
796
797 const ZoneList<LMoveOperands>* moves =
798 resolver_.Resolve(move->move_operands(), &marker_operand);
799 for (int i = moves->length() - 1; i >= 0; --i) {
800 LMoveOperands move = moves->at(i);
801 LOperand* from = move.source();
802 LOperand* to = move.destination();
803 ASSERT(!from->IsDoubleRegister() ||
804 !ToDoubleRegister(from).is(dbl_scratch));
805 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
806 ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch));
807 ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch));
808 if (from == &marker_operand) {
809 if (to->IsRegister()) {
810 __ mov(ToRegister(to), core_scratch);
811 ASSERT(destroys_core_scratch);
812 } else if (to->IsStackSlot()) {
813 __ str(core_scratch, ToMemOperand(to));
814 ASSERT(destroys_core_scratch);
815 } else if (to->IsDoubleRegister()) {
816 __ vmov(ToDoubleRegister(to), dbl_scratch);
817 } else {
818 ASSERT(to->IsDoubleStackSlot());
819 // TODO(regis): Why is vstr not taking a MemOperand?
820 // __ vstr(dbl_scratch, ToMemOperand(to));
821 MemOperand to_operand = ToMemOperand(to);
822 __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
823 }
824 } else if (to == &marker_operand) {
825 if (from->IsRegister() || from->IsConstantOperand()) {
826 __ mov(core_scratch, ToOperand(from));
827 destroys_core_scratch = true;
828 } else if (from->IsStackSlot()) {
829 __ ldr(core_scratch, ToMemOperand(from));
830 destroys_core_scratch = true;
831 } else if (from->IsDoubleRegister()) {
832 __ vmov(dbl_scratch, ToDoubleRegister(from));
833 } else {
834 ASSERT(from->IsDoubleStackSlot());
835 // TODO(regis): Why is vldr not taking a MemOperand?
836 // __ vldr(dbl_scratch, ToMemOperand(from));
837 MemOperand from_operand = ToMemOperand(from);
838 __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
839 }
840 } else if (from->IsConstantOperand()) {
841 if (to->IsRegister()) {
842 __ mov(ToRegister(to), ToOperand(from));
843 } else {
844 ASSERT(to->IsStackSlot());
845 __ mov(ip, ToOperand(from));
846 __ str(ip, ToMemOperand(to));
847 }
848 } else if (from->IsRegister()) {
849 if (to->IsRegister()) {
850 __ mov(ToRegister(to), ToOperand(from));
851 } else {
852 ASSERT(to->IsStackSlot());
853 __ str(ToRegister(from), ToMemOperand(to));
854 }
855 } else if (to->IsRegister()) {
856 ASSERT(from->IsStackSlot());
857 __ ldr(ToRegister(to), ToMemOperand(from));
858 } else if (from->IsStackSlot()) {
859 ASSERT(to->IsStackSlot());
860 __ ldr(ip, ToMemOperand(from));
861 __ str(ip, ToMemOperand(to));
862 } else if (from->IsDoubleRegister()) {
863 if (to->IsDoubleRegister()) {
864 __ vmov(ToDoubleRegister(to), ToDoubleRegister(from));
865 } else {
866 ASSERT(to->IsDoubleStackSlot());
867 // TODO(regis): Why is vstr not taking a MemOperand?
868 // __ vstr(dbl_scratch, ToMemOperand(to));
869 MemOperand to_operand = ToMemOperand(to);
870 __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset());
871 }
872 } else if (to->IsDoubleRegister()) {
873 ASSERT(from->IsDoubleStackSlot());
874 // TODO(regis): Why is vldr not taking a MemOperand?
875 // __ vldr(ToDoubleRegister(to), ToMemOperand(from));
876 MemOperand from_operand = ToMemOperand(from);
877 __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset());
878 } else {
879 ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
880 // TODO(regis): Why is vldr not taking a MemOperand?
881 // __ vldr(dbl_scratch, ToMemOperand(from));
882 MemOperand from_operand = ToMemOperand(from);
883 __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
884 // TODO(regis): Why is vstr not taking a MemOperand?
885 // __ vstr(dbl_scratch, ToMemOperand(to));
886 MemOperand to_operand = ToMemOperand(to);
887 __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
888 }
889 }
890
891 if (destroys_core_scratch) {
892 __ ldr(core_scratch, MemOperand(fp, -kPointerSize));
893 }
894
895 LInstruction* next = GetNextInstruction();
896 if (next != NULL && next->IsLazyBailout()) {
897 int pc = masm()->pc_offset();
898 safepoints_.SetPcAfterGap(pc);
899 }
900 } 700 }
901 701
902 702
903 void LCodeGen::DoGap(LGap* gap) { 703 void LCodeGen::DoGap(LGap* gap) {
904 for (int i = LGap::FIRST_INNER_POSITION; 704 for (int i = LGap::FIRST_INNER_POSITION;
905 i <= LGap::LAST_INNER_POSITION; 705 i <= LGap::LAST_INNER_POSITION;
906 i++) { 706 i++) {
907 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 707 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
908 LParallelMove* move = gap->GetParallelMove(inner_pos); 708 LParallelMove* move = gap->GetParallelMove(inner_pos);
909 if (move != NULL) DoParallelMove(move); 709 if (move != NULL) DoParallelMove(move);
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
959 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 759 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
960 break; 760 break;
961 } 761 }
962 case CodeStub::StringCompare: { 762 case CodeStub::StringCompare: {
963 StringCompareStub stub; 763 StringCompareStub stub;
964 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 764 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
965 break; 765 break;
966 } 766 }
967 case CodeStub::TranscendentalCache: { 767 case CodeStub::TranscendentalCache: {
968 __ ldr(r0, MemOperand(sp, 0)); 768 __ ldr(r0, MemOperand(sp, 0));
969 TranscendentalCacheStub stub(instr->transcendental_type()); 769 TranscendentalCacheStub stub(instr->transcendental_type(),
770 TranscendentalCacheStub::TAGGED);
970 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 771 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
971 break; 772 break;
972 } 773 }
973 default: 774 default:
974 UNREACHABLE(); 775 UNREACHABLE();
975 } 776 }
976 } 777 }
977 778
978 779
979 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 780 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
980 // Nothing to do. 781 // Nothing to do.
981 } 782 }
982 783
983 784
984 void LCodeGen::DoModI(LModI* instr) { 785 void LCodeGen::DoModI(LModI* instr) {
985 class DeferredModI: public LDeferredCode { 786 class DeferredModI: public LDeferredCode {
986 public: 787 public:
987 DeferredModI(LCodeGen* codegen, LModI* instr) 788 DeferredModI(LCodeGen* codegen, LModI* instr)
988 : LDeferredCode(codegen), instr_(instr) { } 789 : LDeferredCode(codegen), instr_(instr) { }
989 virtual void Generate() { 790 virtual void Generate() {
990 codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD); 791 codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD);
991 } 792 }
992 private: 793 private:
993 LModI* instr_; 794 LModI* instr_;
994 }; 795 };
995 // These registers hold untagged 32 bit values. 796 // These registers hold untagged 32 bit values.
996 Register left = ToRegister(instr->InputAt(0)); 797 Register left = ToRegister(instr->InputAt(0));
997 Register right = ToRegister(instr->InputAt(1)); 798 Register right = ToRegister(instr->InputAt(1));
998 Register result = ToRegister(instr->result()); 799 Register result = ToRegister(instr->result());
999 Register scratch = scratch0(); 800 Register scratch = scratch0();
1000 801
1001 Label deoptimize, done; 802 Label deoptimize, done;
1002 // Check for x % 0. 803 // Check for x % 0.
1003 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 804 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1004 __ tst(right, Operand(right)); 805 __ tst(right, Operand(right));
1005 __ b(eq, &deoptimize); 806 __ b(eq, &deoptimize);
1006 } 807 }
1007 808
1008 // Check for (0 % -x) that will produce negative zero. 809 // Check for (0 % -x) that will produce negative zero.
1009 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 810 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1010 Label ok; 811 Label ok;
1011 __ tst(left, Operand(left)); 812 __ tst(left, Operand(left));
1012 __ b(ne, &ok); 813 __ b(ne, &ok);
1013 __ tst(right, Operand(right)); 814 __ tst(right, Operand(right));
1014 __ b(pl, &ok); 815 __ b(pl, &ok);
1015 __ b(al, &deoptimize); 816 __ b(al, &deoptimize);
1016 __ bind(&ok); 817 __ bind(&ok);
1017 } 818 }
1018 819
1019 // Try a few common cases before using the generic stub. 820 // Try a few common cases before using the stub.
1020 Label call_stub; 821 Label call_stub;
1021 const int kUnfolds = 3; 822 const int kUnfolds = 3;
1022 // Skip if either side is negative. 823 // Skip if either side is negative.
1023 __ cmp(left, Operand(0)); 824 __ cmp(left, Operand(0));
1024 __ cmp(right, Operand(0), NegateCondition(mi)); 825 __ cmp(right, Operand(0), NegateCondition(mi));
1025 __ b(mi, &call_stub); 826 __ b(mi, &call_stub);
1026 // If the right hand side is smaller than the (nonnegative) 827 // If the right hand side is smaller than the (nonnegative)
1027 // left hand side, it is the result. Else try a few subtractions 828 // left hand side, it is the result. Else try a few subtractions
1028 // of the left hand side. 829 // of the left hand side.
1029 __ mov(scratch, left); 830 __ mov(scratch, left);
1030 for (int i = 0; i < kUnfolds; i++) { 831 for (int i = 0; i < kUnfolds; i++) {
1031 // Check if the left hand side is less or equal than the 832 // Check if the left hand side is less or equal than the
1032 // the right hand side. 833 // the right hand side.
1033 __ cmp(scratch, right); 834 __ cmp(scratch, right);
1034 __ mov(result, scratch, LeaveCC, lt); 835 __ mov(result, scratch, LeaveCC, lt);
1035 __ b(lt, &done); 836 __ b(lt, &done);
1036 // If not, reduce the left hand side by the right hand 837 // If not, reduce the left hand side by the right hand
1037 // side and check again. 838 // side and check again.
1038 if (i < kUnfolds - 1) __ sub(scratch, scratch, right); 839 if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
1039 } 840 }
1040 841
1041 // Check for power of two on the right hand side. 842 // Check for power of two on the right hand side.
1042 __ JumpIfNotPowerOfTwoOrZero(right, scratch, &call_stub); 843 __ JumpIfNotPowerOfTwoOrZero(right, scratch, &call_stub);
1043 // Perform modulo operation (scratch contains right - 1). 844 // Perform modulo operation (scratch contains right - 1).
1044 __ and_(result, scratch, Operand(left)); 845 __ and_(result, scratch, Operand(left));
1045 846
1046 __ bind(&call_stub); 847 __ bind(&call_stub);
1047 // Call the generic stub. The numbers in r0 and r1 have 848 // Call the stub. The numbers in r0 and r1 have
1048 // to be tagged to Smis. If that is not possible, deoptimize. 849 // to be tagged to Smis. If that is not possible, deoptimize.
1049 DeferredModI* deferred = new DeferredModI(this, instr); 850 DeferredModI* deferred = new DeferredModI(this, instr);
1050 __ TrySmiTag(left, &deoptimize, scratch); 851 __ TrySmiTag(left, &deoptimize, scratch);
1051 __ TrySmiTag(right, &deoptimize, scratch); 852 __ TrySmiTag(right, &deoptimize, scratch);
1052 853
1053 __ b(al, deferred->entry()); 854 __ b(al, deferred->entry());
1054 __ bind(deferred->exit()); 855 __ bind(deferred->exit());
1055 856
1056 // If the result in r0 is a Smi, untag it, else deoptimize. 857 // If the result in r0 is a Smi, untag it, else deoptimize.
1057 __ JumpIfNotSmi(result, &deoptimize); 858 __ JumpIfNotSmi(result, &deoptimize);
1058 __ SmiUntag(result); 859 __ SmiUntag(result);
1059 860
1060 __ b(al, &done); 861 __ b(al, &done);
1061 __ bind(&deoptimize); 862 __ bind(&deoptimize);
1062 DeoptimizeIf(al, instr->environment()); 863 DeoptimizeIf(al, instr->environment());
1063 __ bind(&done); 864 __ bind(&done);
1064 } 865 }
1065 866
1066 867
1067 void LCodeGen::DoDivI(LDivI* instr) { 868 void LCodeGen::DoDivI(LDivI* instr) {
1068 class DeferredDivI: public LDeferredCode { 869 class DeferredDivI: public LDeferredCode {
1069 public: 870 public:
1070 DeferredDivI(LCodeGen* codegen, LDivI* instr) 871 DeferredDivI(LCodeGen* codegen, LDivI* instr)
1071 : LDeferredCode(codegen), instr_(instr) { } 872 : LDeferredCode(codegen), instr_(instr) { }
1072 virtual void Generate() { 873 virtual void Generate() {
1073 codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV); 874 codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
1074 } 875 }
1075 private: 876 private:
1076 LDivI* instr_; 877 LDivI* instr_;
1077 }; 878 };
1078 879
1079 const Register left = ToRegister(instr->InputAt(0)); 880 const Register left = ToRegister(instr->InputAt(0));
1080 const Register right = ToRegister(instr->InputAt(1)); 881 const Register right = ToRegister(instr->InputAt(1));
1081 const Register scratch = scratch0(); 882 const Register scratch = scratch0();
1082 const Register result = ToRegister(instr->result()); 883 const Register result = ToRegister(instr->result());
1083 884
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1116 __ cmp(right, Operand(2)); 917 __ cmp(right, Operand(2));
1117 __ tst(left, Operand(1), eq); 918 __ tst(left, Operand(1), eq);
1118 __ mov(result, Operand(left, ASR, 1), LeaveCC, eq); 919 __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
1119 __ b(eq, &done); 920 __ b(eq, &done);
1120 921
1121 __ cmp(right, Operand(4)); 922 __ cmp(right, Operand(4));
1122 __ tst(left, Operand(3), eq); 923 __ tst(left, Operand(3), eq);
1123 __ mov(result, Operand(left, ASR, 2), LeaveCC, eq); 924 __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
1124 __ b(eq, &done); 925 __ b(eq, &done);
1125 926
1126 // Call the generic stub. The numbers in r0 and r1 have 927 // Call the stub. The numbers in r0 and r1 have
1127 // to be tagged to Smis. If that is not possible, deoptimize. 928 // to be tagged to Smis. If that is not possible, deoptimize.
1128 DeferredDivI* deferred = new DeferredDivI(this, instr); 929 DeferredDivI* deferred = new DeferredDivI(this, instr);
1129 930
1130 __ TrySmiTag(left, &deoptimize, scratch); 931 __ TrySmiTag(left, &deoptimize, scratch);
1131 __ TrySmiTag(right, &deoptimize, scratch); 932 __ TrySmiTag(right, &deoptimize, scratch);
1132 933
1133 __ b(al, deferred->entry()); 934 __ b(al, deferred->entry());
1134 __ bind(deferred->exit()); 935 __ bind(deferred->exit());
1135 936
1136 // If the result in r0 is a Smi, untag it, else deoptimize. 937 // If the result in r0 is a Smi, untag it, else deoptimize.
1137 __ JumpIfNotSmi(result, &deoptimize); 938 __ JumpIfNotSmi(result, &deoptimize);
1138 __ SmiUntag(result); 939 __ SmiUntag(result);
1139 __ b(&done); 940 __ b(&done);
1140 941
1141 __ bind(&deoptimize); 942 __ bind(&deoptimize);
1142 DeoptimizeIf(al, instr->environment()); 943 DeoptimizeIf(al, instr->environment());
1143 __ bind(&done); 944 __ bind(&done);
1144 } 945 }
1145 946
1146 947
1147 template<int T> 948 template<int T>
1148 void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, 949 void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
1149 Token::Value op) { 950 Token::Value op) {
1150 Register left = ToRegister(instr->InputAt(0)); 951 Register left = ToRegister(instr->InputAt(0));
1151 Register right = ToRegister(instr->InputAt(1)); 952 Register right = ToRegister(instr->InputAt(1));
1152 953
1153 __ PushSafepointRegistersAndDoubles(); 954 __ PushSafepointRegistersAndDoubles();
1154 GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right); 955 // Move left to r1 and right to r0 for the stub call.
956 if (left.is(r1)) {
957 __ Move(r0, right);
958 } else if (left.is(r0) && right.is(r1)) {
959 __ Swap(r0, r1, r2);
960 } else if (left.is(r0)) {
961 ASSERT(!right.is(r1));
962 __ mov(r1, r0);
963 __ mov(r0, right);
964 } else {
965 ASSERT(!left.is(r0) && !right.is(r0));
966 __ mov(r0, right);
967 __ mov(r1, left);
968 }
969 TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT);
1155 __ CallStub(&stub); 970 __ CallStub(&stub);
1156 RecordSafepointWithRegistersAndDoubles(instr->pointer_map(), 971 RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
1157 0, 972 0,
1158 Safepoint::kNoDeoptimizationIndex); 973 Safepoint::kNoDeoptimizationIndex);
1159 // Overwrite the stored value of r0 with the result of the stub. 974 // Overwrite the stored value of r0 with the result of the stub.
1160 __ StoreToSafepointRegistersAndDoublesSlot(r0); 975 __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
1161 __ PopSafepointRegistersAndDoubles(); 976 __ PopSafepointRegistersAndDoubles();
1162 } 977 }
1163 978
1164 979
1165 void LCodeGen::DoMulI(LMulI* instr) { 980 void LCodeGen::DoMulI(LMulI* instr) {
1166 Register scratch = scratch0(); 981 Register scratch = scratch0();
1167 Register left = ToRegister(instr->InputAt(0)); 982 Register left = ToRegister(instr->InputAt(0));
1168 Register right = EmitLoadRegister(instr->InputAt(1), scratch); 983 Register right = EmitLoadRegister(instr->InputAt(1), scratch);
1169 984
1170 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) && 985 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
1315 } 1130 }
1316 1131
1317 1132
1318 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) { 1133 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1319 Register result = ToRegister(instr->result()); 1134 Register result = ToRegister(instr->result());
1320 Register array = ToRegister(instr->InputAt(0)); 1135 Register array = ToRegister(instr->InputAt(0));
1321 __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset)); 1136 __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
1322 } 1137 }
1323 1138
1324 1139
1140 void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
1141 Register result = ToRegister(instr->result());
1142 Register array = ToRegister(instr->InputAt(0));
1143 __ ldr(result, FieldMemOperand(array, PixelArray::kLengthOffset));
1144 }
1145
1146
1325 void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) { 1147 void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
1326 Register result = ToRegister(instr->result()); 1148 Register result = ToRegister(instr->result());
1327 Register array = ToRegister(instr->InputAt(0)); 1149 Register array = ToRegister(instr->InputAt(0));
1328 __ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset)); 1150 __ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset));
1329 } 1151 }
1330 1152
1331 1153
1332 void LCodeGen::DoValueOf(LValueOf* instr) { 1154 void LCodeGen::DoValueOf(LValueOf* instr) {
1333 Register input = ToRegister(instr->InputAt(0)); 1155 Register input = ToRegister(instr->InputAt(0));
1334 Register result = ToRegister(instr->result()); 1156 Register result = ToRegister(instr->result());
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
1399 break; 1221 break;
1400 case Token::MOD: { 1222 case Token::MOD: {
1401 // Save r0-r3 on the stack. 1223 // Save r0-r3 on the stack.
1402 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); 1224 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1403 1225
1404 __ PrepareCallCFunction(4, scratch0()); 1226 __ PrepareCallCFunction(4, scratch0());
1405 __ vmov(r0, r1, left); 1227 __ vmov(r0, r1, left);
1406 __ vmov(r2, r3, right); 1228 __ vmov(r2, r3, right);
1407 __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4); 1229 __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
1408 // Move the result in the double result register. 1230 // Move the result in the double result register.
1409 __ vmov(ToDoubleRegister(instr->result()), r0, r1); 1231 __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
1410 1232
1411 // Restore r0-r3. 1233 // Restore r0-r3.
1412 __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); 1234 __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1413 break; 1235 break;
1414 } 1236 }
1415 default: 1237 default:
1416 UNREACHABLE(); 1238 UNREACHABLE();
1417 break; 1239 break;
1418 } 1240 }
1419 } 1241 }
1420 1242
1421 1243
1422 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 1244 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1423 ASSERT(ToRegister(instr->InputAt(0)).is(r1)); 1245 ASSERT(ToRegister(instr->InputAt(0)).is(r1));
1424 ASSERT(ToRegister(instr->InputAt(1)).is(r0)); 1246 ASSERT(ToRegister(instr->InputAt(1)).is(r0));
1425 ASSERT(ToRegister(instr->result()).is(r0)); 1247 ASSERT(ToRegister(instr->result()).is(r0));
1426 1248
1427 // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current 1249 TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
1428 // GenericBinaryOpStub:
1429 // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
1430 GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0);
1431 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1250 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1432 } 1251 }
1433 1252
1434 1253
1435 int LCodeGen::GetNextEmittedBlock(int block) { 1254 int LCodeGen::GetNextEmittedBlock(int block) {
1436 for (int i = block + 1; i < graph()->blocks()->length(); ++i) { 1255 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1437 LLabel* label = chunk_->GetLabel(i); 1256 LLabel* label = chunk_->GetLabel(i);
1438 if (!label->HasReplacement()) return i; 1257 if (!label->HasReplacement()) return i;
1439 } 1258 }
1440 return -1; 1259 return -1;
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after
1598 case Token::IN: 1417 case Token::IN:
1599 case Token::INSTANCEOF: 1418 case Token::INSTANCEOF:
1600 default: 1419 default:
1601 UNREACHABLE(); 1420 UNREACHABLE();
1602 } 1421 }
1603 return cond; 1422 return cond;
1604 } 1423 }
1605 1424
1606 1425
1607 void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { 1426 void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1608 __ cmp(ToRegister(left), ToOperand(right)); 1427 __ cmp(ToRegister(left), ToRegister(right));
1609 } 1428 }
1610 1429
1611 1430
1612 void LCodeGen::DoCmpID(LCmpID* instr) { 1431 void LCodeGen::DoCmpID(LCmpID* instr) {
1613 LOperand* left = instr->InputAt(0); 1432 LOperand* left = instr->InputAt(0);
1614 LOperand* right = instr->InputAt(1); 1433 LOperand* right = instr->InputAt(1);
1615 LOperand* result = instr->result(); 1434 LOperand* result = instr->result();
1616 Register scratch = scratch0(); 1435 Register scratch = scratch0();
1617 1436
1618 Label unordered, done; 1437 Label unordered, done;
1619 if (instr->is_double()) { 1438 if (instr->is_double()) {
1620 // Compare left and right as doubles and load the 1439 // Compare left and right as doubles and load the
1621 // resulting flags into the normal status register. 1440 // resulting flags into the normal status register.
1622 __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right)); 1441 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
1623 __ vmrs(pc);
1624 // If a NaN is involved, i.e. the result is unordered (V set), 1442 // If a NaN is involved, i.e. the result is unordered (V set),
1625 // jump to unordered to return false. 1443 // jump to unordered to return false.
1626 __ b(vs, &unordered); 1444 __ b(vs, &unordered);
1627 } else { 1445 } else {
1628 EmitCmpI(left, right); 1446 EmitCmpI(left, right);
1629 } 1447 }
1630 1448
1631 Condition cc = TokenToCondition(instr->op(), instr->is_double()); 1449 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1632 __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex); 1450 __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
1633 __ b(cc, &done); 1451 __ b(cc, &done);
1634 1452
1635 __ bind(&unordered); 1453 __ bind(&unordered);
1636 __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex); 1454 __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
1637 __ bind(&done); 1455 __ bind(&done);
1638 } 1456 }
1639 1457
1640 1458
1641 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { 1459 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1642 LOperand* left = instr->InputAt(0); 1460 LOperand* left = instr->InputAt(0);
1643 LOperand* right = instr->InputAt(1); 1461 LOperand* right = instr->InputAt(1);
1644 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1462 int false_block = chunk_->LookupDestination(instr->false_block_id());
1645 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1463 int true_block = chunk_->LookupDestination(instr->true_block_id());
1646 1464
1647 if (instr->is_double()) { 1465 if (instr->is_double()) {
1648 // Compare left and right as doubles and load the 1466 // Compare left and right as doubles and load the
1649 // resulting flags into the normal status register. 1467 // resulting flags into the normal status register.
1650 __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right)); 1468 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
1651 __ vmrs(pc);
1652 // If a NaN is involved, i.e. the result is unordered (V set), 1469 // If a NaN is involved, i.e. the result is unordered (V set),
1653 // jump to false block label. 1470 // jump to false block label.
1654 __ b(vs, chunk_->GetAssemblyLabel(false_block)); 1471 __ b(vs, chunk_->GetAssemblyLabel(false_block));
1655 } else { 1472 } else {
1656 EmitCmpI(left, right); 1473 EmitCmpI(left, right);
1657 } 1474 }
1658 1475
1659 Condition cc = TokenToCondition(instr->op(), instr->is_double()); 1476 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1660 EmitBranch(true_block, false_block, cc); 1477 EmitBranch(true_block, false_block, cc);
1661 } 1478 }
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after
1884 Label* false_label = chunk_->GetAssemblyLabel(false_block); 1701 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1885 1702
1886 __ tst(input, Operand(kSmiTagMask)); 1703 __ tst(input, Operand(kSmiTagMask));
1887 __ b(eq, false_label); 1704 __ b(eq, false_label);
1888 1705
1889 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); 1706 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
1890 EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen())); 1707 EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
1891 } 1708 }
1892 1709
1893 1710
1711 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1712 Register input = ToRegister(instr->InputAt(0));
1713 Register result = ToRegister(instr->result());
1714 Register scratch = scratch0();
1715
1716 __ ldr(scratch, FieldMemOperand(input, String::kHashFieldOffset));
1717 __ IndexFromHash(scratch, result);
1718 }
1719
1720
1894 void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { 1721 void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
1895 Abort("DoHasCachedArrayIndex unimplemented."); 1722 Register input = ToRegister(instr->InputAt(0));
1723 Register result = ToRegister(instr->result());
1724 Register scratch = scratch0();
1725
1726 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1727 __ ldr(scratch,
1728 FieldMemOperand(input, String::kHashFieldOffset));
1729 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
1730 __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
1731 __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
1896 } 1732 }
1897 1733
1898 1734
1899 void LCodeGen::DoHasCachedArrayIndexAndBranch( 1735 void LCodeGen::DoHasCachedArrayIndexAndBranch(
1900 LHasCachedArrayIndexAndBranch* instr) { 1736 LHasCachedArrayIndexAndBranch* instr) {
1901 Abort("DoHasCachedArrayIndexAndBranch unimplemented."); 1737 Register input = ToRegister(instr->InputAt(0));
1738 Register scratch = scratch0();
1739
1740 int true_block = chunk_->LookupDestination(instr->true_block_id());
1741 int false_block = chunk_->LookupDestination(instr->false_block_id());
1742
1743 __ ldr(scratch,
1744 FieldMemOperand(input, String::kHashFieldOffset));
1745 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
1746 EmitBranch(true_block, false_block, eq);
1902 } 1747 }
1903 1748
1904 1749
1905 // Branches to a label or falls through with the answer in flags. Trashes 1750 // Branches to a label or falls through with the answer in flags. Trashes
1906 // the temp registers, but not the input. Only input and temp2 may alias. 1751 // the temp registers, but not the input. Only input and temp2 may alias.
1907 void LCodeGen::EmitClassOfTest(Label* is_true, 1752 void LCodeGen::EmitClassOfTest(Label* is_true,
1908 Label* is_false, 1753 Label* is_false,
1909 Handle<String>class_name, 1754 Handle<String>class_name,
1910 Register input, 1755 Register input,
1911 Register temp, 1756 Register temp,
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after
2134 // offset to the location of the map check. 1979 // offset to the location of the map check.
2135 Register temp = ToRegister(instr->TempAt(0)); 1980 Register temp = ToRegister(instr->TempAt(0));
2136 ASSERT(temp.is(r4)); 1981 ASSERT(temp.is(r4));
2137 __ mov(InstanceofStub::right(), Operand(instr->function())); 1982 __ mov(InstanceofStub::right(), Operand(instr->function()));
2138 static const int kAdditionalDelta = 4; 1983 static const int kAdditionalDelta = 4;
2139 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; 1984 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2140 Label before_push_delta; 1985 Label before_push_delta;
2141 __ bind(&before_push_delta); 1986 __ bind(&before_push_delta);
2142 __ BlockConstPoolFor(kAdditionalDelta); 1987 __ BlockConstPoolFor(kAdditionalDelta);
2143 __ mov(temp, Operand(delta * kPointerSize)); 1988 __ mov(temp, Operand(delta * kPointerSize));
2144 __ StoreToSafepointRegisterSlot(temp); 1989 __ StoreToSafepointRegisterSlot(temp, temp);
2145 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); 1990 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2146 ASSERT_EQ(kAdditionalDelta,
2147 masm_->InstructionsGeneratedSince(&before_push_delta));
2148 RecordSafepointWithRegisters(
2149 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2150 // Put the result value into the result register slot and 1991 // Put the result value into the result register slot and
2151 // restore all registers. 1992 // restore all registers.
2152 __ StoreToSafepointRegisterSlot(result); 1993 __ StoreToSafepointRegisterSlot(result, result);
2153 1994
2154 __ PopSafepointRegisters(); 1995 __ PopSafepointRegisters();
2155 } 1996 }
2156 1997
2157 1998
2158 static Condition ComputeCompareCondition(Token::Value op) { 1999 static Condition ComputeCompareCondition(Token::Value op) {
2159 switch (op) { 2000 switch (op) {
2160 case Token::EQ_STRICT: 2001 case Token::EQ_STRICT:
2161 case Token::EQ: 2002 case Token::EQ:
2162 return eq; 2003 return eq;
(...skipping 10 matching lines...) Expand all
2173 return kNoCondition; 2014 return kNoCondition;
2174 } 2015 }
2175 } 2016 }
2176 2017
2177 2018
2178 void LCodeGen::DoCmpT(LCmpT* instr) { 2019 void LCodeGen::DoCmpT(LCmpT* instr) {
2179 Token::Value op = instr->op(); 2020 Token::Value op = instr->op();
2180 2021
2181 Handle<Code> ic = CompareIC::GetUninitialized(op); 2022 Handle<Code> ic = CompareIC::GetUninitialized(op);
2182 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2023 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2024 __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2183 2025
2184 Condition condition = ComputeCompareCondition(op); 2026 Condition condition = ComputeCompareCondition(op);
2185 if (op == Token::GT || op == Token::LTE) { 2027 if (op == Token::GT || op == Token::LTE) {
2186 condition = ReverseCondition(condition); 2028 condition = ReverseCondition(condition);
2187 } 2029 }
2188 __ cmp(r0, Operand(0));
2189 __ LoadRoot(ToRegister(instr->result()), 2030 __ LoadRoot(ToRegister(instr->result()),
2190 Heap::kTrueValueRootIndex, 2031 Heap::kTrueValueRootIndex,
2191 condition); 2032 condition);
2192 __ LoadRoot(ToRegister(instr->result()), 2033 __ LoadRoot(ToRegister(instr->result()),
2193 Heap::kFalseValueRootIndex, 2034 Heap::kFalseValueRootIndex,
2194 NegateCondition(condition)); 2035 NegateCondition(condition));
2195 } 2036 }
2196 2037
2197 2038
2198 void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) { 2039 void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
2199 Abort("DoCmpTAndBranch unimplemented."); 2040 Token::Value op = instr->op();
2041 int true_block = chunk_->LookupDestination(instr->true_block_id());
2042 int false_block = chunk_->LookupDestination(instr->false_block_id());
2043
2044 Handle<Code> ic = CompareIC::GetUninitialized(op);
2045 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2046
2047 // The compare stub expects compare condition and the input operands
2048 // reversed for GT and LTE.
2049 Condition condition = ComputeCompareCondition(op);
2050 if (op == Token::GT || op == Token::LTE) {
2051 condition = ReverseCondition(condition);
2052 }
2053 __ cmp(r0, Operand(0));
2054 EmitBranch(true_block, false_block, condition);
2200 } 2055 }
2201 2056
2202 2057
2203 void LCodeGen::DoReturn(LReturn* instr) { 2058 void LCodeGen::DoReturn(LReturn* instr) {
2204 if (FLAG_trace) { 2059 if (FLAG_trace) {
2205 // Push the return value on the stack as the parameter. 2060 // Push the return value on the stack as the parameter.
2206 // Runtime::TraceExit returns its parameter in r0. 2061 // Runtime::TraceExit returns its parameter in r0.
2207 __ push(r0); 2062 __ push(r0);
2208 __ CallRuntime(Runtime::kTraceExit, 1); 2063 __ CallRuntime(Runtime::kTraceExit, 1);
2209 } 2064 }
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
2248 } 2103 }
2249 2104
2250 // Store the value. 2105 // Store the value.
2251 __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); 2106 __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
2252 } 2107 }
2253 2108
2254 2109
2255 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2110 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2256 Register context = ToRegister(instr->context()); 2111 Register context = ToRegister(instr->context());
2257 Register result = ToRegister(instr->result()); 2112 Register result = ToRegister(instr->result());
2258 __ ldr(result, 2113 __ ldr(result, ContextOperand(context, instr->slot_index()));
2259 MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2260 __ ldr(result, ContextOperand(result, instr->slot_index()));
2261 } 2114 }
2262 2115
2263 2116
2264 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2117 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2265 Register context = ToRegister(instr->context()); 2118 Register context = ToRegister(instr->context());
2266 Register value = ToRegister(instr->value()); 2119 Register value = ToRegister(instr->value());
2267 __ ldr(context,
2268 MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2269 __ str(value, ContextOperand(context, instr->slot_index())); 2120 __ str(value, ContextOperand(context, instr->slot_index()));
2270 if (instr->needs_write_barrier()) { 2121 if (instr->needs_write_barrier()) {
2271 int offset = Context::SlotOffset(instr->slot_index()); 2122 int offset = Context::SlotOffset(instr->slot_index());
2272 __ RecordWrite(context, Operand(offset), value, scratch0()); 2123 __ RecordWrite(context, Operand(offset), value, scratch0());
2273 } 2124 }
2274 } 2125 }
2275 2126
2276 2127
2277 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2128 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2278 Register object = ToRegister(instr->InputAt(0)); 2129 Register object = ToRegister(instr->InputAt(0));
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
2336 // in initial map. 2187 // in initial map.
2337 __ bind(&non_instance); 2188 __ bind(&non_instance);
2338 __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); 2189 __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2339 2190
2340 // All done. 2191 // All done.
2341 __ bind(&done); 2192 __ bind(&done);
2342 } 2193 }
2343 2194
2344 2195
2345 void LCodeGen::DoLoadElements(LLoadElements* instr) { 2196 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2346 ASSERT(instr->result()->Equals(instr->InputAt(0))); 2197 Register result = ToRegister(instr->result());
2347 Register reg = ToRegister(instr->InputAt(0)); 2198 Register input = ToRegister(instr->InputAt(0));
2348 Register scratch = scratch0(); 2199 Register scratch = scratch0();
2349 2200
2350 __ ldr(reg, FieldMemOperand(reg, JSObject::kElementsOffset)); 2201 __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
2351 if (FLAG_debug_code) { 2202 if (FLAG_debug_code) {
2352 Label done; 2203 Label done;
2353 __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); 2204 __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2354 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); 2205 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2355 __ cmp(scratch, ip); 2206 __ cmp(scratch, ip);
2356 __ b(eq, &done); 2207 __ b(eq, &done);
2208 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
2209 __ cmp(scratch, ip);
2210 __ b(eq, &done);
2357 __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); 2211 __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2358 __ cmp(scratch, ip); 2212 __ cmp(scratch, ip);
2359 __ Check(eq, "Check for fast elements failed."); 2213 __ Check(eq, "Check for fast elements failed.");
2360 __ bind(&done); 2214 __ bind(&done);
2361 } 2215 }
2362 } 2216 }
2363 2217
2364 2218
2219 void LCodeGen::DoLoadPixelArrayExternalPointer(
2220 LLoadPixelArrayExternalPointer* instr) {
2221 Register to_reg = ToRegister(instr->result());
2222 Register from_reg = ToRegister(instr->InputAt(0));
2223 __ ldr(to_reg, FieldMemOperand(from_reg, PixelArray::kExternalPointerOffset));
2224 }
2225
2226
2365 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 2227 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2366 Register arguments = ToRegister(instr->arguments()); 2228 Register arguments = ToRegister(instr->arguments());
2367 Register length = ToRegister(instr->length()); 2229 Register length = ToRegister(instr->length());
2368 Register index = ToRegister(instr->index()); 2230 Register index = ToRegister(instr->index());
2369 Register result = ToRegister(instr->result()); 2231 Register result = ToRegister(instr->result());
2370 2232
2371 // Bailout index is not a valid argument index. Use unsigned check to get 2233 // Bailout index is not a valid argument index. Use unsigned check to get
2372 // negative check for free. 2234 // negative check for free.
2373 __ sub(length, length, index, SetCC); 2235 __ sub(length, length, index, SetCC);
2374 DeoptimizeIf(ls, instr->environment()); 2236 DeoptimizeIf(ls, instr->environment());
(...skipping 16 matching lines...) Expand all
2391 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); 2253 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
2392 __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize)); 2254 __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2393 2255
2394 // Check for the hole value. 2256 // Check for the hole value.
2395 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 2257 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2396 __ cmp(result, scratch); 2258 __ cmp(result, scratch);
2397 DeoptimizeIf(eq, instr->environment()); 2259 DeoptimizeIf(eq, instr->environment());
2398 } 2260 }
2399 2261
2400 2262
2263 void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
2264 Register external_elements = ToRegister(instr->external_pointer());
2265 Register key = ToRegister(instr->key());
2266 Register result = ToRegister(instr->result());
2267
2268 // Load the result.
2269 __ ldrb(result, MemOperand(external_elements, key));
2270 }
2271
2272
2401 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 2273 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2402 ASSERT(ToRegister(instr->object()).is(r1)); 2274 ASSERT(ToRegister(instr->object()).is(r1));
2403 ASSERT(ToRegister(instr->key()).is(r0)); 2275 ASSERT(ToRegister(instr->key()).is(r0));
2404 2276
2405 Handle<Code> ic(Isolate::Current()->builtins()-> 2277 Handle<Code> ic(Isolate::Current()->builtins()->
2406 builtin(Builtins::KeyedLoadIC_Initialize)); 2278 builtin(Builtins::KeyedLoadIC_Initialize));
2407 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2279 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2408 } 2280 }
2409 2281
2410 2282
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
2443 __ SmiUntag(result); 2315 __ SmiUntag(result);
2444 2316
2445 // Argument length is in result register. 2317 // Argument length is in result register.
2446 __ bind(&done); 2318 __ bind(&done);
2447 } 2319 }
2448 2320
2449 2321
2450 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 2322 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2451 Register receiver = ToRegister(instr->receiver()); 2323 Register receiver = ToRegister(instr->receiver());
2452 Register function = ToRegister(instr->function()); 2324 Register function = ToRegister(instr->function());
2325 Register length = ToRegister(instr->length());
2326 Register elements = ToRegister(instr->elements());
2453 Register scratch = scratch0(); 2327 Register scratch = scratch0();
2454 2328 ASSERT(receiver.is(r0)); // Used for parameter count.
2455 ASSERT(receiver.is(r0)); 2329 ASSERT(function.is(r1)); // Required by InvokeFunction.
2456 ASSERT(function.is(r1));
2457 ASSERT(ToRegister(instr->result()).is(r0)); 2330 ASSERT(ToRegister(instr->result()).is(r0));
2458 2331
2459 // If the receiver is null or undefined, we have to pass the 2332 // If the receiver is null or undefined, we have to pass the global object
2460 // global object as a receiver. 2333 // as a receiver.
2461 Label global_receiver, receiver_ok; 2334 Label global_object, receiver_ok;
2462 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 2335 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2463 __ cmp(receiver, scratch); 2336 __ cmp(receiver, scratch);
2464 __ b(eq, &global_receiver); 2337 __ b(eq, &global_object);
2465 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 2338 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2466 __ cmp(receiver, scratch); 2339 __ cmp(receiver, scratch);
2467 __ b(ne, &receiver_ok); 2340 __ b(eq, &global_object);
2468 __ bind(&global_receiver); 2341
2342 // Deoptimize if the receiver is not a JS object.
2343 __ tst(receiver, Operand(kSmiTagMask));
2344 DeoptimizeIf(eq, instr->environment());
2345 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
2346 DeoptimizeIf(lo, instr->environment());
2347 __ jmp(&receiver_ok);
2348
2349 __ bind(&global_object);
2469 __ ldr(receiver, GlobalObjectOperand()); 2350 __ ldr(receiver, GlobalObjectOperand());
2470 __ bind(&receiver_ok); 2351 __ bind(&receiver_ok);
2471 2352
2472 Register length = ToRegister(instr->length());
2473 Register elements = ToRegister(instr->elements());
2474
2475 Label invoke;
2476
2477 // Copy the arguments to this function possibly from the 2353 // Copy the arguments to this function possibly from the
2478 // adaptor frame below it. 2354 // adaptor frame below it.
2479 const uint32_t kArgumentsLimit = 1 * KB; 2355 const uint32_t kArgumentsLimit = 1 * KB;
2480 __ cmp(length, Operand(kArgumentsLimit)); 2356 __ cmp(length, Operand(kArgumentsLimit));
2481 DeoptimizeIf(hi, instr->environment()); 2357 DeoptimizeIf(hi, instr->environment());
2482 2358
2483 // Push the receiver and use the register to keep the original 2359 // Push the receiver and use the register to keep the original
2484 // number of arguments. 2360 // number of arguments.
2485 __ push(receiver); 2361 __ push(receiver);
2486 __ mov(receiver, length); 2362 __ mov(receiver, length);
2487 // The arguments are at a one pointer size offset from elements. 2363 // The arguments are at a one pointer size offset from elements.
2488 __ add(elements, elements, Operand(1 * kPointerSize)); 2364 __ add(elements, elements, Operand(1 * kPointerSize));
2489 2365
2490 // Loop through the arguments pushing them onto the execution 2366 // Loop through the arguments pushing them onto the execution
2491 // stack. 2367 // stack.
2492 Label loop; 2368 Label invoke, loop;
2493 // length is a small non-negative integer, due to the test above. 2369 // length is a small non-negative integer, due to the test above.
2494 __ tst(length, Operand(length)); 2370 __ tst(length, Operand(length));
2495 __ b(eq, &invoke); 2371 __ b(eq, &invoke);
2496 __ bind(&loop); 2372 __ bind(&loop);
2497 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); 2373 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
2498 __ push(scratch); 2374 __ push(scratch);
2499 __ sub(length, length, Operand(1), SetCC); 2375 __ sub(length, length, Operand(1), SetCC);
2500 __ b(ne, &loop); 2376 __ b(ne, &loop);
2501 2377
2502 __ bind(&invoke); 2378 __ bind(&invoke);
2503 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); 2379 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2504 LPointerMap* pointers = instr->pointer_map(); 2380 LPointerMap* pointers = instr->pointer_map();
2505 LEnvironment* env = instr->deoptimization_environment(); 2381 LEnvironment* env = instr->deoptimization_environment();
2506 RecordPosition(pointers->position()); 2382 RecordPosition(pointers->position());
2507 RegisterEnvironmentForDeoptimization(env); 2383 RegisterEnvironmentForDeoptimization(env);
2508 SafepointGenerator safepoint_generator(this, 2384 SafepointGenerator safepoint_generator(this,
2509 pointers, 2385 pointers,
2510 env->deoptimization_index()); 2386 env->deoptimization_index());
2511 // The number of arguments is stored in receiver which is r0, as expected 2387 // The number of arguments is stored in receiver which is r0, as expected
2512 // by InvokeFunction. 2388 // by InvokeFunction.
2513 v8::internal::ParameterCount actual(receiver); 2389 v8::internal::ParameterCount actual(receiver);
2514 __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator); 2390 __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
2391 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2515 } 2392 }
2516 2393
2517 2394
2518 void LCodeGen::DoPushArgument(LPushArgument* instr) { 2395 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2519 LOperand* argument = instr->InputAt(0); 2396 LOperand* argument = instr->InputAt(0);
2520 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { 2397 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2521 Abort("DoPushArgument not implemented for double type."); 2398 Abort("DoPushArgument not implemented for double type.");
2522 } else { 2399 } else {
2523 Register argument_reg = EmitLoadRegister(argument, ip); 2400 Register argument_reg = EmitLoadRegister(argument, ip);
2524 __ push(argument_reg); 2401 __ push(argument_reg);
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
2637 2514
2638 // Slow case: Call the runtime system to do the number allocation. 2515 // Slow case: Call the runtime system to do the number allocation.
2639 __ bind(&slow); 2516 __ bind(&slow);
2640 2517
2641 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 2518 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2642 RecordSafepointWithRegisters( 2519 RecordSafepointWithRegisters(
2643 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); 2520 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2644 // Set the pointer to the new heap number in tmp. 2521 // Set the pointer to the new heap number in tmp.
2645 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); 2522 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
2646 // Restore input_reg after call to runtime. 2523 // Restore input_reg after call to runtime.
2647 __ LoadFromSafepointRegisterSlot(input); 2524 __ LoadFromSafepointRegisterSlot(input, input);
2648 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 2525 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2649 2526
2650 __ bind(&allocated); 2527 __ bind(&allocated);
2651 // exponent: floating point exponent value. 2528 // exponent: floating point exponent value.
2652 // tmp1: allocated heap number. 2529 // tmp1: allocated heap number.
2653 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask)); 2530 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
2654 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); 2531 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
2655 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); 2532 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
2656 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); 2533 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
2657 2534
2658 __ str(tmp1, masm()->SafepointRegisterSlot(input)); 2535 __ StoreToSafepointRegisterSlot(tmp1, input);
2659 __ PopSafepointRegisters(); 2536 __ PopSafepointRegisters();
2660 2537
2661 __ bind(&done); 2538 __ bind(&done);
2662 } 2539 }
2663 2540
2664 2541
2665 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { 2542 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2666 Register input = ToRegister(instr->InputAt(0)); 2543 Register input = ToRegister(instr->InputAt(0));
2667 __ cmp(input, Operand(0)); 2544 __ cmp(input, Operand(0));
2668 // We can make rsb conditional because the previous cmp instruction 2545 // We can make rsb conditional because the previous cmp instruction
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
2702 Register input = ToRegister(instr->InputAt(0)); 2579 Register input = ToRegister(instr->InputAt(0));
2703 // Smi check. 2580 // Smi check.
2704 __ JumpIfNotSmi(input, deferred->entry()); 2581 __ JumpIfNotSmi(input, deferred->entry());
2705 // If smi, handle it directly. 2582 // If smi, handle it directly.
2706 EmitIntegerMathAbs(instr); 2583 EmitIntegerMathAbs(instr);
2707 __ bind(deferred->exit()); 2584 __ bind(deferred->exit());
2708 } 2585 }
2709 } 2586 }
2710 2587
2711 2588
2712 // Truncates a double using a specific rounding mode.
2713 // Clears the z flag (ne condition) if an overflow occurs.
2714 void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
2715 SwVfpRegister result,
2716 DwVfpRegister double_input,
2717 Register scratch1,
2718 Register scratch2) {
2719 Register prev_fpscr = scratch1;
2720 Register scratch = scratch2;
2721
2722 // Set custom FPCSR:
2723 // - Set rounding mode.
2724 // - Clear vfp cumulative exception flags.
2725 // - Make sure Flush-to-zero mode control bit is unset.
2726 __ vmrs(prev_fpscr);
2727 __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
2728 kVFPRoundingModeMask |
2729 kVFPFlushToZeroMask));
2730 __ orr(scratch, scratch, Operand(rounding_mode));
2731 __ vmsr(scratch);
2732
2733 // Convert the argument to an integer.
2734 __ vcvt_s32_f64(result,
2735 double_input,
2736 kFPSCRRounding);
2737
2738 // Retrieve FPSCR.
2739 __ vmrs(scratch);
2740 // Restore FPSCR.
2741 __ vmsr(prev_fpscr);
2742 // Check for vfp exceptions.
2743 __ tst(scratch, Operand(kVFPExceptionMask));
2744 }
2745
2746
2747 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { 2589 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2748 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); 2590 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2749 Register result = ToRegister(instr->result()); 2591 Register result = ToRegister(instr->result());
2750 SwVfpRegister single_scratch = double_scratch0().low(); 2592 SwVfpRegister single_scratch = double_scratch0().low();
2751 Register scratch1 = scratch0(); 2593 Register scratch1 = scratch0();
2752 Register scratch2 = ToRegister(instr->TempAt(0)); 2594 Register scratch2 = ToRegister(instr->TempAt(0));
2753 2595
2754 EmitVFPTruncate(kRoundToMinusInf, 2596 __ EmitVFPTruncate(kRoundToMinusInf,
2755 single_scratch, 2597 single_scratch,
2756 input, 2598 input,
2757 scratch1, 2599 scratch1,
2758 scratch2); 2600 scratch2);
2759 DeoptimizeIf(ne, instr->environment()); 2601 DeoptimizeIf(ne, instr->environment());
2760 2602
2761 // Move the result back to general purpose register r0. 2603 // Move the result back to general purpose register r0.
2762 __ vmov(result, single_scratch); 2604 __ vmov(result, single_scratch);
2763 2605
2764 // Test for -0. 2606 // Test for -0.
2765 Label done; 2607 Label done;
2766 __ cmp(result, Operand(0)); 2608 __ cmp(result, Operand(0));
2767 __ b(ne, &done); 2609 __ b(ne, &done);
2768 __ vmov(scratch1, input.high()); 2610 __ vmov(scratch1, input.high());
2769 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 2611 __ tst(scratch1, Operand(HeapNumber::kSignMask));
2770 DeoptimizeIf(ne, instr->environment()); 2612 DeoptimizeIf(ne, instr->environment());
2771 __ bind(&done); 2613 __ bind(&done);
2772 } 2614 }
2773 2615
2774 2616
2617 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2618 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2619 Register result = ToRegister(instr->result());
2620 Register scratch1 = scratch0();
2621 Register scratch2 = result;
2622 __ EmitVFPTruncate(kRoundToNearest,
2623 double_scratch0().low(),
2624 input,
2625 scratch1,
2626 scratch2);
2627 DeoptimizeIf(ne, instr->environment());
2628 __ vmov(result, double_scratch0().low());
2629
2630 // Test for -0.
2631 Label done;
2632 __ cmp(result, Operand(0));
2633 __ b(ne, &done);
2634 __ vmov(scratch1, input.high());
2635 __ tst(scratch1, Operand(HeapNumber::kSignMask));
2636 DeoptimizeIf(ne, instr->environment());
2637 __ bind(&done);
2638 }
2639
2640
2775 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { 2641 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2776 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); 2642 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2777 ASSERT(ToDoubleRegister(instr->result()).is(input)); 2643 ASSERT(ToDoubleRegister(instr->result()).is(input));
2778 __ vsqrt(input, input); 2644 __ vsqrt(input, input);
2779 } 2645 }
2780 2646
2781 2647
2648 void LCodeGen::DoPower(LPower* instr) {
2649 LOperand* left = instr->InputAt(0);
2650 LOperand* right = instr->InputAt(1);
2651 Register scratch = scratch0();
2652 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2653 Representation exponent_type = instr->hydrogen()->right()->representation();
2654 if (exponent_type.IsDouble()) {
2655 // Prepare arguments and call C function.
2656 __ PrepareCallCFunction(4, scratch);
2657 __ vmov(r0, r1, ToDoubleRegister(left));
2658 __ vmov(r2, r3, ToDoubleRegister(right));
2659 __ CallCFunction(ExternalReference::power_double_double_function(), 4);
2660 } else if (exponent_type.IsInteger32()) {
2661 ASSERT(ToRegister(right).is(r0));
2662 // Prepare arguments and call C function.
2663 __ PrepareCallCFunction(4, scratch);
2664 __ mov(r2, ToRegister(right));
2665 __ vmov(r0, r1, ToDoubleRegister(left));
2666 __ CallCFunction(ExternalReference::power_double_int_function(), 4);
2667 } else {
2668 ASSERT(exponent_type.IsTagged());
2669 ASSERT(instr->hydrogen()->left()->representation().IsDouble());
2670
2671 Register right_reg = ToRegister(right);
2672
2673 // Check for smi on the right hand side.
2674 Label non_smi, call;
2675 __ JumpIfNotSmi(right_reg, &non_smi);
2676
2677 // Untag smi and convert it to a double.
2678 __ SmiUntag(right_reg);
2679 SwVfpRegister single_scratch = double_scratch0().low();
2680 __ vmov(single_scratch, right_reg);
2681 __ vcvt_f64_s32(result_reg, single_scratch);
2682 __ jmp(&call);
2683
2684 // Heap number map check.
2685 __ bind(&non_smi);
2686 __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
2687 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
2688 __ cmp(scratch, Operand(ip));
2689 DeoptimizeIf(ne, instr->environment());
2690 int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
2691 __ add(scratch, right_reg, Operand(value_offset));
2692 __ vldr(result_reg, scratch, 0);
2693
2694 // Prepare arguments and call C function.
2695 __ bind(&call);
2696 __ PrepareCallCFunction(4, scratch);
2697 __ vmov(r0, r1, ToDoubleRegister(left));
2698 __ vmov(r2, r3, result_reg);
2699 __ CallCFunction(ExternalReference::power_double_double_function(), 4);
2700 }
2701 // Store the result in the result register.
2702 __ GetCFunctionDoubleResult(result_reg);
2703 }
2704
2705
2706 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
2707 ASSERT(ToDoubleRegister(instr->result()).is(d2));
2708 TranscendentalCacheStub stub(TranscendentalCache::LOG,
2709 TranscendentalCacheStub::UNTAGGED);
2710 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2711 }
2712
2713
2714 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
2715 ASSERT(ToDoubleRegister(instr->result()).is(d2));
2716 TranscendentalCacheStub stub(TranscendentalCache::COS,
2717 TranscendentalCacheStub::UNTAGGED);
2718 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2719 }
2720
2721
2722 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
2723 ASSERT(ToDoubleRegister(instr->result()).is(d2));
2724 TranscendentalCacheStub stub(TranscendentalCache::SIN,
2725 TranscendentalCacheStub::UNTAGGED);
2726 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2727 }
2728
2729
2782 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { 2730 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
2783 switch (instr->op()) { 2731 switch (instr->op()) {
2784 case kMathAbs: 2732 case kMathAbs:
2785 DoMathAbs(instr); 2733 DoMathAbs(instr);
2786 break; 2734 break;
2787 case kMathFloor: 2735 case kMathFloor:
2788 DoMathFloor(instr); 2736 DoMathFloor(instr);
2789 break; 2737 break;
2738 case kMathRound:
2739 DoMathRound(instr);
2740 break;
2790 case kMathSqrt: 2741 case kMathSqrt:
2791 DoMathSqrt(instr); 2742 DoMathSqrt(instr);
2792 break; 2743 break;
2744 case kMathCos:
2745 DoMathCos(instr);
2746 break;
2747 case kMathSin:
2748 DoMathSin(instr);
2749 break;
2750 case kMathLog:
2751 DoMathLog(instr);
2752 break;
2793 default: 2753 default:
2794 Abort("Unimplemented type of LUnaryMathOperation."); 2754 Abort("Unimplemented type of LUnaryMathOperation.");
2795 UNREACHABLE(); 2755 UNREACHABLE();
2796 } 2756 }
2797 } 2757 }
2798 2758
2799 2759
2800 void LCodeGen::DoCallKeyed(LCallKeyed* instr) { 2760 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
2801 ASSERT(ToRegister(instr->result()).is(r0)); 2761 ASSERT(ToRegister(instr->result()).is(r0));
2802 2762
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
2900 } 2860 }
2901 } 2861 }
2902 2862
2903 2863
2904 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 2864 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
2905 ASSERT(ToRegister(instr->object()).is(r1)); 2865 ASSERT(ToRegister(instr->object()).is(r1));
2906 ASSERT(ToRegister(instr->value()).is(r0)); 2866 ASSERT(ToRegister(instr->value()).is(r0));
2907 2867
2908 // Name is always in r2. 2868 // Name is always in r2.
2909 __ mov(r2, Operand(instr->name())); 2869 __ mov(r2, Operand(instr->name()));
2910 Handle<Code> ic(Isolate::Current()->builtins()-> 2870 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
2911 builtin(Builtins::StoreIC_Initialize)); 2871 info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
2872 : Builtins::StoreIC_Initialize));
2912 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2873 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2913 } 2874 }
2914 2875
2915 2876
2916 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 2877 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
2917 __ cmp(ToRegister(instr->index()), ToRegister(instr->length())); 2878 __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
2918 DeoptimizeIf(hs, instr->environment()); 2879 DeoptimizeIf(hs, instr->environment());
2919 } 2880 }
2920 2881
2921 2882
(...skipping 21 matching lines...) Expand all
2943 __ RecordWrite(elements, key, value); 2904 __ RecordWrite(elements, key, value);
2944 } 2905 }
2945 } 2906 }
2946 2907
2947 2908
2948 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 2909 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
2949 ASSERT(ToRegister(instr->object()).is(r2)); 2910 ASSERT(ToRegister(instr->object()).is(r2));
2950 ASSERT(ToRegister(instr->key()).is(r1)); 2911 ASSERT(ToRegister(instr->key()).is(r1));
2951 ASSERT(ToRegister(instr->value()).is(r0)); 2912 ASSERT(ToRegister(instr->value()).is(r0));
2952 2913
2953 Handle<Code> ic(Isolate::Current()->builtins()-> 2914 Handle<Code> ic(Isolate::Current()->builtins()->builtin(
2954 builtin(Builtins::KeyedStoreIC_Initialize)); 2915 info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
2916 : Builtins::KeyedStoreIC_Initialize));
2955 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2917 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2956 } 2918 }
2957 2919
2958 2920
2959 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 2921 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
2960 class DeferredStringCharCodeAt: public LDeferredCode { 2922 class DeferredStringCharCodeAt: public LDeferredCode {
2961 public: 2923 public:
2962 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 2924 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
2963 : LDeferredCode(codegen), instr_(instr) { } 2925 : LDeferredCode(codegen), instr_(instr) { }
2964 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } 2926 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
3085 __ SmiTag(index); 3047 __ SmiTag(index);
3086 __ push(index); 3048 __ push(index);
3087 } 3049 }
3088 __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt); 3050 __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
3089 RecordSafepointWithRegisters( 3051 RecordSafepointWithRegisters(
3090 instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex); 3052 instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
3091 if (FLAG_debug_code) { 3053 if (FLAG_debug_code) {
3092 __ AbortIfNotSmi(r0); 3054 __ AbortIfNotSmi(r0);
3093 } 3055 }
3094 __ SmiUntag(r0); 3056 __ SmiUntag(r0);
3095 MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result); 3057 __ StoreToSafepointRegisterSlot(r0, result);
3096 __ str(r0, result_stack_slot);
3097 __ PopSafepointRegisters(); 3058 __ PopSafepointRegisters();
3098 } 3059 }
3099 3060
3100 3061
3101 void LCodeGen::DoStringLength(LStringLength* instr) { 3062 void LCodeGen::DoStringLength(LStringLength* instr) {
3102 Register string = ToRegister(instr->InputAt(0)); 3063 Register string = ToRegister(instr->InputAt(0));
3103 Register result = ToRegister(instr->result()); 3064 Register result = ToRegister(instr->result());
3104 __ ldr(result, FieldMemOperand(string, String::kLengthOffset)); 3065 __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
3105 } 3066 }
3106 3067
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
3167 __ b(&done); 3128 __ b(&done);
3168 } 3129 }
3169 3130
3170 // Slow case: Call the runtime system to do the number allocation. 3131 // Slow case: Call the runtime system to do the number allocation.
3171 __ bind(&slow); 3132 __ bind(&slow);
3172 3133
3173 // TODO(3095996): Put a valid pointer value in the stack slot where the result 3134 // TODO(3095996): Put a valid pointer value in the stack slot where the result
3174 // register is stored, as this register is in the pointer map, but contains an 3135 // register is stored, as this register is in the pointer map, but contains an
3175 // integer value. 3136 // integer value.
3176 __ mov(ip, Operand(0)); 3137 __ mov(ip, Operand(0));
3177 int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); 3138 __ StoreToSafepointRegisterSlot(ip, reg);
3178 __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize));
3179
3180 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 3139 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
3181 RecordSafepointWithRegisters( 3140 RecordSafepointWithRegisters(
3182 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); 3141 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
3183 if (!reg.is(r0)) __ mov(reg, r0); 3142 if (!reg.is(r0)) __ mov(reg, r0);
3184 3143
3185 // Done. Put the value in dbl_scratch into the value of the allocated heap 3144 // Done. Put the value in dbl_scratch into the value of the allocated heap
3186 // number. 3145 // number.
3187 __ bind(&done); 3146 __ bind(&done);
3188 __ sub(ip, reg, Operand(kHeapObjectTag)); 3147 __ sub(ip, reg, Operand(kHeapObjectTag));
3189 __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset); 3148 __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
3190 __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize)); 3149 __ StoreToSafepointRegisterSlot(reg, reg);
3191 __ PopSafepointRegisters(); 3150 __ PopSafepointRegisters();
3192 } 3151 }
3193 3152
3194 3153
3195 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 3154 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3196 class DeferredNumberTagD: public LDeferredCode { 3155 class DeferredNumberTagD: public LDeferredCode {
3197 public: 3156 public:
3198 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 3157 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3199 : LDeferredCode(codegen), instr_(instr) { } 3158 : LDeferredCode(codegen), instr_(instr) { }
3200 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 3159 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
(...skipping 24 matching lines...) Expand all
3225 // TODO(3095996): Get rid of this. For now, we need to make the 3184 // TODO(3095996): Get rid of this. For now, we need to make the
3226 // result register contain a valid pointer because it is already 3185 // result register contain a valid pointer because it is already
3227 // contained in the register pointer map. 3186 // contained in the register pointer map.
3228 Register reg = ToRegister(instr->result()); 3187 Register reg = ToRegister(instr->result());
3229 __ mov(reg, Operand(0)); 3188 __ mov(reg, Operand(0));
3230 3189
3231 __ PushSafepointRegisters(); 3190 __ PushSafepointRegisters();
3232 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 3191 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
3233 RecordSafepointWithRegisters( 3192 RecordSafepointWithRegisters(
3234 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); 3193 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
3235 int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); 3194 __ StoreToSafepointRegisterSlot(r0, reg);
3236 __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize));
3237 __ PopSafepointRegisters(); 3195 __ PopSafepointRegisters();
3238 } 3196 }
3239 3197
3240 3198
3241 void LCodeGen::DoSmiTag(LSmiTag* instr) { 3199 void LCodeGen::DoSmiTag(LSmiTag* instr) {
3242 LOperand* input = instr->InputAt(0); 3200 LOperand* input = instr->InputAt(0);
3243 ASSERT(input->IsRegister() && input->Equals(instr->result())); 3201 ASSERT(input->IsRegister() && input->Equals(instr->result()));
3244 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); 3202 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3245 __ SmiTag(ToRegister(input)); 3203 __ SmiTag(ToRegister(input));
3246 } 3204 }
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
3412 ASSERT(input->IsDoubleRegister()); 3370 ASSERT(input->IsDoubleRegister());
3413 LOperand* result = instr->result(); 3371 LOperand* result = instr->result();
3414 ASSERT(result->IsRegister()); 3372 ASSERT(result->IsRegister());
3415 3373
3416 DoubleRegister double_input = ToDoubleRegister(input); 3374 DoubleRegister double_input = ToDoubleRegister(input);
3417 Register result_reg = ToRegister(result); 3375 Register result_reg = ToRegister(result);
3418 SwVfpRegister single_scratch = double_scratch0().low(); 3376 SwVfpRegister single_scratch = double_scratch0().low();
3419 Register scratch1 = scratch0(); 3377 Register scratch1 = scratch0();
3420 Register scratch2 = ToRegister(instr->TempAt(0)); 3378 Register scratch2 = ToRegister(instr->TempAt(0));
3421 3379
3422 VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf 3380 __ EmitVFPTruncate(kRoundToZero,
3423 : kRoundToNearest; 3381 single_scratch,
3382 double_input,
3383 scratch1,
3384 scratch2);
3424 3385
3425 EmitVFPTruncate(rounding_mode,
3426 single_scratch,
3427 double_input,
3428 scratch1,
3429 scratch2);
3430 // Deoptimize if we had a vfp invalid exception. 3386 // Deoptimize if we had a vfp invalid exception.
3431 DeoptimizeIf(ne, instr->environment()); 3387 DeoptimizeIf(ne, instr->environment());
3388
3432 // Retrieve the result. 3389 // Retrieve the result.
3433 __ vmov(result_reg, single_scratch); 3390 __ vmov(result_reg, single_scratch);
3434 3391
3435 if (instr->truncating() && 3392 if (!instr->truncating()) {
3436 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3393 // Convert result back to double and compare with input
3437 Label done; 3394 // to check if the conversion was exact.
3438 __ cmp(result_reg, Operand(0)); 3395 __ vmov(single_scratch, result_reg);
3439 __ b(ne, &done); 3396 __ vcvt_f64_s32(double_scratch0(), single_scratch);
3440 // Check for -0. 3397 __ VFPCompareAndSetFlags(double_scratch0(), double_input);
3441 __ vmov(scratch1, double_input.high());
3442 __ tst(scratch1, Operand(HeapNumber::kSignMask));
3443 DeoptimizeIf(ne, instr->environment()); 3398 DeoptimizeIf(ne, instr->environment());
3399 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3400 Label done;
3401 __ cmp(result_reg, Operand(0));
3402 __ b(ne, &done);
3403 // Check for -0.
3404 __ vmov(scratch1, double_input.high());
3405 __ tst(scratch1, Operand(HeapNumber::kSignMask));
3406 DeoptimizeIf(ne, instr->environment());
3444 3407
3445 __ bind(&done); 3408 __ bind(&done);
3409 }
3446 } 3410 }
3447 } 3411 }
3448 3412
3449 3413
3450 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 3414 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
3451 LOperand* input = instr->InputAt(0); 3415 LOperand* input = instr->InputAt(0);
3452 ASSERT(input->IsRegister()); 3416 ASSERT(input->IsRegister());
3453 __ tst(ToRegister(input), Operand(kSmiTagMask)); 3417 __ tst(ToRegister(input), Operand(kSmiTagMask));
3454 DeoptimizeIf(instr->condition(), instr->environment()); 3418 DeoptimizeIf(instr->condition(), instr->environment());
3455 } 3419 }
(...skipping 388 matching lines...) Expand 10 before | Expand all | Expand 10 after
3844 3808
3845 3809
3846 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 3810 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
3847 DeoptimizeIf(al, instr->environment()); 3811 DeoptimizeIf(al, instr->environment());
3848 } 3812 }
3849 3813
3850 3814
3851 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { 3815 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
3852 Register object = ToRegister(instr->object()); 3816 Register object = ToRegister(instr->object());
3853 Register key = ToRegister(instr->key()); 3817 Register key = ToRegister(instr->key());
3854 __ Push(object, key); 3818 Register strict = scratch0();
3819 __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
3820 __ Push(object, key, strict);
3855 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); 3821 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
3856 LPointerMap* pointers = instr->pointer_map(); 3822 LPointerMap* pointers = instr->pointer_map();
3857 LEnvironment* env = instr->deoptimization_environment(); 3823 LEnvironment* env = instr->deoptimization_environment();
3858 RecordPosition(pointers->position()); 3824 RecordPosition(pointers->position());
3859 RegisterEnvironmentForDeoptimization(env); 3825 RegisterEnvironmentForDeoptimization(env);
3860 SafepointGenerator safepoint_generator(this, 3826 SafepointGenerator safepoint_generator(this,
3861 pointers, 3827 pointers,
3862 env->deoptimization_index()); 3828 env->deoptimization_index());
3863 __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator); 3829 __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator);
3864 } 3830 }
3865 3831
3866 3832
3867 void LCodeGen::DoStackCheck(LStackCheck* instr) { 3833 void LCodeGen::DoStackCheck(LStackCheck* instr) {
3868 // Perform stack overflow check. 3834 // Perform stack overflow check.
3869 Label ok; 3835 Label ok;
3870 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 3836 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
3871 __ cmp(sp, Operand(ip)); 3837 __ cmp(sp, Operand(ip));
3872 __ b(hs, &ok); 3838 __ b(hs, &ok);
3873 StackCheckStub stub; 3839 StackCheckStub stub;
3874 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3840 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3875 __ bind(&ok); 3841 __ bind(&ok);
3876 } 3842 }
3877 3843
3878 3844
3879 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 3845 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
3880 Abort("DoOsrEntry unimplemented."); 3846 // This is a pseudo-instruction that ensures that the environment here is
3847 // properly registered for deoptimization and records the assembler's PC
3848 // offset.
3849 LEnvironment* environment = instr->environment();
3850 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
3851 instr->SpilledDoubleRegisterArray());
3852
3853 // If the environment were already registered, we would have no way of
3854 // backpatching it with the spill slot operands.
3855 ASSERT(!environment->HasBeenRegistered());
3856 RegisterEnvironmentForDeoptimization(environment);
3857 ASSERT(osr_pc_offset_ == -1);
3858 osr_pc_offset_ = masm()->pc_offset();
3881 } 3859 }
3882 3860
3883 3861
3884 #undef __ 3862 #undef __
3885 3863
3886 } } // namespace v8::internal 3864 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/lithium-gap-resolver-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698