Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(68)

Side by Side Diff: src/crankshaft/s390/lithium-codegen-s390.cc

Issue 1763233003: S390: Initial Impl of Crankshaft features (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 //
2 // Use of this source code is governed by a BSD-style license that can be 3 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 4 // found in the LICENSE file.
4 5
5 #include "src/crankshaft/ppc/lithium-codegen-ppc.h" 6 #include "src/crankshaft/s390/lithium-codegen-s390.h"
6 7
7 #include "src/base/bits.h" 8 #include "src/base/bits.h"
8 #include "src/code-factory.h" 9 #include "src/code-factory.h"
9 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
10 #include "src/crankshaft/hydrogen-osr.h" 11 #include "src/crankshaft/hydrogen-osr.h"
11 #include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h" 12 #include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
12 #include "src/ic/ic.h" 13 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h" 14 #include "src/ic/stub-cache.h"
14 #include "src/profiler/cpu-profiler.h" 15 #include "src/profiler/cpu-profiler.h"
15 16
16 namespace v8 { 17 namespace v8 {
17 namespace internal { 18 namespace internal {
18 19
19
20 class SafepointGenerator final : public CallWrapper { 20 class SafepointGenerator final : public CallWrapper {
21 public: 21 public:
22 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers, 22 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
23 Safepoint::DeoptMode mode) 23 Safepoint::DeoptMode mode)
24 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {} 24 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
25 virtual ~SafepointGenerator() {} 25 virtual ~SafepointGenerator() {}
26 26
27 void BeforeCall(int call_size) const override {} 27 void BeforeCall(int call_size) const override {}
28 28
29 void AfterCall() const override { 29 void AfterCall() const override {
30 codegen_->RecordSafepoint(pointers_, deopt_mode_); 30 codegen_->RecordSafepoint(pointers_, deopt_mode_);
31 } 31 }
32 32
33 private: 33 private:
34 LCodeGen* codegen_; 34 LCodeGen* codegen_;
35 LPointerMap* pointers_; 35 LPointerMap* pointers_;
36 Safepoint::DeoptMode deopt_mode_; 36 Safepoint::DeoptMode deopt_mode_;
37 }; 37 };
38 38
39
40 #define __ masm()-> 39 #define __ masm()->
41 40
42 bool LCodeGen::GenerateCode() { 41 bool LCodeGen::GenerateCode() {
43 LPhase phase("Z_Code generation", chunk()); 42 LPhase phase("Z_Code generation", chunk());
44 DCHECK(is_unused()); 43 DCHECK(is_unused());
45 status_ = GENERATING; 44 status_ = GENERATING;
46 45
47 // Open a frame scope to indicate that there is a frame on the stack. The 46 // Open a frame scope to indicate that there is a frame on the stack. The
48 // NONE indicates that the scope shouldn't actually generate code to set up 47 // NONE indicates that the scope shouldn't actually generate code to set up
49 // the frame (that is done in GeneratePrologue). 48 // the frame (that is done in GeneratePrologue).
50 FrameScope frame_scope(masm_, StackFrame::NONE); 49 FrameScope frame_scope(masm_, StackFrame::NONE);
51 50
52 bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && 51 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
53 GenerateJumpTable() && GenerateSafepointTable(); 52 GenerateJumpTable() && GenerateSafepointTable();
54 if (FLAG_enable_embedded_constant_pool && !rc) {
55 masm()->AbortConstantPoolBuilding();
56 }
57 return rc;
58 } 53 }
59 54
60
61 void LCodeGen::FinishCode(Handle<Code> code) { 55 void LCodeGen::FinishCode(Handle<Code> code) {
62 DCHECK(is_done()); 56 DCHECK(is_done());
63 code->set_stack_slots(GetTotalFrameSlotCount()); 57 code->set_stack_slots(GetTotalFrameSlotCount());
64 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 58 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
65 PopulateDeoptimizationData(code); 59 PopulateDeoptimizationData(code);
66 } 60 }
67 61
68
69 void LCodeGen::SaveCallerDoubles() { 62 void LCodeGen::SaveCallerDoubles() {
70 DCHECK(info()->saves_caller_doubles()); 63 DCHECK(info()->saves_caller_doubles());
71 DCHECK(NeedsEagerFrame()); 64 DCHECK(NeedsEagerFrame());
72 Comment(";;; Save clobbered callee double registers"); 65 Comment(";;; Save clobbered callee double registers");
73 int count = 0; 66 int count = 0;
74 BitVector* doubles = chunk()->allocated_double_registers(); 67 BitVector* doubles = chunk()->allocated_double_registers();
75 BitVector::Iterator save_iterator(doubles); 68 BitVector::Iterator save_iterator(doubles);
76 while (!save_iterator.Done()) { 69 while (!save_iterator.Done()) {
77 __ stfd(DoubleRegister::from_code(save_iterator.Current()), 70 __ std(DoubleRegister::from_code(save_iterator.Current()),
78 MemOperand(sp, count * kDoubleSize)); 71 MemOperand(sp, count * kDoubleSize));
79 save_iterator.Advance(); 72 save_iterator.Advance();
80 count++; 73 count++;
81 } 74 }
82 } 75 }
83 76
84
85 void LCodeGen::RestoreCallerDoubles() { 77 void LCodeGen::RestoreCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles()); 78 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame()); 79 DCHECK(NeedsEagerFrame());
88 Comment(";;; Restore clobbered callee double registers"); 80 Comment(";;; Restore clobbered callee double registers");
89 BitVector* doubles = chunk()->allocated_double_registers(); 81 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles); 82 BitVector::Iterator save_iterator(doubles);
91 int count = 0; 83 int count = 0;
92 while (!save_iterator.Done()) { 84 while (!save_iterator.Done()) {
93 __ lfd(DoubleRegister::from_code(save_iterator.Current()), 85 __ ld(DoubleRegister::from_code(save_iterator.Current()),
94 MemOperand(sp, count * kDoubleSize)); 86 MemOperand(sp, count * kDoubleSize));
95 save_iterator.Advance(); 87 save_iterator.Advance();
96 count++; 88 count++;
97 } 89 }
98 } 90 }
99 91
100
101 bool LCodeGen::GeneratePrologue() { 92 bool LCodeGen::GeneratePrologue() {
102 DCHECK(is_generating()); 93 DCHECK(is_generating());
103 94
104 if (info()->IsOptimizing()) { 95 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 96 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
106 97
107 // r4: Callee's JS function. 98 // r3: Callee's JS function.
108 // cp: Callee's context. 99 // cp: Callee's context.
109 // pp: Callee's constant pool pointer (if enabled)
110 // fp: Caller's frame pointer. 100 // fp: Caller's frame pointer.
111 // lr: Caller's pc. 101 // lr: Caller's pc.
112 // ip: Our own function entry (required by the prologue) 102 // ip: Our own function entry (required by the prologue)
113 } 103 }
114 104
115 int prologue_offset = masm_->pc_offset(); 105 int prologue_offset = masm_->pc_offset();
116 106
117 if (prologue_offset) { 107 if (prologue_offset) {
118 // Prologue logic requires it's starting address in ip and the 108 // Prologue logic requires its starting address in ip and the
119 // corresponding offset from the function entry. 109 // corresponding offset from the function entry. Need to add
120 prologue_offset += Instruction::kInstrSize; 110 // 4 bytes for the size of AHI/AGHI that AddP expands into.
121 __ addi(ip, ip, Operand(prologue_offset)); 111 __ AddP(ip, ip, Operand(prologue_offset + sizeof(FourByteInstr)));
122 } 112 }
123 info()->set_prologue_offset(prologue_offset); 113 info()->set_prologue_offset(prologue_offset);
124 if (NeedsEagerFrame()) { 114 if (NeedsEagerFrame()) {
125 if (info()->IsStub()) { 115 if (info()->IsStub()) {
126 __ StubPrologue(ip, prologue_offset); 116 __ StubPrologue(ip, prologue_offset);
127 } else { 117 } else {
128 __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset); 118 __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
129 } 119 }
130 frame_is_built_ = true; 120 frame_is_built_ = true;
131 } 121 }
132 122
133 // Reserve space for the stack slots needed by the code. 123 // Reserve space for the stack slots needed by the code.
134 int slots = GetStackSlotCount(); 124 int slots = GetStackSlotCount();
135 if (slots > 0) { 125 if (slots > 0) {
136 __ subi(sp, sp, Operand(slots * kPointerSize)); 126 __ lay(sp, MemOperand(sp, -(slots * kPointerSize)));
137 if (FLAG_debug_code) { 127 if (FLAG_debug_code) {
138 __ Push(r3, r4); 128 __ Push(r2, r3);
139 __ li(r0, Operand(slots)); 129 __ mov(r2, Operand(slots * kPointerSize));
140 __ mtctr(r0); 130 __ mov(r3, Operand(kSlotsZapValue));
141 __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
142 __ mov(r4, Operand(kSlotsZapValue));
143 Label loop; 131 Label loop;
144 __ bind(&loop); 132 __ bind(&loop);
145 __ StorePU(r4, MemOperand(r3, -kPointerSize)); 133 __ StoreP(r3, MemOperand(sp, r2, kPointerSize));
146 __ bdnz(&loop); 134 __ lay(r2, MemOperand(r2, -kPointerSize));
147 __ Pop(r3, r4); 135 __ CmpP(r2, Operand::Zero());
136 __ bne(&loop);
137 __ Pop(r2, r3);
148 } 138 }
149 } 139 }
150 140
151 if (info()->saves_caller_doubles()) { 141 if (info()->saves_caller_doubles()) {
152 SaveCallerDoubles(); 142 SaveCallerDoubles();
153 } 143 }
154 return !is_aborted(); 144 return !is_aborted();
155 } 145 }
156 146
157
158 void LCodeGen::DoPrologue(LPrologue* instr) { 147 void LCodeGen::DoPrologue(LPrologue* instr) {
159 Comment(";;; Prologue begin"); 148 Comment(";;; Prologue begin");
160 149
161 // Possibly allocate a local context. 150 // Possibly allocate a local context.
162 if (info()->scope()->num_heap_slots() > 0) { 151 if (info()->scope()->num_heap_slots() > 0) {
163 Comment(";;; Allocate local context"); 152 Comment(";;; Allocate local context");
164 bool need_write_barrier = true; 153 bool need_write_barrier = true;
165 // Argument to NewContext is the function, which is in r4. 154 // Argument to NewContext is the function, which is in r3.
166 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 155 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
167 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; 156 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
168 if (info()->scope()->is_script_scope()) { 157 if (info()->scope()->is_script_scope()) {
169 __ push(r4); 158 __ push(r3);
170 __ Push(info()->scope()->GetScopeInfo(info()->isolate())); 159 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
171 __ CallRuntime(Runtime::kNewScriptContext); 160 __ CallRuntime(Runtime::kNewScriptContext);
172 deopt_mode = Safepoint::kLazyDeopt; 161 deopt_mode = Safepoint::kLazyDeopt;
173 } else if (slots <= FastNewContextStub::kMaximumSlots) { 162 } else if (slots <= FastNewContextStub::kMaximumSlots) {
174 FastNewContextStub stub(isolate(), slots); 163 FastNewContextStub stub(isolate(), slots);
175 __ CallStub(&stub); 164 __ CallStub(&stub);
176 // Result of FastNewContextStub is always in new space. 165 // Result of FastNewContextStub is always in new space.
177 need_write_barrier = false; 166 need_write_barrier = false;
178 } else { 167 } else {
179 __ push(r4); 168 __ push(r3);
180 __ CallRuntime(Runtime::kNewFunctionContext); 169 __ CallRuntime(Runtime::kNewFunctionContext);
181 } 170 }
182 RecordSafepoint(deopt_mode); 171 RecordSafepoint(deopt_mode);
183 172
184 // Context is returned in both r3 and cp. It replaces the context 173 // Context is returned in both r2 and cp. It replaces the context
185 // passed to us. It's saved in the stack and kept live in cp. 174 // passed to us. It's saved in the stack and kept live in cp.
186 __ mr(cp, r3); 175 __ LoadRR(cp, r2);
187 __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset)); 176 __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
188 // Copy any necessary parameters into the context. 177 // Copy any necessary parameters into the context.
189 int num_parameters = scope()->num_parameters(); 178 int num_parameters = scope()->num_parameters();
190 int first_parameter = scope()->has_this_declaration() ? -1 : 0; 179 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
191 for (int i = first_parameter; i < num_parameters; i++) { 180 for (int i = first_parameter; i < num_parameters; i++) {
192 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i); 181 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
193 if (var->IsContextSlot()) { 182 if (var->IsContextSlot()) {
194 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 183 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
195 (num_parameters - 1 - i) * kPointerSize; 184 (num_parameters - 1 - i) * kPointerSize;
196 // Load parameter from stack. 185 // Load parameter from stack.
197 __ LoadP(r3, MemOperand(fp, parameter_offset)); 186 __ LoadP(r2, MemOperand(fp, parameter_offset));
198 // Store it in the context. 187 // Store it in the context.
199 MemOperand target = ContextMemOperand(cp, var->index()); 188 MemOperand target = ContextMemOperand(cp, var->index());
200 __ StoreP(r3, target, r0); 189 __ StoreP(r2, target);
201 // Update the write barrier. This clobbers r6 and r3. 190 // Update the write barrier. This clobbers r5 and r2.
202 if (need_write_barrier) { 191 if (need_write_barrier) {
203 __ RecordWriteContextSlot(cp, target.offset(), r3, r6, 192 __ RecordWriteContextSlot(cp, target.offset(), r2, r5,
204 GetLinkRegisterState(), kSaveFPRegs); 193 GetLinkRegisterState(), kSaveFPRegs);
205 } else if (FLAG_debug_code) { 194 } else if (FLAG_debug_code) {
206 Label done; 195 Label done;
207 __ JumpIfInNewSpace(cp, r3, &done); 196 __ JumpIfInNewSpace(cp, r2, &done);
208 __ Abort(kExpectedNewSpaceObject); 197 __ Abort(kExpectedNewSpaceObject);
209 __ bind(&done); 198 __ bind(&done);
210 } 199 }
211 } 200 }
212 } 201 }
213 Comment(";;; End allocate local context"); 202 Comment(";;; End allocate local context");
214 } 203 }
215 204
216 Comment(";;; Prologue end"); 205 Comment(";;; Prologue end");
217 } 206 }
218 207
219
220 void LCodeGen::GenerateOsrPrologue() { 208 void LCodeGen::GenerateOsrPrologue() {
221 // Generate the OSR entry prologue at the first unknown OSR value, or if there 209 // Generate the OSR entry prologue at the first unknown OSR value, or if there
222 // are none, at the OSR entrypoint instruction. 210 // are none, at the OSR entrypoint instruction.
223 if (osr_pc_offset_ >= 0) return; 211 if (osr_pc_offset_ >= 0) return;
224 212
225 osr_pc_offset_ = masm()->pc_offset(); 213 osr_pc_offset_ = masm()->pc_offset();
226 214
227 // Adjust the frame size, subsuming the unoptimized frame into the 215 // Adjust the frame size, subsuming the unoptimized frame into the
228 // optimized frame. 216 // optimized frame.
229 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 217 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
230 DCHECK(slots >= 0); 218 DCHECK(slots >= 0);
231 __ subi(sp, sp, Operand(slots * kPointerSize)); 219 __ lay(sp, MemOperand(sp, -slots * kPointerSize));
232 } 220 }
233 221
234
235 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 222 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
236 if (instr->IsCall()) { 223 if (instr->IsCall()) {
237 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 224 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
238 } 225 }
239 if (!instr->IsLazyBailout() && !instr->IsGap()) { 226 if (!instr->IsLazyBailout() && !instr->IsGap()) {
240 safepoints_.BumpLastLazySafepointIndex(); 227 safepoints_.BumpLastLazySafepointIndex();
241 } 228 }
242 } 229 }
243 230
244
245 bool LCodeGen::GenerateDeferredCode() { 231 bool LCodeGen::GenerateDeferredCode() {
246 DCHECK(is_generating()); 232 DCHECK(is_generating());
247 if (deferred_.length() > 0) { 233 if (deferred_.length() > 0) {
248 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 234 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
249 LDeferredCode* code = deferred_[i]; 235 LDeferredCode* code = deferred_[i];
250 236
251 HValue* value = 237 HValue* value =
252 instructions_->at(code->instruction_index())->hydrogen_value(); 238 instructions_->at(code->instruction_index())->hydrogen_value();
253 RecordAndWritePosition( 239 RecordAndWritePosition(
254 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 240 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
255 241
256 Comment( 242 Comment(
257 ";;; <@%d,#%d> " 243 ";;; <@%d,#%d> "
258 "-------------------- Deferred %s --------------------", 244 "-------------------- Deferred %s --------------------",
259 code->instruction_index(), code->instr()->hydrogen_value()->id(), 245 code->instruction_index(), code->instr()->hydrogen_value()->id(),
260 code->instr()->Mnemonic()); 246 code->instr()->Mnemonic());
261 __ bind(code->entry()); 247 __ bind(code->entry());
262 if (NeedsDeferredFrame()) { 248 if (NeedsDeferredFrame()) {
263 Comment(";;; Build frame"); 249 Comment(";;; Build frame");
264 DCHECK(!frame_is_built_); 250 DCHECK(!frame_is_built_);
265 DCHECK(info()->IsStub()); 251 DCHECK(info()->IsStub());
266 frame_is_built_ = true; 252 frame_is_built_ = true;
267 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB)); 253 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
268 __ PushFixedFrame(scratch0()); 254 __ PushFixedFrame(scratch0());
269 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 255 __ la(fp,
256 MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
270 Comment(";;; Deferred code"); 257 Comment(";;; Deferred code");
271 } 258 }
272 code->Generate(); 259 code->Generate();
273 if (NeedsDeferredFrame()) { 260 if (NeedsDeferredFrame()) {
274 Comment(";;; Destroy frame"); 261 Comment(";;; Destroy frame");
275 DCHECK(frame_is_built_); 262 DCHECK(frame_is_built_);
276 __ PopFixedFrame(ip); 263 __ PopFixedFrame(ip);
277 frame_is_built_ = false; 264 frame_is_built_ = false;
278 } 265 }
279 __ b(code->exit()); 266 __ b(code->exit());
280 } 267 }
281 } 268 }
282 269
283 return !is_aborted(); 270 return !is_aborted();
284 } 271 }
285 272
286
287 bool LCodeGen::GenerateJumpTable() { 273 bool LCodeGen::GenerateJumpTable() {
288 // Check that the jump table is accessible from everywhere in the function 274 // Check that the jump table is accessible from everywhere in the function
289 // code, i.e. that offsets to the table can be encoded in the 24bit signed 275 // code, i.e. that offsets in halfworld to the table can be encoded in the
290 // immediate of a branch instruction. 276 // 32-bit signed immediate of a branch instruction.
291 // To simplify we consider the code size from the first instruction to the 277 // To simplify we consider the code size from the first instruction to the
292 // end of the jump table. We also don't consider the pc load delta. 278 // end of the jump table. We also don't consider the pc load delta.
293 // Each entry in the jump table generates one instruction and inlines one 279 // Each entry in the jump table generates one instruction and inlines one
294 // 32bit data after it. 280 // 32bit data after it.
295 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + 281 // TODO(joransiu): The Int24 condition can likely be relaxed for S390
296 jump_table_.length() * 7)) { 282 if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) {
297 Abort(kGeneratedCodeIsTooLarge); 283 Abort(kGeneratedCodeIsTooLarge);
298 } 284 }
299 285
300 if (jump_table_.length() > 0) { 286 if (jump_table_.length() > 0) {
301 Label needs_frame, call_deopt_entry; 287 Label needs_frame, call_deopt_entry;
302 288
303 Comment(";;; -------------------- Jump table --------------------"); 289 Comment(";;; -------------------- Jump table --------------------");
304 Address base = jump_table_[0].address; 290 Address base = jump_table_[0].address;
305 291
306 Register entry_offset = scratch0(); 292 Register entry_offset = scratch0();
307 293
308 int length = jump_table_.length(); 294 int length = jump_table_.length();
309 for (int i = 0; i < length; i++) { 295 for (int i = 0; i < length; i++) {
310 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; 296 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
311 __ bind(&table_entry->label); 297 __ bind(&table_entry->label);
312 298
313 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type); 299 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
314 Address entry = table_entry->address; 300 Address entry = table_entry->address;
315 DeoptComment(table_entry->deopt_info); 301 DeoptComment(table_entry->deopt_info);
316 302
317 // Second-level deopt table entries are contiguous and small, so instead 303 // Second-level deopt table entries are contiguous and small, so instead
318 // of loading the full, absolute address of each one, load an immediate 304 // of loading the full, absolute address of each one, load an immediate
319 // offset which will be added to the base address later. 305 // offset which will be added to the base address later.
320 __ mov(entry_offset, Operand(entry - base)); 306 __ mov(entry_offset, Operand(entry - base));
321 307
322 if (table_entry->needs_frame) { 308 if (table_entry->needs_frame) {
323 DCHECK(!info()->saves_caller_doubles()); 309 DCHECK(!info()->saves_caller_doubles());
324 Comment(";;; call deopt with frame"); 310 Comment(";;; call deopt with frame");
325 __ PushFixedFrame(); 311 __ PushFixedFrame();
326 __ b(&needs_frame, SetLK); 312 __ b(r14, &needs_frame);
327 } else { 313 } else {
328 __ b(&call_deopt_entry, SetLK); 314 __ b(r14, &call_deopt_entry);
329 } 315 }
330 info()->LogDeoptCallPosition(masm()->pc_offset(), 316 info()->LogDeoptCallPosition(masm()->pc_offset(),
331 table_entry->deopt_info.inlining_id); 317 table_entry->deopt_info.inlining_id);
332 } 318 }
333 319
334 if (needs_frame.is_linked()) { 320 if (needs_frame.is_linked()) {
335 __ bind(&needs_frame); 321 __ bind(&needs_frame);
336 // This variant of deopt can only be used with stubs. Since we don't 322 // This variant of deopt can only be used with stubs. Since we don't
337 // have a function pointer to install in the stack frame that we're 323 // have a function pointer to install in the stack frame that we're
338 // building, install a special marker there instead. 324 // building, install a special marker there instead.
339 DCHECK(info()->IsStub()); 325 DCHECK(info()->IsStub());
340 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB)); 326 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
341 __ push(ip); 327 __ push(ip);
342 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 328 __ lay(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
343 } 329 }
344 330
345 Comment(";;; call deopt"); 331 Comment(";;; call deopt");
346 __ bind(&call_deopt_entry); 332 __ bind(&call_deopt_entry);
347 333
348 if (info()->saves_caller_doubles()) { 334 if (info()->saves_caller_doubles()) {
349 DCHECK(info()->IsStub()); 335 DCHECK(info()->IsStub());
350 RestoreCallerDoubles(); 336 RestoreCallerDoubles();
351 } 337 }
352 338
353 // Add the base address to the offset previously loaded in entry_offset. 339 // Add the base address to the offset previously loaded in entry_offset.
354 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); 340 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
355 __ add(ip, entry_offset, ip); 341 __ AddP(ip, entry_offset, ip);
356 __ Jump(ip); 342 __ Jump(ip);
357 } 343 }
358 344
359 // The deoptimization jump table is the last part of the instruction 345 // The deoptimization jump table is the last part of the instruction
360 // sequence. Mark the generated code as done unless we bailed out. 346 // sequence. Mark the generated code as done unless we bailed out.
361 if (!is_aborted()) status_ = DONE; 347 if (!is_aborted()) status_ = DONE;
362 return !is_aborted(); 348 return !is_aborted();
363 } 349 }
364 350
365
366 bool LCodeGen::GenerateSafepointTable() { 351 bool LCodeGen::GenerateSafepointTable() {
367 DCHECK(is_done()); 352 DCHECK(is_done());
368 safepoints_.Emit(masm(), GetTotalFrameSlotCount()); 353 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
369 return !is_aborted(); 354 return !is_aborted();
370 } 355 }
371 356
372
373 Register LCodeGen::ToRegister(int code) const { 357 Register LCodeGen::ToRegister(int code) const {
374 return Register::from_code(code); 358 return Register::from_code(code);
375 } 359 }
376 360
377
378 DoubleRegister LCodeGen::ToDoubleRegister(int code) const { 361 DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
379 return DoubleRegister::from_code(code); 362 return DoubleRegister::from_code(code);
380 } 363 }
381 364
382
383 Register LCodeGen::ToRegister(LOperand* op) const { 365 Register LCodeGen::ToRegister(LOperand* op) const {
384 DCHECK(op->IsRegister()); 366 DCHECK(op->IsRegister());
385 return ToRegister(op->index()); 367 return ToRegister(op->index());
386 } 368 }
387 369
388
389 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { 370 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
390 if (op->IsRegister()) { 371 if (op->IsRegister()) {
391 return ToRegister(op->index()); 372 return ToRegister(op->index());
392 } else if (op->IsConstantOperand()) { 373 } else if (op->IsConstantOperand()) {
393 LConstantOperand* const_op = LConstantOperand::cast(op); 374 LConstantOperand* const_op = LConstantOperand::cast(op);
394 HConstant* constant = chunk_->LookupConstant(const_op); 375 HConstant* constant = chunk_->LookupConstant(const_op);
395 Handle<Object> literal = constant->handle(isolate()); 376 Handle<Object> literal = constant->handle(isolate());
396 Representation r = chunk_->LookupLiteralRepresentation(const_op); 377 Representation r = chunk_->LookupLiteralRepresentation(const_op);
397 if (r.IsInteger32()) { 378 if (r.IsInteger32()) {
398 AllowDeferredHandleDereference get_number; 379 AllowDeferredHandleDereference get_number;
399 DCHECK(literal->IsNumber()); 380 DCHECK(literal->IsNumber());
400 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number())); 381 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
401 } else if (r.IsDouble()) { 382 } else if (r.IsDouble()) {
402 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); 383 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
403 } else { 384 } else {
404 DCHECK(r.IsSmiOrTagged()); 385 DCHECK(r.IsSmiOrTagged());
405 __ Move(scratch, literal); 386 __ Move(scratch, literal);
406 } 387 }
407 return scratch; 388 return scratch;
408 } else if (op->IsStackSlot()) { 389 } else if (op->IsStackSlot()) {
409 __ LoadP(scratch, ToMemOperand(op)); 390 __ LoadP(scratch, ToMemOperand(op));
410 return scratch; 391 return scratch;
411 } 392 }
412 UNREACHABLE(); 393 UNREACHABLE();
413 return scratch; 394 return scratch;
414 } 395 }
415 396
416
417 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op, 397 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
418 Register dst) { 398 Register dst) {
419 DCHECK(IsInteger32(const_op)); 399 DCHECK(IsInteger32(const_op));
420 HConstant* constant = chunk_->LookupConstant(const_op); 400 HConstant* constant = chunk_->LookupConstant(const_op);
421 int32_t value = constant->Integer32Value(); 401 int32_t value = constant->Integer32Value();
422 if (IsSmi(const_op)) { 402 if (IsSmi(const_op)) {
423 __ LoadSmiLiteral(dst, Smi::FromInt(value)); 403 __ LoadSmiLiteral(dst, Smi::FromInt(value));
424 } else { 404 } else {
425 __ LoadIntLiteral(dst, value); 405 __ LoadIntLiteral(dst, value);
426 } 406 }
427 } 407 }
428 408
429
430 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 409 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
431 DCHECK(op->IsDoubleRegister()); 410 DCHECK(op->IsDoubleRegister());
432 return ToDoubleRegister(op->index()); 411 return ToDoubleRegister(op->index());
433 } 412 }
434 413
435
436 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 414 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
437 HConstant* constant = chunk_->LookupConstant(op); 415 HConstant* constant = chunk_->LookupConstant(op);
438 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 416 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
439 return constant->handle(isolate()); 417 return constant->handle(isolate());
440 } 418 }
441 419
442
443 bool LCodeGen::IsInteger32(LConstantOperand* op) const { 420 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
444 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 421 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
445 } 422 }
446 423
447
448 bool LCodeGen::IsSmi(LConstantOperand* op) const { 424 bool LCodeGen::IsSmi(LConstantOperand* op) const {
449 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 425 return chunk_->LookupLiteralRepresentation(op).IsSmi();
450 } 426 }
451 427
452
453 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 428 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
454 return ToRepresentation(op, Representation::Integer32()); 429 return ToRepresentation(op, Representation::Integer32());
455 } 430 }
456 431
457
458 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op, 432 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
459 const Representation& r) const { 433 const Representation& r) const {
460 HConstant* constant = chunk_->LookupConstant(op); 434 HConstant* constant = chunk_->LookupConstant(op);
461 int32_t value = constant->Integer32Value(); 435 int32_t value = constant->Integer32Value();
462 if (r.IsInteger32()) return value; 436 if (r.IsInteger32()) return value;
463 DCHECK(r.IsSmiOrTagged()); 437 DCHECK(r.IsSmiOrTagged());
464 return reinterpret_cast<intptr_t>(Smi::FromInt(value)); 438 return reinterpret_cast<intptr_t>(Smi::FromInt(value));
465 } 439 }
466 440
467
468 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 441 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
469 HConstant* constant = chunk_->LookupConstant(op); 442 HConstant* constant = chunk_->LookupConstant(op);
470 return Smi::FromInt(constant->Integer32Value()); 443 return Smi::FromInt(constant->Integer32Value());
471 } 444 }
472 445
473
474 double LCodeGen::ToDouble(LConstantOperand* op) const { 446 double LCodeGen::ToDouble(LConstantOperand* op) const {
475 HConstant* constant = chunk_->LookupConstant(op); 447 HConstant* constant = chunk_->LookupConstant(op);
476 DCHECK(constant->HasDoubleValue()); 448 DCHECK(constant->HasDoubleValue());
477 return constant->DoubleValue(); 449 return constant->DoubleValue();
478 } 450 }
479 451
480
481 Operand LCodeGen::ToOperand(LOperand* op) { 452 Operand LCodeGen::ToOperand(LOperand* op) {
482 if (op->IsConstantOperand()) { 453 if (op->IsConstantOperand()) {
483 LConstantOperand* const_op = LConstantOperand::cast(op); 454 LConstantOperand* const_op = LConstantOperand::cast(op);
484 HConstant* constant = chunk()->LookupConstant(const_op); 455 HConstant* constant = chunk()->LookupConstant(const_op);
485 Representation r = chunk_->LookupLiteralRepresentation(const_op); 456 Representation r = chunk_->LookupLiteralRepresentation(const_op);
486 if (r.IsSmi()) { 457 if (r.IsSmi()) {
487 DCHECK(constant->HasSmiValue()); 458 DCHECK(constant->HasSmiValue());
488 return Operand(Smi::FromInt(constant->Integer32Value())); 459 return Operand(Smi::FromInt(constant->Integer32Value()));
489 } else if (r.IsInteger32()) { 460 } else if (r.IsInteger32()) {
490 DCHECK(constant->HasInteger32Value()); 461 DCHECK(constant->HasInteger32Value());
491 return Operand(constant->Integer32Value()); 462 return Operand(constant->Integer32Value());
492 } else if (r.IsDouble()) { 463 } else if (r.IsDouble()) {
493 Abort(kToOperandUnsupportedDoubleImmediate); 464 Abort(kToOperandUnsupportedDoubleImmediate);
494 } 465 }
495 DCHECK(r.IsTagged()); 466 DCHECK(r.IsTagged());
496 return Operand(constant->handle(isolate())); 467 return Operand(constant->handle(isolate()));
497 } else if (op->IsRegister()) { 468 } else if (op->IsRegister()) {
498 return Operand(ToRegister(op)); 469 return Operand(ToRegister(op));
499 } else if (op->IsDoubleRegister()) { 470 } else if (op->IsDoubleRegister()) {
500 Abort(kToOperandIsDoubleRegisterUnimplemented); 471 Abort(kToOperandIsDoubleRegisterUnimplemented);
501 return Operand::Zero(); 472 return Operand::Zero();
502 } 473 }
503 // Stack slots not implemented, use ToMemOperand instead. 474 // Stack slots not implemented, use ToMemOperand instead.
504 UNREACHABLE(); 475 UNREACHABLE();
505 return Operand::Zero(); 476 return Operand::Zero();
506 } 477 }
507 478
508
509 static int ArgumentsOffsetWithoutFrame(int index) { 479 static int ArgumentsOffsetWithoutFrame(int index) {
510 DCHECK(index < 0); 480 DCHECK(index < 0);
511 return -(index + 1) * kPointerSize; 481 return -(index + 1) * kPointerSize;
512 } 482 }
513 483
514
515 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { 484 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
516 DCHECK(!op->IsRegister()); 485 DCHECK(!op->IsRegister());
517 DCHECK(!op->IsDoubleRegister()); 486 DCHECK(!op->IsDoubleRegister());
518 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); 487 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
519 if (NeedsEagerFrame()) { 488 if (NeedsEagerFrame()) {
520 return MemOperand(fp, FrameSlotToFPOffset(op->index())); 489 return MemOperand(fp, FrameSlotToFPOffset(op->index()));
521 } else { 490 } else {
522 // Retrieve parameter without eager stack-frame relative to the 491 // Retrieve parameter without eager stack-frame relative to the
523 // stack-pointer. 492 // stack-pointer.
524 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); 493 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
525 } 494 }
526 } 495 }
527 496
528
529 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { 497 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
530 DCHECK(op->IsDoubleStackSlot()); 498 DCHECK(op->IsDoubleStackSlot());
531 if (NeedsEagerFrame()) { 499 if (NeedsEagerFrame()) {
532 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize); 500 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
533 } else { 501 } else {
534 // Retrieve parameter without eager stack-frame relative to the 502 // Retrieve parameter without eager stack-frame relative to the
535 // stack-pointer. 503 // stack-pointer.
536 return MemOperand(sp, 504 return MemOperand(sp,
537 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); 505 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
538 } 506 }
539 } 507 }
540 508
541
542 void LCodeGen::WriteTranslation(LEnvironment* environment, 509 void LCodeGen::WriteTranslation(LEnvironment* environment,
543 Translation* translation) { 510 Translation* translation) {
544 if (environment == NULL) return; 511 if (environment == NULL) return;
545 512
546 // The translation includes one command per value in the environment. 513 // The translation includes one command per value in the environment.
547 int translation_size = environment->translation_size(); 514 int translation_size = environment->translation_size();
548 515
549 WriteTranslation(environment->outer(), translation); 516 WriteTranslation(environment->outer(), translation);
550 WriteTranslationFrame(environment, translation); 517 WriteTranslationFrame(environment, translation);
551 518
552 int object_index = 0; 519 int object_index = 0;
553 int dematerialized_index = 0; 520 int dematerialized_index = 0;
554 for (int i = 0; i < translation_size; ++i) { 521 for (int i = 0; i < translation_size; ++i) {
555 LOperand* value = environment->values()->at(i); 522 LOperand* value = environment->values()->at(i);
556 AddToTranslation( 523 AddToTranslation(
557 environment, translation, value, environment->HasTaggedValueAt(i), 524 environment, translation, value, environment->HasTaggedValueAt(i),
558 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); 525 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
559 } 526 }
560 } 527 }
561 528
562
563 void LCodeGen::AddToTranslation(LEnvironment* environment, 529 void LCodeGen::AddToTranslation(LEnvironment* environment,
564 Translation* translation, LOperand* op, 530 Translation* translation, LOperand* op,
565 bool is_tagged, bool is_uint32, 531 bool is_tagged, bool is_uint32,
566 int* object_index_pointer, 532 int* object_index_pointer,
567 int* dematerialized_index_pointer) { 533 int* dematerialized_index_pointer) {
568 if (op == LEnvironment::materialization_marker()) { 534 if (op == LEnvironment::materialization_marker()) {
569 int object_index = (*object_index_pointer)++; 535 int object_index = (*object_index_pointer)++;
570 if (environment->ObjectIsDuplicateAt(object_index)) { 536 if (environment->ObjectIsDuplicateAt(object_index)) {
571 int dupe_of = environment->ObjectDuplicateOfAt(object_index); 537 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
572 translation->DuplicateObject(dupe_of); 538 translation->DuplicateObject(dupe_of);
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
617 translation->StoreDoubleRegister(reg); 583 translation->StoreDoubleRegister(reg);
618 } else if (op->IsConstantOperand()) { 584 } else if (op->IsConstantOperand()) {
619 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); 585 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
620 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); 586 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
621 translation->StoreLiteral(src_index); 587 translation->StoreLiteral(src_index);
622 } else { 588 } else {
623 UNREACHABLE(); 589 UNREACHABLE();
624 } 590 }
625 } 591 }
626 592
627
628 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode, 593 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
629 LInstruction* instr) { 594 LInstruction* instr) {
630 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); 595 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
631 } 596 }
632 597
633
634 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode, 598 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
635 LInstruction* instr, 599 LInstruction* instr,
636 SafepointMode safepoint_mode) { 600 SafepointMode safepoint_mode) {
637 DCHECK(instr != NULL); 601 DCHECK(instr != NULL);
638 __ Call(code, mode); 602 __ Call(code, mode);
639 RecordSafepointWithLazyDeopt(instr, safepoint_mode); 603 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
640 604
641 // Signal that we don't inline smi code before these stubs in the 605 // Signal that we don't inline smi code before these stubs in the
642 // optimizing code generator. 606 // optimizing code generator.
643 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) { 607 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
644 __ nop(); 608 __ nop();
645 } 609 }
646 } 610 }
647 611
648
649 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments, 612 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
650 LInstruction* instr, SaveFPRegsMode save_doubles) { 613 LInstruction* instr, SaveFPRegsMode save_doubles) {
651 DCHECK(instr != NULL); 614 DCHECK(instr != NULL);
652 615
653 __ CallRuntime(function, num_arguments, save_doubles); 616 __ CallRuntime(function, num_arguments, save_doubles);
654 617
655 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 618 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
656 } 619 }
657 620
658
659 void LCodeGen::LoadContextFromDeferred(LOperand* context) { 621 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
660 if (context->IsRegister()) { 622 if (context->IsRegister()) {
661 __ Move(cp, ToRegister(context)); 623 __ Move(cp, ToRegister(context));
662 } else if (context->IsStackSlot()) { 624 } else if (context->IsStackSlot()) {
663 __ LoadP(cp, ToMemOperand(context)); 625 __ LoadP(cp, ToMemOperand(context));
664 } else if (context->IsConstantOperand()) { 626 } else if (context->IsConstantOperand()) {
665 HConstant* constant = 627 HConstant* constant =
666 chunk_->LookupConstant(LConstantOperand::cast(context)); 628 chunk_->LookupConstant(LConstantOperand::cast(context));
667 __ Move(cp, Handle<Object>::cast(constant->handle(isolate()))); 629 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
668 } else { 630 } else {
669 UNREACHABLE(); 631 UNREACHABLE();
670 } 632 }
671 } 633 }
672 634
673
674 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, 635 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
675 LInstruction* instr, LOperand* context) { 636 LInstruction* instr, LOperand* context) {
676 LoadContextFromDeferred(context); 637 LoadContextFromDeferred(context);
677 __ CallRuntimeSaveDoubles(id); 638 __ CallRuntimeSaveDoubles(id);
678 RecordSafepointWithRegisters(instr->pointer_map(), argc, 639 RecordSafepointWithRegisters(instr->pointer_map(), argc,
679 Safepoint::kNoLazyDeopt); 640 Safepoint::kNoLazyDeopt);
680 } 641 }
681 642
682
683 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, 643 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
684 Safepoint::DeoptMode mode) { 644 Safepoint::DeoptMode mode) {
685 environment->set_has_been_used(); 645 environment->set_has_been_used();
686 if (!environment->HasBeenRegistered()) { 646 if (!environment->HasBeenRegistered()) {
687 // Physical stack frame layout: 647 // Physical stack frame layout:
688 // -x ............. -4 0 ..................................... y 648 // -x ............. -4 0 ..................................... y
689 // [incoming arguments] [spill slots] [pushed outgoing arguments] 649 // [incoming arguments] [spill slots] [pushed outgoing arguments]
690 650
691 // Layout of the environment: 651 // Layout of the environment:
692 // 0 ..................................................... size-1 652 // 0 ..................................................... size-1
(...skipping 15 matching lines...) Expand all
708 Translation translation(&translations_, frame_count, jsframe_count, zone()); 668 Translation translation(&translations_, frame_count, jsframe_count, zone());
709 WriteTranslation(environment, &translation); 669 WriteTranslation(environment, &translation);
710 int deoptimization_index = deoptimizations_.length(); 670 int deoptimization_index = deoptimizations_.length();
711 int pc_offset = masm()->pc_offset(); 671 int pc_offset = masm()->pc_offset();
712 environment->Register(deoptimization_index, translation.index(), 672 environment->Register(deoptimization_index, translation.index(),
713 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 673 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
714 deoptimizations_.Add(environment, zone()); 674 deoptimizations_.Add(environment, zone());
715 } 675 }
716 } 676 }
717 677
718
719 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, 678 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
720 Deoptimizer::DeoptReason deopt_reason, 679 Deoptimizer::DeoptReason deopt_reason,
721 Deoptimizer::BailoutType bailout_type, 680 Deoptimizer::BailoutType bailout_type,
722 CRegister cr) { 681 CRegister cr) {
723 LEnvironment* environment = instr->environment(); 682 LEnvironment* environment = instr->environment();
724 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 683 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
725 DCHECK(environment->HasBeenRegistered()); 684 DCHECK(environment->HasBeenRegistered());
726 int id = environment->deoptimization_index(); 685 int id = environment->deoptimization_index();
727 Address entry = 686 Address entry =
728 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 687 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
729 if (entry == NULL) { 688 if (entry == NULL) {
730 Abort(kBailoutWasNotPrepared); 689 Abort(kBailoutWasNotPrepared);
731 return; 690 return;
732 } 691 }
733 692
734 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { 693 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
735 CRegister alt_cr = cr6;
736 Register scratch = scratch0(); 694 Register scratch = scratch0();
737 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 695 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
738 Label no_deopt; 696 Label no_deopt;
739 DCHECK(!alt_cr.is(cr)); 697
740 __ Push(r4, scratch); 698 // Store the condition on the stack if necessary
699 if (cond != al) {
700 Label done;
701 __ LoadImmP(scratch, Operand::Zero());
702 __ b(NegateCondition(cond), &done, Label::kNear);
703 __ LoadImmP(scratch, Operand(1));
704 __ bind(&done);
705 __ push(scratch);
706 }
707
708 Label done;
709 __ Push(r3);
741 __ mov(scratch, Operand(count)); 710 __ mov(scratch, Operand(count));
742 __ lwz(r4, MemOperand(scratch)); 711 __ LoadW(r3, MemOperand(scratch));
743 __ subi(r4, r4, Operand(1)); 712 __ Sub32(r3, r3, Operand(1));
744 __ cmpi(r4, Operand::Zero(), alt_cr); 713 __ Cmp32(r3, Operand::Zero());
745 __ bne(&no_deopt, alt_cr); 714 __ bne(&no_deopt, Label::kNear);
746 __ li(r4, Operand(FLAG_deopt_every_n_times)); 715
747 __ stw(r4, MemOperand(scratch)); 716 __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times));
748 __ Pop(r4, scratch); 717 __ StoreW(r3, MemOperand(scratch));
718 __ Pop(r3);
719
720 if (cond != al) {
721 // Clean up the stack before the deoptimizer call
722 __ pop(scratch);
723 }
749 724
750 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 725 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
726
727 __ b(&done);
728
751 __ bind(&no_deopt); 729 __ bind(&no_deopt);
752 __ stw(r4, MemOperand(scratch)); 730 __ StoreW(r3, MemOperand(scratch));
753 __ Pop(r4, scratch); 731 __ Pop(r3);
732
733 if (cond != al) {
734 // Clean up the stack before the deoptimizer call
735 __ pop(scratch);
736 }
737
738 __ bind(&done);
739
740 if (cond != al) {
741 cond = ne;
742 __ CmpP(scratch, Operand::Zero());
743 }
754 } 744 }
755 745
756 if (info()->ShouldTrapOnDeopt()) { 746 if (info()->ShouldTrapOnDeopt()) {
757 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr); 747 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
758 } 748 }
759 749
760 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason); 750 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
761 751
762 DCHECK(info()->IsStub() || frame_is_built_); 752 DCHECK(info()->IsStub() || frame_is_built_);
763 // Go through jump table if we need to handle condition, build frame, or 753 // Go through jump table if we need to handle condition, build frame, or
764 // restore caller doubles. 754 // restore caller doubles.
765 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) { 755 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
766 DeoptComment(deopt_info);
767 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 756 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
768 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id); 757 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
769 } else { 758 } else {
770 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, 759 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
771 !frame_is_built_); 760 !frame_is_built_);
772 // We often have several deopts to the same entry, reuse the last 761 // We often have several deopts to the same entry, reuse the last
773 // jump entry if this is the case. 762 // jump entry if this is the case.
774 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() || 763 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
775 jump_table_.is_empty() || 764 jump_table_.is_empty() ||
776 !table_entry.IsEquivalentTo(jump_table_.last())) { 765 !table_entry.IsEquivalentTo(jump_table_.last())) {
777 jump_table_.Add(table_entry, zone()); 766 jump_table_.Add(table_entry, zone());
778 } 767 }
779 __ b(cond, &jump_table_.last().label, cr); 768 __ b(cond, &jump_table_.last().label /*, cr*/);
780 } 769 }
781 } 770 }
782 771
783 772 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
784 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
785 Deoptimizer::DeoptReason deopt_reason, 773 Deoptimizer::DeoptReason deopt_reason,
786 CRegister cr) { 774 CRegister cr) {
787 Deoptimizer::BailoutType bailout_type = 775 Deoptimizer::BailoutType bailout_type =
788 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; 776 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
789 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr); 777 DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
790 } 778 }
791 779
792
793 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, 780 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
794 SafepointMode safepoint_mode) { 781 SafepointMode safepoint_mode) {
795 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 782 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
796 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 783 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
797 } else { 784 } else {
798 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 785 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
799 RecordSafepointWithRegisters(instr->pointer_map(), 0, 786 RecordSafepointWithRegisters(instr->pointer_map(), 0,
800 Safepoint::kLazyDeopt); 787 Safepoint::kLazyDeopt);
801 } 788 }
802 } 789 }
803 790
804
805 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind, 791 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
806 int arguments, Safepoint::DeoptMode deopt_mode) { 792 int arguments, Safepoint::DeoptMode deopt_mode) {
807 DCHECK(expected_safepoint_kind_ == kind); 793 DCHECK(expected_safepoint_kind_ == kind);
808 794
809 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 795 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
810 Safepoint safepoint = 796 Safepoint safepoint =
811 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); 797 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
812 for (int i = 0; i < operands->length(); i++) { 798 for (int i = 0; i < operands->length(); i++) {
813 LOperand* pointer = operands->at(i); 799 LOperand* pointer = operands->at(i);
814 if (pointer->IsStackSlot()) { 800 if (pointer->IsStackSlot()) {
815 safepoint.DefinePointerSlot(pointer->index(), zone()); 801 safepoint.DefinePointerSlot(pointer->index(), zone());
816 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 802 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
817 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); 803 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
818 } 804 }
819 } 805 }
820 } 806 }
821 807
822
823 void LCodeGen::RecordSafepoint(LPointerMap* pointers, 808 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
824 Safepoint::DeoptMode deopt_mode) { 809 Safepoint::DeoptMode deopt_mode) {
825 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); 810 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
826 } 811 }
827 812
828
829 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { 813 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
830 LPointerMap empty_pointers(zone()); 814 LPointerMap empty_pointers(zone());
831 RecordSafepoint(&empty_pointers, deopt_mode); 815 RecordSafepoint(&empty_pointers, deopt_mode);
832 } 816 }
833 817
834
835 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 818 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
836 int arguments, 819 int arguments,
837 Safepoint::DeoptMode deopt_mode) { 820 Safepoint::DeoptMode deopt_mode) {
838 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); 821 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
839 } 822 }
840 823
841
842 void LCodeGen::RecordAndWritePosition(int position) { 824 void LCodeGen::RecordAndWritePosition(int position) {
843 if (position == RelocInfo::kNoPosition) return; 825 if (position == RelocInfo::kNoPosition) return;
844 masm()->positions_recorder()->RecordPosition(position); 826 masm()->positions_recorder()->RecordPosition(position);
845 masm()->positions_recorder()->WriteRecordedPositions(); 827 masm()->positions_recorder()->WriteRecordedPositions();
846 } 828 }
847 829
848
849 static const char* LabelType(LLabel* label) { 830 static const char* LabelType(LLabel* label) {
850 if (label->is_loop_header()) return " (loop header)"; 831 if (label->is_loop_header()) return " (loop header)";
851 if (label->is_osr_entry()) return " (OSR entry)"; 832 if (label->is_osr_entry()) return " (OSR entry)";
852 return ""; 833 return "";
853 } 834 }
854 835
855
856 void LCodeGen::DoLabel(LLabel* label) { 836 void LCodeGen::DoLabel(LLabel* label) {
857 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", 837 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
858 current_instruction_, label->hydrogen_value()->id(), 838 current_instruction_, label->hydrogen_value()->id(),
859 label->block_id(), LabelType(label)); 839 label->block_id(), LabelType(label));
860 __ bind(label->label()); 840 __ bind(label->label());
861 current_block_ = label->block_id(); 841 current_block_ = label->block_id();
862 DoGap(label); 842 DoGap(label);
863 } 843 }
864 844
865
866 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); } 845 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
867 846
868
869 void LCodeGen::DoGap(LGap* gap) { 847 void LCodeGen::DoGap(LGap* gap) {
870 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION; 848 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
871 i++) { 849 i++) {
872 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 850 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
873 LParallelMove* move = gap->GetParallelMove(inner_pos); 851 LParallelMove* move = gap->GetParallelMove(inner_pos);
874 if (move != NULL) DoParallelMove(move); 852 if (move != NULL) DoParallelMove(move);
875 } 853 }
876 } 854 }
877 855
878
879 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); } 856 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
880 857
881
882 void LCodeGen::DoParameter(LParameter* instr) { 858 void LCodeGen::DoParameter(LParameter* instr) {
883 // Nothing to do. 859 // Nothing to do.
884 } 860 }
885 861
886
887 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 862 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
888 GenerateOsrPrologue(); 863 GenerateOsrPrologue();
889 } 864 }
890 865
891
892 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { 866 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
893 Register dividend = ToRegister(instr->dividend()); 867 Register dividend = ToRegister(instr->dividend());
894 int32_t divisor = instr->divisor(); 868 int32_t divisor = instr->divisor();
895 DCHECK(dividend.is(ToRegister(instr->result()))); 869 DCHECK(dividend.is(ToRegister(instr->result())));
896 870
897 // Theoretically, a variation of the branch-free code for integer division by 871 // Theoretically, a variation of the branch-free code for integer division by
898 // a power of 2 (calculating the remainder via an additional multiplication 872 // a power of 2 (calculating the remainder via an additional multiplication
899 // (which gets simplified to an 'and') and subtraction) should be faster, and 873 // (which gets simplified to an 'and') and subtraction) should be faster, and
900 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 874 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
901 // indicate that positive dividends are heavily favored, so the branching 875 // indicate that positive dividends are heavily favored, so the branching
902 // version performs better. 876 // version performs better.
903 HMod* hmod = instr->hydrogen(); 877 HMod* hmod = instr->hydrogen();
904 int32_t shift = WhichPowerOf2Abs(divisor); 878 int32_t shift = WhichPowerOf2Abs(divisor);
905 Label dividend_is_not_negative, done; 879 Label dividend_is_not_negative, done;
906 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 880 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
907 __ cmpwi(dividend, Operand::Zero()); 881 __ CmpP(dividend, Operand::Zero());
908 __ bge(&dividend_is_not_negative); 882 __ bge(&dividend_is_not_negative, Label::kNear);
909 if (shift) { 883 if (shift) {
910 // Note that this is correct even for kMinInt operands. 884 // Note that this is correct even for kMinInt operands.
911 __ neg(dividend, dividend); 885 __ LoadComplementRR(dividend, dividend);
912 __ ExtractBitRange(dividend, dividend, shift - 1, 0); 886 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
913 __ neg(dividend, dividend, LeaveOE, SetRC); 887 __ LoadComplementRR(dividend, dividend);
914 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 888 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
915 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); 889 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
916 } 890 }
917 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 891 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
918 __ li(dividend, Operand::Zero()); 892 __ mov(dividend, Operand::Zero());
919 } else { 893 } else {
920 DeoptimizeIf(al, instr, Deoptimizer::kMinusZero); 894 DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
921 } 895 }
922 __ b(&done); 896 __ b(&done, Label::kNear);
923 } 897 }
924 898
925 __ bind(&dividend_is_not_negative); 899 __ bind(&dividend_is_not_negative);
926 if (shift) { 900 if (shift) {
927 __ ExtractBitRange(dividend, dividend, shift - 1, 0); 901 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
928 } else { 902 } else {
929 __ li(dividend, Operand::Zero()); 903 __ mov(dividend, Operand::Zero());
930 } 904 }
931 __ bind(&done); 905 __ bind(&done);
932 } 906 }
933 907
934
935 void LCodeGen::DoModByConstI(LModByConstI* instr) { 908 void LCodeGen::DoModByConstI(LModByConstI* instr) {
936 Register dividend = ToRegister(instr->dividend()); 909 Register dividend = ToRegister(instr->dividend());
937 int32_t divisor = instr->divisor(); 910 int32_t divisor = instr->divisor();
938 Register result = ToRegister(instr->result()); 911 Register result = ToRegister(instr->result());
939 DCHECK(!dividend.is(result)); 912 DCHECK(!dividend.is(result));
940 913
941 if (divisor == 0) { 914 if (divisor == 0) {
942 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); 915 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
943 return; 916 return;
944 } 917 }
945 918
946 __ TruncatingDiv(result, dividend, Abs(divisor)); 919 __ TruncatingDiv(result, dividend, Abs(divisor));
947 __ mov(ip, Operand(Abs(divisor))); 920 __ mov(ip, Operand(Abs(divisor)));
948 __ mullw(result, result, ip); 921 __ Mul(result, result, ip);
949 __ sub(result, dividend, result, LeaveOE, SetRC); 922 __ SubP(result, dividend, result /*, LeaveOE, SetRC*/);
950 923
951 // Check for negative zero. 924 // Check for negative zero.
952 HMod* hmod = instr->hydrogen(); 925 HMod* hmod = instr->hydrogen();
953 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 926 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
954 Label remainder_not_zero; 927 Label remainder_not_zero;
955 __ bne(&remainder_not_zero, cr0); 928 __ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
956 __ cmpwi(dividend, Operand::Zero()); 929 __ Cmp32(dividend, Operand::Zero());
957 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 930 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
958 __ bind(&remainder_not_zero); 931 __ bind(&remainder_not_zero);
959 } 932 }
960 } 933 }
961 934
962
963 void LCodeGen::DoModI(LModI* instr) { 935 void LCodeGen::DoModI(LModI* instr) {
964 HMod* hmod = instr->hydrogen(); 936 HMod* hmod = instr->hydrogen();
965 Register left_reg = ToRegister(instr->left()); 937 Register left_reg = ToRegister(instr->left());
966 Register right_reg = ToRegister(instr->right()); 938 Register right_reg = ToRegister(instr->right());
967 Register result_reg = ToRegister(instr->result()); 939 Register result_reg = ToRegister(instr->result());
968 Register scratch = scratch0();
969 bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
970 Label done; 940 Label done;
971 941
972 if (can_overflow) {
973 __ li(r0, Operand::Zero()); // clear xer
974 __ mtxer(r0);
975 }
976
977 __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
978
979 // Check for x % 0. 942 // Check for x % 0.
980 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 943 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
981 __ cmpwi(right_reg, Operand::Zero()); 944 __ Cmp32(right_reg, Operand::Zero());
982 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); 945 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
983 } 946 }
984 947
985 // Check for kMinInt % -1, divw will return undefined, which is not what we 948 // Check for kMinInt % -1, dr will return undefined, which is not what we
986 // want. We have to deopt if we care about -0, because we can't return that. 949 // want. We have to deopt if we care about -0, because we can't return that.
987 if (can_overflow) { 950 if (hmod->CheckFlag(HValue::kCanOverflow)) {
951 Label no_overflow_possible;
952 __ Cmp32(left_reg, Operand(kMinInt));
953 __ bne(&no_overflow_possible, Label::kNear);
954 __ Cmp32(right_reg, Operand(-1));
988 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 955 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
989 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0); 956 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
990 } else { 957 } else {
991 if (CpuFeatures::IsSupported(ISELECT)) { 958 __ b(ne, &no_overflow_possible, Label::kNear);
992 __ isel(overflow, result_reg, r0, result_reg, cr0); 959 __ mov(result_reg, Operand::Zero());
993 __ boverflow(&done, cr0); 960 __ b(&done, Label::kNear);
994 } else {
995 Label no_overflow_possible;
996 __ bnooverflow(&no_overflow_possible, cr0);
997 __ li(result_reg, Operand::Zero());
998 __ b(&done);
999 __ bind(&no_overflow_possible);
1000 }
1001 } 961 }
962 __ bind(&no_overflow_possible);
1002 } 963 }
1003 964
1004 __ mullw(scratch, right_reg, scratch); 965 // Divide instruction dr will implicity use register pair
1005 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC); 966 // r0 & r1 below.
967 DCHECK(!left_reg.is(r1));
968 DCHECK(!right_reg.is(r1));
969 DCHECK(!result_reg.is(r1));
970 __ LoadRR(r0, left_reg);
971 __ srda(r0, Operand(32));
972 __ dr(r0, right_reg); // R0:R1 = R1 / divisor - R0 remainder
973
974 __ LoadAndTestP_ExtendSrc(result_reg, r0); // Copy remainder to resultreg
1006 975
1007 // If we care about -0, test if the dividend is <0 and the result is 0. 976 // If we care about -0, test if the dividend is <0 and the result is 0.
1008 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 977 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1009 __ bne(&done, cr0); 978 __ bne(&done, Label::kNear);
1010 __ cmpwi(left_reg, Operand::Zero()); 979 __ Cmp32(left_reg, Operand::Zero());
1011 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 980 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1012 } 981 }
1013 982
1014 __ bind(&done); 983 __ bind(&done);
1015 } 984 }
1016 985
1017
1018 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 986 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1019 Register dividend = ToRegister(instr->dividend()); 987 Register dividend = ToRegister(instr->dividend());
1020 int32_t divisor = instr->divisor(); 988 int32_t divisor = instr->divisor();
1021 Register result = ToRegister(instr->result()); 989 Register result = ToRegister(instr->result());
1022 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 990 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1023 DCHECK(!result.is(dividend)); 991 DCHECK(!result.is(dividend));
1024 992
1025 // Check for (0 / -x) that will produce negative zero. 993 // Check for (0 / -x) that will produce negative zero.
1026 HDiv* hdiv = instr->hydrogen(); 994 HDiv* hdiv = instr->hydrogen();
1027 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 995 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1028 __ cmpwi(dividend, Operand::Zero()); 996 __ Cmp32(dividend, Operand::Zero());
1029 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 997 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1030 } 998 }
1031 // Check for (kMinInt / -1). 999 // Check for (kMinInt / -1).
1032 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 1000 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1033 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); 1001 __ Cmp32(dividend, Operand(0x80000000));
1034 __ cmpw(dividend, r0);
1035 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 1002 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1036 } 1003 }
1037 1004
1038 int32_t shift = WhichPowerOf2Abs(divisor); 1005 int32_t shift = WhichPowerOf2Abs(divisor);
1039 1006
1040 // Deoptimize if remainder will not be 0. 1007 // Deoptimize if remainder will not be 0.
1041 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { 1008 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1042 __ TestBitRange(dividend, shift - 1, 0, r0); 1009 __ TestBitRange(dividend, shift - 1, 0, r0);
1043 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0); 1010 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
1044 } 1011 }
1045 1012
1046 if (divisor == -1) { // Nice shortcut, not needed for correctness. 1013 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1047 __ neg(result, dividend); 1014 __ LoadComplementRR(result, dividend);
1048 return; 1015 return;
1049 } 1016 }
1050 if (shift == 0) { 1017 if (shift == 0) {
1051 __ mr(result, dividend); 1018 __ LoadRR(result, dividend);
1052 } else { 1019 } else {
1053 if (shift == 1) { 1020 if (shift == 1) {
1054 __ srwi(result, dividend, Operand(31)); 1021 __ ShiftRight(result, dividend, Operand(31));
1055 } else { 1022 } else {
1056 __ srawi(result, dividend, 31); 1023 __ ShiftRightArith(result, dividend, Operand(31));
1057 __ srwi(result, result, Operand(32 - shift)); 1024 __ ShiftRight(result, result, Operand(32 - shift));
1058 } 1025 }
1059 __ add(result, dividend, result); 1026 __ AddP(result, dividend, result);
1060 __ srawi(result, result, shift); 1027 __ ShiftRightArith(result, result, Operand(shift));
1028 #if V8_TARGET_ARCH_S390X
1029 __ lgfr(result, result);
1030 #endif
1061 } 1031 }
1062 if (divisor < 0) __ neg(result, result); 1032 if (divisor < 0) __ LoadComplementRR(result, result);
1063 } 1033 }
1064 1034
1065
1066 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1035 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1067 Register dividend = ToRegister(instr->dividend()); 1036 Register dividend = ToRegister(instr->dividend());
1068 int32_t divisor = instr->divisor(); 1037 int32_t divisor = instr->divisor();
1069 Register result = ToRegister(instr->result()); 1038 Register result = ToRegister(instr->result());
1070 DCHECK(!dividend.is(result)); 1039 DCHECK(!dividend.is(result));
1071 1040
1072 if (divisor == 0) { 1041 if (divisor == 0) {
1073 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); 1042 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1074 return; 1043 return;
1075 } 1044 }
1076 1045
1077 // Check for (0 / -x) that will produce negative zero. 1046 // Check for (0 / -x) that will produce negative zero.
1078 HDiv* hdiv = instr->hydrogen(); 1047 HDiv* hdiv = instr->hydrogen();
1079 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1048 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1080 __ cmpwi(dividend, Operand::Zero()); 1049 __ Cmp32(dividend, Operand::Zero());
1081 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 1050 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1082 } 1051 }
1083 1052
1084 __ TruncatingDiv(result, dividend, Abs(divisor)); 1053 __ TruncatingDiv(result, dividend, Abs(divisor));
1085 if (divisor < 0) __ neg(result, result); 1054 if (divisor < 0) __ LoadComplementRR(result, result);
1086 1055
1087 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1056 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1088 Register scratch = scratch0(); 1057 Register scratch = scratch0();
1089 __ mov(ip, Operand(divisor)); 1058 __ mov(ip, Operand(divisor));
1090 __ mullw(scratch, result, ip); 1059 __ Mul(scratch, result, ip);
1091 __ cmpw(scratch, dividend); 1060 __ Cmp32(scratch, dividend);
1092 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); 1061 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1093 } 1062 }
1094 } 1063 }
1095 1064
1096
1097 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1065 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1098 void LCodeGen::DoDivI(LDivI* instr) { 1066 void LCodeGen::DoDivI(LDivI* instr) {
1099 HBinaryOperation* hdiv = instr->hydrogen(); 1067 HBinaryOperation* hdiv = instr->hydrogen();
1100 const Register dividend = ToRegister(instr->dividend()); 1068 const Register dividend = ToRegister(instr->dividend());
1101 const Register divisor = ToRegister(instr->divisor()); 1069 const Register divisor = ToRegister(instr->divisor());
1102 Register result = ToRegister(instr->result()); 1070 Register result = ToRegister(instr->result());
1103 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1104 1071
1105 DCHECK(!dividend.is(result)); 1072 DCHECK(!dividend.is(result));
1106 DCHECK(!divisor.is(result)); 1073 DCHECK(!divisor.is(result));
1107 1074
1108 if (can_overflow) {
1109 __ li(r0, Operand::Zero()); // clear xer
1110 __ mtxer(r0);
1111 }
1112
1113 __ divw(result, dividend, divisor, SetOE, SetRC);
1114
1115 // Check for x / 0. 1075 // Check for x / 0.
1116 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1076 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1117 __ cmpwi(divisor, Operand::Zero()); 1077 __ Cmp32(divisor, Operand::Zero());
1118 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); 1078 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1119 } 1079 }
1120 1080
1121 // Check for (0 / -x) that will produce negative zero. 1081 // Check for (0 / -x) that will produce negative zero.
1122 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1082 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1123 Label dividend_not_zero; 1083 Label dividend_not_zero;
1124 __ cmpwi(dividend, Operand::Zero()); 1084 __ Cmp32(dividend, Operand::Zero());
1125 __ bne(&dividend_not_zero); 1085 __ bne(&dividend_not_zero, Label::kNear);
1126 __ cmpwi(divisor, Operand::Zero()); 1086 __ Cmp32(divisor, Operand::Zero());
1127 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 1087 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1128 __ bind(&dividend_not_zero); 1088 __ bind(&dividend_not_zero);
1129 } 1089 }
1130 1090
1131 // Check for (kMinInt / -1). 1091 // Check for (kMinInt / -1).
1132 if (can_overflow) { 1092 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1133 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1093 Label dividend_not_min_int;
1134 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); 1094 __ Cmp32(dividend, Operand(kMinInt));
1135 } else { 1095 __ bne(&dividend_not_min_int, Label::kNear);
1136 // When truncating, we want kMinInt / -1 = kMinInt. 1096 __ Cmp32(divisor, Operand(-1));
1137 if (CpuFeatures::IsSupported(ISELECT)) { 1097 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1138 __ isel(overflow, result, dividend, result, cr0); 1098 __ bind(&dividend_not_min_int);
1139 } else {
1140 Label no_overflow_possible;
1141 __ bnooverflow(&no_overflow_possible, cr0);
1142 __ mr(result, dividend);
1143 __ bind(&no_overflow_possible);
1144 }
1145 }
1146 } 1099 }
1147 1100
1101 __ LoadRR(r0, dividend);
1102 __ srda(r0, Operand(32));
1103 __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
1104
1105 __ LoadAndTestP_ExtendSrc(result, r1); // Move quotient to result register
1106
1148 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1107 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1149 // Deoptimize if remainder is not 0. 1108 // Deoptimize if remainder is not 0.
1150 Register scratch = scratch0(); 1109 __ Cmp32(r0, Operand::Zero());
1151 __ mullw(scratch, divisor, result);
1152 __ cmpw(dividend, scratch);
1153 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); 1110 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1154 } 1111 }
1155 } 1112 }
1156 1113
1157
1158 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1114 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1159 HBinaryOperation* hdiv = instr->hydrogen(); 1115 HBinaryOperation* hdiv = instr->hydrogen();
1160 Register dividend = ToRegister(instr->dividend()); 1116 Register dividend = ToRegister(instr->dividend());
1161 Register result = ToRegister(instr->result()); 1117 Register result = ToRegister(instr->result());
1162 int32_t divisor = instr->divisor(); 1118 int32_t divisor = instr->divisor();
1163 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt); 1119 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
1164 1120
1165 // If the divisor is positive, things are easy: There can be no deopts and we 1121 // If the divisor is positive, things are easy: There can be no deopts and we
1166 // can simply do an arithmetic right shift. 1122 // can simply do an arithmetic right shift.
1167 int32_t shift = WhichPowerOf2Abs(divisor); 1123 int32_t shift = WhichPowerOf2Abs(divisor);
1168 if (divisor > 0) { 1124 if (divisor > 0) {
1169 if (shift || !result.is(dividend)) { 1125 if (shift || !result.is(dividend)) {
1170 __ srawi(result, dividend, shift); 1126 __ ShiftRightArith(result, dividend, Operand(shift));
1127 #if V8_TARGET_ARCH_S390X
1128 __ lgfr(result, result);
1129 #endif
1171 } 1130 }
1172 return; 1131 return;
1173 } 1132 }
1174 1133
1175 // If the divisor is negative, we have to negate and handle edge cases. 1134 // If the divisor is negative, we have to negate and handle edge cases.
1176 OEBit oe = LeaveOE; 1135 #if V8_TARGET_ARCH_S390X
1177 #if V8_TARGET_ARCH_PPC64
1178 if (divisor == -1 && can_overflow) { 1136 if (divisor == -1 && can_overflow) {
1179 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); 1137 __ Cmp32(dividend, Operand(0x80000000));
1180 __ cmpw(dividend, r0);
1181 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 1138 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1182 } 1139 }
1183 #else
1184 if (can_overflow) {
1185 __ li(r0, Operand::Zero()); // clear xer
1186 __ mtxer(r0);
1187 oe = SetOE;
1188 }
1189 #endif 1140 #endif
1190 1141
1191 __ neg(result, dividend, oe, SetRC); 1142 __ LoadComplementRR(result, dividend);
1192 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1143 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1193 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); 1144 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
1194 } 1145 }
1195 1146
1196 // If the negation could not overflow, simply shifting is OK. 1147 // If the negation could not overflow, simply shifting is OK.
1197 #if !V8_TARGET_ARCH_PPC64 1148 #if !V8_TARGET_ARCH_S390X
1198 if (!can_overflow) { 1149 if (!can_overflow) {
1199 #endif 1150 #endif
1200 if (shift) { 1151 if (shift) {
1201 __ ShiftRightArithImm(result, result, shift); 1152 __ ShiftRightArithP(result, result, Operand(shift));
1202 } 1153 }
1203 return; 1154 return;
1204 #if !V8_TARGET_ARCH_PPC64 1155 #if !V8_TARGET_ARCH_S390X
1205 } 1156 }
1206 1157
1207 // Dividing by -1 is basically negation, unless we overflow. 1158 // Dividing by -1 is basically negation, unless we overflow.
1208 if (divisor == -1) { 1159 if (divisor == -1) {
1209 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); 1160 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1210 return; 1161 return;
1211 } 1162 }
1212 1163
1213 Label overflow, done; 1164 Label overflow_label, done;
1214 __ boverflow(&overflow, cr0); 1165 __ b(overflow, &overflow_label, Label::kNear);
1215 __ srawi(result, result, shift); 1166 __ ShiftRightArith(result, result, Operand(shift));
1216 __ b(&done); 1167 #if V8_TARGET_ARCH_S390X
1217 __ bind(&overflow); 1168 __ lgfr(result, result);
1169 #endif
1170 __ b(&done, Label::kNear);
1171 __ bind(&overflow_label);
1218 __ mov(result, Operand(kMinInt / divisor)); 1172 __ mov(result, Operand(kMinInt / divisor));
1219 __ bind(&done); 1173 __ bind(&done);
1220 #endif 1174 #endif
1221 } 1175 }
1222 1176
1223
1224 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1177 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1225 Register dividend = ToRegister(instr->dividend()); 1178 Register dividend = ToRegister(instr->dividend());
1226 int32_t divisor = instr->divisor(); 1179 int32_t divisor = instr->divisor();
1227 Register result = ToRegister(instr->result()); 1180 Register result = ToRegister(instr->result());
1228 DCHECK(!dividend.is(result)); 1181 DCHECK(!dividend.is(result));
1229 1182
1230 if (divisor == 0) { 1183 if (divisor == 0) {
1231 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); 1184 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1232 return; 1185 return;
1233 } 1186 }
1234 1187
1235 // Check for (0 / -x) that will produce negative zero. 1188 // Check for (0 / -x) that will produce negative zero.
1236 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1189 HMathFloorOfDiv* hdiv = instr->hydrogen();
1237 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1190 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1238 __ cmpwi(dividend, Operand::Zero()); 1191 __ Cmp32(dividend, Operand::Zero());
1239 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 1192 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1240 } 1193 }
1241 1194
1242 // Easy case: We need no dynamic check for the dividend and the flooring 1195 // Easy case: We need no dynamic check for the dividend and the flooring
1243 // division is the same as the truncating division. 1196 // division is the same as the truncating division.
1244 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1197 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1245 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1198 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1246 __ TruncatingDiv(result, dividend, Abs(divisor)); 1199 __ TruncatingDiv(result, dividend, Abs(divisor));
1247 if (divisor < 0) __ neg(result, result); 1200 if (divisor < 0) __ LoadComplementRR(result, result);
1248 return; 1201 return;
1249 } 1202 }
1250 1203
1251 // In the general case we may need to adjust before and after the truncating 1204 // In the general case we may need to adjust before and after the truncating
1252 // division to get a flooring division. 1205 // division to get a flooring division.
1253 Register temp = ToRegister(instr->temp()); 1206 Register temp = ToRegister(instr->temp());
1254 DCHECK(!temp.is(dividend) && !temp.is(result)); 1207 DCHECK(!temp.is(dividend) && !temp.is(result));
1255 Label needs_adjustment, done; 1208 Label needs_adjustment, done;
1256 __ cmpwi(dividend, Operand::Zero()); 1209 __ Cmp32(dividend, Operand::Zero());
1257 __ b(divisor > 0 ? lt : gt, &needs_adjustment); 1210 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1258 __ TruncatingDiv(result, dividend, Abs(divisor)); 1211 __ TruncatingDiv(result, dividend, Abs(divisor));
1259 if (divisor < 0) __ neg(result, result); 1212 if (divisor < 0) __ LoadComplementRR(result, result);
1260 __ b(&done); 1213 __ b(&done, Label::kNear);
1261 __ bind(&needs_adjustment); 1214 __ bind(&needs_adjustment);
1262 __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1)); 1215 __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1263 __ TruncatingDiv(result, temp, Abs(divisor)); 1216 __ TruncatingDiv(result, temp, Abs(divisor));
1264 if (divisor < 0) __ neg(result, result); 1217 if (divisor < 0) __ LoadComplementRR(result, result);
1265 __ subi(result, result, Operand(1)); 1218 __ SubP(result, result, Operand(1));
1266 __ bind(&done); 1219 __ bind(&done);
1267 } 1220 }
1268 1221
1269
1270 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1222 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1271 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1223 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1272 HBinaryOperation* hdiv = instr->hydrogen(); 1224 HBinaryOperation* hdiv = instr->hydrogen();
1273 const Register dividend = ToRegister(instr->dividend()); 1225 const Register dividend = ToRegister(instr->dividend());
1274 const Register divisor = ToRegister(instr->divisor()); 1226 const Register divisor = ToRegister(instr->divisor());
1275 Register result = ToRegister(instr->result()); 1227 Register result = ToRegister(instr->result());
1276 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1277 1228
1278 DCHECK(!dividend.is(result)); 1229 DCHECK(!dividend.is(result));
1279 DCHECK(!divisor.is(result)); 1230 DCHECK(!divisor.is(result));
1280 1231
1281 if (can_overflow) {
1282 __ li(r0, Operand::Zero()); // clear xer
1283 __ mtxer(r0);
1284 }
1285
1286 __ divw(result, dividend, divisor, SetOE, SetRC);
1287
1288 // Check for x / 0. 1232 // Check for x / 0.
1289 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1233 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1290 __ cmpwi(divisor, Operand::Zero()); 1234 __ Cmp32(divisor, Operand::Zero());
1291 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); 1235 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1292 } 1236 }
1293 1237
1294 // Check for (0 / -x) that will produce negative zero. 1238 // Check for (0 / -x) that will produce negative zero.
1295 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1239 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1296 Label dividend_not_zero; 1240 Label dividend_not_zero;
1297 __ cmpwi(dividend, Operand::Zero()); 1241 __ Cmp32(dividend, Operand::Zero());
1298 __ bne(&dividend_not_zero); 1242 __ bne(&dividend_not_zero, Label::kNear);
1299 __ cmpwi(divisor, Operand::Zero()); 1243 __ Cmp32(divisor, Operand::Zero());
1300 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 1244 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1301 __ bind(&dividend_not_zero); 1245 __ bind(&dividend_not_zero);
1302 } 1246 }
1303 1247
1304 // Check for (kMinInt / -1). 1248 // Check for (kMinInt / -1).
1305 if (can_overflow) { 1249 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1250 Label no_overflow_possible;
1251 __ Cmp32(dividend, Operand(kMinInt));
1252 __ bne(&no_overflow_possible, Label::kNear);
1253 __ Cmp32(divisor, Operand(-1));
1306 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1254 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1307 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); 1255 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1308 } else { 1256 } else {
1309 // When truncating, we want kMinInt / -1 = kMinInt. 1257 __ bne(&no_overflow_possible, Label::kNear);
1310 if (CpuFeatures::IsSupported(ISELECT)) { 1258 __ LoadRR(result, dividend);
1311 __ isel(overflow, result, dividend, result, cr0);
1312 } else {
1313 Label no_overflow_possible;
1314 __ bnooverflow(&no_overflow_possible, cr0);
1315 __ mr(result, dividend);
1316 __ bind(&no_overflow_possible);
1317 }
1318 } 1259 }
1260 __ bind(&no_overflow_possible);
1319 } 1261 }
1320 1262
1263 __ LoadRR(r0, dividend);
1264 __ srda(r0, Operand(32));
1265 __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
1266
1267 __ lr(result, r1); // Move quotient to result register
1268
1321 Label done; 1269 Label done;
1322 Register scratch = scratch0(); 1270 Register scratch = scratch0();
1323 // If both operands have the same sign then we are done. 1271 // If both operands have the same sign then we are done.
1324 #if V8_TARGET_ARCH_PPC64 1272 __ Xor(scratch, dividend, divisor);
1325 __ xor_(scratch, dividend, divisor); 1273 __ ltr(scratch, scratch); // use 32 bit version LoadAndTestRR even in 64 bit
1326 __ cmpwi(scratch, Operand::Zero()); 1274 __ bge(&done, Label::kNear);
1327 __ bge(&done);
1328 #else
1329 __ xor_(scratch, dividend, divisor, SetRC);
1330 __ bge(&done, cr0);
1331 #endif
1332 1275
1333 // If there is no remainder then we are done. 1276 // If there is no remainder then we are done.
1334 __ mullw(scratch, divisor, result); 1277 __ lr(scratch, result);
1335 __ cmpw(dividend, scratch); 1278 __ msr(scratch, divisor);
1336 __ beq(&done); 1279 __ Cmp32(dividend, scratch);
1280 __ beq(&done, Label::kNear);
1337 1281
1338 // We performed a truncating division. Correct the result. 1282 // We performed a truncating division. Correct the result.
1339 __ subi(result, result, Operand(1)); 1283 __ SubP(result, result, Operand(1));
1340 __ bind(&done); 1284 __ bind(&done);
1341 } 1285 }
1342 1286
1343
1344 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { 1287 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1345 DoubleRegister addend = ToDoubleRegister(instr->addend()); 1288 DoubleRegister addend = ToDoubleRegister(instr->addend());
1346 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); 1289 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1347 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1290 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1348 DoubleRegister result = ToDoubleRegister(instr->result()); 1291 DoubleRegister result = ToDoubleRegister(instr->result());
1349 1292
1350 __ fmadd(result, multiplier, multiplicand, addend); 1293 // Unable to use madbr as the intermediate value is not rounded
1294 // to proper precision
1295 __ ldr(result, multiplier);
1296 __ mdbr(result, multiplicand);
1297 __ adbr(result, addend);
1351 } 1298 }
1352 1299
1353
1354 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { 1300 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1355 DoubleRegister minuend = ToDoubleRegister(instr->minuend()); 1301 DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1356 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); 1302 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1357 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1303 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1358 DoubleRegister result = ToDoubleRegister(instr->result()); 1304 DoubleRegister result = ToDoubleRegister(instr->result());
1359 1305
1360 __ fmsub(result, multiplier, multiplicand, minuend); 1306 // Unable to use msdbr as the intermediate value is not rounded
1307 // to proper precision
1308 __ ldr(result, multiplier);
1309 __ mdbr(result, multiplicand);
1310 __ sdbr(result, minuend);
1361 } 1311 }
1362 1312
1363
1364 void LCodeGen::DoMulI(LMulI* instr) { 1313 void LCodeGen::DoMulI(LMulI* instr) {
1365 Register scratch = scratch0(); 1314 Register scratch = scratch0();
1366 Register result = ToRegister(instr->result()); 1315 Register result = ToRegister(instr->result());
1367 // Note that result may alias left. 1316 // Note that result may alias left.
1368 Register left = ToRegister(instr->left()); 1317 Register left = ToRegister(instr->left());
1369 LOperand* right_op = instr->right(); 1318 LOperand* right_op = instr->right();
1370 1319
1371 bool bailout_on_minus_zero = 1320 bool bailout_on_minus_zero =
1372 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1321 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1373 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1322 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1374 1323
1375 if (right_op->IsConstantOperand()) { 1324 if (right_op->IsConstantOperand()) {
1376 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); 1325 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1377 1326
1378 if (bailout_on_minus_zero && (constant < 0)) { 1327 if (bailout_on_minus_zero && (constant < 0)) {
1379 // The case of a null constant will be handled separately. 1328 // The case of a null constant will be handled separately.
1380 // If constant is negative and left is null, the result should be -0. 1329 // If constant is negative and left is null, the result should be -0.
1381 __ cmpi(left, Operand::Zero()); 1330 __ CmpP(left, Operand::Zero());
1382 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 1331 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1383 } 1332 }
1384 1333
1385 switch (constant) { 1334 switch (constant) {
1386 case -1: 1335 case -1:
1387 if (can_overflow) { 1336 if (can_overflow) {
1388 #if V8_TARGET_ARCH_PPC64 1337 #if V8_TARGET_ARCH_S390X
1389 if (instr->hydrogen()->representation().IsSmi()) { 1338 if (instr->hydrogen()->representation().IsSmi()) {
1390 #endif 1339 #endif
1391 __ li(r0, Operand::Zero()); // clear xer 1340 __ LoadComplementRR(result, left);
1392 __ mtxer(r0); 1341 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1393 __ neg(result, left, SetOE, SetRC); 1342 #if V8_TARGET_ARCH_S390X
1394 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1395 #if V8_TARGET_ARCH_PPC64
1396 } else { 1343 } else {
1397 __ neg(result, left); 1344 __ LoadComplementRR(result, left);
1398 __ TestIfInt32(result, r0); 1345 __ TestIfInt32(result, r0);
1399 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 1346 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1400 } 1347 }
1401 #endif 1348 #endif
1402 } else { 1349 } else {
1403 __ neg(result, left); 1350 __ LoadComplementRR(result, left);
1404 } 1351 }
1405 break; 1352 break;
1406 case 0: 1353 case 0:
1407 if (bailout_on_minus_zero) { 1354 if (bailout_on_minus_zero) {
1408 // If left is strictly negative and the constant is null, the 1355 // If left is strictly negative and the constant is null, the
1409 // result is -0. Deoptimize if required, otherwise return 0. 1356 // result is -0. Deoptimize if required, otherwise return 0.
1410 #if V8_TARGET_ARCH_PPC64 1357 #if V8_TARGET_ARCH_S390X
1411 if (instr->hydrogen()->representation().IsSmi()) { 1358 if (instr->hydrogen()->representation().IsSmi()) {
1412 #endif 1359 #endif
1413 __ cmpi(left, Operand::Zero()); 1360 __ Cmp32(left, Operand::Zero());
1414 #if V8_TARGET_ARCH_PPC64 1361 #if V8_TARGET_ARCH_S390X
1415 } else { 1362 } else {
1416 __ cmpwi(left, Operand::Zero()); 1363 __ Cmp32(left, Operand::Zero());
1417 } 1364 }
1418 #endif 1365 #endif
1419 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 1366 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1420 } 1367 }
1421 __ li(result, Operand::Zero()); 1368 __ LoadImmP(result, Operand::Zero());
1422 break; 1369 break;
1423 case 1: 1370 case 1:
1424 __ Move(result, left); 1371 __ Move(result, left);
1425 break; 1372 break;
1426 default: 1373 default:
1427 // Multiplying by powers of two and powers of two plus or minus 1374 // Multiplying by powers of two and powers of two plus or minus
1428 // one can be done faster with shifted operands. 1375 // one can be done faster with shifted operands.
1429 // For other constants we emit standard code. 1376 // For other constants we emit standard code.
1430 int32_t mask = constant >> 31; 1377 int32_t mask = constant >> 31;
1431 uint32_t constant_abs = (constant + mask) ^ mask; 1378 uint32_t constant_abs = (constant + mask) ^ mask;
1432 1379
1433 if (base::bits::IsPowerOfTwo32(constant_abs)) { 1380 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1434 int32_t shift = WhichPowerOf2(constant_abs); 1381 int32_t shift = WhichPowerOf2(constant_abs);
1435 __ ShiftLeftImm(result, left, Operand(shift)); 1382 __ ShiftLeftP(result, left, Operand(shift));
1436 // Correct the sign of the result if the constant is negative. 1383 // Correct the sign of the result if the constant is negative.
1437 if (constant < 0) __ neg(result, result); 1384 if (constant < 0) __ LoadComplementRR(result, result);
1438 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { 1385 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1439 int32_t shift = WhichPowerOf2(constant_abs - 1); 1386 int32_t shift = WhichPowerOf2(constant_abs - 1);
1440 __ ShiftLeftImm(scratch, left, Operand(shift)); 1387 __ ShiftLeftP(scratch, left, Operand(shift));
1441 __ add(result, scratch, left); 1388 __ AddP(result, scratch, left);
1442 // Correct the sign of the result if the constant is negative. 1389 // Correct the sign of the result if the constant is negative.
1443 if (constant < 0) __ neg(result, result); 1390 if (constant < 0) __ LoadComplementRR(result, result);
1444 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { 1391 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1445 int32_t shift = WhichPowerOf2(constant_abs + 1); 1392 int32_t shift = WhichPowerOf2(constant_abs + 1);
1446 __ ShiftLeftImm(scratch, left, Operand(shift)); 1393 __ ShiftLeftP(scratch, left, Operand(shift));
1447 __ sub(result, scratch, left); 1394 __ SubP(result, scratch, left);
1448 // Correct the sign of the result if the constant is negative. 1395 // Correct the sign of the result if the constant is negative.
1449 if (constant < 0) __ neg(result, result); 1396 if (constant < 0) __ LoadComplementRR(result, result);
1450 } else { 1397 } else {
1451 // Generate standard code. 1398 // Generate standard code.
1452 __ mov(ip, Operand(constant)); 1399 __ Move(result, left);
1453 __ Mul(result, left, ip); 1400 __ MulP(result, Operand(constant));
1454 } 1401 }
1455 } 1402 }
1456 1403
1457 } else { 1404 } else {
1458 DCHECK(right_op->IsRegister()); 1405 DCHECK(right_op->IsRegister());
1459 Register right = ToRegister(right_op); 1406 Register right = ToRegister(right_op);
1460 1407
1461 if (can_overflow) { 1408 if (can_overflow) {
1462 #if V8_TARGET_ARCH_PPC64 1409 #if V8_TARGET_ARCH_S390X
1463 // result = left * right. 1410 // result = left * right.
1464 if (instr->hydrogen()->representation().IsSmi()) { 1411 if (instr->hydrogen()->representation().IsSmi()) {
1465 __ SmiUntag(result, left); 1412 __ SmiUntag(result, left);
1466 __ SmiUntag(scratch, right); 1413 __ SmiUntag(scratch, right);
1467 __ Mul(result, result, scratch); 1414 __ msgr(result, scratch);
1468 } else { 1415 } else {
1469 __ Mul(result, left, right); 1416 __ LoadRR(result, left);
1417 __ msgr(result, right);
1470 } 1418 }
1471 __ TestIfInt32(result, r0); 1419 __ TestIfInt32(result, r0);
1472 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 1420 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1473 if (instr->hydrogen()->representation().IsSmi()) { 1421 if (instr->hydrogen()->representation().IsSmi()) {
1474 __ SmiTag(result); 1422 __ SmiTag(result);
1475 } 1423 }
1476 #else 1424 #else
1477 // scratch:result = left * right. 1425 // r0:scratch = scratch * right
1478 if (instr->hydrogen()->representation().IsSmi()) { 1426 if (instr->hydrogen()->representation().IsSmi()) {
1479 __ SmiUntag(result, left); 1427 __ SmiUntag(scratch, left);
1480 __ mulhw(scratch, result, right); 1428 __ mr_z(r0, right);
1481 __ mullw(result, result, right); 1429 __ LoadRR(result, scratch);
1482 } else { 1430 } else {
1483 __ mulhw(scratch, left, right); 1431 // r0:scratch = scratch * right
1484 __ mullw(result, left, right); 1432 __ LoadRR(scratch, left);
1433 __ mr_z(r0, right);
1434 __ LoadRR(result, scratch);
1485 } 1435 }
1486 __ TestIfInt32(scratch, result, r0); 1436 __ TestIfInt32(r0, result, scratch);
1487 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 1437 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1488 #endif 1438 #endif
1489 } else { 1439 } else {
1490 if (instr->hydrogen()->representation().IsSmi()) { 1440 if (instr->hydrogen()->representation().IsSmi()) {
1491 __ SmiUntag(result, left); 1441 __ SmiUntag(result, left);
1492 __ Mul(result, result, right); 1442 __ Mul(result, result, right);
1493 } else { 1443 } else {
1494 __ Mul(result, left, right); 1444 __ Mul(result, left, right);
1495 } 1445 }
1496 } 1446 }
1497 1447
1498 if (bailout_on_minus_zero) { 1448 if (bailout_on_minus_zero) {
1499 Label done; 1449 Label done;
1500 #if V8_TARGET_ARCH_PPC64 1450 #if V8_TARGET_ARCH_S390X
1501 if (instr->hydrogen()->representation().IsSmi()) { 1451 if (instr->hydrogen()->representation().IsSmi()) {
1502 #endif 1452 #endif
1503 __ xor_(r0, left, right, SetRC); 1453 __ XorP(r0, left, right);
1504 __ bge(&done, cr0); 1454 __ LoadAndTestRR(r0, r0);
1505 #if V8_TARGET_ARCH_PPC64 1455 __ bge(&done, Label::kNear);
1456 #if V8_TARGET_ARCH_S390X
1506 } else { 1457 } else {
1507 __ xor_(r0, left, right); 1458 __ XorP(r0, left, right);
1508 __ cmpwi(r0, Operand::Zero()); 1459 __ Cmp32(r0, Operand::Zero());
1509 __ bge(&done); 1460 __ bge(&done, Label::kNear);
1510 } 1461 }
1511 #endif 1462 #endif
1512 // Bail out if the result is minus zero. 1463 // Bail out if the result is minus zero.
1513 __ cmpi(result, Operand::Zero()); 1464 __ CmpP(result, Operand::Zero());
1514 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 1465 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1515 __ bind(&done); 1466 __ bind(&done);
1516 } 1467 }
1517 } 1468 }
1518 } 1469 }
1519 1470
1520
1521 void LCodeGen::DoBitI(LBitI* instr) { 1471 void LCodeGen::DoBitI(LBitI* instr) {
1522 LOperand* left_op = instr->left(); 1472 LOperand* left_op = instr->left();
1523 LOperand* right_op = instr->right(); 1473 LOperand* right_op = instr->right();
1524 DCHECK(left_op->IsRegister()); 1474 DCHECK(left_op->IsRegister());
1525 Register left = ToRegister(left_op); 1475 Register left = ToRegister(left_op);
1526 Register result = ToRegister(instr->result()); 1476 Register result = ToRegister(instr->result());
1527 Operand right(no_reg);
1528 1477
1529 if (right_op->IsStackSlot()) { 1478 if (right_op->IsConstantOperand()) {
1530 right = Operand(EmitLoadRegister(right_op, ip)); 1479 switch (instr->op()) {
1480 case Token::BIT_AND:
1481 __ AndP(result, left, Operand(ToOperand(right_op)));
1482 break;
1483 case Token::BIT_OR:
1484 __ OrP(result, left, Operand(ToOperand(right_op)));
1485 break;
1486 case Token::BIT_XOR:
1487 __ XorP(result, left, Operand(ToOperand(right_op)));
1488 break;
1489 default:
1490 UNREACHABLE();
1491 break;
1492 }
1493 } else if (right_op->IsStackSlot()) {
1494 // Reg-Mem instruction clobbers, so copy src to dst first.
1495 if (!left.is(result)) __ LoadRR(result, left);
1496 switch (instr->op()) {
1497 case Token::BIT_AND:
1498 __ AndP(result, ToMemOperand(right_op));
1499 break;
1500 case Token::BIT_OR:
1501 __ OrP(result, ToMemOperand(right_op));
1502 break;
1503 case Token::BIT_XOR:
1504 __ XorP(result, ToMemOperand(right_op));
1505 break;
1506 default:
1507 UNREACHABLE();
1508 break;
1509 }
1531 } else { 1510 } else {
1532 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); 1511 DCHECK(right_op->IsRegister());
1533 right = ToOperand(right_op);
1534 1512
1535 if (right_op->IsConstantOperand() && is_uint16(right.immediate())) { 1513 switch (instr->op()) {
1536 switch (instr->op()) { 1514 case Token::BIT_AND:
1537 case Token::BIT_AND: 1515 __ AndP(result, left, ToRegister(right_op));
1538 __ andi(result, left, right); 1516 break;
1539 break; 1517 case Token::BIT_OR:
1540 case Token::BIT_OR: 1518 __ OrP(result, left, ToRegister(right_op));
1541 __ ori(result, left, right); 1519 break;
1542 break; 1520 case Token::BIT_XOR:
1543 case Token::BIT_XOR: 1521 __ XorP(result, left, ToRegister(right_op));
1544 __ xori(result, left, right); 1522 break;
1545 break; 1523 default:
1546 default: 1524 UNREACHABLE();
1547 UNREACHABLE(); 1525 break;
1548 break;
1549 }
1550 return;
1551 } 1526 }
1552 } 1527 }
1553
1554 switch (instr->op()) {
1555 case Token::BIT_AND:
1556 __ And(result, left, right);
1557 break;
1558 case Token::BIT_OR:
1559 __ Or(result, left, right);
1560 break;
1561 case Token::BIT_XOR:
1562 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1563 __ notx(result, left);
1564 } else {
1565 __ Xor(result, left, right);
1566 }
1567 break;
1568 default:
1569 UNREACHABLE();
1570 break;
1571 }
1572 } 1528 }
1573 1529
1574
1575 void LCodeGen::DoShiftI(LShiftI* instr) { 1530 void LCodeGen::DoShiftI(LShiftI* instr) {
1576 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so 1531 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1577 // result may alias either of them. 1532 // result may alias either of them.
1578 LOperand* right_op = instr->right(); 1533 LOperand* right_op = instr->right();
1579 Register left = ToRegister(instr->left()); 1534 Register left = ToRegister(instr->left());
1580 Register result = ToRegister(instr->result()); 1535 Register result = ToRegister(instr->result());
1581 Register scratch = scratch0(); 1536 Register scratch = scratch0();
1582 if (right_op->IsRegister()) { 1537 if (right_op->IsRegister()) {
1583 // Mask the right_op operand. 1538 // Mask the right_op operand.
1584 __ andi(scratch, ToRegister(right_op), Operand(0x1F)); 1539 __ AndP(scratch, ToRegister(right_op), Operand(0x1F));
1585 switch (instr->op()) { 1540 switch (instr->op()) {
1586 case Token::ROR: 1541 case Token::ROR:
1587 // rotate_right(a, b) == rotate_left(a, 32 - b) 1542 // rotate_right(a, b) == rotate_left(a, 32 - b)
1588 __ subfic(scratch, scratch, Operand(32)); 1543 __ LoadComplementRR(scratch, scratch);
1589 __ rotlw(result, left, scratch); 1544 __ rll(result, left, scratch, Operand(32));
1545 #if V8_TARGET_ARCH_S390X
1546 __ lgfr(result, result);
1547 #endif
1590 break; 1548 break;
1591 case Token::SAR: 1549 case Token::SAR:
1592 __ sraw(result, left, scratch); 1550 __ ShiftRightArith(result, left, scratch);
1551 #if V8_TARGET_ARCH_S390X
1552 __ lgfr(result, result);
1553 #endif
1593 break; 1554 break;
1594 case Token::SHR: 1555 case Token::SHR:
1556 __ ShiftRight(result, left, scratch);
1557 #if V8_TARGET_ARCH_S390X
1558 __ lgfr(result, result);
1559 #endif
1595 if (instr->can_deopt()) { 1560 if (instr->can_deopt()) {
1596 __ srw(result, left, scratch, SetRC); 1561 #if V8_TARGET_ARCH_S390X
1597 #if V8_TARGET_ARCH_PPC64 1562 __ ltgfr(result, result /*, SetRC*/);
1598 __ extsw(result, result, SetRC); 1563 #else
1564 __ ltr(result, result); // Set the <,==,> condition
1599 #endif 1565 #endif
1600 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0); 1566 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
1601 } else {
1602 __ srw(result, left, scratch);
1603 } 1567 }
1604 break; 1568 break;
1605 case Token::SHL: 1569 case Token::SHL:
1606 __ slw(result, left, scratch); 1570 __ ShiftLeft(result, left, scratch);
1607 #if V8_TARGET_ARCH_PPC64 1571 #if V8_TARGET_ARCH_S390X
1608 __ extsw(result, result); 1572 __ lgfr(result, result);
1609 #endif 1573 #endif
1610 break; 1574 break;
1611 default: 1575 default:
1612 UNREACHABLE(); 1576 UNREACHABLE();
1613 break; 1577 break;
1614 } 1578 }
1615 } else { 1579 } else {
1616 // Mask the right_op operand. 1580 // Mask the right_op operand.
1617 int value = ToInteger32(LConstantOperand::cast(right_op)); 1581 int value = ToInteger32(LConstantOperand::cast(right_op));
1618 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1582 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1619 switch (instr->op()) { 1583 switch (instr->op()) {
1620 case Token::ROR: 1584 case Token::ROR:
1621 if (shift_count != 0) { 1585 if (shift_count != 0) {
1622 __ rotrwi(result, left, shift_count); 1586 __ rll(result, left, Operand(32 - shift_count));
1587 #if V8_TARGET_ARCH_S390X
1588 __ lgfr(result, result);
1589 #endif
1623 } else { 1590 } else {
1624 __ Move(result, left); 1591 __ Move(result, left);
1625 } 1592 }
1626 break; 1593 break;
1627 case Token::SAR: 1594 case Token::SAR:
1628 if (shift_count != 0) { 1595 if (shift_count != 0) {
1629 __ srawi(result, left, shift_count); 1596 __ ShiftRightArith(result, left, Operand(shift_count));
1597 #if V8_TARGET_ARCH_S390X
1598 __ lgfr(result, result);
1599 #endif
1630 } else { 1600 } else {
1631 __ Move(result, left); 1601 __ Move(result, left);
1632 } 1602 }
1633 break; 1603 break;
1634 case Token::SHR: 1604 case Token::SHR:
1635 if (shift_count != 0) { 1605 if (shift_count != 0) {
1636 __ srwi(result, left, Operand(shift_count)); 1606 __ ShiftRight(result, left, Operand(shift_count));
1607 #if V8_TARGET_ARCH_S390X
1608 __ lgfr(result, result);
1609 #endif
1637 } else { 1610 } else {
1638 if (instr->can_deopt()) { 1611 if (instr->can_deopt()) {
1639 __ cmpwi(left, Operand::Zero()); 1612 __ Cmp32(left, Operand::Zero());
1640 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue); 1613 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
1641 } 1614 }
1642 __ Move(result, left); 1615 __ Move(result, left);
1643 } 1616 }
1644 break; 1617 break;
1645 case Token::SHL: 1618 case Token::SHL:
1646 if (shift_count != 0) { 1619 if (shift_count != 0) {
1647 #if V8_TARGET_ARCH_PPC64 1620 #if V8_TARGET_ARCH_S390X
1648 if (instr->hydrogen_value()->representation().IsSmi()) { 1621 if (instr->hydrogen_value()->representation().IsSmi()) {
1649 __ sldi(result, left, Operand(shift_count)); 1622 __ ShiftLeftP(result, left, Operand(shift_count));
1650 #else 1623 #else
1651 if (instr->hydrogen_value()->representation().IsSmi() && 1624 if (instr->hydrogen_value()->representation().IsSmi() &&
1652 instr->can_deopt()) { 1625 instr->can_deopt()) {
1653 if (shift_count != 1) { 1626 if (shift_count != 1) {
1654 __ slwi(result, left, Operand(shift_count - 1)); 1627 __ ShiftLeft(result, left, Operand(shift_count - 1));
1628 #if V8_TARGET_ARCH_S390X
1629 __ lgfr(result, result);
1630 #endif
1655 __ SmiTagCheckOverflow(result, result, scratch); 1631 __ SmiTagCheckOverflow(result, result, scratch);
1656 } else { 1632 } else {
1657 __ SmiTagCheckOverflow(result, left, scratch); 1633 __ SmiTagCheckOverflow(result, left, scratch);
1658 } 1634 }
1659 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); 1635 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1660 #endif 1636 #endif
1661 } else { 1637 } else {
1662 __ slwi(result, left, Operand(shift_count)); 1638 __ ShiftLeft(result, left, Operand(shift_count));
1663 #if V8_TARGET_ARCH_PPC64 1639 #if V8_TARGET_ARCH_S390X
1664 __ extsw(result, result); 1640 __ lgfr(result, result);
1665 #endif 1641 #endif
1666 } 1642 }
1667 } else { 1643 } else {
1668 __ Move(result, left); 1644 __ Move(result, left);
1669 } 1645 }
1670 break; 1646 break;
1671 default: 1647 default:
1672 UNREACHABLE(); 1648 UNREACHABLE();
1673 break; 1649 break;
1674 } 1650 }
1675 } 1651 }
1676 } 1652 }
1677 1653
1654 void LCodeGen::DoSubI(LSubI* instr) {
1655 LOperand* left = instr->left();
1656 LOperand* right = instr->right();
1657 LOperand* result = instr->result();
1678 1658
1679 void LCodeGen::DoSubI(LSubI* instr) { 1659 bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
1680 LOperand* right = instr->right(); 1660 instr->hydrogen()->representation().IsExternal());
1681 Register left = ToRegister(instr->left()); 1661
1682 Register result = ToRegister(instr->result()); 1662 #if V8_TARGET_ARCH_S390X
1683 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1663 // The overflow detection needs to be tested on the lower 32-bits.
1684 #if V8_TARGET_ARCH_PPC64 1664 // As a result, on 64-bit, we need to force 32-bit arithmetic operations
1685 const bool isInteger = !instr->hydrogen()->representation().IsSmi(); 1665 // to set the CC overflow bit properly. The result is then sign-extended.
1666 bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1686 #else 1667 #else
1687 const bool isInteger = false; 1668 bool checkOverflow = true;
1688 #endif 1669 #endif
1689 if (!can_overflow || isInteger) { 1670
1690 if (right->IsConstantOperand()) { 1671 if (right->IsConstantOperand()) {
1691 __ Add(result, left, -(ToOperand(right).immediate()), r0); 1672 if (!isInteger || !checkOverflow)
1673 __ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
1674 else
1675 __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
1676 } else if (right->IsRegister()) {
1677 if (!isInteger)
1678 __ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
1679 else if (!checkOverflow)
1680 __ SubP_ExtendSrc(ToRegister(result), ToRegister(left),
1681 ToRegister(right));
1682 else
1683 __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right));
1684 } else {
1685 if (!left->Equals(instr->result()))
1686 __ LoadRR(ToRegister(result), ToRegister(left));
1687
1688 MemOperand mem = ToMemOperand(right);
1689 if (!isInteger) {
1690 __ SubP(ToRegister(result), mem);
1692 } else { 1691 } else {
1693 __ sub(result, left, EmitLoadRegister(right, ip)); 1692 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
1693 // We want to read the 32-bits directly from memory
1694 MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
1695 #else
1696 MemOperand Upper32Mem = ToMemOperand(right);
1697 #endif
1698 if (checkOverflow) {
1699 __ Sub32(ToRegister(result), Upper32Mem);
1700 } else {
1701 __ SubP_ExtendSrc(ToRegister(result), Upper32Mem);
1702 }
1694 } 1703 }
1695 #if V8_TARGET_ARCH_PPC64 1704 }
1696 if (can_overflow) { 1705
1697 __ TestIfInt32(result, r0); 1706 #if V8_TARGET_ARCH_S390X
1698 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 1707 if (isInteger && checkOverflow)
1699 } 1708 __ lgfr(ToRegister(result), ToRegister(result));
1700 #endif 1709 #endif
1701 } else { 1710 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1702 if (right->IsConstantOperand()) { 1711 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1703 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
1704 scratch0(), r0);
1705 } else {
1706 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1707 scratch0(), r0);
1708 }
1709 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1710 } 1712 }
1711 } 1713 }
1712 1714
1713
1714 void LCodeGen::DoRSubI(LRSubI* instr) { 1715 void LCodeGen::DoRSubI(LRSubI* instr) {
1715 LOperand* left = instr->left(); 1716 LOperand* left = instr->left();
1716 LOperand* right = instr->right(); 1717 LOperand* right = instr->right();
1717 LOperand* result = instr->result(); 1718 LOperand* result = instr->result();
1718 1719
1719 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) && 1720 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1720 right->IsConstantOperand()); 1721 right->IsConstantOperand());
1721 1722
1723 #if V8_TARGET_ARCH_S390X
1724 // The overflow detection needs to be tested on the lower 32-bits.
1725 // As a result, on 64-bit, we need to force 32-bit arithmetic operations
1726 // to set the CC overflow bit properly. The result is then sign-extended.
1727 bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1728 #else
1729 bool checkOverflow = true;
1730 #endif
1731
1722 Operand right_operand = ToOperand(right); 1732 Operand right_operand = ToOperand(right);
1723 if (is_int16(right_operand.immediate())) { 1733 __ mov(r0, right_operand);
1724 __ subfic(ToRegister(result), ToRegister(left), right_operand); 1734
1735 if (!checkOverflow) {
1736 __ SubP_ExtendSrc(ToRegister(result), r0, ToRegister(left));
1725 } else { 1737 } else {
1726 __ mov(r0, right_operand); 1738 __ Sub32(ToRegister(result), r0, ToRegister(left));
1727 __ sub(ToRegister(result), r0, ToRegister(left));
1728 } 1739 }
1729 } 1740 }
1730 1741
1731
1732 void LCodeGen::DoConstantI(LConstantI* instr) { 1742 void LCodeGen::DoConstantI(LConstantI* instr) {
1733 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1743 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1734 } 1744 }
1735 1745
1736
1737 void LCodeGen::DoConstantS(LConstantS* instr) { 1746 void LCodeGen::DoConstantS(LConstantS* instr) {
1738 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value()); 1747 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1739 } 1748 }
1740 1749
1741
1742 void LCodeGen::DoConstantD(LConstantD* instr) { 1750 void LCodeGen::DoConstantD(LConstantD* instr) {
1743 DCHECK(instr->result()->IsDoubleRegister()); 1751 DCHECK(instr->result()->IsDoubleRegister());
1744 DoubleRegister result = ToDoubleRegister(instr->result()); 1752 DoubleRegister result = ToDoubleRegister(instr->result());
1745 #if V8_HOST_ARCH_IA32
1746 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1747 // builds.
1748 uint64_t bits = instr->bits(); 1753 uint64_t bits = instr->bits();
1749 if ((bits & V8_UINT64_C(0x7FF8000000000000)) == 1754 __ LoadDoubleLiteral(result, bits, scratch0());
1750 V8_UINT64_C(0x7FF0000000000000)) {
1751 uint32_t lo = static_cast<uint32_t>(bits);
1752 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1753 __ mov(ip, Operand(lo));
1754 __ mov(scratch0(), Operand(hi));
1755 __ MovInt64ToDouble(result, scratch0(), ip);
1756 return;
1757 }
1758 #endif
1759 double v = instr->value();
1760 __ LoadDoubleLiteral(result, v, scratch0());
1761 } 1755 }
1762 1756
1763
1764 void LCodeGen::DoConstantE(LConstantE* instr) { 1757 void LCodeGen::DoConstantE(LConstantE* instr) {
1765 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1758 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1766 } 1759 }
1767 1760
1768
1769 void LCodeGen::DoConstantT(LConstantT* instr) { 1761 void LCodeGen::DoConstantT(LConstantT* instr) {
1770 Handle<Object> object = instr->value(isolate()); 1762 Handle<Object> object = instr->value(isolate());
1771 AllowDeferredHandleDereference smi_check; 1763 AllowDeferredHandleDereference smi_check;
1772 __ Move(ToRegister(instr->result()), object); 1764 __ Move(ToRegister(instr->result()), object);
1773 } 1765 }
1774 1766
1775
1776 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index, 1767 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
1777 String::Encoding encoding) { 1768 String::Encoding encoding) {
1778 if (index->IsConstantOperand()) { 1769 if (index->IsConstantOperand()) {
1779 int offset = ToInteger32(LConstantOperand::cast(index)); 1770 int offset = ToInteger32(LConstantOperand::cast(index));
1780 if (encoding == String::TWO_BYTE_ENCODING) { 1771 if (encoding == String::TWO_BYTE_ENCODING) {
1781 offset *= kUC16Size; 1772 offset *= kUC16Size;
1782 } 1773 }
1783 STATIC_ASSERT(kCharSize == 1); 1774 STATIC_ASSERT(kCharSize == 1);
1784 return FieldMemOperand(string, SeqString::kHeaderSize + offset); 1775 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1785 } 1776 }
1786 Register scratch = scratch0(); 1777 Register scratch = scratch0();
1787 DCHECK(!scratch.is(string)); 1778 DCHECK(!scratch.is(string));
1788 DCHECK(!scratch.is(ToRegister(index))); 1779 DCHECK(!scratch.is(ToRegister(index)));
1780 // TODO(joransiu) : Fold Add into FieldMemOperand
1789 if (encoding == String::ONE_BYTE_ENCODING) { 1781 if (encoding == String::ONE_BYTE_ENCODING) {
1790 __ add(scratch, string, ToRegister(index)); 1782 __ AddP(scratch, string, ToRegister(index));
1791 } else { 1783 } else {
1792 STATIC_ASSERT(kUC16Size == 2); 1784 STATIC_ASSERT(kUC16Size == 2);
1793 __ ShiftLeftImm(scratch, ToRegister(index), Operand(1)); 1785 __ ShiftLeftP(scratch, ToRegister(index), Operand(1));
1794 __ add(scratch, string, scratch); 1786 __ AddP(scratch, string, scratch);
1795 } 1787 }
1796 return FieldMemOperand(scratch, SeqString::kHeaderSize); 1788 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1797 } 1789 }
1798 1790
1799
1800 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { 1791 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1801 String::Encoding encoding = instr->hydrogen()->encoding(); 1792 String::Encoding encoding = instr->hydrogen()->encoding();
1802 Register string = ToRegister(instr->string()); 1793 Register string = ToRegister(instr->string());
1803 Register result = ToRegister(instr->result()); 1794 Register result = ToRegister(instr->result());
1804 1795
1805 if (FLAG_debug_code) { 1796 if (FLAG_debug_code) {
1806 Register scratch = scratch0(); 1797 Register scratch = scratch0();
1807 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); 1798 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1808 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 1799 __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1809 1800
1810 __ andi(scratch, scratch, 1801 __ AndP(scratch, scratch,
1811 Operand(kStringRepresentationMask | kStringEncodingMask)); 1802 Operand(kStringRepresentationMask | kStringEncodingMask));
1812 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1803 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1813 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1804 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1814 __ cmpi(scratch, 1805 __ CmpP(scratch,
1815 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type 1806 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
1816 : two_byte_seq_type)); 1807 : two_byte_seq_type));
1817 __ Check(eq, kUnexpectedStringType); 1808 __ Check(eq, kUnexpectedStringType);
1818 } 1809 }
1819 1810
1820 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1811 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1821 if (encoding == String::ONE_BYTE_ENCODING) { 1812 if (encoding == String::ONE_BYTE_ENCODING) {
1822 __ lbz(result, operand); 1813 __ llc(result, operand);
1823 } else { 1814 } else {
1824 __ lhz(result, operand); 1815 __ llh(result, operand);
1825 } 1816 }
1826 } 1817 }
1827 1818
1828
1829 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 1819 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1830 String::Encoding encoding = instr->hydrogen()->encoding(); 1820 String::Encoding encoding = instr->hydrogen()->encoding();
1831 Register string = ToRegister(instr->string()); 1821 Register string = ToRegister(instr->string());
1832 Register value = ToRegister(instr->value()); 1822 Register value = ToRegister(instr->value());
1833 1823
1834 if (FLAG_debug_code) { 1824 if (FLAG_debug_code) {
1835 Register index = ToRegister(instr->index()); 1825 Register index = ToRegister(instr->index());
1836 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1826 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1837 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1827 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1838 int encoding_mask = 1828 int encoding_mask =
1839 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 1829 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1840 ? one_byte_seq_type 1830 ? one_byte_seq_type
1841 : two_byte_seq_type; 1831 : two_byte_seq_type;
1842 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); 1832 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1843 } 1833 }
1844 1834
1845 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1835 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1846 if (encoding == String::ONE_BYTE_ENCODING) { 1836 if (encoding == String::ONE_BYTE_ENCODING) {
1847 __ stb(value, operand); 1837 __ stc(value, operand);
1848 } else { 1838 } else {
1849 __ sth(value, operand); 1839 __ sth(value, operand);
1850 } 1840 }
1851 } 1841 }
1852 1842
1853
1854 void LCodeGen::DoAddI(LAddI* instr) { 1843 void LCodeGen::DoAddI(LAddI* instr) {
1844 LOperand* left = instr->left();
1855 LOperand* right = instr->right(); 1845 LOperand* right = instr->right();
1856 Register left = ToRegister(instr->left()); 1846 LOperand* result = instr->result();
1857 Register result = ToRegister(instr->result()); 1847 bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
1858 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1848 instr->hydrogen()->representation().IsExternal());
1859 #if V8_TARGET_ARCH_PPC64 1849 #if V8_TARGET_ARCH_S390X
1860 const bool isInteger = !(instr->hydrogen()->representation().IsSmi() || 1850 // The overflow detection needs to be tested on the lower 32-bits.
1861 instr->hydrogen()->representation().IsExternal()); 1851 // As a result, on 64-bit, we need to force 32-bit arithmetic operations
1852 // to set the CC overflow bit properly. The result is then sign-extended.
1853 bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1862 #else 1854 #else
1863 const bool isInteger = false; 1855 bool checkOverflow = true;
1864 #endif 1856 #endif
1865 1857
1866 if (!can_overflow || isInteger) { 1858 if (right->IsConstantOperand()) {
1867 if (right->IsConstantOperand()) { 1859 if (!isInteger || !checkOverflow)
1868 __ Add(result, left, ToOperand(right).immediate(), r0); 1860 __ AddP(ToRegister(result), ToRegister(left), ToOperand(right));
1861 else
1862 __ Add32(ToRegister(result), ToRegister(left), ToOperand(right));
1863 } else if (right->IsRegister()) {
1864 if (!isInteger)
1865 __ AddP(ToRegister(result), ToRegister(left), ToRegister(right));
1866 else if (!checkOverflow)
1867 __ AddP_ExtendSrc(ToRegister(result), ToRegister(left),
1868 ToRegister(right));
1869 else
1870 __ Add32(ToRegister(result), ToRegister(left), ToRegister(right));
1871 } else {
1872 if (!left->Equals(instr->result()))
1873 __ LoadRR(ToRegister(result), ToRegister(left));
1874
1875 MemOperand mem = ToMemOperand(right);
1876 if (!isInteger) {
1877 __ AddP(ToRegister(result), mem);
1869 } else { 1878 } else {
1870 __ add(result, left, EmitLoadRegister(right, ip)); 1879 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
1880 // We want to read the 32-bits directly from memory
1881 MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
1882 #else
1883 MemOperand Upper32Mem = ToMemOperand(right);
1884 #endif
1885 if (checkOverflow) {
1886 __ Add32(ToRegister(result), Upper32Mem);
1887 } else {
1888 __ AddP_ExtendSrc(ToRegister(result), Upper32Mem);
1889 }
1871 } 1890 }
1872 #if V8_TARGET_ARCH_PPC64 1891 }
1873 if (can_overflow) { 1892
1874 __ TestIfInt32(result, r0); 1893 #if V8_TARGET_ARCH_S390X
1875 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 1894 if (isInteger && checkOverflow)
1876 } 1895 __ lgfr(ToRegister(result), ToRegister(result));
1877 #endif 1896 #endif
1878 } else { 1897 // Doptimize on overflow
1879 if (right->IsConstantOperand()) { 1898 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1880 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(), 1899 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1881 scratch0(), r0);
1882 } else {
1883 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1884 scratch0(), r0);
1885 }
1886 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1887 } 1900 }
1888 } 1901 }
1889 1902
1890
1891 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1903 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1892 LOperand* left = instr->left(); 1904 LOperand* left = instr->left();
1893 LOperand* right = instr->right(); 1905 LOperand* right = instr->right();
1894 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1906 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1895 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge; 1907 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
1896 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 1908 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1897 Register left_reg = ToRegister(left); 1909 Register left_reg = ToRegister(left);
1898 Register right_reg = EmitLoadRegister(right, ip); 1910 Register right_reg = EmitLoadRegister(right, ip);
1899 Register result_reg = ToRegister(instr->result()); 1911 Register result_reg = ToRegister(instr->result());
1900 Label return_left, done; 1912 Label return_left, done;
1901 #if V8_TARGET_ARCH_PPC64 1913 #if V8_TARGET_ARCH_S390X
1902 if (instr->hydrogen_value()->representation().IsSmi()) { 1914 if (instr->hydrogen_value()->representation().IsSmi()) {
1903 #endif 1915 #endif
1904 __ cmp(left_reg, right_reg); 1916 __ CmpP(left_reg, right_reg);
1905 #if V8_TARGET_ARCH_PPC64 1917 #if V8_TARGET_ARCH_S390X
1906 } else { 1918 } else {
1907 __ cmpw(left_reg, right_reg); 1919 __ Cmp32(left_reg, right_reg);
1908 } 1920 }
1909 #endif 1921 #endif
1910 if (CpuFeatures::IsSupported(ISELECT)) { 1922 __ b(cond, &return_left, Label::kNear);
1911 __ isel(cond, result_reg, left_reg, right_reg); 1923 __ Move(result_reg, right_reg);
1912 } else { 1924 __ b(&done, Label::kNear);
1913 __ b(cond, &return_left); 1925 __ bind(&return_left);
1914 __ Move(result_reg, right_reg); 1926 __ Move(result_reg, left_reg);
1915 __ b(&done); 1927 __ bind(&done);
1916 __ bind(&return_left);
1917 __ Move(result_reg, left_reg);
1918 __ bind(&done);
1919 }
1920 } else { 1928 } else {
1921 DCHECK(instr->hydrogen()->representation().IsDouble()); 1929 DCHECK(instr->hydrogen()->representation().IsDouble());
1922 DoubleRegister left_reg = ToDoubleRegister(left); 1930 DoubleRegister left_reg = ToDoubleRegister(left);
1923 DoubleRegister right_reg = ToDoubleRegister(right); 1931 DoubleRegister right_reg = ToDoubleRegister(right);
1924 DoubleRegister result_reg = ToDoubleRegister(instr->result()); 1932 DoubleRegister result_reg = ToDoubleRegister(instr->result());
1925 Label check_nan_left, check_zero, return_left, return_right, done; 1933 Label check_nan_left, check_zero, return_left, return_right, done;
1926 __ fcmpu(left_reg, right_reg); 1934 __ cdbr(left_reg, right_reg);
1927 __ bunordered(&check_nan_left); 1935 __ bunordered(&check_nan_left, Label::kNear);
1928 __ beq(&check_zero); 1936 __ beq(&check_zero);
1929 __ b(cond, &return_left); 1937 __ b(cond, &return_left, Label::kNear);
1930 __ b(&return_right); 1938 __ b(&return_right, Label::kNear);
1931 1939
1932 __ bind(&check_zero); 1940 __ bind(&check_zero);
1933 __ fcmpu(left_reg, kDoubleRegZero); 1941 __ lzdr(kDoubleRegZero);
1934 __ bne(&return_left); // left == right != 0. 1942 __ cdbr(left_reg, kDoubleRegZero);
1943 __ bne(&return_left, Label::kNear); // left == right != 0.
1935 1944
1936 // At this point, both left and right are either 0 or -0. 1945 // At this point, both left and right are either 0 or -0.
1946 // N.B. The following works because +0 + -0 == +0
1937 if (operation == HMathMinMax::kMathMin) { 1947 if (operation == HMathMinMax::kMathMin) {
1938 // Min: The algorithm is: -((-L) + (-R)), which in case of L and R being 1948 // For min we want logical-or of sign bit: -(-L + -R)
1939 // different registers is most efficiently expressed as -((-L) - R). 1949 __ lcdbr(left_reg, left_reg);
1940 __ fneg(left_reg, left_reg); 1950 __ ldr(result_reg, left_reg);
1941 if (left_reg.is(right_reg)) { 1951 if (left_reg.is(right_reg)) {
1942 __ fadd(result_reg, left_reg, right_reg); 1952 __ adbr(result_reg, right_reg);
1943 } else { 1953 } else {
1944 __ fsub(result_reg, left_reg, right_reg); 1954 __ sdbr(result_reg, right_reg);
1945 } 1955 }
1946 __ fneg(result_reg, result_reg); 1956 __ lcdbr(result_reg, result_reg);
1947 } else { 1957 } else {
1948 // Max: The following works because +0 + -0 == +0 1958 // For max we want logical-and of sign bit: (L + R)
1949 __ fadd(result_reg, left_reg, right_reg); 1959 __ ldr(result_reg, left_reg);
1960 __ adbr(result_reg, right_reg);
1950 } 1961 }
1951 __ b(&done); 1962 __ b(&done, Label::kNear);
1952 1963
1953 __ bind(&check_nan_left); 1964 __ bind(&check_nan_left);
1954 __ fcmpu(left_reg, left_reg); 1965 __ cdbr(left_reg, left_reg);
1955 __ bunordered(&return_left); // left == NaN. 1966 __ bunordered(&return_left, Label::kNear); // left == NaN.
1956 1967
1957 __ bind(&return_right); 1968 __ bind(&return_right);
1958 if (!right_reg.is(result_reg)) { 1969 if (!right_reg.is(result_reg)) {
1959 __ fmr(result_reg, right_reg); 1970 __ ldr(result_reg, right_reg);
1960 } 1971 }
1961 __ b(&done); 1972 __ b(&done, Label::kNear);
1962 1973
1963 __ bind(&return_left); 1974 __ bind(&return_left);
1964 if (!left_reg.is(result_reg)) { 1975 if (!left_reg.is(result_reg)) {
1965 __ fmr(result_reg, left_reg); 1976 __ ldr(result_reg, left_reg);
1966 } 1977 }
1967 __ bind(&done); 1978 __ bind(&done);
1968 } 1979 }
1969 } 1980 }
1970 1981
1971
1972 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1982 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1973 DoubleRegister left = ToDoubleRegister(instr->left()); 1983 DoubleRegister left = ToDoubleRegister(instr->left());
1974 DoubleRegister right = ToDoubleRegister(instr->right()); 1984 DoubleRegister right = ToDoubleRegister(instr->right());
1975 DoubleRegister result = ToDoubleRegister(instr->result()); 1985 DoubleRegister result = ToDoubleRegister(instr->result());
1986 // All operations except MOD are computed in-place.
1987 DCHECK(instr->op() == Token::MOD || left.is(result));
1976 switch (instr->op()) { 1988 switch (instr->op()) {
1977 case Token::ADD: 1989 case Token::ADD:
1978 __ fadd(result, left, right); 1990 __ adbr(result, right);
1979 break; 1991 break;
1980 case Token::SUB: 1992 case Token::SUB:
1981 __ fsub(result, left, right); 1993 __ sdbr(result, right);
1982 break; 1994 break;
1983 case Token::MUL: 1995 case Token::MUL:
1984 __ fmul(result, left, right); 1996 __ mdbr(result, right);
1985 break; 1997 break;
1986 case Token::DIV: 1998 case Token::DIV:
1987 __ fdiv(result, left, right); 1999 __ ddbr(result, right);
1988 break; 2000 break;
1989 case Token::MOD: { 2001 case Token::MOD: {
1990 __ PrepareCallCFunction(0, 2, scratch0()); 2002 __ PrepareCallCFunction(0, 2, scratch0());
1991 __ MovToFloatParameters(left, right); 2003 __ MovToFloatParameters(left, right);
1992 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), 2004 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1993 0, 2); 2005 0, 2);
1994 // Move the result in the double result register. 2006 // Move the result in the double result register.
1995 __ MovFromFloatResult(result); 2007 __ MovFromFloatResult(result);
1996 break; 2008 break;
1997 } 2009 }
1998 default: 2010 default:
1999 UNREACHABLE(); 2011 UNREACHABLE();
2000 break; 2012 break;
2001 } 2013 }
2002 } 2014 }
2003 2015
2004
2005 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2016 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2006 DCHECK(ToRegister(instr->context()).is(cp)); 2017 DCHECK(ToRegister(instr->context()).is(cp));
2007 DCHECK(ToRegister(instr->left()).is(r4)); 2018 DCHECK(ToRegister(instr->left()).is(r3));
2008 DCHECK(ToRegister(instr->right()).is(r3)); 2019 DCHECK(ToRegister(instr->right()).is(r2));
2009 DCHECK(ToRegister(instr->result()).is(r3)); 2020 DCHECK(ToRegister(instr->result()).is(r2));
2010 2021
2011 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code(); 2022 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
2012 CallCode(code, RelocInfo::CODE_TARGET, instr); 2023 CallCode(code, RelocInfo::CODE_TARGET, instr);
2013 } 2024 }
2014 2025
2015
2016 template <class InstrType> 2026 template <class InstrType>
2017 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) { 2027 void LCodeGen::EmitBranch(InstrType instr, Condition cond) {
2018 int left_block = instr->TrueDestination(chunk_); 2028 int left_block = instr->TrueDestination(chunk_);
2019 int right_block = instr->FalseDestination(chunk_); 2029 int right_block = instr->FalseDestination(chunk_);
2020 2030
2021 int next_block = GetNextEmittedBlock(); 2031 int next_block = GetNextEmittedBlock();
2022 2032
2023 if (right_block == left_block || cond == al) { 2033 if (right_block == left_block || cond == al) {
2024 EmitGoto(left_block); 2034 EmitGoto(left_block);
2025 } else if (left_block == next_block) { 2035 } else if (left_block == next_block) {
2026 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr); 2036 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block));
2027 } else if (right_block == next_block) { 2037 } else if (right_block == next_block) {
2028 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr); 2038 __ b(cond, chunk_->GetAssemblyLabel(left_block));
2029 } else { 2039 } else {
2030 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr); 2040 __ b(cond, chunk_->GetAssemblyLabel(left_block));
2031 __ b(chunk_->GetAssemblyLabel(right_block)); 2041 __ b(chunk_->GetAssemblyLabel(right_block));
2032 } 2042 }
2033 } 2043 }
2034 2044
2045 template <class InstrType>
2046 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) {
2047 int true_block = instr->TrueDestination(chunk_);
2048 __ b(cond, chunk_->GetAssemblyLabel(true_block));
2049 }
2035 2050
2036 template <class InstrType> 2051 template <class InstrType>
2037 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) { 2052 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) {
2038 int true_block = instr->TrueDestination(chunk_); 2053 int false_block = instr->FalseDestination(chunk_);
2039 __ b(cond, chunk_->GetAssemblyLabel(true_block), cr); 2054 __ b(cond, chunk_->GetAssemblyLabel(false_block));
2040 } 2055 }
2041 2056
2042
2043 template <class InstrType>
2044 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
2045 int false_block = instr->FalseDestination(chunk_);
2046 __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
2047 }
2048
2049
2050 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); } 2057 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
2051 2058
2052
2053 void LCodeGen::DoBranch(LBranch* instr) { 2059 void LCodeGen::DoBranch(LBranch* instr) {
2054 Representation r = instr->hydrogen()->value()->representation(); 2060 Representation r = instr->hydrogen()->value()->representation();
2055 DoubleRegister dbl_scratch = double_scratch0(); 2061 DoubleRegister dbl_scratch = double_scratch0();
2056 const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
2057 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
2058 2062
2059 if (r.IsInteger32()) { 2063 if (r.IsInteger32()) {
2060 DCHECK(!info()->IsStub()); 2064 DCHECK(!info()->IsStub());
2061 Register reg = ToRegister(instr->value()); 2065 Register reg = ToRegister(instr->value());
2062 __ cmpwi(reg, Operand::Zero()); 2066 __ Cmp32(reg, Operand::Zero());
2063 EmitBranch(instr, ne); 2067 EmitBranch(instr, ne);
2064 } else if (r.IsSmi()) { 2068 } else if (r.IsSmi()) {
2065 DCHECK(!info()->IsStub()); 2069 DCHECK(!info()->IsStub());
2066 Register reg = ToRegister(instr->value()); 2070 Register reg = ToRegister(instr->value());
2067 __ cmpi(reg, Operand::Zero()); 2071 __ CmpP(reg, Operand::Zero());
2068 EmitBranch(instr, ne); 2072 EmitBranch(instr, ne);
2069 } else if (r.IsDouble()) { 2073 } else if (r.IsDouble()) {
2070 DCHECK(!info()->IsStub()); 2074 DCHECK(!info()->IsStub());
2071 DoubleRegister reg = ToDoubleRegister(instr->value()); 2075 DoubleRegister reg = ToDoubleRegister(instr->value());
2076 __ lzdr(kDoubleRegZero);
2077 __ cdbr(reg, kDoubleRegZero);
2072 // Test the double value. Zero and NaN are false. 2078 // Test the double value. Zero and NaN are false.
2073 __ fcmpu(reg, kDoubleRegZero, cr7); 2079 Condition lt_gt = static_cast<Condition>(lt | gt);
2074 __ mfcr(r0); 2080
2075 __ andi(r0, r0, Operand(crZOrNaNBits)); 2081 EmitBranch(instr, lt_gt);
2076 EmitBranch(instr, eq, cr0);
2077 } else { 2082 } else {
2078 DCHECK(r.IsTagged()); 2083 DCHECK(r.IsTagged());
2079 Register reg = ToRegister(instr->value()); 2084 Register reg = ToRegister(instr->value());
2080 HType type = instr->hydrogen()->value()->type(); 2085 HType type = instr->hydrogen()->value()->type();
2081 if (type.IsBoolean()) { 2086 if (type.IsBoolean()) {
2082 DCHECK(!info()->IsStub()); 2087 DCHECK(!info()->IsStub());
2083 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2088 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2084 EmitBranch(instr, eq); 2089 EmitBranch(instr, eq);
2085 } else if (type.IsSmi()) { 2090 } else if (type.IsSmi()) {
2086 DCHECK(!info()->IsStub()); 2091 DCHECK(!info()->IsStub());
2087 __ cmpi(reg, Operand::Zero()); 2092 __ CmpP(reg, Operand::Zero());
2088 EmitBranch(instr, ne); 2093 EmitBranch(instr, ne);
2089 } else if (type.IsJSArray()) { 2094 } else if (type.IsJSArray()) {
2090 DCHECK(!info()->IsStub()); 2095 DCHECK(!info()->IsStub());
2091 EmitBranch(instr, al); 2096 EmitBranch(instr, al);
2092 } else if (type.IsHeapNumber()) { 2097 } else if (type.IsHeapNumber()) {
2093 DCHECK(!info()->IsStub()); 2098 DCHECK(!info()->IsStub());
2094 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2099 __ ld(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2095 // Test the double value. Zero and NaN are false. 2100 // Test the double value. Zero and NaN are false.
2096 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7); 2101 __ lzdr(kDoubleRegZero);
2097 __ mfcr(r0); 2102 __ cdbr(dbl_scratch, kDoubleRegZero);
2098 __ andi(r0, r0, Operand(crZOrNaNBits)); 2103 Condition lt_gt = static_cast<Condition>(lt | gt);
2099 EmitBranch(instr, eq, cr0); 2104 EmitBranch(instr, lt_gt);
2100 } else if (type.IsString()) { 2105 } else if (type.IsString()) {
2101 DCHECK(!info()->IsStub()); 2106 DCHECK(!info()->IsStub());
2102 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); 2107 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2103 __ cmpi(ip, Operand::Zero()); 2108 __ CmpP(ip, Operand::Zero());
2104 EmitBranch(instr, ne); 2109 EmitBranch(instr, ne);
2105 } else { 2110 } else {
2106 ToBooleanICStub::Types expected = 2111 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2107 instr->hydrogen()->expected_input_types();
2108 // Avoid deopts in the case where we've never executed this path before. 2112 // Avoid deopts in the case where we've never executed this path before.
2109 if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic(); 2113 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2110 2114
2111 if (expected.Contains(ToBooleanICStub::UNDEFINED)) { 2115 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2112 // undefined -> false. 2116 // undefined -> false.
2113 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); 2117 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2114 __ beq(instr->FalseLabel(chunk_)); 2118 __ beq(instr->FalseLabel(chunk_));
2115 } 2119 }
2116 if (expected.Contains(ToBooleanICStub::BOOLEAN)) { 2120 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2117 // Boolean -> its value. 2121 // Boolean -> its value.
2118 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2122 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2119 __ beq(instr->TrueLabel(chunk_)); 2123 __ beq(instr->TrueLabel(chunk_));
2120 __ CompareRoot(reg, Heap::kFalseValueRootIndex); 2124 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2121 __ beq(instr->FalseLabel(chunk_)); 2125 __ beq(instr->FalseLabel(chunk_));
2122 } 2126 }
2123 if (expected.Contains(ToBooleanICStub::NULL_TYPE)) { 2127 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2124 // 'null' -> false. 2128 // 'null' -> false.
2125 __ CompareRoot(reg, Heap::kNullValueRootIndex); 2129 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2126 __ beq(instr->FalseLabel(chunk_)); 2130 __ beq(instr->FalseLabel(chunk_));
2127 } 2131 }
2128 2132
2129 if (expected.Contains(ToBooleanICStub::SMI)) { 2133 if (expected.Contains(ToBooleanStub::SMI)) {
2130 // Smis: 0 -> false, all other -> true. 2134 // Smis: 0 -> false, all other -> true.
2131 __ cmpi(reg, Operand::Zero()); 2135 __ CmpP(reg, Operand::Zero());
2132 __ beq(instr->FalseLabel(chunk_)); 2136 __ beq(instr->FalseLabel(chunk_));
2133 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2137 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2134 } else if (expected.NeedsMap()) { 2138 } else if (expected.NeedsMap()) {
2135 // If we need a map later and have a Smi -> deopt. 2139 // If we need a map later and have a Smi -> deopt.
2136 __ TestIfSmi(reg, r0); 2140 __ TestIfSmi(reg);
2137 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); 2141 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
2138 } 2142 }
2139 2143
2140 const Register map = scratch0(); 2144 const Register map = scratch0();
2141 if (expected.NeedsMap()) { 2145 if (expected.NeedsMap()) {
2142 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 2146 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2143 2147
2144 if (expected.CanBeUndetectable()) { 2148 if (expected.CanBeUndetectable()) {
2145 // Undetectable -> false. 2149 // Undetectable -> false.
2146 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset)); 2150 __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
2147 __ TestBit(ip, Map::kIsUndetectable, r0); 2151 Operand(1 << Map::kIsUndetectable));
2148 __ bne(instr->FalseLabel(chunk_), cr0); 2152 __ bne(instr->FalseLabel(chunk_));
2149 } 2153 }
2150 } 2154 }
2151 2155
2152 if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) { 2156 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2153 // spec object -> true. 2157 // spec object -> true.
2154 __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE); 2158 __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
2155 __ bge(instr->TrueLabel(chunk_)); 2159 __ bge(instr->TrueLabel(chunk_));
2156 } 2160 }
2157 2161
2158 if (expected.Contains(ToBooleanICStub::STRING)) { 2162 if (expected.Contains(ToBooleanStub::STRING)) {
2159 // String value -> false iff empty. 2163 // String value -> false iff empty.
2160 Label not_string; 2164 Label not_string;
2161 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); 2165 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2162 __ bge(&not_string); 2166 __ bge(&not_string, Label::kNear);
2163 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); 2167 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2164 __ cmpi(ip, Operand::Zero()); 2168 __ CmpP(ip, Operand::Zero());
2165 __ bne(instr->TrueLabel(chunk_)); 2169 __ bne(instr->TrueLabel(chunk_));
2166 __ b(instr->FalseLabel(chunk_)); 2170 __ b(instr->FalseLabel(chunk_));
2167 __ bind(&not_string); 2171 __ bind(&not_string);
2168 } 2172 }
2169 2173
2170 if (expected.Contains(ToBooleanICStub::SYMBOL)) { 2174 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2171 // Symbol value -> true. 2175 // Symbol value -> true.
2172 __ CompareInstanceType(map, ip, SYMBOL_TYPE); 2176 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2173 __ beq(instr->TrueLabel(chunk_)); 2177 __ beq(instr->TrueLabel(chunk_));
2174 } 2178 }
2175 2179
2176 if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) { 2180 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2177 // SIMD value -> true. 2181 // SIMD value -> true.
2178 Label not_simd; 2182 Label not_simd;
2179 __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE); 2183 __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
2180 __ beq(instr->TrueLabel(chunk_)); 2184 __ beq(instr->TrueLabel(chunk_));
2181 } 2185 }
2182 2186
2183 if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) { 2187 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2184 // heap number -> false iff +0, -0, or NaN. 2188 // heap number -> false iff +0, -0, or NaN.
2185 Label not_heap_number; 2189 Label not_heap_number;
2186 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 2190 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2187 __ bne(&not_heap_number); 2191 __ bne(&not_heap_number, Label::kNear);
2188 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2192 __ LoadDouble(dbl_scratch,
2189 // Test the double value. Zero and NaN are false. 2193 FieldMemOperand(reg, HeapNumber::kValueOffset));
2190 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7); 2194 __ lzdr(kDoubleRegZero);
2191 __ mfcr(r0); 2195 __ cdbr(dbl_scratch, kDoubleRegZero);
2192 __ andi(r0, r0, Operand(crZOrNaNBits)); 2196 __ bunordered(instr->FalseLabel(chunk_)); // NaN -> false.
2193 __ bne(instr->FalseLabel(chunk_), cr0); 2197 __ beq(instr->FalseLabel(chunk_)); // +0, -0 -> false.
2194 __ b(instr->TrueLabel(chunk_)); 2198 __ b(instr->TrueLabel(chunk_));
2195 __ bind(&not_heap_number); 2199 __ bind(&not_heap_number);
2196 } 2200 }
2197 2201
2198 if (!expected.IsGeneric()) { 2202 if (!expected.IsGeneric()) {
2199 // We've seen something for the first time -> deopt. 2203 // We've seen something for the first time -> deopt.
2200 // This can only happen if we are not generic already. 2204 // This can only happen if we are not generic already.
2201 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); 2205 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2202 } 2206 }
2203 } 2207 }
2204 } 2208 }
2205 } 2209 }
2206 2210
2207
2208 void LCodeGen::EmitGoto(int block) { 2211 void LCodeGen::EmitGoto(int block) {
2209 if (!IsNextEmittedBlock(block)) { 2212 if (!IsNextEmittedBlock(block)) {
2210 __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); 2213 __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2211 } 2214 }
2212 } 2215 }
2213 2216
2214
2215 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); } 2217 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
2216 2218
2217
2218 Condition LCodeGen::TokenToCondition(Token::Value op) { 2219 Condition LCodeGen::TokenToCondition(Token::Value op) {
2219 Condition cond = kNoCondition; 2220 Condition cond = kNoCondition;
2220 switch (op) { 2221 switch (op) {
2221 case Token::EQ: 2222 case Token::EQ:
2222 case Token::EQ_STRICT: 2223 case Token::EQ_STRICT:
2223 cond = eq; 2224 cond = eq;
2224 break; 2225 break;
2225 case Token::NE: 2226 case Token::NE:
2226 case Token::NE_STRICT: 2227 case Token::NE_STRICT:
2227 cond = ne; 2228 cond = ne;
(...skipping 11 matching lines...) Expand all
2239 cond = ge; 2240 cond = ge;
2240 break; 2241 break;
2241 case Token::IN: 2242 case Token::IN:
2242 case Token::INSTANCEOF: 2243 case Token::INSTANCEOF:
2243 default: 2244 default:
2244 UNREACHABLE(); 2245 UNREACHABLE();
2245 } 2246 }
2246 return cond; 2247 return cond;
2247 } 2248 }
2248 2249
2249
2250 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { 2250 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2251 LOperand* left = instr->left(); 2251 LOperand* left = instr->left();
2252 LOperand* right = instr->right(); 2252 LOperand* right = instr->right();
2253 bool is_unsigned = 2253 bool is_unsigned =
2254 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || 2254 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2255 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); 2255 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2256 Condition cond = TokenToCondition(instr->op()); 2256 Condition cond = TokenToCondition(instr->op());
2257 2257
2258 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2258 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2259 // We can statically evaluate the comparison. 2259 // We can statically evaluate the comparison.
2260 double left_val = ToDouble(LConstantOperand::cast(left)); 2260 double left_val = ToDouble(LConstantOperand::cast(left));
2261 double right_val = ToDouble(LConstantOperand::cast(right)); 2261 double right_val = ToDouble(LConstantOperand::cast(right));
2262 int next_block = Token::EvalComparison(instr->op(), left_val, right_val) 2262 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2263 ? instr->TrueDestination(chunk_) 2263 ? instr->TrueDestination(chunk_)
2264 : instr->FalseDestination(chunk_); 2264 : instr->FalseDestination(chunk_);
2265 EmitGoto(next_block); 2265 EmitGoto(next_block);
2266 } else { 2266 } else {
2267 if (instr->is_double()) { 2267 if (instr->is_double()) {
2268 // Compare left and right operands as doubles and load the 2268 // Compare left and right operands as doubles and load the
2269 // resulting flags into the normal status register. 2269 // resulting flags into the normal status register.
2270 __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right)); 2270 __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right));
2271 // If a NaN is involved, i.e. the result is unordered, 2271 // If a NaN is involved, i.e. the result is unordered,
2272 // jump to false block label. 2272 // jump to false block label.
2273 __ bunordered(instr->FalseLabel(chunk_)); 2273 __ bunordered(instr->FalseLabel(chunk_));
2274 } else { 2274 } else {
2275 if (right->IsConstantOperand()) { 2275 if (right->IsConstantOperand()) {
2276 int32_t value = ToInteger32(LConstantOperand::cast(right)); 2276 int32_t value = ToInteger32(LConstantOperand::cast(right));
2277 if (instr->hydrogen_value()->representation().IsSmi()) { 2277 if (instr->hydrogen_value()->representation().IsSmi()) {
2278 if (is_unsigned) { 2278 if (is_unsigned) {
2279 __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); 2279 __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2280 } else { 2280 } else {
2281 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); 2281 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2282 } 2282 }
2283 } else { 2283 } else {
2284 if (is_unsigned) { 2284 if (is_unsigned) {
2285 __ Cmplwi(ToRegister(left), Operand(value), r0); 2285 __ CmpLogical32(ToRegister(left), ToOperand(right));
2286 } else { 2286 } else {
2287 __ Cmpwi(ToRegister(left), Operand(value), r0); 2287 __ Cmp32(ToRegister(left), ToOperand(right));
2288 } 2288 }
2289 } 2289 }
2290 } else if (left->IsConstantOperand()) { 2290 } else if (left->IsConstantOperand()) {
2291 int32_t value = ToInteger32(LConstantOperand::cast(left)); 2291 int32_t value = ToInteger32(LConstantOperand::cast(left));
2292 if (instr->hydrogen_value()->representation().IsSmi()) { 2292 if (instr->hydrogen_value()->representation().IsSmi()) {
2293 if (is_unsigned) { 2293 if (is_unsigned) {
2294 __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); 2294 __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2295 } else { 2295 } else {
2296 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); 2296 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2297 } 2297 }
2298 } else { 2298 } else {
2299 if (is_unsigned) { 2299 if (is_unsigned) {
2300 __ Cmplwi(ToRegister(right), Operand(value), r0); 2300 __ CmpLogical32(ToRegister(right), ToOperand(left));
2301 } else { 2301 } else {
2302 __ Cmpwi(ToRegister(right), Operand(value), r0); 2302 __ Cmp32(ToRegister(right), ToOperand(left));
2303 } 2303 }
2304 } 2304 }
2305 // We commuted the operands, so commute the condition. 2305 // We commuted the operands, so commute the condition.
2306 cond = CommuteCondition(cond); 2306 cond = CommuteCondition(cond);
2307 } else if (instr->hydrogen_value()->representation().IsSmi()) { 2307 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2308 if (is_unsigned) { 2308 if (is_unsigned) {
2309 __ cmpl(ToRegister(left), ToRegister(right)); 2309 __ CmpLogicalP(ToRegister(left), ToRegister(right));
2310 } else { 2310 } else {
2311 __ cmp(ToRegister(left), ToRegister(right)); 2311 __ CmpP(ToRegister(left), ToRegister(right));
2312 } 2312 }
2313 } else { 2313 } else {
2314 if (is_unsigned) { 2314 if (is_unsigned) {
2315 __ cmplw(ToRegister(left), ToRegister(right)); 2315 __ CmpLogical32(ToRegister(left), ToRegister(right));
2316 } else { 2316 } else {
2317 __ cmpw(ToRegister(left), ToRegister(right)); 2317 __ Cmp32(ToRegister(left), ToRegister(right));
2318 } 2318 }
2319 } 2319 }
2320 } 2320 }
2321 EmitBranch(instr, cond); 2321 EmitBranch(instr, cond);
2322 } 2322 }
2323 } 2323 }
2324 2324
2325
2326 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2325 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2327 Register left = ToRegister(instr->left()); 2326 Register left = ToRegister(instr->left());
2328 Register right = ToRegister(instr->right()); 2327 Register right = ToRegister(instr->right());
2329 2328
2330 __ cmp(left, right); 2329 __ CmpP(left, right);
2331 EmitBranch(instr, eq); 2330 EmitBranch(instr, eq);
2332 } 2331 }
2333 2332
2334
2335 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { 2333 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2336 if (instr->hydrogen()->representation().IsTagged()) { 2334 if (instr->hydrogen()->representation().IsTagged()) {
2337 Register input_reg = ToRegister(instr->object()); 2335 Register input_reg = ToRegister(instr->object());
2338 __ mov(ip, Operand(factory()->the_hole_value())); 2336 __ CmpP(input_reg, Operand(factory()->the_hole_value()));
2339 __ cmp(input_reg, ip);
2340 EmitBranch(instr, eq); 2337 EmitBranch(instr, eq);
2341 return; 2338 return;
2342 } 2339 }
2343 2340
2344 DoubleRegister input_reg = ToDoubleRegister(instr->object()); 2341 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2345 __ fcmpu(input_reg, input_reg); 2342 __ cdbr(input_reg, input_reg);
2346 EmitFalseBranch(instr, ordered); 2343 EmitFalseBranch(instr, ordered);
2347 2344
2348 Register scratch = scratch0(); 2345 Register scratch = scratch0();
2349 __ MovDoubleHighToInt(scratch, input_reg); 2346 // Convert to GPR and examine the upper 32 bits
2350 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); 2347 __ lgdr(scratch, input_reg);
2348 __ srlg(scratch, scratch, Operand(32));
2349 __ Cmp32(scratch, Operand(kHoleNanUpper32));
2351 EmitBranch(instr, eq); 2350 EmitBranch(instr, eq);
2352 } 2351 }
2353 2352
2354
2355 Condition LCodeGen::EmitIsString(Register input, Register temp1, 2353 Condition LCodeGen::EmitIsString(Register input, Register temp1,
2356 Label* is_not_string, 2354 Label* is_not_string,
2357 SmiCheck check_needed = INLINE_SMI_CHECK) { 2355 SmiCheck check_needed = INLINE_SMI_CHECK) {
2358 if (check_needed == INLINE_SMI_CHECK) { 2356 if (check_needed == INLINE_SMI_CHECK) {
2359 __ JumpIfSmi(input, is_not_string); 2357 __ JumpIfSmi(input, is_not_string);
2360 } 2358 }
2361 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); 2359 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2362 2360
2363 return lt; 2361 return lt;
2364 } 2362 }
2365 2363
2366
2367 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { 2364 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2368 Register reg = ToRegister(instr->value()); 2365 Register reg = ToRegister(instr->value());
2369 Register temp1 = ToRegister(instr->temp()); 2366 Register temp1 = ToRegister(instr->temp());
2370 2367
2371 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() 2368 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2372 ? OMIT_SMI_CHECK 2369 ? OMIT_SMI_CHECK
2373 : INLINE_SMI_CHECK; 2370 : INLINE_SMI_CHECK;
2374 Condition true_cond = 2371 Condition true_cond =
2375 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); 2372 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2376 2373
2377 EmitBranch(instr, true_cond); 2374 EmitBranch(instr, true_cond);
2378 } 2375 }
2379 2376
2380
2381 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 2377 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2382 Register input_reg = EmitLoadRegister(instr->value(), ip); 2378 Register input_reg = EmitLoadRegister(instr->value(), ip);
2383 __ TestIfSmi(input_reg, r0); 2379 __ TestIfSmi(input_reg);
2384 EmitBranch(instr, eq, cr0); 2380 EmitBranch(instr, eq);
2385 } 2381 }
2386 2382
2387
2388 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2383 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2389 Register input = ToRegister(instr->value()); 2384 Register input = ToRegister(instr->value());
2390 Register temp = ToRegister(instr->temp()); 2385 Register temp = ToRegister(instr->temp());
2391 2386
2392 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2387 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2393 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2388 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2394 } 2389 }
2395 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2390 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2396 __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); 2391 __ tm(FieldMemOperand(temp, Map::kBitFieldOffset),
2397 __ TestBit(temp, Map::kIsUndetectable, r0); 2392 Operand(1 << Map::kIsUndetectable));
2398 EmitBranch(instr, ne, cr0); 2393 EmitBranch(instr, ne);
2399 } 2394 }
2400 2395
2401
2402 static Condition ComputeCompareCondition(Token::Value op) { 2396 static Condition ComputeCompareCondition(Token::Value op) {
2403 switch (op) { 2397 switch (op) {
2404 case Token::EQ_STRICT: 2398 case Token::EQ_STRICT:
2405 case Token::EQ: 2399 case Token::EQ:
2406 return eq; 2400 return eq;
2407 case Token::LT: 2401 case Token::LT:
2408 return lt; 2402 return lt;
2409 case Token::GT: 2403 case Token::GT:
2410 return gt; 2404 return gt;
2411 case Token::LTE: 2405 case Token::LTE:
2412 return le; 2406 return le;
2413 case Token::GTE: 2407 case Token::GTE:
2414 return ge; 2408 return ge;
2415 default: 2409 default:
2416 UNREACHABLE(); 2410 UNREACHABLE();
2417 return kNoCondition; 2411 return kNoCondition;
2418 } 2412 }
2419 } 2413 }
2420 2414
2421
2422 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2415 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2423 DCHECK(ToRegister(instr->context()).is(cp)); 2416 DCHECK(ToRegister(instr->context()).is(cp));
2424 DCHECK(ToRegister(instr->left()).is(r4)); 2417 DCHECK(ToRegister(instr->left()).is(r3));
2425 DCHECK(ToRegister(instr->right()).is(r3)); 2418 DCHECK(ToRegister(instr->right()).is(r2));
2426 2419
2427 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code(); 2420 Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
2428 CallCode(code, RelocInfo::CODE_TARGET, instr); 2421 CallCode(code, RelocInfo::CODE_TARGET, instr);
2429 __ CompareRoot(r3, Heap::kTrueValueRootIndex); 2422 __ CmpP(r2, Operand::Zero());
2430 EmitBranch(instr, eq); 2423
2424 EmitBranch(instr, ComputeCompareCondition(instr->op()));
2431 } 2425 }
2432 2426
2433
2434 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2427 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2435 InstanceType from = instr->from(); 2428 InstanceType from = instr->from();
2436 InstanceType to = instr->to(); 2429 InstanceType to = instr->to();
2437 if (from == FIRST_TYPE) return to; 2430 if (from == FIRST_TYPE) return to;
2438 DCHECK(from == to || to == LAST_TYPE); 2431 DCHECK(from == to || to == LAST_TYPE);
2439 return from; 2432 return from;
2440 } 2433 }
2441 2434
2442
2443 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2435 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2444 InstanceType from = instr->from(); 2436 InstanceType from = instr->from();
2445 InstanceType to = instr->to(); 2437 InstanceType to = instr->to();
2446 if (from == to) return eq; 2438 if (from == to) return eq;
2447 if (to == LAST_TYPE) return ge; 2439 if (to == LAST_TYPE) return ge;
2448 if (from == FIRST_TYPE) return le; 2440 if (from == FIRST_TYPE) return le;
2449 UNREACHABLE(); 2441 UNREACHABLE();
2450 return eq; 2442 return eq;
2451 } 2443 }
2452 2444
2453
2454 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 2445 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2455 Register scratch = scratch0(); 2446 Register scratch = scratch0();
2456 Register input = ToRegister(instr->value()); 2447 Register input = ToRegister(instr->value());
2457 2448
2458 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2449 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2459 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2450 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2460 } 2451 }
2461 2452
2462 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); 2453 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2463 EmitBranch(instr, BranchCondition(instr->hydrogen())); 2454 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2464 } 2455 }
2465 2456
2466
2467 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2457 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2468 Register input = ToRegister(instr->value()); 2458 Register input = ToRegister(instr->value());
2469 Register result = ToRegister(instr->result()); 2459 Register result = ToRegister(instr->result());
2470 2460
2471 __ AssertString(input); 2461 __ AssertString(input);
2472 2462
2473 __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset)); 2463 __ LoadlW(result, FieldMemOperand(input, String::kHashFieldOffset));
2474 __ IndexFromHash(result, result); 2464 __ IndexFromHash(result, result);
2475 } 2465 }
2476 2466
2477
2478 void LCodeGen::DoHasCachedArrayIndexAndBranch( 2467 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2479 LHasCachedArrayIndexAndBranch* instr) { 2468 LHasCachedArrayIndexAndBranch* instr) {
2480 Register input = ToRegister(instr->value()); 2469 Register input = ToRegister(instr->value());
2481 Register scratch = scratch0(); 2470 Register scratch = scratch0();
2482 2471
2483 __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset)); 2472 __ LoadlW(scratch, FieldMemOperand(input, String::kHashFieldOffset));
2484 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask)); 2473 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
2485 __ and_(r0, scratch, r0, SetRC); 2474 __ AndP(r0, scratch);
2486 EmitBranch(instr, eq, cr0); 2475 EmitBranch(instr, eq);
2487 } 2476 }
2488 2477
2489
2490 // Branches to a label or falls through with the answer in flags. Trashes 2478 // Branches to a label or falls through with the answer in flags. Trashes
2491 // the temp registers, but not the input. 2479 // the temp registers, but not the input.
2492 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, 2480 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
2493 Handle<String> class_name, Register input, 2481 Handle<String> class_name, Register input,
2494 Register temp, Register temp2) { 2482 Register temp, Register temp2) {
2495 DCHECK(!input.is(temp)); 2483 DCHECK(!input.is(temp));
2496 DCHECK(!input.is(temp2)); 2484 DCHECK(!input.is(temp2));
2497 DCHECK(!temp.is(temp2)); 2485 DCHECK(!temp.is(temp2));
2498 2486
2499 __ JumpIfSmi(input, is_false); 2487 __ JumpIfSmi(input, is_false);
2500 2488
2501 __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE); 2489 __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
2502 if (String::Equals(isolate()->factory()->Function_string(), class_name)) { 2490 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2503 __ beq(is_true); 2491 __ beq(is_true);
2504 } else { 2492 } else {
2505 __ beq(is_false); 2493 __ beq(is_false);
2506 } 2494 }
2507 2495
2508 // Check if the constructor in the map is a function. 2496 // Check if the constructor in the map is a function.
2509 Register instance_type = ip; 2497 Register instance_type = ip;
2510 __ GetMapConstructor(temp, temp, temp2, instance_type); 2498 __ GetMapConstructor(temp, temp, temp2, instance_type);
2511 2499
2512 // Objects with a non-function constructor have class 'Object'. 2500 // Objects with a non-function constructor have class 'Object'.
2513 __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE)); 2501 __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
2514 if (String::Equals(isolate()->factory()->Object_string(), class_name)) { 2502 if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
2515 __ bne(is_true); 2503 __ bne(is_true);
2516 } else { 2504 } else {
2517 __ bne(is_false); 2505 __ bne(is_false);
2518 } 2506 }
2519 2507
2520 // temp now contains the constructor function. Grab the 2508 // temp now contains the constructor function. Grab the
2521 // instance class name from there. 2509 // instance class name from there.
2522 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2510 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2523 __ LoadP(temp, 2511 __ LoadP(temp,
2524 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); 2512 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
2525 // The class name we are testing against is internalized since it's a literal. 2513 // The class name we are testing against is internalized since it's a literal.
2526 // The name in the constructor is internalized because of the way the context 2514 // The name in the constructor is internalized because of the way the context
2527 // is booted. This routine isn't expected to work for random API-created 2515 // is booted. This routine isn't expected to work for random API-created
2528 // classes and it doesn't have to because you can't access it with natives 2516 // classes and it doesn't have to because you can't access it with natives
2529 // syntax. Since both sides are internalized it is sufficient to use an 2517 // syntax. Since both sides are internalized it is sufficient to use an
2530 // identity comparison. 2518 // identity comparison.
2531 __ Cmpi(temp, Operand(class_name), r0); 2519 __ CmpP(temp, Operand(class_name));
2532 // End with the answer in flags. 2520 // End with the answer in flags.
2533 } 2521 }
2534 2522
2535
2536 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2523 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2537 Register input = ToRegister(instr->value()); 2524 Register input = ToRegister(instr->value());
2538 Register temp = scratch0(); 2525 Register temp = scratch0();
2539 Register temp2 = ToRegister(instr->temp()); 2526 Register temp2 = ToRegister(instr->temp());
2540 Handle<String> class_name = instr->hydrogen()->class_name(); 2527 Handle<String> class_name = instr->hydrogen()->class_name();
2541 2528
2542 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2529 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2543 class_name, input, temp, temp2); 2530 class_name, input, temp, temp2);
2544 2531
2545 EmitBranch(instr, eq); 2532 EmitBranch(instr, eq);
2546 } 2533 }
2547 2534
2548
2549 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2535 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2550 Register reg = ToRegister(instr->value()); 2536 Register reg = ToRegister(instr->value());
2551 Register temp = ToRegister(instr->temp()); 2537 Register temp = ToRegister(instr->temp());
2552 2538
2553 __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); 2539 __ mov(temp, Operand(instr->map()));
2554 __ Cmpi(temp, Operand(instr->map()), r0); 2540 __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2555 EmitBranch(instr, eq); 2541 EmitBranch(instr, eq);
2556 } 2542 }
2557 2543
2558
2559 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 2544 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2560 DCHECK(ToRegister(instr->context()).is(cp)); 2545 DCHECK(ToRegister(instr->context()).is(cp));
2561 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister())); 2546 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2562 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister())); 2547 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2563 DCHECK(ToRegister(instr->result()).is(r3)); 2548 DCHECK(ToRegister(instr->result()).is(r2));
2564 InstanceOfStub stub(isolate()); 2549 InstanceOfStub stub(isolate());
2565 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2550 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2566 } 2551 }
2567 2552
2568
2569 void LCodeGen::DoHasInPrototypeChainAndBranch( 2553 void LCodeGen::DoHasInPrototypeChainAndBranch(
2570 LHasInPrototypeChainAndBranch* instr) { 2554 LHasInPrototypeChainAndBranch* instr) {
2571 Register const object = ToRegister(instr->object()); 2555 Register const object = ToRegister(instr->object());
2572 Register const object_map = scratch0(); 2556 Register const object_map = scratch0();
2573 Register const object_instance_type = ip; 2557 Register const object_instance_type = ip;
2574 Register const object_prototype = object_map; 2558 Register const object_prototype = object_map;
2575 Register const prototype = ToRegister(instr->prototype()); 2559 Register const prototype = ToRegister(instr->prototype());
2576 2560
2577 // The {object} must be a spec object. It's sufficient to know that {object} 2561 // The {object} must be a spec object. It's sufficient to know that {object}
2578 // is not a smi, since all other non-spec objects have {null} prototypes and 2562 // is not a smi, since all other non-spec objects have {null} prototypes and
2579 // will be ruled out below. 2563 // will be ruled out below.
2580 if (instr->hydrogen()->ObjectNeedsSmiCheck()) { 2564 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2581 __ TestIfSmi(object, r0); 2565 __ TestIfSmi(object);
2582 EmitFalseBranch(instr, eq, cr0); 2566 EmitFalseBranch(instr, eq);
2583 } 2567 }
2584
2585 // Loop through the {object}s prototype chain looking for the {prototype}. 2568 // Loop through the {object}s prototype chain looking for the {prototype}.
2586 __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); 2569 __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2587 Label loop; 2570 Label loop;
2588 __ bind(&loop); 2571 __ bind(&loop);
2589 2572
2590 // Deoptimize if the object needs to be access checked. 2573 // Deoptimize if the object needs to be access checked.
2591 __ lbz(object_instance_type, 2574 __ LoadlB(object_instance_type,
2592 FieldMemOperand(object_map, Map::kBitFieldOffset)); 2575 FieldMemOperand(object_map, Map::kBitFieldOffset));
2593 __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0); 2576 __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
2594 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0); 2577 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
2595 // Deoptimize for proxies. 2578 // Deoptimize for proxies.
2596 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); 2579 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
2597 DeoptimizeIf(eq, instr, Deoptimizer::kProxy); 2580 DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
2598 __ LoadP(object_prototype, 2581 __ LoadP(object_prototype,
2599 FieldMemOperand(object_map, Map::kPrototypeOffset)); 2582 FieldMemOperand(object_map, Map::kPrototypeOffset));
2600 __ cmp(object_prototype, prototype); 2583 __ CmpP(object_prototype, prototype);
2601 EmitTrueBranch(instr, eq); 2584 EmitTrueBranch(instr, eq);
2602 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); 2585 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2603 EmitFalseBranch(instr, eq); 2586 EmitFalseBranch(instr, eq);
2604 __ LoadP(object_map, 2587 __ LoadP(object_map,
2605 FieldMemOperand(object_prototype, HeapObject::kMapOffset)); 2588 FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2606 __ b(&loop); 2589 __ b(&loop);
2607 } 2590 }
2608 2591
2609
2610 void LCodeGen::DoCmpT(LCmpT* instr) { 2592 void LCodeGen::DoCmpT(LCmpT* instr) {
2611 DCHECK(ToRegister(instr->context()).is(cp)); 2593 DCHECK(ToRegister(instr->context()).is(cp));
2612 Token::Value op = instr->op(); 2594 Token::Value op = instr->op();
2613 2595
2614 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); 2596 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2615 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2597 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2616 // This instruction also signals no smi code inlined 2598 // This instruction also signals no smi code inlined
2617 __ cmpi(r3, Operand::Zero()); 2599 __ CmpP(r2, Operand::Zero());
2618 2600
2619 Condition condition = ComputeCompareCondition(op); 2601 Condition condition = ComputeCompareCondition(op);
2620 if (CpuFeatures::IsSupported(ISELECT)) { 2602 Label true_value, done;
2621 __ LoadRoot(r4, Heap::kTrueValueRootIndex);
2622 __ LoadRoot(r5, Heap::kFalseValueRootIndex);
2623 __ isel(condition, ToRegister(instr->result()), r4, r5);
2624 } else {
2625 Label true_value, done;
2626 2603
2627 __ b(condition, &true_value); 2604 __ b(condition, &true_value, Label::kNear);
2628 2605
2629 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); 2606 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2630 __ b(&done); 2607 __ b(&done, Label::kNear);
2631 2608
2632 __ bind(&true_value); 2609 __ bind(&true_value);
2633 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); 2610 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2634 2611
2635 __ bind(&done); 2612 __ bind(&done);
2636 }
2637 } 2613 }
2638 2614
2639
2640 void LCodeGen::DoReturn(LReturn* instr) { 2615 void LCodeGen::DoReturn(LReturn* instr) {
2641 if (FLAG_trace && info()->IsOptimizing()) { 2616 if (FLAG_trace && info()->IsOptimizing()) {
2642 // Push the return value on the stack as the parameter. 2617 // Push the return value on the stack as the parameter.
2643 // Runtime::TraceExit returns its parameter in r3. We're leaving the code 2618 // Runtime::TraceExit returns its parameter in r2. We're leaving the code
2644 // managed by the register allocator and tearing down the frame, it's 2619 // managed by the register allocator and tearing down the frame, it's
2645 // safe to write to the context register. 2620 // safe to write to the context register.
2646 __ push(r3); 2621 __ push(r2);
2647 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 2622 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2648 __ CallRuntime(Runtime::kTraceExit); 2623 __ CallRuntime(Runtime::kTraceExit);
2649 } 2624 }
2650 if (info()->saves_caller_doubles()) { 2625 if (info()->saves_caller_doubles()) {
2651 RestoreCallerDoubles(); 2626 RestoreCallerDoubles();
2652 } 2627 }
2653 if (instr->has_constant_parameter_count()) { 2628 if (instr->has_constant_parameter_count()) {
2654 int parameter_count = ToInteger32(instr->constant_parameter_count()); 2629 int parameter_count = ToInteger32(instr->constant_parameter_count());
2655 int32_t sp_delta = (parameter_count + 1) * kPointerSize; 2630 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2656 if (NeedsEagerFrame()) { 2631 if (NeedsEagerFrame()) {
2657 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta); 2632 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
2658 } else if (sp_delta != 0) { 2633 } else if (sp_delta != 0) {
2659 __ addi(sp, sp, Operand(sp_delta)); 2634 // TODO(joransiu): Clean this up into Macro Assembler
2635 if (sp_delta >= 0 && sp_delta < 4096)
2636 __ la(sp, MemOperand(sp, sp_delta));
2637 else
2638 __ lay(sp, MemOperand(sp, sp_delta));
2660 } 2639 }
2661 } else { 2640 } else {
2662 DCHECK(info()->IsStub()); // Functions would need to drop one more value. 2641 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2663 Register reg = ToRegister(instr->parameter_count()); 2642 Register reg = ToRegister(instr->parameter_count());
2664 // The argument count parameter is a smi 2643 // The argument count parameter is a smi
2665 if (NeedsEagerFrame()) { 2644 if (NeedsEagerFrame()) {
2666 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT); 2645 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2667 } 2646 }
2668 __ SmiToPtrArrayOffset(r0, reg); 2647 __ SmiToPtrArrayOffset(r0, reg);
2669 __ add(sp, sp, r0); 2648 __ AddP(sp, sp, r0);
2670 } 2649 }
2671 2650
2672 __ blr(); 2651 __ Ret();
2673 } 2652 }
2674 2653
2675
2676 template <class T> 2654 template <class T>
2677 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { 2655 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2678 Register vector_register = ToRegister(instr->temp_vector()); 2656 Register vector_register = ToRegister(instr->temp_vector());
2679 Register slot_register = LoadDescriptor::SlotRegister(); 2657 Register slot_register = LoadDescriptor::SlotRegister();
2680 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister())); 2658 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2681 DCHECK(slot_register.is(r3)); 2659 DCHECK(slot_register.is(r2));
2682 2660
2683 AllowDeferredHandleDereference vector_structure_check; 2661 AllowDeferredHandleDereference vector_structure_check;
2684 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); 2662 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2685 __ Move(vector_register, vector); 2663 __ Move(vector_register, vector);
2686 // No need to allocate this register. 2664 // No need to allocate this register.
2687 FeedbackVectorSlot slot = instr->hydrogen()->slot(); 2665 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2688 int index = vector->GetIndex(slot); 2666 int index = vector->GetIndex(slot);
2689 __ LoadSmiLiteral(slot_register, Smi::FromInt(index)); 2667 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
2690 } 2668 }
2691 2669
2692
2693 template <class T> 2670 template <class T>
2694 void LCodeGen::EmitVectorStoreICRegisters(T* instr) { 2671 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2695 Register vector_register = ToRegister(instr->temp_vector()); 2672 Register vector_register = ToRegister(instr->temp_vector());
2696 Register slot_register = ToRegister(instr->temp_slot()); 2673 Register slot_register = ToRegister(instr->temp_slot());
2697 2674
2698 AllowDeferredHandleDereference vector_structure_check; 2675 AllowDeferredHandleDereference vector_structure_check;
2699 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); 2676 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2700 __ Move(vector_register, vector); 2677 __ Move(vector_register, vector);
2701 FeedbackVectorSlot slot = instr->hydrogen()->slot(); 2678 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2702 int index = vector->GetIndex(slot); 2679 int index = vector->GetIndex(slot);
2703 __ LoadSmiLiteral(slot_register, Smi::FromInt(index)); 2680 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
2704 } 2681 }
2705 2682
2706
2707 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 2683 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2708 DCHECK(ToRegister(instr->context()).is(cp)); 2684 DCHECK(ToRegister(instr->context()).is(cp));
2709 DCHECK(ToRegister(instr->global_object()) 2685 DCHECK(ToRegister(instr->global_object())
2710 .is(LoadDescriptor::ReceiverRegister())); 2686 .is(LoadDescriptor::ReceiverRegister()));
2711 DCHECK(ToRegister(instr->result()).is(r3)); 2687 DCHECK(ToRegister(instr->result()).is(r2));
2712 2688
2713 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); 2689 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2714 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr); 2690 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2715 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode( 2691 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2716 isolate(), instr->typeof_mode(), PREMONOMORPHIC) 2692 isolate(), instr->typeof_mode(), PREMONOMORPHIC)
2717 .code(); 2693 .code();
2718 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2694 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2719 } 2695 }
2720 2696
2721
2722 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2697 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2723 Register context = ToRegister(instr->context()); 2698 Register context = ToRegister(instr->context());
2724 Register result = ToRegister(instr->result()); 2699 Register result = ToRegister(instr->result());
2725 __ LoadP(result, ContextMemOperand(context, instr->slot_index())); 2700 __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
2726 if (instr->hydrogen()->RequiresHoleCheck()) { 2701 if (instr->hydrogen()->RequiresHoleCheck()) {
2727 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 2702 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2728 if (instr->hydrogen()->DeoptimizesOnHole()) { 2703 if (instr->hydrogen()->DeoptimizesOnHole()) {
2729 __ cmp(result, ip);
2730 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 2704 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2731 } else { 2705 } else {
2732 if (CpuFeatures::IsSupported(ISELECT)) { 2706 Label skip;
2733 Register scratch = scratch0(); 2707 __ bne(&skip, Label::kNear);
2734 __ mov(scratch, Operand(factory()->undefined_value())); 2708 __ mov(result, Operand(factory()->undefined_value()));
2735 __ cmp(result, ip); 2709 __ bind(&skip);
2736 __ isel(eq, result, scratch, result);
2737 } else {
2738 Label skip;
2739 __ cmp(result, ip);
2740 __ bne(&skip);
2741 __ mov(result, Operand(factory()->undefined_value()));
2742 __ bind(&skip);
2743 }
2744 } 2710 }
2745 } 2711 }
2746 } 2712 }
2747 2713
2748
2749 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2714 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2750 Register context = ToRegister(instr->context()); 2715 Register context = ToRegister(instr->context());
2751 Register value = ToRegister(instr->value()); 2716 Register value = ToRegister(instr->value());
2752 Register scratch = scratch0(); 2717 Register scratch = scratch0();
2753 MemOperand target = ContextMemOperand(context, instr->slot_index()); 2718 MemOperand target = ContextMemOperand(context, instr->slot_index());
2754 2719
2755 Label skip_assignment; 2720 Label skip_assignment;
2756 2721
2757 if (instr->hydrogen()->RequiresHoleCheck()) { 2722 if (instr->hydrogen()->RequiresHoleCheck()) {
2758 __ LoadP(scratch, target); 2723 __ LoadP(scratch, target);
2759 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 2724 __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
2760 __ cmp(scratch, ip);
2761 if (instr->hydrogen()->DeoptimizesOnHole()) { 2725 if (instr->hydrogen()->DeoptimizesOnHole()) {
2762 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 2726 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2763 } else { 2727 } else {
2764 __ bne(&skip_assignment); 2728 __ bne(&skip_assignment);
2765 } 2729 }
2766 } 2730 }
2767 2731
2768 __ StoreP(value, target, r0); 2732 __ StoreP(value, target);
2769 if (instr->hydrogen()->NeedsWriteBarrier()) { 2733 if (instr->hydrogen()->NeedsWriteBarrier()) {
2770 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() 2734 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2771 ? OMIT_SMI_CHECK 2735 ? OMIT_SMI_CHECK
2772 : INLINE_SMI_CHECK; 2736 : INLINE_SMI_CHECK;
2773 __ RecordWriteContextSlot(context, target.offset(), value, scratch, 2737 __ RecordWriteContextSlot(context, target.offset(), value, scratch,
2774 GetLinkRegisterState(), kSaveFPRegs, 2738 GetLinkRegisterState(), kSaveFPRegs,
2775 EMIT_REMEMBERED_SET, check_needed); 2739 EMIT_REMEMBERED_SET, check_needed);
2776 } 2740 }
2777 2741
2778 __ bind(&skip_assignment); 2742 __ bind(&skip_assignment);
2779 } 2743 }
2780 2744
2781
2782 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2745 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2783 HObjectAccess access = instr->hydrogen()->access(); 2746 HObjectAccess access = instr->hydrogen()->access();
2784 int offset = access.offset(); 2747 int offset = access.offset();
2785 Register object = ToRegister(instr->object()); 2748 Register object = ToRegister(instr->object());
2786 2749
2787 if (access.IsExternalMemory()) { 2750 if (access.IsExternalMemory()) {
2788 Register result = ToRegister(instr->result()); 2751 Register result = ToRegister(instr->result());
2789 MemOperand operand = MemOperand(object, offset); 2752 MemOperand operand = MemOperand(object, offset);
2790 __ LoadRepresentation(result, operand, access.representation(), r0); 2753 __ LoadRepresentation(result, operand, access.representation(), r0);
2791 return; 2754 return;
2792 } 2755 }
2793 2756
2794 if (instr->hydrogen()->representation().IsDouble()) { 2757 if (instr->hydrogen()->representation().IsDouble()) {
2795 DCHECK(access.IsInobject()); 2758 DCHECK(access.IsInobject());
2796 DoubleRegister result = ToDoubleRegister(instr->result()); 2759 DoubleRegister result = ToDoubleRegister(instr->result());
2797 __ lfd(result, FieldMemOperand(object, offset)); 2760 __ ld(result, FieldMemOperand(object, offset));
2798 return; 2761 return;
2799 } 2762 }
2800 2763
2801 Register result = ToRegister(instr->result()); 2764 Register result = ToRegister(instr->result());
2802 if (!access.IsInobject()) { 2765 if (!access.IsInobject()) {
2803 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 2766 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2804 object = result; 2767 object = result;
2805 } 2768 }
2806 2769
2807 Representation representation = access.representation(); 2770 Representation representation = access.representation();
2808 2771
2809 #if V8_TARGET_ARCH_PPC64 2772 #if V8_TARGET_ARCH_S390X
2810 // 64-bit Smi optimization 2773 // 64-bit Smi optimization
2811 if (representation.IsSmi() && 2774 if (representation.IsSmi() &&
2812 instr->hydrogen()->representation().IsInteger32()) { 2775 instr->hydrogen()->representation().IsInteger32()) {
2813 // Read int value directly from upper half of the smi. 2776 // Read int value directly from upper half of the smi.
2814 offset = SmiWordOffset(offset); 2777 offset = SmiWordOffset(offset);
2815 representation = Representation::Integer32(); 2778 representation = Representation::Integer32();
2816 } 2779 }
2817 #endif 2780 #endif
2818 2781
2819 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation, 2782 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
2820 r0); 2783 r0);
2821 } 2784 }
2822 2785
2823
2824 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 2786 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2825 DCHECK(ToRegister(instr->context()).is(cp)); 2787 DCHECK(ToRegister(instr->context()).is(cp));
2826 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 2788 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2827 DCHECK(ToRegister(instr->result()).is(r3)); 2789 DCHECK(ToRegister(instr->result()).is(r2));
2828 2790
2829 // Name is always in r5. 2791 // Name is always in r4.
2830 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); 2792 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2831 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); 2793 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
2832 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode( 2794 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
2833 isolate(), NOT_INSIDE_TYPEOF, 2795 isolate(), NOT_INSIDE_TYPEOF,
2834 instr->hydrogen()->initialization_state()) 2796 instr->hydrogen()->initialization_state())
2835 .code(); 2797 .code();
2836 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2798 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2837 } 2799 }
2838 2800
2839
2840 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 2801 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2841 Register scratch = scratch0(); 2802 Register scratch = scratch0();
2842 Register function = ToRegister(instr->function()); 2803 Register function = ToRegister(instr->function());
2843 Register result = ToRegister(instr->result()); 2804 Register result = ToRegister(instr->result());
2844 2805
2845 // Get the prototype or initial map from the function. 2806 // Get the prototype or initial map from the function.
2846 __ LoadP(result, 2807 __ LoadP(result,
2847 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2808 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2848 2809
2849 // Check that the function has a prototype or an initial map. 2810 // Check that the function has a prototype or an initial map.
2850 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 2811 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2851 __ cmp(result, ip);
2852 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 2812 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2853 2813
2854 // If the function does not have an initial map, we're done. 2814 // If the function does not have an initial map, we're done.
2855 if (CpuFeatures::IsSupported(ISELECT)) { 2815 Label done;
2856 // Get the prototype from the initial map (optimistic). 2816 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2857 __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset)); 2817 __ bne(&done, Label::kNear);
2858 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2859 __ isel(eq, result, ip, result);
2860 } else {
2861 Label done;
2862 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2863 __ bne(&done);
2864 2818
2865 // Get the prototype from the initial map. 2819 // Get the prototype from the initial map.
2866 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); 2820 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2867 2821
2868 // All done. 2822 // All done.
2869 __ bind(&done); 2823 __ bind(&done);
2870 }
2871 } 2824 }
2872 2825
2873
2874 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 2826 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2875 Register result = ToRegister(instr->result()); 2827 Register result = ToRegister(instr->result());
2876 __ LoadRoot(result, instr->index()); 2828 __ LoadRoot(result, instr->index());
2877 } 2829 }
2878 2830
2879
2880 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 2831 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2881 Register arguments = ToRegister(instr->arguments()); 2832 Register arguments = ToRegister(instr->arguments());
2882 Register result = ToRegister(instr->result()); 2833 Register result = ToRegister(instr->result());
2883 // There are two words between the frame pointer and the last argument. 2834 // There are two words between the frame pointer and the last argument.
2884 // Subtracting from length accounts for one of them add one more. 2835 // Subtracting from length accounts for one of them add one more.
2885 if (instr->length()->IsConstantOperand()) { 2836 if (instr->length()->IsConstantOperand()) {
2886 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); 2837 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2887 if (instr->index()->IsConstantOperand()) { 2838 if (instr->index()->IsConstantOperand()) {
2888 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 2839 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2889 int index = (const_length - const_index) + 1; 2840 int index = (const_length - const_index) + 1;
2890 __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0); 2841 __ LoadP(result, MemOperand(arguments, index * kPointerSize));
2891 } else { 2842 } else {
2892 Register index = ToRegister(instr->index()); 2843 Register index = ToRegister(instr->index());
2893 __ subfic(result, index, Operand(const_length + 1)); 2844 __ SubP(result, index, Operand(const_length + 1));
2894 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); 2845 __ LoadComplementRR(result, result);
2895 __ LoadPX(result, MemOperand(arguments, result)); 2846 __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
2847 __ LoadP(result, MemOperand(arguments, result));
2896 } 2848 }
2897 } else if (instr->index()->IsConstantOperand()) { 2849 } else if (instr->index()->IsConstantOperand()) {
2898 Register length = ToRegister(instr->length()); 2850 Register length = ToRegister(instr->length());
2899 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 2851 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2900 int loc = const_index - 1; 2852 int loc = const_index - 1;
2901 if (loc != 0) { 2853 if (loc != 0) {
2902 __ subi(result, length, Operand(loc)); 2854 __ SubP(result, length, Operand(loc));
2903 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); 2855 __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
2904 __ LoadPX(result, MemOperand(arguments, result)); 2856 __ LoadP(result, MemOperand(arguments, result));
2905 } else { 2857 } else {
2906 __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2)); 2858 __ ShiftLeftP(result, length, Operand(kPointerSizeLog2));
2907 __ LoadPX(result, MemOperand(arguments, result)); 2859 __ LoadP(result, MemOperand(arguments, result));
2908 } 2860 }
2909 } else { 2861 } else {
2910 Register length = ToRegister(instr->length()); 2862 Register length = ToRegister(instr->length());
2911 Register index = ToRegister(instr->index()); 2863 Register index = ToRegister(instr->index());
2912 __ sub(result, length, index); 2864 __ SubP(result, length, index);
2913 __ addi(result, result, Operand(1)); 2865 __ AddP(result, result, Operand(1));
2914 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); 2866 __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
2915 __ LoadPX(result, MemOperand(arguments, result)); 2867 __ LoadP(result, MemOperand(arguments, result));
2916 } 2868 }
2917 } 2869 }
2918 2870
2919
2920 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 2871 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2921 Register external_pointer = ToRegister(instr->elements()); 2872 Register external_pointer = ToRegister(instr->elements());
2922 Register key = no_reg; 2873 Register key = no_reg;
2923 ElementsKind elements_kind = instr->elements_kind(); 2874 ElementsKind elements_kind = instr->elements_kind();
2924 bool key_is_constant = instr->key()->IsConstantOperand(); 2875 bool key_is_constant = instr->key()->IsConstantOperand();
2925 int constant_key = 0; 2876 int constant_key = 0;
2926 if (key_is_constant) { 2877 if (key_is_constant) {
2927 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 2878 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2928 if (constant_key & 0xF0000000) { 2879 if (constant_key & 0xF0000000) {
2929 Abort(kArrayIndexConstantValueTooBig); 2880 Abort(kArrayIndexConstantValueTooBig);
2930 } 2881 }
2931 } else { 2882 } else {
2932 key = ToRegister(instr->key()); 2883 key = ToRegister(instr->key());
2933 } 2884 }
2934 int element_size_shift = ElementsKindToShiftSize(elements_kind); 2885 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2935 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 2886 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
2936 int base_offset = instr->base_offset(); 2887 int base_offset = instr->base_offset();
2888 bool use_scratch = false;
2937 2889
2938 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { 2890 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2939 DoubleRegister result = ToDoubleRegister(instr->result()); 2891 DoubleRegister result = ToDoubleRegister(instr->result());
2940 if (key_is_constant) { 2892 if (key_is_constant) {
2941 __ Add(scratch0(), external_pointer, constant_key << element_size_shift, 2893 base_offset += constant_key << element_size_shift;
2942 r0); 2894 if (!is_int20(base_offset)) {
2895 __ mov(scratch0(), Operand(base_offset));
2896 base_offset = 0;
2897 use_scratch = true;
2898 }
2943 } else { 2899 } else {
2944 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); 2900 __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi);
2945 __ add(scratch0(), external_pointer, r0); 2901 use_scratch = true;
2946 } 2902 }
2947 if (elements_kind == FLOAT32_ELEMENTS) { 2903 if (elements_kind == FLOAT32_ELEMENTS) {
2948 __ lfs(result, MemOperand(scratch0(), base_offset)); 2904 if (!use_scratch) {
2905 __ ldeb(result, MemOperand(external_pointer, base_offset));
2906 } else {
2907 __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset));
2908 }
2949 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS 2909 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2950 __ lfd(result, MemOperand(scratch0(), base_offset)); 2910 if (!use_scratch) {
2911 __ ld(result, MemOperand(external_pointer, base_offset));
2912 } else {
2913 __ ld(result, MemOperand(scratch0(), external_pointer, base_offset));
2914 }
2951 } 2915 }
2952 } else { 2916 } else {
2953 Register result = ToRegister(instr->result()); 2917 Register result = ToRegister(instr->result());
2954 MemOperand mem_operand = 2918 MemOperand mem_operand =
2955 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, 2919 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
2956 constant_key, element_size_shift, base_offset); 2920 constant_key, element_size_shift, base_offset);
2957 switch (elements_kind) { 2921 switch (elements_kind) {
2958 case INT8_ELEMENTS: 2922 case INT8_ELEMENTS:
2959 if (key_is_constant) { 2923 __ LoadB(result, mem_operand);
2960 __ LoadByte(result, mem_operand, r0);
2961 } else {
2962 __ lbzx(result, mem_operand);
2963 }
2964 __ extsb(result, result);
2965 break; 2924 break;
2966 case UINT8_ELEMENTS: 2925 case UINT8_ELEMENTS:
2967 case UINT8_CLAMPED_ELEMENTS: 2926 case UINT8_CLAMPED_ELEMENTS:
2968 if (key_is_constant) { 2927 __ LoadlB(result, mem_operand);
2969 __ LoadByte(result, mem_operand, r0);
2970 } else {
2971 __ lbzx(result, mem_operand);
2972 }
2973 break; 2928 break;
2974 case INT16_ELEMENTS: 2929 case INT16_ELEMENTS:
2975 if (key_is_constant) { 2930 __ LoadHalfWordP(result, mem_operand);
2976 __ LoadHalfWordArith(result, mem_operand, r0);
2977 } else {
2978 __ lhax(result, mem_operand);
2979 }
2980 break; 2931 break;
2981 case UINT16_ELEMENTS: 2932 case UINT16_ELEMENTS:
2982 if (key_is_constant) { 2933 __ LoadLogicalHalfWordP(result, mem_operand);
2983 __ LoadHalfWord(result, mem_operand, r0);
2984 } else {
2985 __ lhzx(result, mem_operand);
2986 }
2987 break; 2934 break;
2988 case INT32_ELEMENTS: 2935 case INT32_ELEMENTS:
2989 if (key_is_constant) { 2936 __ LoadW(result, mem_operand, r0);
2990 __ LoadWordArith(result, mem_operand, r0);
2991 } else {
2992 __ lwax(result, mem_operand);
2993 }
2994 break; 2937 break;
2995 case UINT32_ELEMENTS: 2938 case UINT32_ELEMENTS:
2996 if (key_is_constant) { 2939 __ LoadlW(result, mem_operand, r0);
2997 __ LoadWord(result, mem_operand, r0);
2998 } else {
2999 __ lwzx(result, mem_operand);
3000 }
3001 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 2940 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3002 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); 2941 __ CmpLogical32(result, Operand(0x80000000));
3003 __ cmplw(result, r0);
3004 DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue); 2942 DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
3005 } 2943 }
3006 break; 2944 break;
3007 case FLOAT32_ELEMENTS: 2945 case FLOAT32_ELEMENTS:
3008 case FLOAT64_ELEMENTS: 2946 case FLOAT64_ELEMENTS:
3009 case FAST_HOLEY_DOUBLE_ELEMENTS: 2947 case FAST_HOLEY_DOUBLE_ELEMENTS:
3010 case FAST_HOLEY_ELEMENTS: 2948 case FAST_HOLEY_ELEMENTS:
3011 case FAST_HOLEY_SMI_ELEMENTS: 2949 case FAST_HOLEY_SMI_ELEMENTS:
3012 case FAST_DOUBLE_ELEMENTS: 2950 case FAST_DOUBLE_ELEMENTS:
3013 case FAST_ELEMENTS: 2951 case FAST_ELEMENTS:
3014 case FAST_SMI_ELEMENTS: 2952 case FAST_SMI_ELEMENTS:
3015 case DICTIONARY_ELEMENTS: 2953 case DICTIONARY_ELEMENTS:
3016 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: 2954 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3017 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: 2955 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3018 case FAST_STRING_WRAPPER_ELEMENTS: 2956 case FAST_STRING_WRAPPER_ELEMENTS:
3019 case SLOW_STRING_WRAPPER_ELEMENTS: 2957 case SLOW_STRING_WRAPPER_ELEMENTS:
3020 case NO_ELEMENTS: 2958 case NO_ELEMENTS:
3021 UNREACHABLE(); 2959 UNREACHABLE();
3022 break; 2960 break;
3023 } 2961 }
3024 } 2962 }
3025 } 2963 }
3026 2964
3027
3028 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 2965 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3029 Register elements = ToRegister(instr->elements()); 2966 Register elements = ToRegister(instr->elements());
3030 bool key_is_constant = instr->key()->IsConstantOperand(); 2967 bool key_is_constant = instr->key()->IsConstantOperand();
3031 Register key = no_reg; 2968 Register key = no_reg;
3032 DoubleRegister result = ToDoubleRegister(instr->result()); 2969 DoubleRegister result = ToDoubleRegister(instr->result());
3033 Register scratch = scratch0(); 2970 Register scratch = scratch0();
3034 2971
3035 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 2972 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3036 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 2973 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3037 int constant_key = 0; 2974 int constant_key = 0;
3038 if (key_is_constant) { 2975 if (key_is_constant) {
3039 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 2976 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3040 if (constant_key & 0xF0000000) { 2977 if (constant_key & 0xF0000000) {
3041 Abort(kArrayIndexConstantValueTooBig); 2978 Abort(kArrayIndexConstantValueTooBig);
3042 } 2979 }
3043 } else { 2980 } else {
3044 key = ToRegister(instr->key()); 2981 key = ToRegister(instr->key());
3045 } 2982 }
3046 2983
3047 int base_offset = instr->base_offset() + constant_key * kDoubleSize; 2984 bool use_scratch = false;
2985 intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
3048 if (!key_is_constant) { 2986 if (!key_is_constant) {
3049 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); 2987 use_scratch = true;
3050 __ add(scratch, elements, r0); 2988 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3051 elements = scratch;
3052 } 2989 }
3053 if (!is_int16(base_offset)) { 2990
3054 __ Add(scratch, elements, base_offset, r0); 2991 // Memory references support up to 20-bits signed displacement in RXY form
2992 // Include Register::kExponentOffset in check, so we are guaranteed not to
2993 // overflow displacement later.
2994 if (!is_int20(base_offset + Register::kExponentOffset)) {
2995 use_scratch = true;
2996 if (key_is_constant) {
2997 __ mov(scratch, Operand(base_offset));
2998 } else {
2999 __ AddP(scratch, Operand(base_offset));
3000 }
3055 base_offset = 0; 3001 base_offset = 0;
3056 elements = scratch;
3057 } 3002 }
3058 __ lfd(result, MemOperand(elements, base_offset)); 3003
3004 if (!use_scratch) {
3005 __ ld(result, MemOperand(elements, base_offset));
3006 } else {
3007 __ ld(result, MemOperand(scratch, elements, base_offset));
3008 }
3059 3009
3060 if (instr->hydrogen()->RequiresHoleCheck()) { 3010 if (instr->hydrogen()->RequiresHoleCheck()) {
3061 if (is_int16(base_offset + Register::kExponentOffset)) { 3011 if (!use_scratch) {
3062 __ lwz(scratch, 3012 __ LoadlW(r0,
3063 MemOperand(elements, base_offset + Register::kExponentOffset)); 3013 MemOperand(elements, base_offset + Register::kExponentOffset));
3064 } else { 3014 } else {
3065 __ addi(scratch, elements, Operand(base_offset)); 3015 __ LoadlW(r0, MemOperand(scratch, elements,
3066 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset)); 3016 base_offset + Register::kExponentOffset));
3067 } 3017 }
3068 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); 3018 __ Cmp32(r0, Operand(kHoleNanUpper32));
3069 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 3019 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3070 } 3020 }
3071 } 3021 }
3072 3022
3073
3074 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3023 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3075 HLoadKeyed* hinstr = instr->hydrogen(); 3024 HLoadKeyed* hinstr = instr->hydrogen();
3076 Register elements = ToRegister(instr->elements()); 3025 Register elements = ToRegister(instr->elements());
3077 Register result = ToRegister(instr->result()); 3026 Register result = ToRegister(instr->result());
3078 Register scratch = scratch0(); 3027 Register scratch = scratch0();
3079 Register store_base = scratch;
3080 int offset = instr->base_offset(); 3028 int offset = instr->base_offset();
3081 3029
3082 if (instr->key()->IsConstantOperand()) { 3030 if (instr->key()->IsConstantOperand()) {
3083 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3031 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3084 offset += ToInteger32(const_operand) * kPointerSize; 3032 offset += ToInteger32(const_operand) * kPointerSize;
3085 store_base = elements;
3086 } else { 3033 } else {
3087 Register key = ToRegister(instr->key()); 3034 Register key = ToRegister(instr->key());
3088 // Even though the HLoadKeyed instruction forces the input 3035 // Even though the HLoadKeyed instruction forces the input
3089 // representation for the key to be an integer, the input gets replaced 3036 // representation for the key to be an integer, the input gets replaced
3090 // during bound check elimination with the index argument to the bounds 3037 // during bound check elimination with the index argument to the bounds
3091 // check, which can be tagged, so that case must be handled here, too. 3038 // check, which can be tagged, so that case must be handled here, too.
3092 if (hinstr->key()->representation().IsSmi()) { 3039 if (hinstr->key()->representation().IsSmi()) {
3093 __ SmiToPtrArrayOffset(r0, key); 3040 __ SmiToPtrArrayOffset(scratch, key);
3094 } else { 3041 } else {
3095 __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2)); 3042 __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
3096 } 3043 }
3097 __ add(scratch, elements, r0);
3098 } 3044 }
3099 3045
3100 bool requires_hole_check = hinstr->RequiresHoleCheck(); 3046 bool requires_hole_check = hinstr->RequiresHoleCheck();
3101 Representation representation = hinstr->representation(); 3047 Representation representation = hinstr->representation();
3102 3048
3103 #if V8_TARGET_ARCH_PPC64 3049 #if V8_TARGET_ARCH_S390X
3104 // 64-bit Smi optimization 3050 // 64-bit Smi optimization
3105 if (representation.IsInteger32() && 3051 if (representation.IsInteger32() &&
3106 hinstr->elements_kind() == FAST_SMI_ELEMENTS) { 3052 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3107 DCHECK(!requires_hole_check); 3053 DCHECK(!requires_hole_check);
3108 // Read int value directly from upper half of the smi. 3054 // Read int value directly from upper half of the smi.
3109 offset = SmiWordOffset(offset); 3055 offset = SmiWordOffset(offset);
3110 } 3056 }
3111 #endif 3057 #endif
3112 3058
3113 __ LoadRepresentation(result, MemOperand(store_base, offset), representation, 3059 if (instr->key()->IsConstantOperand()) {
3114 r0); 3060 __ LoadRepresentation(result, MemOperand(elements, offset), representation,
3061 r1);
3062 } else {
3063 __ LoadRepresentation(result, MemOperand(scratch, elements, offset),
3064 representation, r1);
3065 }
3115 3066
3116 // Check for the hole value. 3067 // Check for the hole value.
3117 if (requires_hole_check) { 3068 if (requires_hole_check) {
3118 if (IsFastSmiElementsKind(hinstr->elements_kind())) { 3069 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3119 __ TestIfSmi(result, r0); 3070 __ TestIfSmi(result);
3120 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); 3071 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
3121 } else { 3072 } else {
3122 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3073 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3123 __ cmp(result, scratch);
3124 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 3074 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3125 } 3075 }
3126 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { 3076 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3127 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); 3077 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3128 Label done; 3078 Label done;
3129 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3079 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3130 __ cmp(result, scratch); 3080 __ CmpP(result, scratch);
3131 __ bne(&done); 3081 __ bne(&done);
3132 if (info()->IsStub()) { 3082 if (info()->IsStub()) {
3133 // A stub can safely convert the hole to undefined only if the array 3083 // A stub can safely convert the hole to undefined only if the array
3134 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise 3084 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3135 // it needs to bail out. 3085 // it needs to bail out.
3136 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); 3086 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3137 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset)); 3087 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
3138 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0); 3088 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
3139 DeoptimizeIf(ne, instr, Deoptimizer::kHole); 3089 DeoptimizeIf(ne, instr, Deoptimizer::kHole);
3140 } 3090 }
3141 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 3091 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3142 __ bind(&done); 3092 __ bind(&done);
3143 } 3093 }
3144 } 3094 }
3145 3095
3146
3147 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 3096 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3148 if (instr->is_fixed_typed_array()) { 3097 if (instr->is_fixed_typed_array()) {
3149 DoLoadKeyedExternalArray(instr); 3098 DoLoadKeyedExternalArray(instr);
3150 } else if (instr->hydrogen()->representation().IsDouble()) { 3099 } else if (instr->hydrogen()->representation().IsDouble()) {
3151 DoLoadKeyedFixedDoubleArray(instr); 3100 DoLoadKeyedFixedDoubleArray(instr);
3152 } else { 3101 } else {
3153 DoLoadKeyedFixedArray(instr); 3102 DoLoadKeyedFixedArray(instr);
3154 } 3103 }
3155 } 3104 }
3156 3105
3157
3158 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base, 3106 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
3159 bool key_is_constant, bool key_is_smi, 3107 bool key_is_constant, bool key_is_smi,
3160 int constant_key, 3108 int constant_key,
3161 int element_size_shift, 3109 int element_size_shift,
3162 int base_offset) { 3110 int base_offset) {
3163 Register scratch = scratch0(); 3111 Register scratch = scratch0();
3164 3112
3165 if (key_is_constant) { 3113 if (key_is_constant) {
3166 return MemOperand(base, (constant_key << element_size_shift) + base_offset); 3114 int offset = (base_offset + (constant_key << element_size_shift));
3115 if (!is_int20(offset)) {
3116 __ mov(scratch, Operand(offset));
3117 return MemOperand(base, scratch);
3118 } else {
3119 return MemOperand(base,
3120 (constant_key << element_size_shift) + base_offset);
3121 }
3167 } 3122 }
3168 3123
3169 bool needs_shift = 3124 bool needs_shift =
3170 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0)); 3125 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
3171 3126
3172 if (!(base_offset || needs_shift)) { 3127 if (needs_shift) {
3173 return MemOperand(base, key); 3128 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3129 } else {
3130 scratch = key;
3174 } 3131 }
3175 3132
3176 if (needs_shift) { 3133 if (!is_int20(base_offset)) {
3177 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi); 3134 __ AddP(scratch, Operand(base_offset));
3178 key = scratch; 3135 base_offset = 0;
3179 } 3136 }
3180 3137 return MemOperand(scratch, base, base_offset);
3181 if (base_offset) {
3182 __ Add(scratch, key, base_offset, r0);
3183 }
3184
3185 return MemOperand(base, scratch);
3186 } 3138 }
3187 3139
3188
3189 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3140 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3190 DCHECK(ToRegister(instr->context()).is(cp)); 3141 DCHECK(ToRegister(instr->context()).is(cp));
3191 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3142 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3192 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); 3143 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3193 3144
3194 if (instr->hydrogen()->HasVectorAndSlot()) { 3145 if (instr->hydrogen()->HasVectorAndSlot()) {
3195 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); 3146 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3196 } 3147 }
3197 3148
3198 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode( 3149 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3199 isolate(), instr->hydrogen()->initialization_state()) 3150 isolate(), instr->hydrogen()->initialization_state())
3200 .code(); 3151 .code();
3201 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3152 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3202 } 3153 }
3203 3154
3204
3205 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 3155 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3206 Register scratch = scratch0(); 3156 Register scratch = scratch0();
3207 Register result = ToRegister(instr->result()); 3157 Register result = ToRegister(instr->result());
3208 3158
3209 if (instr->hydrogen()->from_inlined()) { 3159 if (instr->hydrogen()->from_inlined()) {
3210 __ subi(result, sp, Operand(2 * kPointerSize)); 3160 __ lay(result, MemOperand(sp, -2 * kPointerSize));
3211 } else { 3161 } else {
3212 // Check if the calling frame is an arguments adaptor frame. 3162 // Check if the calling frame is an arguments adaptor frame.
3163 Label done, adapted;
3213 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3164 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3214 __ LoadP(result, 3165 __ LoadP(result,
3215 MemOperand(scratch, StandardFrameConstants::kContextOffset)); 3166 MemOperand(scratch, StandardFrameConstants::kContextOffset));
3216 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); 3167 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3217 3168
3218 // Result is the frame pointer for the frame if not adapted and for the real 3169 // Result is the frame pointer for the frame if not adapted and for the real
3219 // frame below the adaptor frame if adapted. 3170 // frame below the adaptor frame if adapted.
3220 if (CpuFeatures::IsSupported(ISELECT)) { 3171 __ beq(&adapted, Label::kNear);
3221 __ isel(eq, result, scratch, fp); 3172 __ LoadRR(result, fp);
3222 } else { 3173 __ b(&done, Label::kNear);
3223 Label done, adapted;
3224 __ beq(&adapted);
3225 __ mr(result, fp);
3226 __ b(&done);
3227 3174
3228 __ bind(&adapted); 3175 __ bind(&adapted);
3229 __ mr(result, scratch); 3176 __ LoadRR(result, scratch);
3230 __ bind(&done); 3177 __ bind(&done);
3231 }
3232 } 3178 }
3233 } 3179 }
3234 3180
3235
3236 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 3181 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3237 Register elem = ToRegister(instr->elements()); 3182 Register elem = ToRegister(instr->elements());
3238 Register result = ToRegister(instr->result()); 3183 Register result = ToRegister(instr->result());
3239 3184
3240 Label done; 3185 Label done;
3241 3186
3242 // If no arguments adaptor frame the number of arguments is fixed. 3187 // If no arguments adaptor frame the number of arguments is fixed.
3243 __ cmp(fp, elem); 3188 __ CmpP(fp, elem);
3244 __ mov(result, Operand(scope()->num_parameters())); 3189 __ mov(result, Operand(scope()->num_parameters()));
3245 __ beq(&done); 3190 __ beq(&done, Label::kNear);
3246 3191
3247 // Arguments adaptor frame present. Get argument length from there. 3192 // Arguments adaptor frame present. Get argument length from there.
3248 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3193 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3249 __ LoadP(result, 3194 __ LoadP(result,
3250 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3195 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3251 __ SmiUntag(result); 3196 __ SmiUntag(result);
3252 3197
3253 // Argument length is in result register. 3198 // Argument length is in result register.
3254 __ bind(&done); 3199 __ bind(&done);
3255 } 3200 }
3256 3201
3257
3258 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 3202 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3259 Register receiver = ToRegister(instr->receiver()); 3203 Register receiver = ToRegister(instr->receiver());
3260 Register function = ToRegister(instr->function()); 3204 Register function = ToRegister(instr->function());
3261 Register result = ToRegister(instr->result()); 3205 Register result = ToRegister(instr->result());
3262 Register scratch = scratch0(); 3206 Register scratch = scratch0();
3263 3207
3264 // If the receiver is null or undefined, we have to pass the global 3208 // If the receiver is null or undefined, we have to pass the global
3265 // object as a receiver to normal functions. Values have to be 3209 // object as a receiver to normal functions. Values have to be
3266 // passed unchanged to builtins and strict-mode functions. 3210 // passed unchanged to builtins and strict-mode functions.
3267 Label global_object, result_in_receiver; 3211 Label global_object, result_in_receiver;
3268 3212
3269 if (!instr->hydrogen()->known_function()) { 3213 if (!instr->hydrogen()->known_function()) {
3270 // Do not transform the receiver to object for strict mode 3214 // Do not transform the receiver to object for strict mode
3271 // functions or builtins. 3215 // functions or builtins.
3272 __ LoadP(scratch, 3216 __ LoadP(scratch,
3273 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3217 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3274 __ lwz(scratch, 3218 __ LoadlW(scratch, FieldMemOperand(
3275 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); 3219 scratch, SharedFunctionInfo::kCompilerHintsOffset));
3276 __ andi(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) | 3220 __ AndP(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
3277 (1 << SharedFunctionInfo::kNativeBit))); 3221 (1 << SharedFunctionInfo::kNativeBit)));
3278 __ bne(&result_in_receiver, cr0); 3222 __ bne(&result_in_receiver, Label::kNear);
3279 } 3223 }
3280 3224
3281 // Normal function. Replace undefined or null with global receiver. 3225 // Normal function. Replace undefined or null with global receiver.
3282 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 3226 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3283 __ cmp(receiver, scratch); 3227 __ beq(&global_object, Label::kNear);
3284 __ beq(&global_object); 3228 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3285 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 3229 __ beq(&global_object, Label::kNear);
3286 __ cmp(receiver, scratch);
3287 __ beq(&global_object);
3288 3230
3289 // Deoptimize if the receiver is not a JS object. 3231 // Deoptimize if the receiver is not a JS object.
3290 __ TestIfSmi(receiver, r0); 3232 __ TestIfSmi(receiver);
3291 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); 3233 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
3292 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE); 3234 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
3293 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); 3235 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3294 3236
3295 __ b(&result_in_receiver); 3237 __ b(&result_in_receiver, Label::kNear);
3296 __ bind(&global_object); 3238 __ bind(&global_object);
3297 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); 3239 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3298 __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); 3240 __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3299 __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); 3241 __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3300 3242
3301 if (result.is(receiver)) { 3243 if (result.is(receiver)) {
3302 __ bind(&result_in_receiver); 3244 __ bind(&result_in_receiver);
3303 } else { 3245 } else {
3304 Label result_ok; 3246 Label result_ok;
3305 __ b(&result_ok); 3247 __ b(&result_ok, Label::kNear);
3306 __ bind(&result_in_receiver); 3248 __ bind(&result_in_receiver);
3307 __ mr(result, receiver); 3249 __ LoadRR(result, receiver);
3308 __ bind(&result_ok); 3250 __ bind(&result_ok);
3309 } 3251 }
3310 } 3252 }
3311 3253
3312
3313 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 3254 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3314 Register receiver = ToRegister(instr->receiver()); 3255 Register receiver = ToRegister(instr->receiver());
3315 Register function = ToRegister(instr->function()); 3256 Register function = ToRegister(instr->function());
3316 Register length = ToRegister(instr->length()); 3257 Register length = ToRegister(instr->length());
3317 Register elements = ToRegister(instr->elements()); 3258 Register elements = ToRegister(instr->elements());
3318 Register scratch = scratch0(); 3259 Register scratch = scratch0();
3319 DCHECK(receiver.is(r3)); // Used for parameter count. 3260 DCHECK(receiver.is(r2)); // Used for parameter count.
3320 DCHECK(function.is(r4)); // Required by InvokeFunction. 3261 DCHECK(function.is(r3)); // Required by InvokeFunction.
3321 DCHECK(ToRegister(instr->result()).is(r3)); 3262 DCHECK(ToRegister(instr->result()).is(r2));
3322 3263
3323 // Copy the arguments to this function possibly from the 3264 // Copy the arguments to this function possibly from the
3324 // adaptor frame below it. 3265 // adaptor frame below it.
3325 const uint32_t kArgumentsLimit = 1 * KB; 3266 const uint32_t kArgumentsLimit = 1 * KB;
3326 __ cmpli(length, Operand(kArgumentsLimit)); 3267 __ CmpLogicalP(length, Operand(kArgumentsLimit));
3327 DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments); 3268 DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
3328 3269
3329 // Push the receiver and use the register to keep the original 3270 // Push the receiver and use the register to keep the original
3330 // number of arguments. 3271 // number of arguments.
3331 __ push(receiver); 3272 __ push(receiver);
3332 __ mr(receiver, length); 3273 __ LoadRR(receiver, length);
3333 // The arguments are at a one pointer size offset from elements. 3274 // The arguments are at a one pointer size offset from elements.
3334 __ addi(elements, elements, Operand(1 * kPointerSize)); 3275 __ AddP(elements, Operand(1 * kPointerSize));
3335 3276
3336 // Loop through the arguments pushing them onto the execution 3277 // Loop through the arguments pushing them onto the execution
3337 // stack. 3278 // stack.
3338 Label invoke, loop; 3279 Label invoke, loop;
3339 // length is a small non-negative integer, due to the test above. 3280 // length is a small non-negative integer, due to the test above.
3340 __ cmpi(length, Operand::Zero()); 3281 __ CmpP(length, Operand::Zero());
3341 __ beq(&invoke); 3282 __ beq(&invoke, Label::kNear);
3342 __ mtctr(length);
3343 __ bind(&loop); 3283 __ bind(&loop);
3344 __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2)); 3284 __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2));
3345 __ LoadPX(scratch, MemOperand(elements, r0)); 3285 __ LoadP(scratch, MemOperand(elements, r1));
3346 __ push(scratch); 3286 __ push(scratch);
3347 __ addi(length, length, Operand(-1)); 3287 __ BranchOnCount(length, &loop);
3348 __ bdnz(&loop);
3349 3288
3350 __ bind(&invoke); 3289 __ bind(&invoke);
3351 DCHECK(instr->HasPointerMap()); 3290 DCHECK(instr->HasPointerMap());
3352 LPointerMap* pointers = instr->pointer_map(); 3291 LPointerMap* pointers = instr->pointer_map();
3353 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); 3292 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3354 // The number of arguments is stored in receiver which is r3, as expected 3293 // The number of arguments is stored in receiver which is r2, as expected
3355 // by InvokeFunction. 3294 // by InvokeFunction.
3356 ParameterCount actual(receiver); 3295 ParameterCount actual(receiver);
3357 __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION, 3296 __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
3358 safepoint_generator); 3297 safepoint_generator);
3359 } 3298 }
3360 3299
3361
3362 void LCodeGen::DoPushArgument(LPushArgument* instr) { 3300 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3363 LOperand* argument = instr->value(); 3301 LOperand* argument = instr->value();
3364 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { 3302 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3365 Abort(kDoPushArgumentNotImplementedForDoubleType); 3303 Abort(kDoPushArgumentNotImplementedForDoubleType);
3366 } else { 3304 } else {
3367 Register argument_reg = EmitLoadRegister(argument, ip); 3305 Register argument_reg = EmitLoadRegister(argument, ip);
3368 __ push(argument_reg); 3306 __ push(argument_reg);
3369 } 3307 }
3370 } 3308 }
3371 3309
3372
3373 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); } 3310 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
3374 3311
3375
3376 void LCodeGen::DoThisFunction(LThisFunction* instr) { 3312 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3377 Register result = ToRegister(instr->result()); 3313 Register result = ToRegister(instr->result());
3378 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 3314 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3379 } 3315 }
3380 3316
3381
3382 void LCodeGen::DoContext(LContext* instr) { 3317 void LCodeGen::DoContext(LContext* instr) {
3383 // If there is a non-return use, the context must be moved to a register. 3318 // If there is a non-return use, the context must be moved to a register.
3384 Register result = ToRegister(instr->result()); 3319 Register result = ToRegister(instr->result());
3385 if (info()->IsOptimizing()) { 3320 if (info()->IsOptimizing()) {
3386 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3321 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3387 } else { 3322 } else {
3388 // If there is no frame, the context must be in cp. 3323 // If there is no frame, the context must be in cp.
3389 DCHECK(result.is(cp)); 3324 DCHECK(result.is(cp));
3390 } 3325 }
3391 } 3326 }
3392 3327
3393
3394 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3328 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3395 DCHECK(ToRegister(instr->context()).is(cp)); 3329 DCHECK(ToRegister(instr->context()).is(cp));
3396 __ Move(scratch0(), instr->hydrogen()->pairs()); 3330 __ Move(scratch0(), instr->hydrogen()->pairs());
3397 __ push(scratch0()); 3331 __ push(scratch0());
3398 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags())); 3332 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3399 __ push(scratch0()); 3333 __ push(scratch0());
3400 CallRuntime(Runtime::kDeclareGlobals, instr); 3334 CallRuntime(Runtime::kDeclareGlobals, instr);
3401 } 3335 }
3402 3336
3403
3404 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3337 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3405 int formal_parameter_count, int arity, 3338 int formal_parameter_count, int arity,
3406 LInstruction* instr) { 3339 LInstruction* instr) {
3407 bool dont_adapt_arguments = 3340 bool dont_adapt_arguments =
3408 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3341 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3409 bool can_invoke_directly = 3342 bool can_invoke_directly =
3410 dont_adapt_arguments || formal_parameter_count == arity; 3343 dont_adapt_arguments || formal_parameter_count == arity;
3411 3344
3412 Register function_reg = r4; 3345 Register function_reg = r3;
3413 3346
3414 LPointerMap* pointers = instr->pointer_map(); 3347 LPointerMap* pointers = instr->pointer_map();
3415 3348
3416 if (can_invoke_directly) { 3349 if (can_invoke_directly) {
3417 // Change context. 3350 // Change context.
3418 __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); 3351 __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3419 3352
3420 // Always initialize new target and number of actual arguments. 3353 // Always initialize new target and number of actual arguments.
3421 __ LoadRoot(r6, Heap::kUndefinedValueRootIndex); 3354 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
3422 __ mov(r3, Operand(arity)); 3355 __ mov(r2, Operand(arity));
3423 3356
3424 bool is_self_call = function.is_identical_to(info()->closure()); 3357 bool is_self_call = function.is_identical_to(info()->closure());
3425 3358
3426 // Invoke function. 3359 // Invoke function.
3427 if (is_self_call) { 3360 if (is_self_call) {
3428 __ CallSelf(); 3361 __ CallSelf();
3429 } else { 3362 } else {
3430 __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); 3363 __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3431 __ CallJSEntry(ip); 3364 __ CallJSEntry(ip);
3432 } 3365 }
3433 3366
3434 // Set up deoptimization. 3367 // Set up deoptimization.
3435 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3368 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3436 } else { 3369 } else {
3437 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3370 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3438 ParameterCount count(arity); 3371 ParameterCount count(arity);
3439 ParameterCount expected(formal_parameter_count); 3372 ParameterCount expected(formal_parameter_count);
3440 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator); 3373 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3441 } 3374 }
3442 } 3375 }
3443 3376
3444
3445 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3377 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3446 DCHECK(instr->context() != NULL); 3378 DCHECK(instr->context() != NULL);
3447 DCHECK(ToRegister(instr->context()).is(cp)); 3379 DCHECK(ToRegister(instr->context()).is(cp));
3448 Register input = ToRegister(instr->value()); 3380 Register input = ToRegister(instr->value());
3449 Register result = ToRegister(instr->result()); 3381 Register result = ToRegister(instr->result());
3450 Register scratch = scratch0(); 3382 Register scratch = scratch0();
3451 3383
3452 // Deoptimize if not a heap number. 3384 // Deoptimize if not a heap number.
3453 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 3385 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3454 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 3386 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3455 __ cmp(scratch, ip);
3456 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 3387 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3457 3388
3458 Label done; 3389 Label done;
3459 Register exponent = scratch0(); 3390 Register exponent = scratch0();
3460 scratch = no_reg; 3391 scratch = no_reg;
3461 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3392 __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3462 // Check the sign of the argument. If the argument is positive, just 3393 // Check the sign of the argument. If the argument is positive, just
3463 // return it. 3394 // return it.
3464 __ cmpwi(exponent, Operand::Zero()); 3395 __ Cmp32(exponent, Operand::Zero());
3465 // Move the input to the result if necessary. 3396 // Move the input to the result if necessary.
3466 __ Move(result, input); 3397 __ Move(result, input);
3467 __ bge(&done); 3398 __ bge(&done);
3468 3399
3469 // Input is negative. Reverse its sign. 3400 // Input is negative. Reverse its sign.
3470 // Preserve the value of all registers. 3401 // Preserve the value of all registers.
3471 { 3402 {
3472 PushSafepointRegistersScope scope(this); 3403 PushSafepointRegistersScope scope(this);
3473 3404
3474 // Registers were saved at the safepoint, so we can use 3405 // Registers were saved at the safepoint, so we can use
3475 // many scratch registers. 3406 // many scratch registers.
3476 Register tmp1 = input.is(r4) ? r3 : r4; 3407 Register tmp1 = input.is(r3) ? r2 : r3;
3477 Register tmp2 = input.is(r5) ? r3 : r5; 3408 Register tmp2 = input.is(r4) ? r2 : r4;
3478 Register tmp3 = input.is(r6) ? r3 : r6; 3409 Register tmp3 = input.is(r5) ? r2 : r5;
3479 Register tmp4 = input.is(r7) ? r3 : r7; 3410 Register tmp4 = input.is(r6) ? r2 : r6;
3480 3411
3481 // exponent: floating point exponent value. 3412 // exponent: floating point exponent value.
3482 3413
3483 Label allocated, slow; 3414 Label allocated, slow;
3484 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); 3415 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3485 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); 3416 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3486 __ b(&allocated); 3417 __ b(&allocated);
3487 3418
3488 // Slow case: Call the runtime system to do the number allocation. 3419 // Slow case: Call the runtime system to do the number allocation.
3489 __ bind(&slow); 3420 __ bind(&slow);
3490 3421
3491 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, 3422 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3492 instr->context()); 3423 instr->context());
3493 // Set the pointer to the new heap number in tmp. 3424 // Set the pointer to the new heap number in tmp.
3494 if (!tmp1.is(r3)) __ mr(tmp1, r3); 3425 if (!tmp1.is(r2)) __ LoadRR(tmp1, r2);
3495 // Restore input_reg after call to runtime. 3426 // Restore input_reg after call to runtime.
3496 __ LoadFromSafepointRegisterSlot(input, input); 3427 __ LoadFromSafepointRegisterSlot(input, input);
3497 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3428 __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3498 3429
3499 __ bind(&allocated); 3430 __ bind(&allocated);
3500 // exponent: floating point exponent value. 3431 // exponent: floating point exponent value.
3501 // tmp1: allocated heap number. 3432 // tmp1: allocated heap number.
3502 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 3433
3503 __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit 3434 // Clear the sign bit.
3504 __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); 3435 __ nilf(exponent, Operand(~HeapNumber::kSignMask));
3505 __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); 3436 __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3506 __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); 3437 __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3438 __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3507 3439
3508 __ StoreToSafepointRegisterSlot(tmp1, result); 3440 __ StoreToSafepointRegisterSlot(tmp1, result);
3509 } 3441 }
3510 3442
3511 __ bind(&done); 3443 __ bind(&done);
3512 } 3444 }
3513 3445
3514
3515 void LCodeGen::EmitMathAbs(LMathAbs* instr) { 3446 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3516 Register input = ToRegister(instr->value()); 3447 Register input = ToRegister(instr->value());
3517 Register result = ToRegister(instr->result()); 3448 Register result = ToRegister(instr->result());
3518 Label done; 3449 Label done;
3519 __ cmpi(input, Operand::Zero()); 3450 __ CmpP(input, Operand::Zero());
3520 __ Move(result, input); 3451 __ Move(result, input);
3521 __ bge(&done); 3452 __ bge(&done, Label::kNear);
3522 __ li(r0, Operand::Zero()); // clear xer 3453 __ LoadComplementRR(result, result);
3523 __ mtxer(r0);
3524 __ neg(result, result, SetOE, SetRC);
3525 // Deoptimize on overflow. 3454 // Deoptimize on overflow.
3526 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); 3455 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
3527 __ bind(&done); 3456 __ bind(&done);
3528 } 3457 }
3529 3458
3530 3459 #if V8_TARGET_ARCH_S390X
3531 #if V8_TARGET_ARCH_PPC64
3532 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { 3460 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3533 Register input = ToRegister(instr->value()); 3461 Register input = ToRegister(instr->value());
3534 Register result = ToRegister(instr->result()); 3462 Register result = ToRegister(instr->result());
3535 Label done; 3463 Label done;
3536 __ cmpwi(input, Operand::Zero()); 3464 __ Cmp32(input, Operand::Zero());
3537 __ Move(result, input); 3465 __ Move(result, input);
3538 __ bge(&done); 3466 __ bge(&done, Label::kNear);
3539 3467
3540 // Deoptimize on overflow. 3468 // Deoptimize on overflow.
3541 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); 3469 __ Cmp32(input, Operand(0x80000000));
3542 __ cmpw(input, r0);
3543 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 3470 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
3544 3471
3545 __ neg(result, result); 3472 __ LoadComplementRR(result, result);
3546 __ bind(&done); 3473 __ bind(&done);
3547 } 3474 }
3548 #endif 3475 #endif
3549 3476
3550
3551 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3477 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3552 // Class for deferred case. 3478 // Class for deferred case.
3553 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { 3479 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3554 public: 3480 public:
3555 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) 3481 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3556 : LDeferredCode(codegen), instr_(instr) {} 3482 : LDeferredCode(codegen), instr_(instr) {}
3557 void Generate() override { 3483 void Generate() override {
3558 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3484 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3559 } 3485 }
3560 LInstruction* instr() override { return instr_; } 3486 LInstruction* instr() override { return instr_; }
3561 3487
3562 private: 3488 private:
3563 LMathAbs* instr_; 3489 LMathAbs* instr_;
3564 }; 3490 };
3565 3491
3566 Representation r = instr->hydrogen()->value()->representation(); 3492 Representation r = instr->hydrogen()->value()->representation();
3567 if (r.IsDouble()) { 3493 if (r.IsDouble()) {
3568 DoubleRegister input = ToDoubleRegister(instr->value()); 3494 DoubleRegister input = ToDoubleRegister(instr->value());
3569 DoubleRegister result = ToDoubleRegister(instr->result()); 3495 DoubleRegister result = ToDoubleRegister(instr->result());
3570 __ fabs(result, input); 3496 __ lpdbr(result, input);
3571 #if V8_TARGET_ARCH_PPC64 3497 #if V8_TARGET_ARCH_S390X
3572 } else if (r.IsInteger32()) { 3498 } else if (r.IsInteger32()) {
3573 EmitInteger32MathAbs(instr); 3499 EmitInteger32MathAbs(instr);
3574 } else if (r.IsSmi()) { 3500 } else if (r.IsSmi()) {
3575 #else 3501 #else
3576 } else if (r.IsSmiOrInteger32()) { 3502 } else if (r.IsSmiOrInteger32()) {
3577 #endif 3503 #endif
3578 EmitMathAbs(instr); 3504 EmitMathAbs(instr);
3579 } else { 3505 } else {
3580 // Representation is tagged. 3506 // Representation is tagged.
3581 DeferredMathAbsTaggedHeapNumber* deferred = 3507 DeferredMathAbsTaggedHeapNumber* deferred =
3582 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3508 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3583 Register input = ToRegister(instr->value()); 3509 Register input = ToRegister(instr->value());
3584 // Smi check. 3510 // Smi check.
3585 __ JumpIfNotSmi(input, deferred->entry()); 3511 __ JumpIfNotSmi(input, deferred->entry());
3586 // If smi, handle it directly. 3512 // If smi, handle it directly.
3587 EmitMathAbs(instr); 3513 EmitMathAbs(instr);
3588 __ bind(deferred->exit()); 3514 __ bind(deferred->exit());
3589 } 3515 }
3590 } 3516 }
3591 3517
3592
3593 void LCodeGen::DoMathFloor(LMathFloor* instr) { 3518 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3594 DoubleRegister input = ToDoubleRegister(instr->value()); 3519 DoubleRegister input = ToDoubleRegister(instr->value());
3595 Register result = ToRegister(instr->result()); 3520 Register result = ToRegister(instr->result());
3596 Register input_high = scratch0(); 3521 Register input_high = scratch0();
3597 Register scratch = ip; 3522 Register scratch = ip;
3598 Label done, exact; 3523 Label done, exact;
3599 3524
3600 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, 3525 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3601 &exact); 3526 &exact);
3602 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); 3527 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3603 3528
3604 __ bind(&exact); 3529 __ bind(&exact);
3605 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3530 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3606 // Test for -0. 3531 // Test for -0.
3607 __ cmpi(result, Operand::Zero()); 3532 __ CmpP(result, Operand::Zero());
3608 __ bne(&done); 3533 __ bne(&done, Label::kNear);
3609 __ cmpwi(input_high, Operand::Zero()); 3534 __ Cmp32(input_high, Operand::Zero());
3610 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 3535 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
3611 } 3536 }
3612 __ bind(&done); 3537 __ bind(&done);
3613 } 3538 }
3614 3539
3615
3616 void LCodeGen::DoMathRound(LMathRound* instr) { 3540 void LCodeGen::DoMathRound(LMathRound* instr) {
3617 DoubleRegister input = ToDoubleRegister(instr->value()); 3541 DoubleRegister input = ToDoubleRegister(instr->value());
3618 Register result = ToRegister(instr->result()); 3542 Register result = ToRegister(instr->result());
3619 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3543 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3620 DoubleRegister input_plus_dot_five = double_scratch1; 3544 DoubleRegister input_plus_dot_five = double_scratch1;
3621 Register scratch1 = scratch0(); 3545 Register scratch1 = scratch0();
3622 Register scratch2 = ip; 3546 Register scratch2 = ip;
3623 DoubleRegister dot_five = double_scratch0(); 3547 DoubleRegister dot_five = double_scratch0();
3624 Label convert, done; 3548 Label convert, done;
3625 3549
3626 __ LoadDoubleLiteral(dot_five, 0.5, r0); 3550 __ LoadDoubleLiteral(dot_five, 0.5, r0);
3627 __ fabs(double_scratch1, input); 3551 __ lpdbr(double_scratch1, input);
3628 __ fcmpu(double_scratch1, dot_five); 3552 __ cdbr(double_scratch1, dot_five);
3629 DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN); 3553 DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
3630 // If input is in [-0.5, -0], the result is -0. 3554 // If input is in [-0.5, -0], the result is -0.
3631 // If input is in [+0, +0.5[, the result is +0. 3555 // If input is in [+0, +0.5[, the result is +0.
3632 // If the input is +0.5, the result is 1. 3556 // If the input is +0.5, the result is 1.
3633 __ bgt(&convert); // Out of [-0.5, +0.5]. 3557 __ bgt(&convert, Label::kNear); // Out of [-0.5, +0.5].
3634 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3558 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3635 // [-0.5, -0] (negative) yields minus zero. 3559 // [-0.5, -0] (negative) yields minus zero.
3636 __ TestDoubleSign(input, scratch1); 3560 __ TestDoubleSign(input, scratch1);
3637 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 3561 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
3638 } 3562 }
3639 __ fcmpu(input, dot_five); 3563 Label return_zero;
3640 if (CpuFeatures::IsSupported(ISELECT)) { 3564 __ cdbr(input, dot_five);
3641 __ li(result, Operand(1)); 3565 __ bne(&return_zero, Label::kNear);
3642 __ isel(lt, result, r0, result); 3566 __ LoadImmP(result, Operand(1)); // +0.5.
3643 __ b(&done); 3567 __ b(&done, Label::kNear);
3644 } else { 3568 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3645 Label return_zero; 3569 // flag kBailoutOnMinusZero.
3646 __ bne(&return_zero); 3570 __ bind(&return_zero);
3647 __ li(result, Operand(1)); // +0.5. 3571 __ LoadImmP(result, Operand::Zero());
3648 __ b(&done); 3572 __ b(&done, Label::kNear);
3649 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3650 // flag kBailoutOnMinusZero.
3651 __ bind(&return_zero);
3652 __ li(result, Operand::Zero());
3653 __ b(&done);
3654 }
3655 3573
3656 __ bind(&convert); 3574 __ bind(&convert);
3657 __ fadd(input_plus_dot_five, input, dot_five); 3575 __ ldr(input_plus_dot_five, input);
3576 __ adbr(input_plus_dot_five, dot_five);
3658 // Reuse dot_five (double_scratch0) as we no longer need this value. 3577 // Reuse dot_five (double_scratch0) as we no longer need this value.
3659 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, 3578 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
3660 double_scratch0(), &done, &done); 3579 double_scratch0(), &done, &done);
3661 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); 3580 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3662 __ bind(&done); 3581 __ bind(&done);
3663 } 3582 }
3664 3583
3665
3666 void LCodeGen::DoMathFround(LMathFround* instr) { 3584 void LCodeGen::DoMathFround(LMathFround* instr) {
3667 DoubleRegister input_reg = ToDoubleRegister(instr->value()); 3585 DoubleRegister input_reg = ToDoubleRegister(instr->value());
3668 DoubleRegister output_reg = ToDoubleRegister(instr->result()); 3586 DoubleRegister output_reg = ToDoubleRegister(instr->result());
3669 __ frsp(output_reg, input_reg); 3587
3588 // Round double to float
3589 __ ledbr(output_reg, input_reg);
3590 // Extend from float to double
3591 __ ldebr(output_reg, output_reg);
3670 } 3592 }
3671 3593
3672
3673 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 3594 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3674 DoubleRegister input = ToDoubleRegister(instr->value()); 3595 DoubleRegister input = ToDoubleRegister(instr->value());
3675 DoubleRegister result = ToDoubleRegister(instr->result()); 3596 DoubleRegister result = ToDoubleRegister(instr->result());
3676 __ fsqrt(result, input); 3597 __ sqdbr(result, input);
3677 } 3598 }
3678 3599
3679
3680 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3600 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3681 DoubleRegister input = ToDoubleRegister(instr->value()); 3601 DoubleRegister input = ToDoubleRegister(instr->value());
3682 DoubleRegister result = ToDoubleRegister(instr->result()); 3602 DoubleRegister result = ToDoubleRegister(instr->result());
3683 DoubleRegister temp = double_scratch0(); 3603 DoubleRegister temp = double_scratch0();
3684 3604
3685 // Note that according to ECMA-262 15.8.2.13: 3605 // Note that according to ECMA-262 15.8.2.13:
3686 // Math.pow(-Infinity, 0.5) == Infinity 3606 // Math.pow(-Infinity, 0.5) == Infinity
3687 // Math.sqrt(-Infinity) == NaN 3607 // Math.sqrt(-Infinity) == NaN
3688 Label skip, done; 3608 Label skip, done;
3689 3609
3690 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0()); 3610 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
3691 __ fcmpu(input, temp); 3611 __ cdbr(input, temp);
3692 __ bne(&skip); 3612 __ bne(&skip, Label::kNear);
3693 __ fneg(result, temp); 3613 __ lcdbr(result, temp);
3694 __ b(&done); 3614 __ b(&done, Label::kNear);
3695 3615
3696 // Add +0 to convert -0 to +0. 3616 // Add +0 to convert -0 to +0.
3697 __ bind(&skip); 3617 __ bind(&skip);
3698 __ fadd(result, input, kDoubleRegZero); 3618 __ ldr(result, input);
3699 __ fsqrt(result, result); 3619 __ lzdr(kDoubleRegZero);
3620 __ adbr(result, kDoubleRegZero);
3621 __ sqdbr(result, result);
3700 __ bind(&done); 3622 __ bind(&done);
3701 } 3623 }
3702 3624
3703
3704 void LCodeGen::DoPower(LPower* instr) { 3625 void LCodeGen::DoPower(LPower* instr) {
3705 Representation exponent_type = instr->hydrogen()->right()->representation(); 3626 Representation exponent_type = instr->hydrogen()->right()->representation();
3706 // Having marked this as a call, we can use any registers. 3627 // Having marked this as a call, we can use any registers.
3707 // Just make sure that the input/output registers are the expected ones. 3628 // Just make sure that the input/output registers are the expected ones.
3708 Register tagged_exponent = MathPowTaggedDescriptor::exponent(); 3629 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3709 DCHECK(!instr->right()->IsDoubleRegister() || 3630 DCHECK(!instr->right()->IsDoubleRegister() ||
3710 ToDoubleRegister(instr->right()).is(d2)); 3631 ToDoubleRegister(instr->right()).is(d2));
3711 DCHECK(!instr->right()->IsRegister() || 3632 DCHECK(!instr->right()->IsRegister() ||
3712 ToRegister(instr->right()).is(tagged_exponent)); 3633 ToRegister(instr->right()).is(tagged_exponent));
3713 DCHECK(ToDoubleRegister(instr->left()).is(d1)); 3634 DCHECK(ToDoubleRegister(instr->left()).is(d1));
3714 DCHECK(ToDoubleRegister(instr->result()).is(d3)); 3635 DCHECK(ToDoubleRegister(instr->result()).is(d3));
3715 3636
3716 if (exponent_type.IsSmi()) { 3637 if (exponent_type.IsSmi()) {
3717 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3638 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3718 __ CallStub(&stub); 3639 __ CallStub(&stub);
3719 } else if (exponent_type.IsTagged()) { 3640 } else if (exponent_type.IsTagged()) {
3720 Label no_deopt; 3641 Label no_deopt;
3721 __ JumpIfSmi(tagged_exponent, &no_deopt); 3642 __ JumpIfSmi(tagged_exponent, &no_deopt);
3722 DCHECK(!r10.is(tagged_exponent)); 3643 __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3723 __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); 3644 __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
3724 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3725 __ cmp(r10, ip);
3726 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 3645 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3727 __ bind(&no_deopt); 3646 __ bind(&no_deopt);
3728 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3647 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3729 __ CallStub(&stub); 3648 __ CallStub(&stub);
3730 } else if (exponent_type.IsInteger32()) { 3649 } else if (exponent_type.IsInteger32()) {
3731 MathPowStub stub(isolate(), MathPowStub::INTEGER); 3650 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3732 __ CallStub(&stub); 3651 __ CallStub(&stub);
3733 } else { 3652 } else {
3734 DCHECK(exponent_type.IsDouble()); 3653 DCHECK(exponent_type.IsDouble());
3735 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3654 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3736 __ CallStub(&stub); 3655 __ CallStub(&stub);
3737 } 3656 }
3738 } 3657 }
3739 3658
3740
3741 void LCodeGen::DoMathExp(LMathExp* instr) { 3659 void LCodeGen::DoMathExp(LMathExp* instr) {
3742 DoubleRegister input = ToDoubleRegister(instr->value()); 3660 DoubleRegister input = ToDoubleRegister(instr->value());
3743 DoubleRegister result = ToDoubleRegister(instr->result()); 3661 DoubleRegister result = ToDoubleRegister(instr->result());
3744 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); 3662 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3745 DoubleRegister double_scratch2 = double_scratch0(); 3663 DoubleRegister double_scratch2 = double_scratch0();
3746 Register temp1 = ToRegister(instr->temp1()); 3664 Register temp1 = ToRegister(instr->temp1());
3747 Register temp2 = ToRegister(instr->temp2()); 3665 Register temp2 = ToRegister(instr->temp2());
3748 3666
3749 MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1, 3667 MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
3750 double_scratch2, temp1, temp2, scratch0()); 3668 double_scratch2, temp1, temp2, scratch0());
3751 } 3669 }
3752 3670
3753
3754 void LCodeGen::DoMathLog(LMathLog* instr) { 3671 void LCodeGen::DoMathLog(LMathLog* instr) {
3755 __ PrepareCallCFunction(0, 1, scratch0()); 3672 __ PrepareCallCFunction(0, 1, scratch0());
3756 __ MovToFloatParameter(ToDoubleRegister(instr->value())); 3673 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3757 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0, 3674 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
3758 1); 3675 1);
3759 __ MovFromFloatResult(ToDoubleRegister(instr->result())); 3676 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3760 } 3677 }
3761 3678
3762
3763 void LCodeGen::DoMathClz32(LMathClz32* instr) { 3679 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3764 Register input = ToRegister(instr->value()); 3680 Register input = ToRegister(instr->value());
3765 Register result = ToRegister(instr->result()); 3681 Register result = ToRegister(instr->result());
3766 __ cntlzw_(result, input); 3682 Label done;
3683 __ llgfr(result, input);
3684 __ flogr(r0, result);
3685 __ LoadRR(result, r0);
3686 __ CmpP(r0, Operand::Zero());
3687 __ beq(&done, Label::kNear);
3688 __ SubP(result, Operand(32));
3689 __ bind(&done);
3767 } 3690 }
3768 3691
3769
3770 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 3692 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3771 DCHECK(ToRegister(instr->context()).is(cp)); 3693 DCHECK(ToRegister(instr->context()).is(cp));
3772 DCHECK(ToRegister(instr->function()).is(r4)); 3694 DCHECK(ToRegister(instr->function()).is(r3));
3773 DCHECK(instr->HasPointerMap()); 3695 DCHECK(instr->HasPointerMap());
3774 3696
3775 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); 3697 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3776 if (known_function.is_null()) { 3698 if (known_function.is_null()) {
3777 LPointerMap* pointers = instr->pointer_map(); 3699 LPointerMap* pointers = instr->pointer_map();
3778 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3700 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3779 ParameterCount count(instr->arity()); 3701 ParameterCount count(instr->arity());
3780 __ InvokeFunction(r4, no_reg, count, CALL_FUNCTION, generator); 3702 __ InvokeFunction(r3, no_reg, count, CALL_FUNCTION, generator);
3781 } else { 3703 } else {
3782 CallKnownFunction(known_function, 3704 CallKnownFunction(known_function,
3783 instr->hydrogen()->formal_parameter_count(), 3705 instr->hydrogen()->formal_parameter_count(),
3784 instr->arity(), instr); 3706 instr->arity(), instr);
3785 } 3707 }
3786 } 3708 }
3787 3709
3788
3789 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 3710 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3790 DCHECK(ToRegister(instr->result()).is(r3)); 3711 DCHECK(ToRegister(instr->result()).is(r2));
3791 3712
3792 if (instr->hydrogen()->IsTailCall()) { 3713 if (instr->hydrogen()->IsTailCall()) {
3793 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); 3714 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3794 3715
3795 if (instr->target()->IsConstantOperand()) { 3716 if (instr->target()->IsConstantOperand()) {
3796 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3717 LConstantOperand* target = LConstantOperand::cast(instr->target());
3797 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3718 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3798 __ Jump(code, RelocInfo::CODE_TARGET); 3719 __ Jump(code, RelocInfo::CODE_TARGET);
3799 } else { 3720 } else {
3800 DCHECK(instr->target()->IsRegister()); 3721 DCHECK(instr->target()->IsRegister());
3801 Register target = ToRegister(instr->target()); 3722 Register target = ToRegister(instr->target());
3802 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); 3723 __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3803 __ JumpToJSEntry(ip); 3724 __ JumpToJSEntry(ip);
3804 } 3725 }
3805 } else { 3726 } else {
3806 LPointerMap* pointers = instr->pointer_map(); 3727 LPointerMap* pointers = instr->pointer_map();
3807 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3728 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3808 3729
3809 if (instr->target()->IsConstantOperand()) { 3730 if (instr->target()->IsConstantOperand()) {
3810 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3731 LConstantOperand* target = LConstantOperand::cast(instr->target());
3811 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3732 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3812 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 3733 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3813 __ Call(code, RelocInfo::CODE_TARGET); 3734 __ Call(code, RelocInfo::CODE_TARGET);
3814 } else { 3735 } else {
3815 DCHECK(instr->target()->IsRegister()); 3736 DCHECK(instr->target()->IsRegister());
3816 Register target = ToRegister(instr->target()); 3737 Register target = ToRegister(instr->target());
3817 generator.BeforeCall(__ CallSize(target)); 3738 generator.BeforeCall(__ CallSize(target));
3818 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); 3739 __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3819 __ CallJSEntry(ip); 3740 __ CallJSEntry(ip);
3820 } 3741 }
3821 generator.AfterCall(); 3742 generator.AfterCall();
3822 } 3743 }
3823 } 3744 }
3824 3745
3825
3826 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 3746 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3827 DCHECK(ToRegister(instr->context()).is(cp)); 3747 DCHECK(ToRegister(instr->context()).is(cp));
3828 DCHECK(ToRegister(instr->constructor()).is(r4)); 3748 DCHECK(ToRegister(instr->constructor()).is(r3));
3829 DCHECK(ToRegister(instr->result()).is(r3)); 3749 DCHECK(ToRegister(instr->result()).is(r2));
3830 3750
3831 __ mov(r3, Operand(instr->arity())); 3751 __ mov(r2, Operand(instr->arity()));
3832 if (instr->arity() == 1) { 3752 if (instr->arity() == 1) {
3833 // We only need the allocation site for the case we have a length argument. 3753 // We only need the allocation site for the case we have a length argument.
3834 // The case may bail out to the runtime, which will determine the correct 3754 // The case may bail out to the runtime, which will determine the correct
3835 // elements kind with the site. 3755 // elements kind with the site.
3836 __ Move(r5, instr->hydrogen()->site()); 3756 __ Move(r4, instr->hydrogen()->site());
3837 } else { 3757 } else {
3838 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); 3758 __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
3839 } 3759 }
3840 ElementsKind kind = instr->hydrogen()->elements_kind(); 3760 ElementsKind kind = instr->hydrogen()->elements_kind();
3841 AllocationSiteOverrideMode override_mode = 3761 AllocationSiteOverrideMode override_mode =
3842 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 3762 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3843 ? DISABLE_ALLOCATION_SITES 3763 ? DISABLE_ALLOCATION_SITES
3844 : DONT_OVERRIDE; 3764 : DONT_OVERRIDE;
3845 3765
3846 if (instr->arity() == 0) { 3766 if (instr->arity() == 0) {
3847 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); 3767 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3848 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3768 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3849 } else if (instr->arity() == 1) { 3769 } else if (instr->arity() == 1) {
3850 Label done; 3770 Label done;
3851 if (IsFastPackedElementsKind(kind)) { 3771 if (IsFastPackedElementsKind(kind)) {
3852 Label packed_case; 3772 Label packed_case;
3853 // We might need a change here 3773 // We might need a change here
3854 // look at the first argument 3774 // look at the first argument
3855 __ LoadP(r8, MemOperand(sp, 0)); 3775 __ LoadP(r7, MemOperand(sp, 0));
3856 __ cmpi(r8, Operand::Zero()); 3776 __ CmpP(r7, Operand::Zero());
3857 __ beq(&packed_case); 3777 __ beq(&packed_case, Label::kNear);
3858 3778
3859 ElementsKind holey_kind = GetHoleyElementsKind(kind); 3779 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3860 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind, 3780 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
3861 override_mode); 3781 override_mode);
3862 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3782 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3863 __ b(&done); 3783 __ b(&done, Label::kNear);
3864 __ bind(&packed_case); 3784 __ bind(&packed_case);
3865 } 3785 }
3866 3786
3867 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 3787 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3868 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3788 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3869 __ bind(&done); 3789 __ bind(&done);
3870 } else { 3790 } else {
3871 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); 3791 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3872 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3792 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3873 } 3793 }
3874 } 3794 }
3875 3795
3876
3877 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 3796 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3878 CallRuntime(instr->function(), instr->arity(), instr); 3797 CallRuntime(instr->function(), instr->arity(), instr);
3879 } 3798 }
3880 3799
3881
3882 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 3800 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3883 Register function = ToRegister(instr->function()); 3801 Register function = ToRegister(instr->function());
3884 Register code_object = ToRegister(instr->code_object()); 3802 Register code_object = ToRegister(instr->code_object());
3885 __ addi(code_object, code_object, 3803 __ lay(code_object,
3886 Operand(Code::kHeaderSize - kHeapObjectTag)); 3804 MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag));
3887 __ StoreP(code_object, 3805 __ StoreP(code_object,
3888 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0); 3806 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
3889 } 3807 }
3890 3808
3891
3892 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 3809 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3893 Register result = ToRegister(instr->result()); 3810 Register result = ToRegister(instr->result());
3894 Register base = ToRegister(instr->base_object()); 3811 Register base = ToRegister(instr->base_object());
3895 if (instr->offset()->IsConstantOperand()) { 3812 if (instr->offset()->IsConstantOperand()) {
3896 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); 3813 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3897 __ Add(result, base, ToInteger32(offset), r0); 3814 __ lay(result, MemOperand(base, ToInteger32(offset)));
3898 } else { 3815 } else {
3899 Register offset = ToRegister(instr->offset()); 3816 Register offset = ToRegister(instr->offset());
3900 __ add(result, base, offset); 3817 __ lay(result, MemOperand(base, offset));
3901 } 3818 }
3902 } 3819 }
3903 3820
3904
3905 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 3821 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3906 HStoreNamedField* hinstr = instr->hydrogen(); 3822 HStoreNamedField* hinstr = instr->hydrogen();
3907 Representation representation = instr->representation(); 3823 Representation representation = instr->representation();
3908 3824
3909 Register object = ToRegister(instr->object()); 3825 Register object = ToRegister(instr->object());
3910 Register scratch = scratch0(); 3826 Register scratch = scratch0();
3911 HObjectAccess access = hinstr->access(); 3827 HObjectAccess access = hinstr->access();
3912 int offset = access.offset(); 3828 int offset = access.offset();
3913 3829
3914 if (access.IsExternalMemory()) { 3830 if (access.IsExternalMemory()) {
3915 Register value = ToRegister(instr->value()); 3831 Register value = ToRegister(instr->value());
3916 MemOperand operand = MemOperand(object, offset); 3832 MemOperand operand = MemOperand(object, offset);
3917 __ StoreRepresentation(value, operand, representation, r0); 3833 __ StoreRepresentation(value, operand, representation, r0);
3918 return; 3834 return;
3919 } 3835 }
3920 3836
3921 __ AssertNotSmi(object); 3837 __ AssertNotSmi(object);
3922 3838
3923 #if V8_TARGET_ARCH_PPC64 3839 #if V8_TARGET_ARCH_S390X
3924 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || 3840 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
3925 IsInteger32(LConstantOperand::cast(instr->value()))); 3841 IsInteger32(LConstantOperand::cast(instr->value())));
3926 #else 3842 #else
3927 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || 3843 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
3928 IsSmi(LConstantOperand::cast(instr->value()))); 3844 IsSmi(LConstantOperand::cast(instr->value())));
3929 #endif 3845 #endif
3930 if (!FLAG_unbox_double_fields && representation.IsDouble()) { 3846 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3931 DCHECK(access.IsInobject()); 3847 DCHECK(access.IsInobject());
3932 DCHECK(!hinstr->has_transition()); 3848 DCHECK(!hinstr->has_transition());
3933 DCHECK(!hinstr->NeedsWriteBarrier()); 3849 DCHECK(!hinstr->NeedsWriteBarrier());
3934 DoubleRegister value = ToDoubleRegister(instr->value()); 3850 DoubleRegister value = ToDoubleRegister(instr->value());
3935 __ stfd(value, FieldMemOperand(object, offset)); 3851 DCHECK(offset >= 0);
3852 __ std(value, FieldMemOperand(object, offset));
3936 return; 3853 return;
3937 } 3854 }
3938 3855
3939 if (hinstr->has_transition()) { 3856 if (hinstr->has_transition()) {
3940 Handle<Map> transition = hinstr->transition_map(); 3857 Handle<Map> transition = hinstr->transition_map();
3941 AddDeprecationDependency(transition); 3858 AddDeprecationDependency(transition);
3942 __ mov(scratch, Operand(transition)); 3859 __ mov(scratch, Operand(transition));
3943 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0); 3860 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
3944 if (hinstr->NeedsWriteBarrierForMap()) { 3861 if (hinstr->NeedsWriteBarrierForMap()) {
3945 Register temp = ToRegister(instr->temp()); 3862 Register temp = ToRegister(instr->temp());
3946 // Update the write barrier for the map field. 3863 // Update the write barrier for the map field.
3947 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(), 3864 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
3948 kSaveFPRegs); 3865 kSaveFPRegs);
3949 } 3866 }
3950 } 3867 }
3951 3868
3952 // Do the store. 3869 // Do the store.
3953 Register record_dest = object; 3870 Register record_dest = object;
3954 Register record_value = no_reg; 3871 Register record_value = no_reg;
3955 Register record_scratch = scratch; 3872 Register record_scratch = scratch;
3956 #if V8_TARGET_ARCH_PPC64 3873 #if V8_TARGET_ARCH_S390X
3957 if (FLAG_unbox_double_fields && representation.IsDouble()) { 3874 if (FLAG_unbox_double_fields && representation.IsDouble()) {
3958 DCHECK(access.IsInobject()); 3875 DCHECK(access.IsInobject());
3959 DoubleRegister value = ToDoubleRegister(instr->value()); 3876 DoubleRegister value = ToDoubleRegister(instr->value());
3960 __ stfd(value, FieldMemOperand(object, offset)); 3877 __ std(value, FieldMemOperand(object, offset));
3961 if (hinstr->NeedsWriteBarrier()) { 3878 if (hinstr->NeedsWriteBarrier()) {
3962 record_value = ToRegister(instr->value()); 3879 record_value = ToRegister(instr->value());
3963 } 3880 }
3964 } else { 3881 } else {
3965 if (representation.IsSmi() && 3882 if (representation.IsSmi() &&
3966 hinstr->value()->representation().IsInteger32()) { 3883 hinstr->value()->representation().IsInteger32()) {
3967 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 3884 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3968 // 64-bit Smi optimization 3885 // 64-bit Smi optimization
3969 // Store int value directly to upper half of the smi. 3886 // Store int value directly to upper half of the smi.
3970 offset = SmiWordOffset(offset); 3887 offset = SmiWordOffset(offset);
3971 representation = Representation::Integer32(); 3888 representation = Representation::Integer32();
3972 } 3889 }
3973 #endif 3890 #endif
3974 if (access.IsInobject()) { 3891 if (access.IsInobject()) {
3975 Register value = ToRegister(instr->value()); 3892 Register value = ToRegister(instr->value());
3976 MemOperand operand = FieldMemOperand(object, offset); 3893 MemOperand operand = FieldMemOperand(object, offset);
3977 __ StoreRepresentation(value, operand, representation, r0); 3894 __ StoreRepresentation(value, operand, representation, r0);
3978 record_value = value; 3895 record_value = value;
3979 } else { 3896 } else {
3980 Register value = ToRegister(instr->value()); 3897 Register value = ToRegister(instr->value());
3981 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); 3898 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3982 MemOperand operand = FieldMemOperand(scratch, offset); 3899 MemOperand operand = FieldMemOperand(scratch, offset);
3983 __ StoreRepresentation(value, operand, representation, r0); 3900 __ StoreRepresentation(value, operand, representation, r0);
3984 record_dest = scratch; 3901 record_dest = scratch;
3985 record_value = value; 3902 record_value = value;
3986 record_scratch = object; 3903 record_scratch = object;
3987 } 3904 }
3988 #if V8_TARGET_ARCH_PPC64 3905 #if V8_TARGET_ARCH_S390X
3989 } 3906 }
3990 #endif 3907 #endif
3991 3908
3992 if (hinstr->NeedsWriteBarrier()) { 3909 if (hinstr->NeedsWriteBarrier()) {
3993 __ RecordWriteField(record_dest, offset, record_value, record_scratch, 3910 __ RecordWriteField(record_dest, offset, record_value, record_scratch,
3994 GetLinkRegisterState(), kSaveFPRegs, 3911 GetLinkRegisterState(), kSaveFPRegs,
3995 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(), 3912 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
3996 hinstr->PointersToHereCheckForValue()); 3913 hinstr->PointersToHereCheckForValue());
3997 } 3914 }
3998 } 3915 }
3999 3916
4000
4001 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 3917 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4002 DCHECK(ToRegister(instr->context()).is(cp)); 3918 DCHECK(ToRegister(instr->context()).is(cp));
4003 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 3919 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4004 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 3920 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4005 3921
4006 if (instr->hydrogen()->HasVectorAndSlot()) { 3922 if (instr->hydrogen()->HasVectorAndSlot()) {
4007 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr); 3923 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4008 } 3924 }
4009 3925
4010 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name())); 3926 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4011 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode( 3927 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4012 isolate(), instr->language_mode(), 3928 isolate(), instr->language_mode(),
4013 instr->hydrogen()->initialization_state()).code(); 3929 instr->hydrogen()->initialization_state())
3930 .code();
4014 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3931 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4015 } 3932 }
4016 3933
4017
4018 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 3934 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4019 Representation representation = instr->hydrogen()->length()->representation(); 3935 Representation representation = instr->hydrogen()->length()->representation();
4020 DCHECK(representation.Equals(instr->hydrogen()->index()->representation())); 3936 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4021 DCHECK(representation.IsSmiOrInteger32()); 3937 DCHECK(representation.IsSmiOrInteger32());
4022 3938
4023 Condition cc = instr->hydrogen()->allow_equality() ? lt : le; 3939 Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4024 if (instr->length()->IsConstantOperand()) { 3940 if (instr->length()->IsConstantOperand()) {
4025 int32_t length = ToInteger32(LConstantOperand::cast(instr->length())); 3941 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4026 Register index = ToRegister(instr->index()); 3942 Register index = ToRegister(instr->index());
4027 if (representation.IsSmi()) { 3943 if (representation.IsSmi()) {
4028 __ Cmpli(index, Operand(Smi::FromInt(length)), r0); 3944 __ CmpLogicalP(index, Operand(Smi::FromInt(length)));
4029 } else { 3945 } else {
4030 __ Cmplwi(index, Operand(length), r0); 3946 __ CmpLogical32(index, Operand(length));
4031 } 3947 }
4032 cc = CommuteCondition(cc); 3948 cc = CommuteCondition(cc);
4033 } else if (instr->index()->IsConstantOperand()) { 3949 } else if (instr->index()->IsConstantOperand()) {
4034 int32_t index = ToInteger32(LConstantOperand::cast(instr->index())); 3950 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4035 Register length = ToRegister(instr->length()); 3951 Register length = ToRegister(instr->length());
4036 if (representation.IsSmi()) { 3952 if (representation.IsSmi()) {
4037 __ Cmpli(length, Operand(Smi::FromInt(index)), r0); 3953 __ CmpLogicalP(length, Operand(Smi::FromInt(index)));
4038 } else { 3954 } else {
4039 __ Cmplwi(length, Operand(index), r0); 3955 __ CmpLogical32(length, Operand(index));
4040 } 3956 }
4041 } else { 3957 } else {
4042 Register index = ToRegister(instr->index()); 3958 Register index = ToRegister(instr->index());
4043 Register length = ToRegister(instr->length()); 3959 Register length = ToRegister(instr->length());
4044 if (representation.IsSmi()) { 3960 if (representation.IsSmi()) {
4045 __ cmpl(length, index); 3961 __ CmpLogicalP(length, index);
4046 } else { 3962 } else {
4047 __ cmplw(length, index); 3963 __ CmpLogical32(length, index);
4048 } 3964 }
4049 } 3965 }
4050 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 3966 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4051 Label done; 3967 Label done;
4052 __ b(NegateCondition(cc), &done); 3968 __ b(NegateCondition(cc), &done, Label::kNear);
4053 __ stop("eliminated bounds check failed"); 3969 __ stop("eliminated bounds check failed");
4054 __ bind(&done); 3970 __ bind(&done);
4055 } else { 3971 } else {
4056 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); 3972 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4057 } 3973 }
4058 } 3974 }
4059 3975
4060
4061 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 3976 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4062 Register external_pointer = ToRegister(instr->elements()); 3977 Register external_pointer = ToRegister(instr->elements());
4063 Register key = no_reg; 3978 Register key = no_reg;
4064 ElementsKind elements_kind = instr->elements_kind(); 3979 ElementsKind elements_kind = instr->elements_kind();
4065 bool key_is_constant = instr->key()->IsConstantOperand(); 3980 bool key_is_constant = instr->key()->IsConstantOperand();
4066 int constant_key = 0; 3981 int constant_key = 0;
4067 if (key_is_constant) { 3982 if (key_is_constant) {
4068 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3983 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4069 if (constant_key & 0xF0000000) { 3984 if (constant_key & 0xF0000000) {
4070 Abort(kArrayIndexConstantValueTooBig); 3985 Abort(kArrayIndexConstantValueTooBig);
4071 } 3986 }
4072 } else { 3987 } else {
4073 key = ToRegister(instr->key()); 3988 key = ToRegister(instr->key());
4074 } 3989 }
4075 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3990 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4076 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 3991 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4077 int base_offset = instr->base_offset(); 3992 int base_offset = instr->base_offset();
4078 3993
4079 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { 3994 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4080 Register address = scratch0(); 3995 Register address = scratch0();
4081 DoubleRegister value(ToDoubleRegister(instr->value())); 3996 DoubleRegister value(ToDoubleRegister(instr->value()));
4082 if (key_is_constant) { 3997 if (key_is_constant) {
4083 if (constant_key != 0) { 3998 if (constant_key != 0) {
4084 __ Add(address, external_pointer, constant_key << element_size_shift, 3999 base_offset += constant_key << element_size_shift;
4085 r0); 4000 if (!is_int20(base_offset)) {
4001 __ mov(address, Operand(base_offset));
4002 __ AddP(address, external_pointer);
4003 } else {
4004 __ AddP(address, external_pointer, Operand(base_offset));
4005 }
4006 base_offset = 0;
4086 } else { 4007 } else {
4087 address = external_pointer; 4008 address = external_pointer;
4088 } 4009 }
4089 } else { 4010 } else {
4090 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); 4011 __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi);
4091 __ add(address, external_pointer, r0); 4012 __ AddP(address, external_pointer);
4092 } 4013 }
4093 if (elements_kind == FLOAT32_ELEMENTS) { 4014 if (elements_kind == FLOAT32_ELEMENTS) {
4094 __ frsp(double_scratch0(), value); 4015 __ ledbr(double_scratch0(), value);
4095 __ stfs(double_scratch0(), MemOperand(address, base_offset)); 4016 __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset));
4096 } else { // Storing doubles, not floats. 4017 } else { // Storing doubles, not floats.
4097 __ stfd(value, MemOperand(address, base_offset)); 4018 __ StoreDouble(value, MemOperand(address, base_offset));
4098 } 4019 }
4099 } else { 4020 } else {
4100 Register value(ToRegister(instr->value())); 4021 Register value(ToRegister(instr->value()));
4101 MemOperand mem_operand = 4022 MemOperand mem_operand =
4102 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, 4023 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
4103 constant_key, element_size_shift, base_offset); 4024 constant_key, element_size_shift, base_offset);
4104 switch (elements_kind) { 4025 switch (elements_kind) {
4105 case UINT8_ELEMENTS: 4026 case UINT8_ELEMENTS:
4106 case UINT8_CLAMPED_ELEMENTS: 4027 case UINT8_CLAMPED_ELEMENTS:
4107 case INT8_ELEMENTS: 4028 case INT8_ELEMENTS:
4108 if (key_is_constant) { 4029 if (key_is_constant) {
4109 __ StoreByte(value, mem_operand, r0); 4030 __ StoreByte(value, mem_operand, r0);
4110 } else { 4031 } else {
4111 __ stbx(value, mem_operand); 4032 __ StoreByte(value, mem_operand);
4112 } 4033 }
4113 break; 4034 break;
4114 case INT16_ELEMENTS: 4035 case INT16_ELEMENTS:
4115 case UINT16_ELEMENTS: 4036 case UINT16_ELEMENTS:
4116 if (key_is_constant) { 4037 if (key_is_constant) {
4117 __ StoreHalfWord(value, mem_operand, r0); 4038 __ StoreHalfWord(value, mem_operand, r0);
4118 } else { 4039 } else {
4119 __ sthx(value, mem_operand); 4040 __ StoreHalfWord(value, mem_operand);
4120 } 4041 }
4121 break; 4042 break;
4122 case INT32_ELEMENTS: 4043 case INT32_ELEMENTS:
4123 case UINT32_ELEMENTS: 4044 case UINT32_ELEMENTS:
4124 if (key_is_constant) { 4045 if (key_is_constant) {
4125 __ StoreWord(value, mem_operand, r0); 4046 __ StoreW(value, mem_operand, r0);
4126 } else { 4047 } else {
4127 __ stwx(value, mem_operand); 4048 __ StoreW(value, mem_operand);
4128 } 4049 }
4129 break; 4050 break;
4130 case FLOAT32_ELEMENTS: 4051 case FLOAT32_ELEMENTS:
4131 case FLOAT64_ELEMENTS: 4052 case FLOAT64_ELEMENTS:
4132 case FAST_DOUBLE_ELEMENTS: 4053 case FAST_DOUBLE_ELEMENTS:
4133 case FAST_ELEMENTS: 4054 case FAST_ELEMENTS:
4134 case FAST_SMI_ELEMENTS: 4055 case FAST_SMI_ELEMENTS:
4135 case FAST_HOLEY_DOUBLE_ELEMENTS: 4056 case FAST_HOLEY_DOUBLE_ELEMENTS:
4136 case FAST_HOLEY_ELEMENTS: 4057 case FAST_HOLEY_ELEMENTS:
4137 case FAST_HOLEY_SMI_ELEMENTS: 4058 case FAST_HOLEY_SMI_ELEMENTS:
4138 case DICTIONARY_ELEMENTS: 4059 case DICTIONARY_ELEMENTS:
4139 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: 4060 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4140 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: 4061 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4141 case FAST_STRING_WRAPPER_ELEMENTS: 4062 case FAST_STRING_WRAPPER_ELEMENTS:
4142 case SLOW_STRING_WRAPPER_ELEMENTS: 4063 case SLOW_STRING_WRAPPER_ELEMENTS:
4143 case NO_ELEMENTS: 4064 case NO_ELEMENTS:
4144 UNREACHABLE(); 4065 UNREACHABLE();
4145 break; 4066 break;
4146 } 4067 }
4147 } 4068 }
4148 } 4069 }
4149 4070
4150
4151 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4071 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4152 DoubleRegister value = ToDoubleRegister(instr->value()); 4072 DoubleRegister value = ToDoubleRegister(instr->value());
4153 Register elements = ToRegister(instr->elements()); 4073 Register elements = ToRegister(instr->elements());
4154 Register key = no_reg; 4074 Register key = no_reg;
4155 Register scratch = scratch0(); 4075 Register scratch = scratch0();
4156 DoubleRegister double_scratch = double_scratch0(); 4076 DoubleRegister double_scratch = double_scratch0();
4157 bool key_is_constant = instr->key()->IsConstantOperand(); 4077 bool key_is_constant = instr->key()->IsConstantOperand();
4158 int constant_key = 0; 4078 int constant_key = 0;
4159 4079
4160 // Calculate the effective address of the slot in the array to store the 4080 // Calculate the effective address of the slot in the array to store the
4161 // double value. 4081 // double value.
4162 if (key_is_constant) { 4082 if (key_is_constant) {
4163 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4083 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4164 if (constant_key & 0xF0000000) { 4084 if (constant_key & 0xF0000000) {
4165 Abort(kArrayIndexConstantValueTooBig); 4085 Abort(kArrayIndexConstantValueTooBig);
4166 } 4086 }
4167 } else { 4087 } else {
4168 key = ToRegister(instr->key()); 4088 key = ToRegister(instr->key());
4169 } 4089 }
4170 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 4090 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4171 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 4091 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4172 int base_offset = instr->base_offset() + constant_key * kDoubleSize; 4092 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4173 if (!key_is_constant) { 4093 bool use_scratch = false;
4094 intptr_t address_offset = base_offset;
4095
4096 if (key_is_constant) {
4097 // Memory references support up to 20-bits signed displacement in RXY form
4098 if (!is_int20((address_offset))) {
4099 __ mov(scratch, Operand(address_offset));
4100 address_offset = 0;
4101 use_scratch = true;
4102 }
4103 } else {
4104 use_scratch = true;
4174 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi); 4105 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
4175 __ add(scratch, elements, scratch); 4106 // Memory references support up to 20-bits signed displacement in RXY form
4176 elements = scratch; 4107 if (!is_int20((address_offset))) {
4177 } 4108 __ AddP(scratch, Operand(address_offset));
4178 if (!is_int16(base_offset)) { 4109 address_offset = 0;
4179 __ Add(scratch, elements, base_offset, r0); 4110 }
4180 base_offset = 0;
4181 elements = scratch;
4182 } 4111 }
4183 4112
4184 if (instr->NeedsCanonicalization()) { 4113 if (instr->NeedsCanonicalization()) {
4185 // Turn potential sNaN value into qNaN. 4114 // Turn potential sNaN value into qNaN.
4186 __ CanonicalizeNaN(double_scratch, value); 4115 __ CanonicalizeNaN(double_scratch, value);
4187 __ stfd(double_scratch, MemOperand(elements, base_offset)); 4116 DCHECK(address_offset >= 0);
4117 if (use_scratch)
4118 __ std(double_scratch, MemOperand(scratch, elements, address_offset));
4119 else
4120 __ std(double_scratch, MemOperand(elements, address_offset));
4188 } else { 4121 } else {
4189 __ stfd(value, MemOperand(elements, base_offset)); 4122 if (use_scratch)
4123 __ std(value, MemOperand(scratch, elements, address_offset));
4124 else
4125 __ std(value, MemOperand(elements, address_offset));
4190 } 4126 }
4191 } 4127 }
4192 4128
4193
4194 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4129 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4195 HStoreKeyed* hinstr = instr->hydrogen(); 4130 HStoreKeyed* hinstr = instr->hydrogen();
4196 Register value = ToRegister(instr->value()); 4131 Register value = ToRegister(instr->value());
4197 Register elements = ToRegister(instr->elements()); 4132 Register elements = ToRegister(instr->elements());
4198 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; 4133 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4199 Register scratch = scratch0(); 4134 Register scratch = scratch0();
4200 Register store_base = scratch;
4201 int offset = instr->base_offset(); 4135 int offset = instr->base_offset();
4202 4136
4203 // Do the store. 4137 // Do the store.
4204 if (instr->key()->IsConstantOperand()) { 4138 if (instr->key()->IsConstantOperand()) {
4205 DCHECK(!hinstr->NeedsWriteBarrier()); 4139 DCHECK(!hinstr->NeedsWriteBarrier());
4206 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 4140 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4207 offset += ToInteger32(const_operand) * kPointerSize; 4141 offset += ToInteger32(const_operand) * kPointerSize;
4208 store_base = elements;
4209 } else { 4142 } else {
4210 // Even though the HLoadKeyed instruction forces the input 4143 // Even though the HLoadKeyed instruction forces the input
4211 // representation for the key to be an integer, the input gets replaced 4144 // representation for the key to be an integer, the input gets replaced
4212 // during bound check elimination with the index argument to the bounds 4145 // during bound check elimination with the index argument to the bounds
4213 // check, which can be tagged, so that case must be handled here, too. 4146 // check, which can be tagged, so that case must be handled here, too.
4214 if (hinstr->key()->representation().IsSmi()) { 4147 if (hinstr->key()->representation().IsSmi()) {
4215 __ SmiToPtrArrayOffset(scratch, key); 4148 __ SmiToPtrArrayOffset(scratch, key);
4216 } else { 4149 } else {
4217 __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2)); 4150 __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
4218 } 4151 }
4219 __ add(scratch, elements, scratch);
4220 } 4152 }
4221 4153
4222 Representation representation = hinstr->value()->representation(); 4154 Representation representation = hinstr->value()->representation();
4223 4155
4224 #if V8_TARGET_ARCH_PPC64 4156 #if V8_TARGET_ARCH_S390X
4225 // 64-bit Smi optimization 4157 // 64-bit Smi optimization
4226 if (representation.IsInteger32()) { 4158 if (representation.IsInteger32()) {
4227 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 4159 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4228 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS); 4160 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4229 // Store int value directly to upper half of the smi. 4161 // Store int value directly to upper half of the smi.
4230 offset = SmiWordOffset(offset); 4162 offset = SmiWordOffset(offset);
4231 } 4163 }
4232 #endif 4164 #endif
4233 4165
4234 __ StoreRepresentation(value, MemOperand(store_base, offset), representation, 4166 if (instr->key()->IsConstantOperand()) {
4235 r0); 4167 __ StoreRepresentation(value, MemOperand(elements, offset), representation,
4168 scratch);
4169 } else {
4170 __ StoreRepresentation(value, MemOperand(scratch, elements, offset),
4171 representation, r0);
4172 }
4236 4173
4237 if (hinstr->NeedsWriteBarrier()) { 4174 if (hinstr->NeedsWriteBarrier()) {
4238 SmiCheck check_needed = hinstr->value()->type().IsHeapObject() 4175 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4239 ? OMIT_SMI_CHECK 4176 ? OMIT_SMI_CHECK
4240 : INLINE_SMI_CHECK; 4177 : INLINE_SMI_CHECK;
4241 // Compute address of modified element and store it into key register. 4178 // Compute address of modified element and store it into key register.
4242 __ Add(key, store_base, offset, r0); 4179 if (instr->key()->IsConstantOperand()) {
4180 __ lay(key, MemOperand(elements, offset));
4181 } else {
4182 __ lay(key, MemOperand(scratch, elements, offset));
4183 }
4243 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs, 4184 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4244 EMIT_REMEMBERED_SET, check_needed, 4185 EMIT_REMEMBERED_SET, check_needed,
4245 hinstr->PointersToHereCheckForValue()); 4186 hinstr->PointersToHereCheckForValue());
4246 } 4187 }
4247 } 4188 }
4248 4189
4249
4250 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4190 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4251 // By cases: external, fast double 4191 // By cases: external, fast double
4252 if (instr->is_fixed_typed_array()) { 4192 if (instr->is_fixed_typed_array()) {
4253 DoStoreKeyedExternalArray(instr); 4193 DoStoreKeyedExternalArray(instr);
4254 } else if (instr->hydrogen()->value()->representation().IsDouble()) { 4194 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4255 DoStoreKeyedFixedDoubleArray(instr); 4195 DoStoreKeyedFixedDoubleArray(instr);
4256 } else { 4196 } else {
4257 DoStoreKeyedFixedArray(instr); 4197 DoStoreKeyedFixedArray(instr);
4258 } 4198 }
4259 } 4199 }
4260 4200
4261
4262 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4201 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4263 DCHECK(ToRegister(instr->context()).is(cp)); 4202 DCHECK(ToRegister(instr->context()).is(cp));
4264 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 4203 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4265 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); 4204 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4266 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 4205 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4267 4206
4268 if (instr->hydrogen()->HasVectorAndSlot()) { 4207 if (instr->hydrogen()->HasVectorAndSlot()) {
4269 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr); 4208 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4270 } 4209 }
4271 4210
4272 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( 4211 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4273 isolate(), instr->language_mode(), 4212 isolate(), instr->language_mode(),
4274 instr->hydrogen()->initialization_state()).code(); 4213 instr->hydrogen()->initialization_state())
4214 .code();
4275 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4215 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4276 } 4216 }
4277 4217
4278
4279 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { 4218 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4280 class DeferredMaybeGrowElements final : public LDeferredCode { 4219 class DeferredMaybeGrowElements final : public LDeferredCode {
4281 public: 4220 public:
4282 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) 4221 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4283 : LDeferredCode(codegen), instr_(instr) {} 4222 : LDeferredCode(codegen), instr_(instr) {}
4284 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } 4223 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4285 LInstruction* instr() override { return instr_; } 4224 LInstruction* instr() override { return instr_; }
4286 4225
4287 private: 4226 private:
4288 LMaybeGrowElements* instr_; 4227 LMaybeGrowElements* instr_;
4289 }; 4228 };
4290 4229
4291 Register result = r3; 4230 Register result = r2;
4292 DeferredMaybeGrowElements* deferred = 4231 DeferredMaybeGrowElements* deferred =
4293 new (zone()) DeferredMaybeGrowElements(this, instr); 4232 new (zone()) DeferredMaybeGrowElements(this, instr);
4294 LOperand* key = instr->key(); 4233 LOperand* key = instr->key();
4295 LOperand* current_capacity = instr->current_capacity(); 4234 LOperand* current_capacity = instr->current_capacity();
4296 4235
4297 DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); 4236 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4298 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); 4237 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4299 DCHECK(key->IsConstantOperand() || key->IsRegister()); 4238 DCHECK(key->IsConstantOperand() || key->IsRegister());
4300 DCHECK(current_capacity->IsConstantOperand() || 4239 DCHECK(current_capacity->IsConstantOperand() ||
4301 current_capacity->IsRegister()); 4240 current_capacity->IsRegister());
4302 4241
4303 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { 4242 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4304 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); 4243 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4305 int32_t constant_capacity = 4244 int32_t constant_capacity =
4306 ToInteger32(LConstantOperand::cast(current_capacity)); 4245 ToInteger32(LConstantOperand::cast(current_capacity));
4307 if (constant_key >= constant_capacity) { 4246 if (constant_key >= constant_capacity) {
4308 // Deferred case. 4247 // Deferred case.
4309 __ b(deferred->entry()); 4248 __ b(deferred->entry());
4310 } 4249 }
4311 } else if (key->IsConstantOperand()) { 4250 } else if (key->IsConstantOperand()) {
4312 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); 4251 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4313 __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0); 4252 __ Cmp32(ToRegister(current_capacity), Operand(constant_key));
4314 __ ble(deferred->entry()); 4253 __ ble(deferred->entry());
4315 } else if (current_capacity->IsConstantOperand()) { 4254 } else if (current_capacity->IsConstantOperand()) {
4316 int32_t constant_capacity = 4255 int32_t constant_capacity =
4317 ToInteger32(LConstantOperand::cast(current_capacity)); 4256 ToInteger32(LConstantOperand::cast(current_capacity));
4318 __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0); 4257 __ Cmp32(ToRegister(key), Operand(constant_capacity));
4319 __ bge(deferred->entry()); 4258 __ bge(deferred->entry());
4320 } else { 4259 } else {
4321 __ cmpw(ToRegister(key), ToRegister(current_capacity)); 4260 __ Cmp32(ToRegister(key), ToRegister(current_capacity));
4322 __ bge(deferred->entry()); 4261 __ bge(deferred->entry());
4323 } 4262 }
4324 4263
4325 if (instr->elements()->IsRegister()) { 4264 if (instr->elements()->IsRegister()) {
4326 __ Move(result, ToRegister(instr->elements())); 4265 __ Move(result, ToRegister(instr->elements()));
4327 } else { 4266 } else {
4328 __ LoadP(result, ToMemOperand(instr->elements())); 4267 __ LoadP(result, ToMemOperand(instr->elements()));
4329 } 4268 }
4330 4269
4331 __ bind(deferred->exit()); 4270 __ bind(deferred->exit());
4332 } 4271 }
4333 4272
4334
4335 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { 4273 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4336 // TODO(3095996): Get rid of this. For now, we need to make the 4274 // TODO(3095996): Get rid of this. For now, we need to make the
4337 // result register contain a valid pointer because it is already 4275 // result register contain a valid pointer because it is already
4338 // contained in the register pointer map. 4276 // contained in the register pointer map.
4339 Register result = r3; 4277 Register result = r2;
4340 __ li(result, Operand::Zero()); 4278 __ LoadImmP(result, Operand::Zero());
4341 4279
4342 // We have to call a stub. 4280 // We have to call a stub.
4343 { 4281 {
4344 PushSafepointRegistersScope scope(this); 4282 PushSafepointRegistersScope scope(this);
4345 if (instr->object()->IsRegister()) { 4283 if (instr->object()->IsRegister()) {
4346 __ Move(result, ToRegister(instr->object())); 4284 __ Move(result, ToRegister(instr->object()));
4347 } else { 4285 } else {
4348 __ LoadP(result, ToMemOperand(instr->object())); 4286 __ LoadP(result, ToMemOperand(instr->object()));
4349 } 4287 }
4350 4288
4351 LOperand* key = instr->key(); 4289 LOperand* key = instr->key();
4352 if (key->IsConstantOperand()) { 4290 if (key->IsConstantOperand()) {
4353 __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key))); 4291 __ LoadSmiLiteral(r5, ToSmi(LConstantOperand::cast(key)));
4354 } else { 4292 } else {
4355 __ SmiTag(r6, ToRegister(key)); 4293 __ SmiTag(r5, ToRegister(key));
4356 } 4294 }
4357 4295
4358 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), 4296 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4359 instr->hydrogen()->kind()); 4297 instr->hydrogen()->kind());
4360 __ CallStub(&stub); 4298 __ CallStub(&stub);
4361 RecordSafepointWithLazyDeopt( 4299 RecordSafepointWithLazyDeopt(
4362 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 4300 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4363 __ StoreToSafepointRegisterSlot(result, result); 4301 __ StoreToSafepointRegisterSlot(result, result);
4364 } 4302 }
4365 4303
4366 // Deopt on smi, which means the elements array changed to dictionary mode. 4304 // Deopt on smi, which means the elements array changed to dictionary mode.
4367 __ TestIfSmi(result, r0); 4305 __ TestIfSmi(result);
4368 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); 4306 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
4369 } 4307 }
4370 4308
4371
4372 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4309 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4373 Register object_reg = ToRegister(instr->object()); 4310 Register object_reg = ToRegister(instr->object());
4374 Register scratch = scratch0(); 4311 Register scratch = scratch0();
4375 4312
4376 Handle<Map> from_map = instr->original_map(); 4313 Handle<Map> from_map = instr->original_map();
4377 Handle<Map> to_map = instr->transitioned_map(); 4314 Handle<Map> to_map = instr->transitioned_map();
4378 ElementsKind from_kind = instr->from_kind(); 4315 ElementsKind from_kind = instr->from_kind();
4379 ElementsKind to_kind = instr->to_kind(); 4316 ElementsKind to_kind = instr->to_kind();
4380 4317
4381 Label not_applicable; 4318 Label not_applicable;
4382 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4319 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4383 __ Cmpi(scratch, Operand(from_map), r0); 4320 __ CmpP(scratch, Operand(from_map));
4384 __ bne(&not_applicable); 4321 __ bne(&not_applicable);
4385 4322
4386 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4323 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4387 Register new_map_reg = ToRegister(instr->new_map_temp()); 4324 Register new_map_reg = ToRegister(instr->new_map_temp());
4388 __ mov(new_map_reg, Operand(to_map)); 4325 __ mov(new_map_reg, Operand(to_map));
4389 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset), 4326 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4390 r0);
4391 // Write barrier. 4327 // Write barrier.
4392 __ RecordWriteForMap(object_reg, new_map_reg, scratch, 4328 __ RecordWriteForMap(object_reg, new_map_reg, scratch,
4393 GetLinkRegisterState(), kDontSaveFPRegs); 4329 GetLinkRegisterState(), kDontSaveFPRegs);
4394 } else { 4330 } else {
4395 DCHECK(ToRegister(instr->context()).is(cp)); 4331 DCHECK(ToRegister(instr->context()).is(cp));
4396 DCHECK(object_reg.is(r3)); 4332 DCHECK(object_reg.is(r2));
4397 PushSafepointRegistersScope scope(this); 4333 PushSafepointRegistersScope scope(this);
4398 __ Move(r4, to_map); 4334 __ Move(r3, to_map);
4399 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; 4335 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4400 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); 4336 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4401 __ CallStub(&stub); 4337 __ CallStub(&stub);
4402 RecordSafepointWithRegisters(instr->pointer_map(), 0, 4338 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4403 Safepoint::kLazyDeopt); 4339 Safepoint::kLazyDeopt);
4404 } 4340 }
4405 __ bind(&not_applicable); 4341 __ bind(&not_applicable);
4406 } 4342 }
4407 4343
4408
4409 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4344 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4410 Register object = ToRegister(instr->object()); 4345 Register object = ToRegister(instr->object());
4411 Register temp = ToRegister(instr->temp()); 4346 Register temp = ToRegister(instr->temp());
4412 Label no_memento_found; 4347 Label no_memento_found;
4413 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); 4348 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4414 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); 4349 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4415 __ bind(&no_memento_found); 4350 __ bind(&no_memento_found);
4416 } 4351 }
4417 4352
4418
4419 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4353 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4420 DCHECK(ToRegister(instr->context()).is(cp)); 4354 DCHECK(ToRegister(instr->context()).is(cp));
4421 DCHECK(ToRegister(instr->left()).is(r4)); 4355 DCHECK(ToRegister(instr->left()).is(r3));
4422 DCHECK(ToRegister(instr->right()).is(r3)); 4356 DCHECK(ToRegister(instr->right()).is(r2));
4423 StringAddStub stub(isolate(), instr->hydrogen()->flags(), 4357 StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4424 instr->hydrogen()->pretenure_flag()); 4358 instr->hydrogen()->pretenure_flag());
4425 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4359 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4426 } 4360 }
4427 4361
4428
4429 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4362 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4430 class DeferredStringCharCodeAt final : public LDeferredCode { 4363 class DeferredStringCharCodeAt final : public LDeferredCode {
4431 public: 4364 public:
4432 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 4365 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4433 : LDeferredCode(codegen), instr_(instr) {} 4366 : LDeferredCode(codegen), instr_(instr) {}
4434 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } 4367 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4435 LInstruction* instr() override { return instr_; } 4368 LInstruction* instr() override { return instr_; }
4436 4369
4437 private: 4370 private:
4438 LStringCharCodeAt* instr_; 4371 LStringCharCodeAt* instr_;
4439 }; 4372 };
4440 4373
4441 DeferredStringCharCodeAt* deferred = 4374 DeferredStringCharCodeAt* deferred =
4442 new (zone()) DeferredStringCharCodeAt(this, instr); 4375 new (zone()) DeferredStringCharCodeAt(this, instr);
4443 4376
4444 StringCharLoadGenerator::Generate( 4377 StringCharLoadGenerator::Generate(
4445 masm(), ToRegister(instr->string()), ToRegister(instr->index()), 4378 masm(), ToRegister(instr->string()), ToRegister(instr->index()),
4446 ToRegister(instr->result()), deferred->entry()); 4379 ToRegister(instr->result()), deferred->entry());
4447 __ bind(deferred->exit()); 4380 __ bind(deferred->exit());
4448 } 4381 }
4449 4382
4450
4451 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 4383 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4452 Register string = ToRegister(instr->string()); 4384 Register string = ToRegister(instr->string());
4453 Register result = ToRegister(instr->result()); 4385 Register result = ToRegister(instr->result());
4454 Register scratch = scratch0(); 4386 Register scratch = scratch0();
4455 4387
4456 // TODO(3095996): Get rid of this. For now, we need to make the 4388 // TODO(3095996): Get rid of this. For now, we need to make the
4457 // result register contain a valid pointer because it is already 4389 // result register contain a valid pointer because it is already
4458 // contained in the register pointer map. 4390 // contained in the register pointer map.
4459 __ li(result, Operand::Zero()); 4391 __ LoadImmP(result, Operand::Zero());
4460 4392
4461 PushSafepointRegistersScope scope(this); 4393 PushSafepointRegistersScope scope(this);
4462 __ push(string); 4394 __ push(string);
4463 // Push the index as a smi. This is safe because of the checks in 4395 // Push the index as a smi. This is safe because of the checks in
4464 // DoStringCharCodeAt above. 4396 // DoStringCharCodeAt above.
4465 if (instr->index()->IsConstantOperand()) { 4397 if (instr->index()->IsConstantOperand()) {
4466 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 4398 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4467 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index)); 4399 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4468 __ push(scratch); 4400 __ push(scratch);
4469 } else { 4401 } else {
4470 Register index = ToRegister(instr->index()); 4402 Register index = ToRegister(instr->index());
4471 __ SmiTag(index); 4403 __ SmiTag(index);
4472 __ push(index); 4404 __ push(index);
4473 } 4405 }
4474 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, 4406 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4475 instr->context()); 4407 instr->context());
4476 __ AssertSmi(r3); 4408 __ AssertSmi(r2);
4477 __ SmiUntag(r3); 4409 __ SmiUntag(r2);
4478 __ StoreToSafepointRegisterSlot(r3, result); 4410 __ StoreToSafepointRegisterSlot(r2, result);
4479 } 4411 }
4480 4412
4481
4482 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 4413 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4483 class DeferredStringCharFromCode final : public LDeferredCode { 4414 class DeferredStringCharFromCode final : public LDeferredCode {
4484 public: 4415 public:
4485 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) 4416 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4486 : LDeferredCode(codegen), instr_(instr) {} 4417 : LDeferredCode(codegen), instr_(instr) {}
4487 void Generate() override { 4418 void Generate() override {
4488 codegen()->DoDeferredStringCharFromCode(instr_); 4419 codegen()->DoDeferredStringCharFromCode(instr_);
4489 } 4420 }
4490 LInstruction* instr() override { return instr_; } 4421 LInstruction* instr() override { return instr_; }
4491 4422
4492 private: 4423 private:
4493 LStringCharFromCode* instr_; 4424 LStringCharFromCode* instr_;
4494 }; 4425 };
4495 4426
4496 DeferredStringCharFromCode* deferred = 4427 DeferredStringCharFromCode* deferred =
4497 new (zone()) DeferredStringCharFromCode(this, instr); 4428 new (zone()) DeferredStringCharFromCode(this, instr);
4498 4429
4499 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); 4430 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4500 Register char_code = ToRegister(instr->char_code()); 4431 Register char_code = ToRegister(instr->char_code());
4501 Register result = ToRegister(instr->result()); 4432 Register result = ToRegister(instr->result());
4502 DCHECK(!char_code.is(result)); 4433 DCHECK(!char_code.is(result));
4503 4434
4504 __ cmpli(char_code, Operand(String::kMaxOneByteCharCode)); 4435 __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode));
4505 __ bgt(deferred->entry()); 4436 __ bgt(deferred->entry());
4506 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 4437 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4507 __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2)); 4438 __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2));
4508 __ add(result, result, r0); 4439 __ AddP(result, r0);
4509 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize)); 4440 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4510 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4441 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4511 __ cmp(result, ip);
4512 __ beq(deferred->entry()); 4442 __ beq(deferred->entry());
4513 __ bind(deferred->exit()); 4443 __ bind(deferred->exit());
4514 } 4444 }
4515 4445
4516
4517 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4446 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4518 Register char_code = ToRegister(instr->char_code()); 4447 Register char_code = ToRegister(instr->char_code());
4519 Register result = ToRegister(instr->result()); 4448 Register result = ToRegister(instr->result());
4520 4449
4521 // TODO(3095996): Get rid of this. For now, we need to make the 4450 // TODO(3095996): Get rid of this. For now, we need to make the
4522 // result register contain a valid pointer because it is already 4451 // result register contain a valid pointer because it is already
4523 // contained in the register pointer map. 4452 // contained in the register pointer map.
4524 __ li(result, Operand::Zero()); 4453 __ LoadImmP(result, Operand::Zero());
4525 4454
4526 PushSafepointRegistersScope scope(this); 4455 PushSafepointRegistersScope scope(this);
4527 __ SmiTag(char_code); 4456 __ SmiTag(char_code);
4528 __ push(char_code); 4457 __ push(char_code);
4529 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, 4458 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4530 instr->context()); 4459 instr->context());
4531 __ StoreToSafepointRegisterSlot(r3, result); 4460 __ StoreToSafepointRegisterSlot(r2, result);
4532 } 4461 }
4533 4462
4534
4535 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4463 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4536 LOperand* input = instr->value(); 4464 LOperand* input = instr->value();
4537 DCHECK(input->IsRegister() || input->IsStackSlot()); 4465 DCHECK(input->IsRegister() || input->IsStackSlot());
4538 LOperand* output = instr->result(); 4466 LOperand* output = instr->result();
4539 DCHECK(output->IsDoubleRegister()); 4467 DCHECK(output->IsDoubleRegister());
4540 if (input->IsStackSlot()) { 4468 if (input->IsStackSlot()) {
4541 Register scratch = scratch0(); 4469 Register scratch = scratch0();
4542 __ LoadP(scratch, ToMemOperand(input)); 4470 __ LoadP(scratch, ToMemOperand(input));
4543 __ ConvertIntToDouble(scratch, ToDoubleRegister(output)); 4471 __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
4544 } else { 4472 } else {
4545 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output)); 4473 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
4546 } 4474 }
4547 } 4475 }
4548 4476
4549
4550 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4477 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4551 LOperand* input = instr->value(); 4478 LOperand* input = instr->value();
4552 LOperand* output = instr->result(); 4479 LOperand* output = instr->result();
4553 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output)); 4480 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
4554 } 4481 }
4555 4482
4556
4557 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4483 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4558 class DeferredNumberTagI final : public LDeferredCode { 4484 class DeferredNumberTagI final : public LDeferredCode {
4559 public: 4485 public:
4560 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) 4486 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4561 : LDeferredCode(codegen), instr_(instr) {} 4487 : LDeferredCode(codegen), instr_(instr) {}
4562 void Generate() override { 4488 void Generate() override {
4563 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), 4489 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4564 instr_->temp2(), SIGNED_INT32); 4490 instr_->temp2(), SIGNED_INT32);
4565 } 4491 }
4566 LInstruction* instr() override { return instr_; } 4492 LInstruction* instr() override { return instr_; }
4567 4493
4568 private: 4494 private:
4569 LNumberTagI* instr_; 4495 LNumberTagI* instr_;
4570 }; 4496 };
4571 4497
4572 Register src = ToRegister(instr->value()); 4498 Register src = ToRegister(instr->value());
4573 Register dst = ToRegister(instr->result()); 4499 Register dst = ToRegister(instr->result());
4574 4500
4575 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr); 4501 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
4576 #if V8_TARGET_ARCH_PPC64 4502 #if V8_TARGET_ARCH_S390X
4577 __ SmiTag(dst, src); 4503 __ SmiTag(dst, src);
4578 #else 4504 #else
4579 __ SmiTagCheckOverflow(dst, src, r0); 4505 // Add src to itself to defect SMI overflow.
4580 __ BranchOnOverflow(deferred->entry()); 4506 __ Add32(dst, src, src);
4507 __ b(overflow, deferred->entry());
4581 #endif 4508 #endif
4582 __ bind(deferred->exit()); 4509 __ bind(deferred->exit());
4583 } 4510 }
4584 4511
4585
4586 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4512 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4587 class DeferredNumberTagU final : public LDeferredCode { 4513 class DeferredNumberTagU final : public LDeferredCode {
4588 public: 4514 public:
4589 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4515 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4590 : LDeferredCode(codegen), instr_(instr) {} 4516 : LDeferredCode(codegen), instr_(instr) {}
4591 void Generate() override { 4517 void Generate() override {
4592 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), 4518 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4593 instr_->temp2(), UNSIGNED_INT32); 4519 instr_->temp2(), UNSIGNED_INT32);
4594 } 4520 }
4595 LInstruction* instr() override { return instr_; } 4521 LInstruction* instr() override { return instr_; }
4596 4522
4597 private: 4523 private:
4598 LNumberTagU* instr_; 4524 LNumberTagU* instr_;
4599 }; 4525 };
4600 4526
4601 Register input = ToRegister(instr->value()); 4527 Register input = ToRegister(instr->value());
4602 Register result = ToRegister(instr->result()); 4528 Register result = ToRegister(instr->result());
4603 4529
4604 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr); 4530 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
4605 __ Cmpli(input, Operand(Smi::kMaxValue), r0); 4531 __ CmpLogicalP(input, Operand(Smi::kMaxValue));
4606 __ bgt(deferred->entry()); 4532 __ bgt(deferred->entry());
4607 __ SmiTag(result, input); 4533 __ SmiTag(result, input);
4608 __ bind(deferred->exit()); 4534 __ bind(deferred->exit());
4609 } 4535 }
4610 4536
4611
4612 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value, 4537 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
4613 LOperand* temp1, LOperand* temp2, 4538 LOperand* temp1, LOperand* temp2,
4614 IntegerSignedness signedness) { 4539 IntegerSignedness signedness) {
4615 Label done, slow; 4540 Label done, slow;
4616 Register src = ToRegister(value); 4541 Register src = ToRegister(value);
4617 Register dst = ToRegister(instr->result()); 4542 Register dst = ToRegister(instr->result());
4618 Register tmp1 = scratch0(); 4543 Register tmp1 = scratch0();
4619 Register tmp2 = ToRegister(temp1); 4544 Register tmp2 = ToRegister(temp1);
4620 Register tmp3 = ToRegister(temp2); 4545 Register tmp3 = ToRegister(temp2);
4621 DoubleRegister dbl_scratch = double_scratch0(); 4546 DoubleRegister dbl_scratch = double_scratch0();
4622 4547
4623 if (signedness == SIGNED_INT32) { 4548 if (signedness == SIGNED_INT32) {
4624 // There was overflow, so bits 30 and 31 of the original integer 4549 // There was overflow, so bits 30 and 31 of the original integer
4625 // disagree. Try to allocate a heap number in new space and store 4550 // disagree. Try to allocate a heap number in new space and store
4626 // the value in there. If that fails, call the runtime system. 4551 // the value in there. If that fails, call the runtime system.
4627 if (dst.is(src)) { 4552 if (dst.is(src)) {
4628 __ SmiUntag(src, dst); 4553 __ SmiUntag(src, dst);
4629 __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16)); 4554 __ xilf(src, Operand(HeapNumber::kSignMask));
4630 } 4555 }
4631 __ ConvertIntToDouble(src, dbl_scratch); 4556 __ ConvertIntToDouble(src, dbl_scratch);
4632 } else { 4557 } else {
4633 __ ConvertUnsignedIntToDouble(src, dbl_scratch); 4558 __ ConvertUnsignedIntToDouble(src, dbl_scratch);
4634 } 4559 }
4635 4560
4636 if (FLAG_inline_new) { 4561 if (FLAG_inline_new) {
4637 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); 4562 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4638 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow); 4563 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
4639 __ b(&done); 4564 __ b(&done);
4640 } 4565 }
4641 4566
4642 // Slow case: Call the runtime system to do the number allocation. 4567 // Slow case: Call the runtime system to do the number allocation.
4643 __ bind(&slow); 4568 __ bind(&slow);
4644 { 4569 {
4645 // TODO(3095996): Put a valid pointer value in the stack slot where the 4570 // TODO(3095996): Put a valid pointer value in the stack slot where the
4646 // result register is stored, as this register is in the pointer map, but 4571 // result register is stored, as this register is in the pointer map, but
4647 // contains an integer value. 4572 // contains an integer value.
4648 __ li(dst, Operand::Zero()); 4573 __ LoadImmP(dst, Operand::Zero());
4649 4574
4650 // Preserve the value of all registers. 4575 // Preserve the value of all registers.
4651 PushSafepointRegistersScope scope(this); 4576 PushSafepointRegistersScope scope(this);
4652 4577
4653 // NumberTagI and NumberTagD use the context from the frame, rather than 4578 // NumberTagI and NumberTagD use the context from the frame, rather than
4654 // the environment's HContext or HInlinedContext value. 4579 // the environment's HContext or HInlinedContext value.
4655 // They only call Runtime::kAllocateHeapNumber. 4580 // They only call Runtime::kAllocateHeapNumber.
4656 // The corresponding HChange instructions are added in a phase that does 4581 // The corresponding HChange instructions are added in a phase that does
4657 // not have easy access to the local context. 4582 // not have easy access to the local context.
4658 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4583 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4659 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4584 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4660 RecordSafepointWithRegisters(instr->pointer_map(), 0, 4585 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4661 Safepoint::kNoLazyDeopt); 4586 Safepoint::kNoLazyDeopt);
4662 __ StoreToSafepointRegisterSlot(r3, dst); 4587 __ StoreToSafepointRegisterSlot(r2, dst);
4663 } 4588 }
4664 4589
4665 // Done. Put the value in dbl_scratch into the value of the allocated heap 4590 // Done. Put the value in dbl_scratch into the value of the allocated heap
4666 // number. 4591 // number.
4667 __ bind(&done); 4592 __ bind(&done);
4668 __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); 4593 __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4669 } 4594 }
4670 4595
4671
4672 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4596 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4673 class DeferredNumberTagD final : public LDeferredCode { 4597 class DeferredNumberTagD final : public LDeferredCode {
4674 public: 4598 public:
4675 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4599 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4676 : LDeferredCode(codegen), instr_(instr) {} 4600 : LDeferredCode(codegen), instr_(instr) {}
4677 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } 4601 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4678 LInstruction* instr() override { return instr_; } 4602 LInstruction* instr() override { return instr_; }
4679 4603
4680 private: 4604 private:
4681 LNumberTagD* instr_; 4605 LNumberTagD* instr_;
4682 }; 4606 };
4683 4607
4684 DoubleRegister input_reg = ToDoubleRegister(instr->value()); 4608 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4685 Register scratch = scratch0(); 4609 Register scratch = scratch0();
4686 Register reg = ToRegister(instr->result()); 4610 Register reg = ToRegister(instr->result());
4687 Register temp1 = ToRegister(instr->temp()); 4611 Register temp1 = ToRegister(instr->temp());
4688 Register temp2 = ToRegister(instr->temp2()); 4612 Register temp2 = ToRegister(instr->temp2());
4689 4613
4690 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr); 4614 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
4691 if (FLAG_inline_new) { 4615 if (FLAG_inline_new) {
4692 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 4616 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4693 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); 4617 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4694 } else { 4618 } else {
4695 __ b(deferred->entry()); 4619 __ b(deferred->entry());
4696 } 4620 }
4697 __ bind(deferred->exit()); 4621 __ bind(deferred->exit());
4698 __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); 4622 __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4699 } 4623 }
4700 4624
4701
4702 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4625 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4703 // TODO(3095996): Get rid of this. For now, we need to make the 4626 // TODO(3095996): Get rid of this. For now, we need to make the
4704 // result register contain a valid pointer because it is already 4627 // result register contain a valid pointer because it is already
4705 // contained in the register pointer map. 4628 // contained in the register pointer map.
4706 Register reg = ToRegister(instr->result()); 4629 Register reg = ToRegister(instr->result());
4707 __ li(reg, Operand::Zero()); 4630 __ LoadImmP(reg, Operand::Zero());
4708 4631
4709 PushSafepointRegistersScope scope(this); 4632 PushSafepointRegistersScope scope(this);
4710 // NumberTagI and NumberTagD use the context from the frame, rather than 4633 // NumberTagI and NumberTagD use the context from the frame, rather than
4711 // the environment's HContext or HInlinedContext value. 4634 // the environment's HContext or HInlinedContext value.
4712 // They only call Runtime::kAllocateHeapNumber. 4635 // They only call Runtime::kAllocateHeapNumber.
4713 // The corresponding HChange instructions are added in a phase that does 4636 // The corresponding HChange instructions are added in a phase that does
4714 // not have easy access to the local context. 4637 // not have easy access to the local context.
4715 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4638 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4716 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4639 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4717 RecordSafepointWithRegisters(instr->pointer_map(), 0, 4640 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4718 Safepoint::kNoLazyDeopt); 4641 Safepoint::kNoLazyDeopt);
4719 __ StoreToSafepointRegisterSlot(r3, reg); 4642 __ StoreToSafepointRegisterSlot(r2, reg);
4720 } 4643 }
4721 4644
4722
4723 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4645 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4724 HChange* hchange = instr->hydrogen(); 4646 HChange* hchange = instr->hydrogen();
4725 Register input = ToRegister(instr->value()); 4647 Register input = ToRegister(instr->value());
4726 Register output = ToRegister(instr->result()); 4648 Register output = ToRegister(instr->result());
4727 if (hchange->CheckFlag(HValue::kCanOverflow) && 4649 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4728 hchange->value()->CheckFlag(HValue::kUint32)) { 4650 hchange->value()->CheckFlag(HValue::kUint32)) {
4729 __ TestUnsignedSmiCandidate(input, r0); 4651 __ TestUnsignedSmiCandidate(input, r0);
4730 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0); 4652 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
4731 } 4653 }
4732 #if !V8_TARGET_ARCH_PPC64 4654 #if !V8_TARGET_ARCH_S390X
4733 if (hchange->CheckFlag(HValue::kCanOverflow) && 4655 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4734 !hchange->value()->CheckFlag(HValue::kUint32)) { 4656 !hchange->value()->CheckFlag(HValue::kUint32)) {
4735 __ SmiTagCheckOverflow(output, input, r0); 4657 __ SmiTagCheckOverflow(output, input, r0);
4736 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); 4658 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
4737 } else { 4659 } else {
4738 #endif 4660 #endif
4739 __ SmiTag(output, input); 4661 __ SmiTag(output, input);
4740 #if !V8_TARGET_ARCH_PPC64 4662 #if !V8_TARGET_ARCH_S390X
4741 } 4663 }
4742 #endif 4664 #endif
4743 } 4665 }
4744 4666
4745
4746 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4667 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4747 Register scratch = scratch0();
4748 Register input = ToRegister(instr->value()); 4668 Register input = ToRegister(instr->value());
4749 Register result = ToRegister(instr->result()); 4669 Register result = ToRegister(instr->result());
4750 if (instr->needs_check()) { 4670 if (instr->needs_check()) {
4751 // If the input is a HeapObject, value of scratch won't be zero. 4671 __ tmll(input, Operand(kHeapObjectTag));
4752 __ andi(scratch, input, Operand(kHeapObjectTag)); 4672 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
4753 __ SmiUntag(result, input); 4673 __ SmiUntag(result, input);
4754 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
4755 } else { 4674 } else {
4756 __ SmiUntag(result, input); 4675 __ SmiUntag(result, input);
4757 } 4676 }
4758 } 4677 }
4759 4678
4760
4761 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, 4679 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4762 DoubleRegister result_reg, 4680 DoubleRegister result_reg,
4763 NumberUntagDMode mode) { 4681 NumberUntagDMode mode) {
4764 bool can_convert_undefined_to_nan = 4682 bool can_convert_undefined_to_nan =
4765 instr->hydrogen()->can_convert_undefined_to_nan(); 4683 instr->hydrogen()->can_convert_undefined_to_nan();
4766 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); 4684 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4767 4685
4768 Register scratch = scratch0(); 4686 Register scratch = scratch0();
4769 DCHECK(!result_reg.is(double_scratch0())); 4687 DCHECK(!result_reg.is(double_scratch0()));
4770 4688
4771 Label convert, load_smi, done; 4689 Label convert, load_smi, done;
4772 4690
4773 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4691 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4774 // Smi check. 4692 // Smi check.
4775 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 4693 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4776 4694
4777 // Heap number map check. 4695 // Heap number map check.
4778 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4696 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4779 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4697 __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex));
4780 __ cmp(scratch, ip); 4698
4781 if (can_convert_undefined_to_nan) { 4699 if (can_convert_undefined_to_nan) {
4782 __ bne(&convert); 4700 __ bne(&convert, Label::kNear);
4783 } else { 4701 } else {
4784 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 4702 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4785 } 4703 }
4786 // load heap number 4704 // load heap number
4787 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4705 __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4788 if (deoptimize_on_minus_zero) { 4706 if (deoptimize_on_minus_zero) {
4789 __ TestDoubleIsMinusZero(result_reg, scratch, ip); 4707 __ TestDoubleIsMinusZero(result_reg, scratch, ip);
4790 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 4708 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4791 } 4709 }
4792 __ b(&done); 4710 __ b(&done, Label::kNear);
4793 if (can_convert_undefined_to_nan) { 4711 if (can_convert_undefined_to_nan) {
4794 __ bind(&convert); 4712 __ bind(&convert);
4795 // Convert undefined (and hole) to NaN. 4713 // Convert undefined (and hole) to NaN.
4796 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4714 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4797 __ cmp(input_reg, ip);
4798 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); 4715 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
4799 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4716 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4800 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4717 __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4801 __ b(&done); 4718 __ b(&done, Label::kNear);
4802 } 4719 }
4803 } else { 4720 } else {
4804 __ SmiUntag(scratch, input_reg); 4721 __ SmiUntag(scratch, input_reg);
4805 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4722 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4806 } 4723 }
4807 // Smi to double register conversion 4724 // Smi to double register conversion
4808 __ bind(&load_smi); 4725 __ bind(&load_smi);
4809 // scratch: untagged value of input_reg 4726 // scratch: untagged value of input_reg
4810 __ ConvertIntToDouble(scratch, result_reg); 4727 __ ConvertIntToDouble(scratch, result_reg);
4811 __ bind(&done); 4728 __ bind(&done);
4812 } 4729 }
4813 4730
4814
4815 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { 4731 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4816 Register input_reg = ToRegister(instr->value()); 4732 Register input_reg = ToRegister(instr->value());
4817 Register scratch1 = scratch0(); 4733 Register scratch1 = scratch0();
4818 Register scratch2 = ToRegister(instr->temp()); 4734 Register scratch2 = ToRegister(instr->temp());
4819 DoubleRegister double_scratch = double_scratch0(); 4735 DoubleRegister double_scratch = double_scratch0();
4820 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); 4736 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4821 4737
4822 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); 4738 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4823 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); 4739 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4824 4740
4825 Label done; 4741 Label done;
4826 4742
4827 // Heap number map check. 4743 // Heap number map check.
4828 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4744 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4829 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4745 __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
4830 __ cmp(scratch1, ip);
4831 4746
4832 if (instr->truncating()) { 4747 if (instr->truncating()) {
4833 // Performs a truncating conversion of a floating point number as used by 4748 // Performs a truncating conversion of a floating point number as used by
4834 // the JS bitwise operations. 4749 // the JS bitwise operations.
4835 Label no_heap_number, check_bools, check_false; 4750 Label no_heap_number, check_bools, check_false;
4836 __ bne(&no_heap_number); 4751 __ bne(&no_heap_number, Label::kNear);
4837 __ mr(scratch2, input_reg); 4752 __ LoadRR(scratch2, input_reg);
4838 __ TruncateHeapNumberToI(input_reg, scratch2); 4753 __ TruncateHeapNumberToI(input_reg, scratch2);
4839 __ b(&done); 4754 __ b(&done, Label::kNear);
4840 4755
4841 // Check for Oddballs. Undefined/False is converted to zero and True to one 4756 // Check for Oddballs. Undefined/False is converted to zero and True to one
4842 // for truncating conversions. 4757 // for truncating conversions.
4843 __ bind(&no_heap_number); 4758 __ bind(&no_heap_number);
4844 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4759 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4845 __ cmp(input_reg, ip);
4846 __ bne(&check_bools); 4760 __ bne(&check_bools);
4847 __ li(input_reg, Operand::Zero()); 4761 __ LoadImmP(input_reg, Operand::Zero());
4848 __ b(&done); 4762 __ b(&done, Label::kNear);
4849 4763
4850 __ bind(&check_bools); 4764 __ bind(&check_bools);
4851 __ LoadRoot(ip, Heap::kTrueValueRootIndex); 4765 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
4852 __ cmp(input_reg, ip); 4766 __ bne(&check_false, Label::kNear);
4853 __ bne(&check_false); 4767 __ LoadImmP(input_reg, Operand(1));
4854 __ li(input_reg, Operand(1)); 4768 __ b(&done, Label::kNear);
4855 __ b(&done);
4856 4769
4857 __ bind(&check_false); 4770 __ bind(&check_false);
4858 __ LoadRoot(ip, Heap::kFalseValueRootIndex); 4771 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
4859 __ cmp(input_reg, ip);
4860 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean); 4772 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
4861 __ li(input_reg, Operand::Zero()); 4773 __ LoadImmP(input_reg, Operand::Zero());
4862 } else { 4774 } else {
4775 // Deoptimize if we don't have a heap number.
4863 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 4776 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4864 4777
4865 __ lfd(double_scratch2, 4778 __ ld(double_scratch2,
4866 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4779 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4867 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4780 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4868 // preserve heap number pointer in scratch2 for minus zero check below 4781 // preserve heap number pointer in scratch2 for minus zero check below
4869 __ mr(scratch2, input_reg); 4782 __ LoadRR(scratch2, input_reg);
4870 } 4783 }
4871 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, 4784 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
4872 double_scratch); 4785 double_scratch);
4873 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 4786 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4874 4787
4875 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4788 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4876 __ cmpi(input_reg, Operand::Zero()); 4789 __ CmpP(input_reg, Operand::Zero());
4877 __ bne(&done); 4790 __ bne(&done, Label::kNear);
4878 __ TestHeapNumberSign(scratch2, scratch1); 4791 __ TestHeapNumberSign(scratch2, scratch1);
4879 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 4792 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
4880 } 4793 }
4881 } 4794 }
4882 __ bind(&done); 4795 __ bind(&done);
4883 } 4796 }
4884 4797
4885
4886 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4798 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4887 class DeferredTaggedToI final : public LDeferredCode { 4799 class DeferredTaggedToI final : public LDeferredCode {
4888 public: 4800 public:
4889 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 4801 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4890 : LDeferredCode(codegen), instr_(instr) {} 4802 : LDeferredCode(codegen), instr_(instr) {}
4891 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); } 4803 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4892 LInstruction* instr() override { return instr_; } 4804 LInstruction* instr() override { return instr_; }
4893 4805
4894 private: 4806 private:
4895 LTaggedToI* instr_; 4807 LTaggedToI* instr_;
(...skipping 11 matching lines...) Expand all
4907 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr); 4819 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
4908 4820
4909 // Branch to deferred code if the input is a HeapObject. 4821 // Branch to deferred code if the input is a HeapObject.
4910 __ JumpIfNotSmi(input_reg, deferred->entry()); 4822 __ JumpIfNotSmi(input_reg, deferred->entry());
4911 4823
4912 __ SmiUntag(input_reg); 4824 __ SmiUntag(input_reg);
4913 __ bind(deferred->exit()); 4825 __ bind(deferred->exit());
4914 } 4826 }
4915 } 4827 }
4916 4828
4917
4918 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4829 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4919 LOperand* input = instr->value(); 4830 LOperand* input = instr->value();
4920 DCHECK(input->IsRegister()); 4831 DCHECK(input->IsRegister());
4921 LOperand* result = instr->result(); 4832 LOperand* result = instr->result();
4922 DCHECK(result->IsDoubleRegister()); 4833 DCHECK(result->IsDoubleRegister());
4923 4834
4924 Register input_reg = ToRegister(input); 4835 Register input_reg = ToRegister(input);
4925 DoubleRegister result_reg = ToDoubleRegister(result); 4836 DoubleRegister result_reg = ToDoubleRegister(result);
4926 4837
4927 HValue* value = instr->hydrogen()->value(); 4838 HValue* value = instr->hydrogen()->value();
4928 NumberUntagDMode mode = value->representation().IsSmi() 4839 NumberUntagDMode mode = value->representation().IsSmi()
4929 ? NUMBER_CANDIDATE_IS_SMI 4840 ? NUMBER_CANDIDATE_IS_SMI
4930 : NUMBER_CANDIDATE_IS_ANY_TAGGED; 4841 : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4931 4842
4932 EmitNumberUntagD(instr, input_reg, result_reg, mode); 4843 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4933 } 4844 }
4934 4845
4935
4936 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 4846 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4937 Register result_reg = ToRegister(instr->result()); 4847 Register result_reg = ToRegister(instr->result());
4938 Register scratch1 = scratch0(); 4848 Register scratch1 = scratch0();
4939 DoubleRegister double_input = ToDoubleRegister(instr->value()); 4849 DoubleRegister double_input = ToDoubleRegister(instr->value());
4940 DoubleRegister double_scratch = double_scratch0(); 4850 DoubleRegister double_scratch = double_scratch0();
4941 4851
4942 if (instr->truncating()) { 4852 if (instr->truncating()) {
4943 __ TruncateDoubleToI(result_reg, double_input); 4853 __ TruncateDoubleToI(result_reg, double_input);
4944 } else { 4854 } else {
4945 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, 4855 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
4946 double_scratch); 4856 double_scratch);
4947 // Deoptimize if the input wasn't a int32 (inside a double). 4857 // Deoptimize if the input wasn't a int32 (inside a double).
4948 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 4858 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4949 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4859 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4950 Label done; 4860 Label done;
4951 __ cmpi(result_reg, Operand::Zero()); 4861 __ CmpP(result_reg, Operand::Zero());
4952 __ bne(&done); 4862 __ bne(&done, Label::kNear);
4953 __ TestDoubleSign(double_input, scratch1); 4863 __ TestDoubleSign(double_input, scratch1);
4954 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 4864 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
4955 __ bind(&done); 4865 __ bind(&done);
4956 } 4866 }
4957 } 4867 }
4958 } 4868 }
4959 4869
4960
4961 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 4870 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4962 Register result_reg = ToRegister(instr->result()); 4871 Register result_reg = ToRegister(instr->result());
4963 Register scratch1 = scratch0(); 4872 Register scratch1 = scratch0();
4964 DoubleRegister double_input = ToDoubleRegister(instr->value()); 4873 DoubleRegister double_input = ToDoubleRegister(instr->value());
4965 DoubleRegister double_scratch = double_scratch0(); 4874 DoubleRegister double_scratch = double_scratch0();
4966 4875
4967 if (instr->truncating()) { 4876 if (instr->truncating()) {
4968 __ TruncateDoubleToI(result_reg, double_input); 4877 __ TruncateDoubleToI(result_reg, double_input);
4969 } else { 4878 } else {
4970 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, 4879 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
4971 double_scratch); 4880 double_scratch);
4972 // Deoptimize if the input wasn't a int32 (inside a double). 4881 // Deoptimize if the input wasn't a int32 (inside a double).
4973 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 4882 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4974 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4883 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4975 Label done; 4884 Label done;
4976 __ cmpi(result_reg, Operand::Zero()); 4885 __ CmpP(result_reg, Operand::Zero());
4977 __ bne(&done); 4886 __ bne(&done, Label::kNear);
4978 __ TestDoubleSign(double_input, scratch1); 4887 __ TestDoubleSign(double_input, scratch1);
4979 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 4888 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
4980 __ bind(&done); 4889 __ bind(&done);
4981 } 4890 }
4982 } 4891 }
4983 #if V8_TARGET_ARCH_PPC64 4892 #if V8_TARGET_ARCH_S390X
4984 __ SmiTag(result_reg); 4893 __ SmiTag(result_reg);
4985 #else 4894 #else
4986 __ SmiTagCheckOverflow(result_reg, r0); 4895 __ SmiTagCheckOverflow(result_reg, r0);
4987 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); 4896 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
4988 #endif 4897 #endif
4989 } 4898 }
4990 4899
4991
4992 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 4900 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4993 LOperand* input = instr->value(); 4901 LOperand* input = instr->value();
4994 __ TestIfSmi(ToRegister(input), r0); 4902 __ TestIfSmi(ToRegister(input));
4995 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); 4903 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
4996 } 4904 }
4997 4905
4998
4999 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 4906 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5000 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 4907 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5001 LOperand* input = instr->value(); 4908 LOperand* input = instr->value();
5002 __ TestIfSmi(ToRegister(input), r0); 4909 __ TestIfSmi(ToRegister(input));
5003 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); 4910 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
5004 } 4911 }
5005 } 4912 }
5006 4913
5007
5008 void LCodeGen::DoCheckArrayBufferNotNeutered( 4914 void LCodeGen::DoCheckArrayBufferNotNeutered(
5009 LCheckArrayBufferNotNeutered* instr) { 4915 LCheckArrayBufferNotNeutered* instr) {
5010 Register view = ToRegister(instr->view()); 4916 Register view = ToRegister(instr->view());
5011 Register scratch = scratch0(); 4917 Register scratch = scratch0();
5012 4918
5013 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); 4919 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5014 __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); 4920 __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5015 __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); 4921 __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
5016 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0); 4922 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
5017 } 4923 }
5018 4924
5019
5020 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 4925 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5021 Register input = ToRegister(instr->value()); 4926 Register input = ToRegister(instr->value());
5022 Register scratch = scratch0(); 4927 Register scratch = scratch0();
5023 4928
5024 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 4929 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5025 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5026 4930
5027 if (instr->hydrogen()->is_interval_check()) { 4931 if (instr->hydrogen()->is_interval_check()) {
5028 InstanceType first; 4932 InstanceType first;
5029 InstanceType last; 4933 InstanceType last;
5030 instr->hydrogen()->GetCheckInterval(&first, &last); 4934 instr->hydrogen()->GetCheckInterval(&first, &last);
5031 4935
5032 __ cmpli(scratch, Operand(first)); 4936 __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
4937 Operand(first));
5033 4938
5034 // If there is only one type in the interval check for equality. 4939 // If there is only one type in the interval check for equality.
5035 if (first == last) { 4940 if (first == last) {
5036 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); 4941 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5037 } else { 4942 } else {
5038 DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType); 4943 DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
5039 // Omit check for the last type. 4944 // Omit check for the last type.
5040 if (last != LAST_TYPE) { 4945 if (last != LAST_TYPE) {
5041 __ cmpli(scratch, Operand(last)); 4946 __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
4947 Operand(last));
5042 DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType); 4948 DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
5043 } 4949 }
5044 } 4950 }
5045 } else { 4951 } else {
5046 uint8_t mask; 4952 uint8_t mask;
5047 uint8_t tag; 4953 uint8_t tag;
5048 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 4954 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5049 4955
4956 __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4957
5050 if (base::bits::IsPowerOfTwo32(mask)) { 4958 if (base::bits::IsPowerOfTwo32(mask)) {
5051 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); 4959 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5052 __ andi(r0, scratch, Operand(mask)); 4960 __ AndP(scratch, Operand(mask));
5053 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType, 4961 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
5054 cr0);
5055 } else { 4962 } else {
5056 __ andi(scratch, scratch, Operand(mask)); 4963 __ AndP(scratch, Operand(mask));
5057 __ cmpi(scratch, Operand(tag)); 4964 __ CmpP(scratch, Operand(tag));
5058 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); 4965 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5059 } 4966 }
5060 } 4967 }
5061 } 4968 }
5062 4969
5063
5064 void LCodeGen::DoCheckValue(LCheckValue* instr) { 4970 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5065 Register reg = ToRegister(instr->value()); 4971 Register reg = ToRegister(instr->value());
5066 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 4972 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5067 AllowDeferredHandleDereference smi_check; 4973 AllowDeferredHandleDereference smi_check;
5068 if (isolate()->heap()->InNewSpace(*object)) { 4974 if (isolate()->heap()->InNewSpace(*object)) {
5069 Register reg = ToRegister(instr->value()); 4975 Register reg = ToRegister(instr->value());
5070 Handle<Cell> cell = isolate()->factory()->NewCell(object); 4976 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5071 __ mov(ip, Operand(cell)); 4977 __ mov(ip, Operand(cell));
5072 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); 4978 __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset));
5073 __ cmp(reg, ip);
5074 } else { 4979 } else {
5075 __ Cmpi(reg, Operand(object), r0); 4980 __ CmpP(reg, Operand(object));
5076 } 4981 }
5077 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); 4982 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
5078 } 4983 }
5079 4984
5080
5081 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 4985 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5082 Register temp = ToRegister(instr->temp()); 4986 Register temp = ToRegister(instr->temp());
5083 { 4987 {
5084 PushSafepointRegistersScope scope(this); 4988 PushSafepointRegistersScope scope(this);
5085 __ push(object); 4989 __ push(object);
5086 __ li(cp, Operand::Zero()); 4990 __ LoadImmP(cp, Operand::Zero());
5087 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 4991 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5088 RecordSafepointWithRegisters(instr->pointer_map(), 1, 4992 RecordSafepointWithRegisters(instr->pointer_map(), 1,
5089 Safepoint::kNoLazyDeopt); 4993 Safepoint::kNoLazyDeopt);
5090 __ StoreToSafepointRegisterSlot(r3, temp); 4994 __ StoreToSafepointRegisterSlot(r2, temp);
5091 } 4995 }
5092 __ TestIfSmi(temp, r0); 4996 __ TestIfSmi(temp);
5093 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0); 4997 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
5094 } 4998 }
5095 4999
5096
5097 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5000 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5098 class DeferredCheckMaps final : public LDeferredCode { 5001 class DeferredCheckMaps final : public LDeferredCode {
5099 public: 5002 public:
5100 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 5003 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5101 : LDeferredCode(codegen), instr_(instr), object_(object) { 5004 : LDeferredCode(codegen), instr_(instr), object_(object) {
5102 SetExit(check_maps()); 5005 SetExit(check_maps());
5103 } 5006 }
5104 void Generate() override { 5007 void Generate() override {
5105 codegen()->DoDeferredInstanceMigration(instr_, object_); 5008 codegen()->DoDeferredInstanceMigration(instr_, object_);
5106 } 5009 }
5107 Label* check_maps() { return &check_maps_; } 5010 Label* check_maps() { return &check_maps_; }
5108 LInstruction* instr() override { return instr_; } 5011 LInstruction* instr() override { return instr_; }
5109 5012
5110 private: 5013 private:
5111 LCheckMaps* instr_; 5014 LCheckMaps* instr_;
5112 Label check_maps_; 5015 Label check_maps_;
5113 Register object_; 5016 Register object_;
5114 }; 5017 };
5115 5018
5116 if (instr->hydrogen()->IsStabilityCheck()) { 5019 if (instr->hydrogen()->IsStabilityCheck()) {
5117 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5020 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5118 for (int i = 0; i < maps->size(); ++i) { 5021 for (int i = 0; i < maps->size(); ++i) {
5119 AddStabilityDependency(maps->at(i).handle()); 5022 AddStabilityDependency(maps->at(i).handle());
5120 } 5023 }
5121 return; 5024 return;
5122 } 5025 }
5123 5026
5124 Register object = ToRegister(instr->value()); 5027 LOperand* input = instr->value();
5125 Register map_reg = ToRegister(instr->temp()); 5028 DCHECK(input->IsRegister());
5126 5029 Register reg = ToRegister(input);
5127 __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
5128 5030
5129 DeferredCheckMaps* deferred = NULL; 5031 DeferredCheckMaps* deferred = NULL;
5130 if (instr->hydrogen()->HasMigrationTarget()) { 5032 if (instr->hydrogen()->HasMigrationTarget()) {
5131 deferred = new (zone()) DeferredCheckMaps(this, instr, object); 5033 deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
5132 __ bind(deferred->check_maps()); 5034 __ bind(deferred->check_maps());
5133 } 5035 }
5134 5036
5135 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5037 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5136 Label success; 5038 Label success;
5137 for (int i = 0; i < maps->size() - 1; i++) { 5039 for (int i = 0; i < maps->size() - 1; i++) {
5138 Handle<Map> map = maps->at(i).handle(); 5040 Handle<Map> map = maps->at(i).handle();
5139 __ CompareMap(map_reg, map, &success); 5041 __ CompareMap(reg, map, &success);
5140 __ beq(&success); 5042 __ beq(&success);
5141 } 5043 }
5142 5044
5143 Handle<Map> map = maps->at(maps->size() - 1).handle(); 5045 Handle<Map> map = maps->at(maps->size() - 1).handle();
5144 __ CompareMap(map_reg, map, &success); 5046 __ CompareMap(reg, map, &success);
5145 if (instr->hydrogen()->HasMigrationTarget()) { 5047 if (instr->hydrogen()->HasMigrationTarget()) {
5146 __ bne(deferred->entry()); 5048 __ bne(deferred->entry());
5147 } else { 5049 } else {
5148 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); 5050 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5149 } 5051 }
5150 5052
5151 __ bind(&success); 5053 __ bind(&success);
5152 } 5054 }
5153 5055
5154
5155 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5056 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5156 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); 5057 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5157 Register result_reg = ToRegister(instr->result()); 5058 Register result_reg = ToRegister(instr->result());
5158 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); 5059 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5159 } 5060 }
5160 5061
5161
5162 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5062 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5163 Register unclamped_reg = ToRegister(instr->unclamped()); 5063 Register unclamped_reg = ToRegister(instr->unclamped());
5164 Register result_reg = ToRegister(instr->result()); 5064 Register result_reg = ToRegister(instr->result());
5165 __ ClampUint8(result_reg, unclamped_reg); 5065 __ ClampUint8(result_reg, unclamped_reg);
5166 } 5066 }
5167 5067
5168
5169 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5068 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5170 Register scratch = scratch0(); 5069 Register scratch = scratch0();
5171 Register input_reg = ToRegister(instr->unclamped()); 5070 Register input_reg = ToRegister(instr->unclamped());
5172 Register result_reg = ToRegister(instr->result()); 5071 Register result_reg = ToRegister(instr->result());
5173 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); 5072 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5174 Label is_smi, done, heap_number; 5073 Label is_smi, done, heap_number;
5175 5074
5176 // Both smi and heap number cases are handled. 5075 // Both smi and heap number cases are handled.
5177 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); 5076 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5178 5077
5179 // Check for heap number 5078 // Check for heap number
5180 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5079 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5181 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0); 5080 __ CmpP(scratch, Operand(factory()->heap_number_map()));
5182 __ beq(&heap_number); 5081 __ beq(&heap_number, Label::kNear);
5183 5082
5184 // Check for undefined. Undefined is converted to zero for clamping 5083 // Check for undefined. Undefined is converted to zero for clamping
5185 // conversions. 5084 // conversions.
5186 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0); 5085 __ CmpP(input_reg, Operand(factory()->undefined_value()));
5187 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); 5086 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5188 __ li(result_reg, Operand::Zero()); 5087 __ LoadImmP(result_reg, Operand::Zero());
5189 __ b(&done); 5088 __ b(&done, Label::kNear);
5190 5089
5191 // Heap number 5090 // Heap number
5192 __ bind(&heap_number); 5091 __ bind(&heap_number);
5193 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 5092 __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5194 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); 5093 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5195 __ b(&done); 5094 __ b(&done, Label::kNear);
5196 5095
5197 // smi 5096 // smi
5198 __ bind(&is_smi); 5097 __ bind(&is_smi);
5199 __ ClampUint8(result_reg, result_reg); 5098 __ ClampUint8(result_reg, result_reg);
5200 5099
5201 __ bind(&done); 5100 __ bind(&done);
5202 } 5101 }
5203 5102
5204
5205 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { 5103 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5206 DoubleRegister value_reg = ToDoubleRegister(instr->value()); 5104 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5207 Register result_reg = ToRegister(instr->result()); 5105 Register result_reg = ToRegister(instr->result());
5208 5106 // TODO(joransiu): Use non-memory version.
5107 __ stdy(value_reg, MemOperand(sp, -kDoubleSize));
5209 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { 5108 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5210 __ MovDoubleHighToInt(result_reg, value_reg); 5109 __ LoadlW(result_reg,
5110 MemOperand(sp, -kDoubleSize + Register::kExponentOffset));
5211 } else { 5111 } else {
5212 __ MovDoubleLowToInt(result_reg, value_reg); 5112 __ LoadlW(result_reg,
5113 MemOperand(sp, -kDoubleSize + Register::kMantissaOffset));
5213 } 5114 }
5214 } 5115 }
5215 5116
5216
5217 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { 5117 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5218 Register hi_reg = ToRegister(instr->hi()); 5118 Register hi_reg = ToRegister(instr->hi());
5219 Register lo_reg = ToRegister(instr->lo()); 5119 Register lo_reg = ToRegister(instr->lo());
5220 DoubleRegister result_reg = ToDoubleRegister(instr->result()); 5120 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5221 #if V8_TARGET_ARCH_PPC64 5121 // TODO(joransiu): Construct with ldgr
5222 __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0); 5122 Register scratch = scratch0();
5223 #else 5123
5224 __ MovInt64ToDouble(result_reg, hi_reg, lo_reg); 5124 // Combine hi_reg:lo_reg into a single 64-bit register.
5225 #endif 5125 __ sllg(scratch, hi_reg, Operand(32));
5126 __ lr(scratch, lo_reg);
5127
5128 // Bitwise convert from GPR to FPR
5129 __ ldgr(result_reg, scratch);
5226 } 5130 }
5227 5131
5228
5229 void LCodeGen::DoAllocate(LAllocate* instr) { 5132 void LCodeGen::DoAllocate(LAllocate* instr) {
5230 class DeferredAllocate final : public LDeferredCode { 5133 class DeferredAllocate final : public LDeferredCode {
5231 public: 5134 public:
5232 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 5135 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5233 : LDeferredCode(codegen), instr_(instr) {} 5136 : LDeferredCode(codegen), instr_(instr) {}
5234 void Generate() override { codegen()->DoDeferredAllocate(instr_); } 5137 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5235 LInstruction* instr() override { return instr_; } 5138 LInstruction* instr() override { return instr_; }
5236 5139
5237 private: 5140 private:
5238 LAllocate* instr_; 5141 LAllocate* instr_;
(...skipping 22 matching lines...) Expand all
5261 } else { 5164 } else {
5262 Register size = ToRegister(instr->size()); 5165 Register size = ToRegister(instr->size());
5263 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5166 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5264 } 5167 }
5265 5168
5266 __ bind(deferred->exit()); 5169 __ bind(deferred->exit());
5267 5170
5268 if (instr->hydrogen()->MustPrefillWithFiller()) { 5171 if (instr->hydrogen()->MustPrefillWithFiller()) {
5269 if (instr->size()->IsConstantOperand()) { 5172 if (instr->size()->IsConstantOperand()) {
5270 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5173 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5271 __ LoadIntLiteral(scratch, size - kHeapObjectTag); 5174 __ LoadIntLiteral(scratch, size);
5272 } else { 5175 } else {
5273 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); 5176 scratch = ToRegister(instr->size());
5274 } 5177 }
5178 __ lay(scratch, MemOperand(scratch, -kPointerSize));
5179 Label loop;
5275 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); 5180 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5276 Label loop;
5277 __ bind(&loop); 5181 __ bind(&loop);
5278 __ subi(scratch, scratch, Operand(kPointerSize)); 5182 __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag));
5279 __ StorePX(scratch2, MemOperand(result, scratch)); 5183 #if V8_TARGET_ARCH_S390X
5280 __ cmpi(scratch, Operand::Zero()); 5184 __ lay(scratch, MemOperand(scratch, -kPointerSize));
5185 #else
5186 // TODO(joransiu): Improve the following sequence.
5187 // Need to use AHI instead of LAY as top nibble is not set with LAY, causing
5188 // incorrect result with the signed compare
5189 __ AddP(scratch, Operand(-kPointerSize));
5190 #endif
5191 __ CmpP(scratch, Operand::Zero());
5281 __ bge(&loop); 5192 __ bge(&loop);
5282 } 5193 }
5283 } 5194 }
5284 5195
5285
5286 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 5196 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5287 Register result = ToRegister(instr->result()); 5197 Register result = ToRegister(instr->result());
5288 5198
5289 // TODO(3095996): Get rid of this. For now, we need to make the 5199 // TODO(3095996): Get rid of this. For now, we need to make the
5290 // result register contain a valid pointer because it is already 5200 // result register contain a valid pointer because it is already
5291 // contained in the register pointer map. 5201 // contained in the register pointer map.
5292 __ LoadSmiLiteral(result, Smi::FromInt(0)); 5202 __ LoadSmiLiteral(result, Smi::FromInt(0));
5293 5203
5294 PushSafepointRegistersScope scope(this); 5204 PushSafepointRegistersScope scope(this);
5295 if (instr->size()->IsRegister()) { 5205 if (instr->size()->IsRegister()) {
5296 Register size = ToRegister(instr->size()); 5206 Register size = ToRegister(instr->size());
5297 DCHECK(!size.is(result)); 5207 DCHECK(!size.is(result));
5298 __ SmiTag(size); 5208 __ SmiTag(size);
5299 __ push(size); 5209 __ push(size);
5300 } else { 5210 } else {
5301 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5211 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5302 #if !V8_TARGET_ARCH_PPC64 5212 #if !V8_TARGET_ARCH_S390X
5303 if (size >= 0 && size <= Smi::kMaxValue) { 5213 if (size >= 0 && size <= Smi::kMaxValue) {
5304 #endif 5214 #endif
5305 __ Push(Smi::FromInt(size)); 5215 __ Push(Smi::FromInt(size));
5306 #if !V8_TARGET_ARCH_PPC64 5216 #if !V8_TARGET_ARCH_S390X
5307 } else { 5217 } else {
5308 // We should never get here at runtime => abort 5218 // We should never get here at runtime => abort
5309 __ stop("invalid allocation size"); 5219 __ stop("invalid allocation size");
5310 return; 5220 return;
5311 } 5221 }
5312 #endif 5222 #endif
5313 } 5223 }
5314 5224
5315 int flags = AllocateDoubleAlignFlag::encode( 5225 int flags = AllocateDoubleAlignFlag::encode(
5316 instr->hydrogen()->MustAllocateDoubleAligned()); 5226 instr->hydrogen()->MustAllocateDoubleAligned());
5317 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5227 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5318 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5228 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5319 flags = AllocateTargetSpace::update(flags, OLD_SPACE); 5229 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5320 } else { 5230 } else {
5321 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 5231 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5322 } 5232 }
5323 __ Push(Smi::FromInt(flags)); 5233 __ Push(Smi::FromInt(flags));
5324 5234
5325 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, 5235 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
5326 instr->context()); 5236 instr->context());
5327 __ StoreToSafepointRegisterSlot(r3, result); 5237 __ StoreToSafepointRegisterSlot(r2, result);
5328 } 5238 }
5329 5239
5330
5331 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 5240 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5332 DCHECK(ToRegister(instr->value()).is(r3)); 5241 DCHECK(ToRegister(instr->value()).is(r2));
5333 __ push(r3); 5242 __ push(r2);
5334 CallRuntime(Runtime::kToFastProperties, 1, instr); 5243 CallRuntime(Runtime::kToFastProperties, 1, instr);
5335 } 5244 }
5336 5245
5337
5338 void LCodeGen::DoTypeof(LTypeof* instr) { 5246 void LCodeGen::DoTypeof(LTypeof* instr) {
5339 DCHECK(ToRegister(instr->value()).is(r6)); 5247 DCHECK(ToRegister(instr->value()).is(r5));
5340 DCHECK(ToRegister(instr->result()).is(r3)); 5248 DCHECK(ToRegister(instr->result()).is(r2));
5341 Label end, do_call; 5249 Label end, do_call;
5342 Register value_register = ToRegister(instr->value()); 5250 Register value_register = ToRegister(instr->value());
5343 __ JumpIfNotSmi(value_register, &do_call); 5251 __ JumpIfNotSmi(value_register, &do_call);
5344 __ mov(r3, Operand(isolate()->factory()->number_string())); 5252 __ mov(r2, Operand(isolate()->factory()->number_string()));
5345 __ b(&end); 5253 __ b(&end);
5346 __ bind(&do_call); 5254 __ bind(&do_call);
5347 TypeofStub stub(isolate()); 5255 TypeofStub stub(isolate());
5348 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5256 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5349 __ bind(&end); 5257 __ bind(&end);
5350 } 5258 }
5351 5259
5352
5353 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 5260 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5354 Register input = ToRegister(instr->value()); 5261 Register input = ToRegister(instr->value());
5355 5262
5356 Condition final_branch_condition = 5263 Condition final_branch_condition =
5357 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input, 5264 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
5358 instr->type_literal()); 5265 instr->type_literal());
5359 if (final_branch_condition != kNoCondition) { 5266 if (final_branch_condition != kNoCondition) {
5360 EmitBranch(instr, final_branch_condition); 5267 EmitBranch(instr, final_branch_condition);
5361 } 5268 }
5362 } 5269 }
5363 5270
5364
5365 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label, 5271 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
5366 Register input, Handle<String> type_name) { 5272 Register input, Handle<String> type_name) {
5367 Condition final_branch_condition = kNoCondition; 5273 Condition final_branch_condition = kNoCondition;
5368 Register scratch = scratch0(); 5274 Register scratch = scratch0();
5369 Factory* factory = isolate()->factory(); 5275 Factory* factory = isolate()->factory();
5370 if (String::Equals(type_name, factory->number_string())) { 5276 if (String::Equals(type_name, factory->number_string())) {
5371 __ JumpIfSmi(input, true_label); 5277 __ JumpIfSmi(input, true_label);
5372 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5278 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5373 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); 5279 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5374 final_branch_condition = eq; 5280 final_branch_condition = eq;
(...skipping 13 matching lines...) Expand all
5388 __ beq(true_label); 5294 __ beq(true_label);
5389 __ CompareRoot(input, Heap::kFalseValueRootIndex); 5295 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5390 final_branch_condition = eq; 5296 final_branch_condition = eq;
5391 5297
5392 } else if (String::Equals(type_name, factory->undefined_string())) { 5298 } else if (String::Equals(type_name, factory->undefined_string())) {
5393 __ CompareRoot(input, Heap::kNullValueRootIndex); 5299 __ CompareRoot(input, Heap::kNullValueRootIndex);
5394 __ beq(false_label); 5300 __ beq(false_label);
5395 __ JumpIfSmi(input, false_label); 5301 __ JumpIfSmi(input, false_label);
5396 // Check for undetectable objects => true. 5302 // Check for undetectable objects => true.
5397 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5303 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5398 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5304 __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5399 __ ExtractBit(r0, scratch, Map::kIsUndetectable); 5305 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5400 __ cmpi(r0, Operand::Zero()); 5306 __ CmpP(r0, Operand::Zero());
5401 final_branch_condition = ne; 5307 final_branch_condition = ne;
5402 5308
5403 } else if (String::Equals(type_name, factory->function_string())) { 5309 } else if (String::Equals(type_name, factory->function_string())) {
5404 __ JumpIfSmi(input, false_label); 5310 __ JumpIfSmi(input, false_label);
5405 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5311 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5406 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5312 __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5407 __ andi(scratch, scratch, 5313 __ AndP(scratch, scratch,
5408 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); 5314 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5409 __ cmpi(scratch, Operand(1 << Map::kIsCallable)); 5315 __ CmpP(scratch, Operand(1 << Map::kIsCallable));
5410 final_branch_condition = eq; 5316 final_branch_condition = eq;
5411 5317
5412 } else if (String::Equals(type_name, factory->object_string())) { 5318 } else if (String::Equals(type_name, factory->object_string())) {
5413 __ JumpIfSmi(input, false_label); 5319 __ JumpIfSmi(input, false_label);
5414 __ CompareRoot(input, Heap::kNullValueRootIndex); 5320 __ CompareRoot(input, Heap::kNullValueRootIndex);
5415 __ beq(true_label); 5321 __ beq(true_label);
5416 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); 5322 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5417 __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE); 5323 __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
5418 __ blt(false_label); 5324 __ blt(false_label);
5419 // Check for callable or undetectable objects => false. 5325 // Check for callable or undetectable objects => false.
5420 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5326 __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5421 __ andi(r0, scratch, 5327 __ AndP(r0, scratch,
5422 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); 5328 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5423 __ cmpi(r0, Operand::Zero()); 5329 __ CmpP(r0, Operand::Zero());
5424 final_branch_condition = eq; 5330 final_branch_condition = eq;
5425 5331
5426 // clang-format off 5332 // clang-format off
5427 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \ 5333 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5428 } else if (String::Equals(type_name, factory->type##_string())) { \ 5334 } else if (String::Equals(type_name, factory->type##_string())) { \
5429 __ JumpIfSmi(input, false_label); \ 5335 __ JumpIfSmi(input, false_label); \
5430 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \ 5336 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
5431 __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \ 5337 __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
5432 final_branch_condition = eq; 5338 final_branch_condition = eq;
5433 SIMD128_TYPES(SIMD128_TYPE) 5339 SIMD128_TYPES(SIMD128_TYPE)
5434 #undef SIMD128_TYPE 5340 #undef SIMD128_TYPE
5435 // clang-format on 5341 // clang-format on
5436 5342
5437 } else { 5343 } else {
5438 __ b(false_label); 5344 __ b(false_label);
5439 } 5345 }
5440 5346
5441 return final_branch_condition; 5347 return final_branch_condition;
5442 } 5348 }
5443 5349
5444
5445 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 5350 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5446 if (info()->ShouldEnsureSpaceForLazyDeopt()) { 5351 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5447 // Ensure that we have enough space after the previous lazy-bailout 5352 // Ensure that we have enough space after the previous lazy-bailout
5448 // instruction for patching the code here. 5353 // instruction for patching the code here.
5449 int current_pc = masm()->pc_offset(); 5354 int current_pc = masm()->pc_offset();
5450 if (current_pc < last_lazy_deopt_pc_ + space_needed) { 5355 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5451 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 5356 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5452 DCHECK_EQ(0, padding_size % Assembler::kInstrSize); 5357 DCHECK_EQ(0, padding_size % 2);
5453 while (padding_size > 0) { 5358 while (padding_size > 0) {
5454 __ nop(); 5359 __ nop();
5455 padding_size -= Assembler::kInstrSize; 5360 padding_size -= 2;
5456 } 5361 }
5457 } 5362 }
5458 } 5363 }
5459 last_lazy_deopt_pc_ = masm()->pc_offset(); 5364 last_lazy_deopt_pc_ = masm()->pc_offset();
5460 } 5365 }
5461 5366
5462
5463 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 5367 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5464 last_lazy_deopt_pc_ = masm()->pc_offset(); 5368 last_lazy_deopt_pc_ = masm()->pc_offset();
5465 DCHECK(instr->HasEnvironment()); 5369 DCHECK(instr->HasEnvironment());
5466 LEnvironment* env = instr->environment(); 5370 LEnvironment* env = instr->environment();
5467 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5371 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5468 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5372 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5469 } 5373 }
5470 5374
5471
5472 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 5375 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5473 Deoptimizer::BailoutType type = instr->hydrogen()->type(); 5376 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5474 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the 5377 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5475 // needed return address), even though the implementation of LAZY and EAGER is 5378 // needed return address), even though the implementation of LAZY and EAGER is
5476 // now identical. When LAZY is eventually completely folded into EAGER, remove 5379 // now identical. When LAZY is eventually completely folded into EAGER, remove
5477 // the special case below. 5380 // the special case below.
5478 if (info()->IsStub() && type == Deoptimizer::EAGER) { 5381 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5479 type = Deoptimizer::LAZY; 5382 type = Deoptimizer::LAZY;
5480 } 5383 }
5481 5384
5482 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type); 5385 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5483 } 5386 }
5484 5387
5485
5486 void LCodeGen::DoDummy(LDummy* instr) { 5388 void LCodeGen::DoDummy(LDummy* instr) {
5487 // Nothing to see here, move on! 5389 // Nothing to see here, move on!
5488 } 5390 }
5489 5391
5490
5491 void LCodeGen::DoDummyUse(LDummyUse* instr) { 5392 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5492 // Nothing to see here, move on! 5393 // Nothing to see here, move on!
5493 } 5394 }
5494 5395
5495
5496 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 5396 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5497 PushSafepointRegistersScope scope(this); 5397 PushSafepointRegistersScope scope(this);
5498 LoadContextFromDeferred(instr->context()); 5398 LoadContextFromDeferred(instr->context());
5499 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 5399 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5500 RecordSafepointWithLazyDeopt( 5400 RecordSafepointWithLazyDeopt(
5501 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 5401 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5502 DCHECK(instr->HasEnvironment()); 5402 DCHECK(instr->HasEnvironment());
5503 LEnvironment* env = instr->environment(); 5403 LEnvironment* env = instr->environment();
5504 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5404 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5505 } 5405 }
5506 5406
5507
5508 void LCodeGen::DoStackCheck(LStackCheck* instr) { 5407 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5509 class DeferredStackCheck final : public LDeferredCode { 5408 class DeferredStackCheck final : public LDeferredCode {
5510 public: 5409 public:
5511 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 5410 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5512 : LDeferredCode(codegen), instr_(instr) {} 5411 : LDeferredCode(codegen), instr_(instr) {}
5513 void Generate() override { codegen()->DoDeferredStackCheck(instr_); } 5412 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5514 LInstruction* instr() override { return instr_; } 5413 LInstruction* instr() override { return instr_; }
5515 5414
5516 private: 5415 private:
5517 LStackCheck* instr_; 5416 LStackCheck* instr_;
5518 }; 5417 };
5519 5418
5520 DCHECK(instr->HasEnvironment()); 5419 DCHECK(instr->HasEnvironment());
5521 LEnvironment* env = instr->environment(); 5420 LEnvironment* env = instr->environment();
5522 // There is no LLazyBailout instruction for stack-checks. We have to 5421 // There is no LLazyBailout instruction for stack-checks. We have to
5523 // prepare for lazy deoptimization explicitly here. 5422 // prepare for lazy deoptimization explicitly here.
5524 if (instr->hydrogen()->is_function_entry()) { 5423 if (instr->hydrogen()->is_function_entry()) {
5525 // Perform stack overflow check. 5424 // Perform stack overflow check.
5526 Label done; 5425 Label done;
5527 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 5426 __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
5528 __ cmpl(sp, ip); 5427 __ bge(&done, Label::kNear);
5529 __ bge(&done);
5530 DCHECK(instr->context()->IsRegister()); 5428 DCHECK(instr->context()->IsRegister());
5531 DCHECK(ToRegister(instr->context()).is(cp)); 5429 DCHECK(ToRegister(instr->context()).is(cp));
5532 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, 5430 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
5533 instr); 5431 instr);
5534 __ bind(&done); 5432 __ bind(&done);
5535 } else { 5433 } else {
5536 DCHECK(instr->hydrogen()->is_backwards_branch()); 5434 DCHECK(instr->hydrogen()->is_backwards_branch());
5537 // Perform stack overflow check if this goto needs it before jumping. 5435 // Perform stack overflow check if this goto needs it before jumping.
5538 DeferredStackCheck* deferred_stack_check = 5436 DeferredStackCheck* deferred_stack_check =
5539 new (zone()) DeferredStackCheck(this, instr); 5437 new (zone()) DeferredStackCheck(this, instr);
5540 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 5438 __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
5541 __ cmpl(sp, ip);
5542 __ blt(deferred_stack_check->entry()); 5439 __ blt(deferred_stack_check->entry());
5543 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 5440 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5544 __ bind(instr->done_label()); 5441 __ bind(instr->done_label());
5545 deferred_stack_check->SetExit(instr->done_label()); 5442 deferred_stack_check->SetExit(instr->done_label());
5546 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5443 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5547 // Don't record a deoptimization index for the safepoint here. 5444 // Don't record a deoptimization index for the safepoint here.
5548 // This will be done explicitly when emitting call and the safepoint in 5445 // This will be done explicitly when emitting call and the safepoint in
5549 // the deferred code. 5446 // the deferred code.
5550 } 5447 }
5551 } 5448 }
5552 5449
5553
5554 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 5450 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5555 // This is a pseudo-instruction that ensures that the environment here is 5451 // This is a pseudo-instruction that ensures that the environment here is
5556 // properly registered for deoptimization and records the assembler's PC 5452 // properly registered for deoptimization and records the assembler's PC
5557 // offset. 5453 // offset.
5558 LEnvironment* environment = instr->environment(); 5454 LEnvironment* environment = instr->environment();
5559 5455
5560 // If the environment were already registered, we would have no way of 5456 // If the environment were already registered, we would have no way of
5561 // backpatching it with the spill slot operands. 5457 // backpatching it with the spill slot operands.
5562 DCHECK(!environment->HasBeenRegistered()); 5458 DCHECK(!environment->HasBeenRegistered());
5563 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 5459 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5564 5460
5565 GenerateOsrPrologue(); 5461 GenerateOsrPrologue();
5566 } 5462 }
5567 5463
5568
5569 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5464 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5570 Label use_cache, call_runtime; 5465 Label use_cache, call_runtime;
5571 __ CheckEnumCache(&call_runtime); 5466 __ CheckEnumCache(&call_runtime);
5572 5467
5573 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); 5468 __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
5574 __ b(&use_cache); 5469 __ b(&use_cache);
5575 5470
5576 // Get the set of properties to enumerate. 5471 // Get the set of properties to enumerate.
5577 __ bind(&call_runtime); 5472 __ bind(&call_runtime);
5578 __ push(r3); 5473 __ push(r2);
5579 CallRuntime(Runtime::kForInEnumerate, instr); 5474 CallRuntime(Runtime::kForInEnumerate, instr);
5580 __ bind(&use_cache); 5475 __ bind(&use_cache);
5581 } 5476 }
5582 5477
5583
5584 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 5478 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5585 Register map = ToRegister(instr->map()); 5479 Register map = ToRegister(instr->map());
5586 Register result = ToRegister(instr->result()); 5480 Register result = ToRegister(instr->result());
5587 Label load_cache, done; 5481 Label load_cache, done;
5588 __ EnumLength(result, map); 5482 __ EnumLength(result, map);
5589 __ CmpSmiLiteral(result, Smi::FromInt(0), r0); 5483 __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
5590 __ bne(&load_cache); 5484 __ bne(&load_cache, Label::kNear);
5591 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); 5485 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5592 __ b(&done); 5486 __ b(&done, Label::kNear);
5593 5487
5594 __ bind(&load_cache); 5488 __ bind(&load_cache);
5595 __ LoadInstanceDescriptors(map, result); 5489 __ LoadInstanceDescriptors(map, result);
5596 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 5490 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5597 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 5491 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5598 __ cmpi(result, Operand::Zero()); 5492 __ CmpP(result, Operand::Zero());
5599 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); 5493 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
5600 5494
5601 __ bind(&done); 5495 __ bind(&done);
5602 } 5496 }
5603 5497
5604
5605 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5498 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5606 Register object = ToRegister(instr->value()); 5499 Register object = ToRegister(instr->value());
5607 Register map = ToRegister(instr->map()); 5500 Register map = ToRegister(instr->map());
5608 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 5501 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5609 __ cmp(map, scratch0()); 5502 __ CmpP(map, scratch0());
5610 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); 5503 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5611 } 5504 }
5612 5505
5613
5614 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 5506 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5615 Register result, Register object, 5507 Register result, Register object,
5616 Register index) { 5508 Register index) {
5617 PushSafepointRegistersScope scope(this); 5509 PushSafepointRegistersScope scope(this);
5618 __ Push(object, index); 5510 __ Push(object, index);
5619 __ li(cp, Operand::Zero()); 5511 __ LoadImmP(cp, Operand::Zero());
5620 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); 5512 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5621 RecordSafepointWithRegisters(instr->pointer_map(), 2, 5513 RecordSafepointWithRegisters(instr->pointer_map(), 2,
5622 Safepoint::kNoLazyDeopt); 5514 Safepoint::kNoLazyDeopt);
5623 __ StoreToSafepointRegisterSlot(r3, result); 5515 __ StoreToSafepointRegisterSlot(r2, result);
5624 } 5516 }
5625 5517
5626
5627 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 5518 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5628 class DeferredLoadMutableDouble final : public LDeferredCode { 5519 class DeferredLoadMutableDouble final : public LDeferredCode {
5629 public: 5520 public:
5630 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr, 5521 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
5631 Register result, Register object, Register index) 5522 Register result, Register object, Register index)
5632 : LDeferredCode(codegen), 5523 : LDeferredCode(codegen),
5633 instr_(instr), 5524 instr_(instr),
5634 result_(result), 5525 result_(result),
5635 object_(object), 5526 object_(object),
5636 index_(index) {} 5527 index_(index) {}
(...skipping 14 matching lines...) Expand all
5651 Register result = ToRegister(instr->result()); 5542 Register result = ToRegister(instr->result());
5652 Register scratch = scratch0(); 5543 Register scratch = scratch0();
5653 5544
5654 DeferredLoadMutableDouble* deferred; 5545 DeferredLoadMutableDouble* deferred;
5655 deferred = new (zone()) 5546 deferred = new (zone())
5656 DeferredLoadMutableDouble(this, instr, result, object, index); 5547 DeferredLoadMutableDouble(this, instr, result, object, index);
5657 5548
5658 Label out_of_object, done; 5549 Label out_of_object, done;
5659 5550
5660 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0); 5551 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
5661 __ bne(deferred->entry(), cr0); 5552 __ bne(deferred->entry());
5662 __ ShiftRightArithImm(index, index, 1); 5553 __ ShiftRightArithP(index, index, Operand(1));
5663 5554
5664 __ cmpi(index, Operand::Zero()); 5555 __ CmpP(index, Operand::Zero());
5665 __ blt(&out_of_object); 5556 __ blt(&out_of_object, Label::kNear);
5666 5557
5667 __ SmiToPtrArrayOffset(r0, index); 5558 __ SmiToPtrArrayOffset(r0, index);
5668 __ add(scratch, object, r0); 5559 __ AddP(scratch, object, r0);
5669 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); 5560 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5670 5561
5671 __ b(&done); 5562 __ b(&done, Label::kNear);
5672 5563
5673 __ bind(&out_of_object); 5564 __ bind(&out_of_object);
5674 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5565 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5675 // Index is equal to negated out of object property index plus 1. 5566 // Index is equal to negated out of object property index plus 1.
5676 __ SmiToPtrArrayOffset(r0, index); 5567 __ SmiToPtrArrayOffset(r0, index);
5677 __ sub(scratch, result, r0); 5568 __ SubP(scratch, result, r0);
5678 __ LoadP(result, 5569 __ LoadP(result,
5679 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); 5570 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
5680 __ bind(deferred->exit()); 5571 __ bind(deferred->exit());
5681 __ bind(&done); 5572 __ bind(&done);
5682 } 5573 }
5683 5574
5684
5685 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { 5575 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5686 Register context = ToRegister(instr->context()); 5576 Register context = ToRegister(instr->context());
5687 __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); 5577 __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5688 } 5578 }
5689 5579
5690
5691 #undef __ 5580 #undef __
5692 } // namespace internal 5581 } // namespace internal
5693 } // namespace v8 5582 } // namespace v8
OLDNEW
« no previous file with comments | « src/crankshaft/s390/lithium-codegen-s390.h ('k') | src/crankshaft/s390/lithium-gap-resolver-s390.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698