Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1251)

Side by Side Diff: src/ppc/lithium-codegen-ppc.cc

Issue 714093002: PowerPC specific sub-directories. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ppc/lithium-codegen-ppc.h ('k') | src/ppc/lithium-gap-resolver-ppc.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/arm/lithium-codegen-arm.h"
8 #include "src/arm/lithium-gap-resolver-arm.h"
9 #include "src/base/bits.h" 7 #include "src/base/bits.h"
10 #include "src/code-factory.h" 8 #include "src/code-factory.h"
11 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
12 #include "src/hydrogen-osr.h" 10 #include "src/hydrogen-osr.h"
13 #include "src/ic/ic.h" 11 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h" 12 #include "src/ic/stub-cache.h"
13 #include "src/ppc/lithium-codegen-ppc.h"
14 #include "src/ppc/lithium-gap-resolver-ppc.h"
15 15
16 namespace v8 { 16 namespace v8 {
17 namespace internal { 17 namespace internal {
18 18
19 19
20 class SafepointGenerator FINAL : public CallWrapper { 20 class SafepointGenerator FINAL : public CallWrapper {
21 public: 21 public:
22 SafepointGenerator(LCodeGen* codegen, 22 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
23 LPointerMap* pointers,
24 Safepoint::DeoptMode mode) 23 Safepoint::DeoptMode mode)
25 : codegen_(codegen), 24 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
26 pointers_(pointers),
27 deopt_mode_(mode) { }
28 virtual ~SafepointGenerator() {} 25 virtual ~SafepointGenerator() {}
29 26
30 virtual void BeforeCall(int call_size) const OVERRIDE {} 27 virtual void BeforeCall(int call_size) const OVERRIDE {}
31 28
32 virtual void AfterCall() const OVERRIDE { 29 virtual void AfterCall() const OVERRIDE {
33 codegen_->RecordSafepoint(pointers_, deopt_mode_); 30 codegen_->RecordSafepoint(pointers_, deopt_mode_);
34 } 31 }
35 32
36 private: 33 private:
37 LCodeGen* codegen_; 34 LCodeGen* codegen_;
(...skipping 29 matching lines...) Expand all
67 64
68 65
69 void LCodeGen::SaveCallerDoubles() { 66 void LCodeGen::SaveCallerDoubles() {
70 DCHECK(info()->saves_caller_doubles()); 67 DCHECK(info()->saves_caller_doubles());
71 DCHECK(NeedsEagerFrame()); 68 DCHECK(NeedsEagerFrame());
72 Comment(";;; Save clobbered callee double registers"); 69 Comment(";;; Save clobbered callee double registers");
73 int count = 0; 70 int count = 0;
74 BitVector* doubles = chunk()->allocated_double_registers(); 71 BitVector* doubles = chunk()->allocated_double_registers();
75 BitVector::Iterator save_iterator(doubles); 72 BitVector::Iterator save_iterator(doubles);
76 while (!save_iterator.Done()) { 73 while (!save_iterator.Done()) {
77 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), 74 __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
78 MemOperand(sp, count * kDoubleSize)); 75 MemOperand(sp, count * kDoubleSize));
79 save_iterator.Advance(); 76 save_iterator.Advance();
80 count++; 77 count++;
81 } 78 }
82 } 79 }
83 80
84 81
85 void LCodeGen::RestoreCallerDoubles() { 82 void LCodeGen::RestoreCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles()); 83 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame()); 84 DCHECK(NeedsEagerFrame());
88 Comment(";;; Restore clobbered callee double registers"); 85 Comment(";;; Restore clobbered callee double registers");
89 BitVector* doubles = chunk()->allocated_double_registers(); 86 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles); 87 BitVector::Iterator save_iterator(doubles);
91 int count = 0; 88 int count = 0;
92 while (!save_iterator.Done()) { 89 while (!save_iterator.Done()) {
93 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), 90 __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
94 MemOperand(sp, count * kDoubleSize)); 91 MemOperand(sp, count * kDoubleSize));
95 save_iterator.Advance(); 92 save_iterator.Advance();
96 count++; 93 count++;
97 } 94 }
98 } 95 }
99 96
100 97
101 bool LCodeGen::GeneratePrologue() { 98 bool LCodeGen::GeneratePrologue() {
102 DCHECK(is_generating()); 99 DCHECK(is_generating());
103 100
104 if (info()->IsOptimizing()) { 101 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 102 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
106 103
107 #ifdef DEBUG 104 #ifdef DEBUG
108 if (strlen(FLAG_stop_at) > 0 && 105 if (strlen(FLAG_stop_at) > 0 &&
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { 106 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
110 __ stop("stop_at"); 107 __ stop("stop_at");
111 } 108 }
112 #endif 109 #endif
113 110
114 // r1: Callee's JS function. 111 // r4: Callee's JS function.
115 // cp: Callee's context. 112 // cp: Callee's context.
116 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool) 113 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
117 // fp: Caller's frame pointer. 114 // fp: Caller's frame pointer.
118 // lr: Caller's pc. 115 // lr: Caller's pc.
116 // ip: Our own function entry (required by the prologue)
119 117
120 // Sloppy mode functions and builtins need to replace the receiver with the 118 // Sloppy mode functions and builtins need to replace the receiver with the
121 // global proxy when called as functions (without an explicit receiver 119 // global proxy when called as functions (without an explicit receiver
122 // object). 120 // object).
123 if (info_->this_has_uses() && 121 if (info_->this_has_uses() && info_->strict_mode() == SLOPPY &&
124 info_->strict_mode() == SLOPPY &&
125 !info_->is_native()) { 122 !info_->is_native()) {
126 Label ok; 123 Label ok;
127 int receiver_offset = info_->scope()->num_parameters() * kPointerSize; 124 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
128 __ ldr(r2, MemOperand(sp, receiver_offset)); 125 __ LoadP(r5, MemOperand(sp, receiver_offset));
129 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); 126 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
130 __ b(ne, &ok); 127 __ bne(&ok);
131 128
132 __ ldr(r2, GlobalObjectOperand()); 129 __ LoadP(r5, GlobalObjectOperand());
133 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset)); 130 __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
134 131
135 __ str(r2, MemOperand(sp, receiver_offset)); 132 __ StoreP(r5, MemOperand(sp, receiver_offset));
136 133
137 __ bind(&ok); 134 __ bind(&ok);
138 } 135 }
139 } 136 }
140 137
141 info()->set_prologue_offset(masm_->pc_offset()); 138 int prologue_offset = masm_->pc_offset();
139
140 if (prologue_offset) {
141 // Prologue logic requires it's starting address in ip and the
142 // corresponding offset from the function entry.
143 prologue_offset += Instruction::kInstrSize;
144 __ addi(ip, ip, Operand(prologue_offset));
145 }
146 info()->set_prologue_offset(prologue_offset);
142 if (NeedsEagerFrame()) { 147 if (NeedsEagerFrame()) {
143 if (info()->IsStub()) { 148 if (info()->IsStub()) {
144 __ StubPrologue(); 149 __ StubPrologue(prologue_offset);
145 } else { 150 } else {
146 __ Prologue(info()->IsCodePreAgingActive()); 151 __ Prologue(info()->IsCodePreAgingActive(), prologue_offset);
147 } 152 }
148 frame_is_built_ = true; 153 frame_is_built_ = true;
149 info_->AddNoFrameRange(0, masm_->pc_offset()); 154 info_->AddNoFrameRange(0, masm_->pc_offset());
150 } 155 }
151 156
152 // Reserve space for the stack slots needed by the code. 157 // Reserve space for the stack slots needed by the code.
153 int slots = GetStackSlotCount(); 158 int slots = GetStackSlotCount();
154 if (slots > 0) { 159 if (slots > 0) {
160 __ subi(sp, sp, Operand(slots * kPointerSize));
155 if (FLAG_debug_code) { 161 if (FLAG_debug_code) {
156 __ sub(sp, sp, Operand(slots * kPointerSize)); 162 __ Push(r3, r4);
157 __ push(r0); 163 __ li(r0, Operand(slots));
158 __ push(r1); 164 __ mtctr(r0);
159 __ add(r0, sp, Operand(slots * kPointerSize)); 165 __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
160 __ mov(r1, Operand(kSlotsZapValue)); 166 __ mov(r4, Operand(kSlotsZapValue));
161 Label loop; 167 Label loop;
162 __ bind(&loop); 168 __ bind(&loop);
163 __ sub(r0, r0, Operand(kPointerSize)); 169 __ StorePU(r4, MemOperand(r3, -kPointerSize));
164 __ str(r1, MemOperand(r0, 2 * kPointerSize)); 170 __ bdnz(&loop);
165 __ cmp(r0, sp); 171 __ Pop(r3, r4);
166 __ b(ne, &loop);
167 __ pop(r1);
168 __ pop(r0);
169 } else {
170 __ sub(sp, sp, Operand(slots * kPointerSize));
171 } 172 }
172 } 173 }
173 174
174 if (info()->saves_caller_doubles()) { 175 if (info()->saves_caller_doubles()) {
175 SaveCallerDoubles(); 176 SaveCallerDoubles();
176 } 177 }
177 178
178 // Possibly allocate a local context. 179 // Possibly allocate a local context.
179 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 180 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
180 if (heap_slots > 0) { 181 if (heap_slots > 0) {
181 Comment(";;; Allocate local context"); 182 Comment(";;; Allocate local context");
182 bool need_write_barrier = true; 183 bool need_write_barrier = true;
183 // Argument to NewContext is the function, which is in r1. 184 // Argument to NewContext is the function, which is in r4.
184 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 185 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
185 FastNewContextStub stub(isolate(), heap_slots); 186 FastNewContextStub stub(isolate(), heap_slots);
186 __ CallStub(&stub); 187 __ CallStub(&stub);
187 // Result of FastNewContextStub is always in new space. 188 // Result of FastNewContextStub is always in new space.
188 need_write_barrier = false; 189 need_write_barrier = false;
189 } else { 190 } else {
190 __ push(r1); 191 __ push(r4);
191 __ CallRuntime(Runtime::kNewFunctionContext, 1); 192 __ CallRuntime(Runtime::kNewFunctionContext, 1);
192 } 193 }
193 RecordSafepoint(Safepoint::kNoLazyDeopt); 194 RecordSafepoint(Safepoint::kNoLazyDeopt);
194 // Context is returned in both r0 and cp. It replaces the context 195 // Context is returned in both r3 and cp. It replaces the context
195 // passed to us. It's saved in the stack and kept live in cp. 196 // passed to us. It's saved in the stack and kept live in cp.
196 __ mov(cp, r0); 197 __ mr(cp, r3);
197 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset)); 198 __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
198 // Copy any necessary parameters into the context. 199 // Copy any necessary parameters into the context.
199 int num_parameters = scope()->num_parameters(); 200 int num_parameters = scope()->num_parameters();
200 for (int i = 0; i < num_parameters; i++) { 201 for (int i = 0; i < num_parameters; i++) {
201 Variable* var = scope()->parameter(i); 202 Variable* var = scope()->parameter(i);
202 if (var->IsContextSlot()) { 203 if (var->IsContextSlot()) {
203 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 204 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
204 (num_parameters - 1 - i) * kPointerSize; 205 (num_parameters - 1 - i) * kPointerSize;
205 // Load parameter from stack. 206 // Load parameter from stack.
206 __ ldr(r0, MemOperand(fp, parameter_offset)); 207 __ LoadP(r3, MemOperand(fp, parameter_offset));
207 // Store it in the context. 208 // Store it in the context.
208 MemOperand target = ContextOperand(cp, var->index()); 209 MemOperand target = ContextOperand(cp, var->index());
209 __ str(r0, target); 210 __ StoreP(r3, target, r0);
210 // Update the write barrier. This clobbers r3 and r0. 211 // Update the write barrier. This clobbers r6 and r3.
211 if (need_write_barrier) { 212 if (need_write_barrier) {
212 __ RecordWriteContextSlot( 213 __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
213 cp, 214 GetLinkRegisterState(), kSaveFPRegs);
214 target.offset(),
215 r0,
216 r3,
217 GetLinkRegisterState(),
218 kSaveFPRegs);
219 } else if (FLAG_debug_code) { 215 } else if (FLAG_debug_code) {
220 Label done; 216 Label done;
221 __ JumpIfInNewSpace(cp, r0, &done); 217 __ JumpIfInNewSpace(cp, r3, &done);
222 __ Abort(kExpectedNewSpaceObject); 218 __ Abort(kExpectedNewSpaceObject);
223 __ bind(&done); 219 __ bind(&done);
224 } 220 }
225 } 221 }
226 } 222 }
227 Comment(";;; End allocate local context"); 223 Comment(";;; End allocate local context");
228 } 224 }
229 225
230 // Trace the call. 226 // Trace the call.
231 if (FLAG_trace && info()->IsOptimizing()) { 227 if (FLAG_trace && info()->IsOptimizing()) {
232 // We have not executed any compiled code yet, so cp still holds the 228 // We have not executed any compiled code yet, so cp still holds the
233 // incoming context. 229 // incoming context.
234 __ CallRuntime(Runtime::kTraceEnter, 0); 230 __ CallRuntime(Runtime::kTraceEnter, 0);
235 } 231 }
236 return !is_aborted(); 232 return !is_aborted();
237 } 233 }
238 234
239 235
240 void LCodeGen::GenerateOsrPrologue() { 236 void LCodeGen::GenerateOsrPrologue() {
241 // Generate the OSR entry prologue at the first unknown OSR value, or if there 237 // Generate the OSR entry prologue at the first unknown OSR value, or if there
242 // are none, at the OSR entrypoint instruction. 238 // are none, at the OSR entrypoint instruction.
243 if (osr_pc_offset_ >= 0) return; 239 if (osr_pc_offset_ >= 0) return;
244 240
245 osr_pc_offset_ = masm()->pc_offset(); 241 osr_pc_offset_ = masm()->pc_offset();
246 242
247 // Adjust the frame size, subsuming the unoptimized frame into the 243 // Adjust the frame size, subsuming the unoptimized frame into the
248 // optimized frame. 244 // optimized frame.
249 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 245 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
250 DCHECK(slots >= 0); 246 DCHECK(slots >= 0);
251 __ sub(sp, sp, Operand(slots * kPointerSize)); 247 __ subi(sp, sp, Operand(slots * kPointerSize));
252 } 248 }
253 249
254 250
255 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 251 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
256 if (instr->IsCall()) { 252 if (instr->IsCall()) {
257 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 253 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
258 } 254 }
259 if (!instr->IsLazyBailout() && !instr->IsGap()) { 255 if (!instr->IsLazyBailout() && !instr->IsGap()) {
260 safepoints_.BumpLastLazySafepointIndex(); 256 safepoints_.BumpLastLazySafepointIndex();
261 } 257 }
262 } 258 }
263 259
264 260
265 bool LCodeGen::GenerateDeferredCode() { 261 bool LCodeGen::GenerateDeferredCode() {
266 DCHECK(is_generating()); 262 DCHECK(is_generating());
267 if (deferred_.length() > 0) { 263 if (deferred_.length() > 0) {
268 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 264 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
269 LDeferredCode* code = deferred_[i]; 265 LDeferredCode* code = deferred_[i];
270 266
271 HValue* value = 267 HValue* value =
272 instructions_->at(code->instruction_index())->hydrogen_value(); 268 instructions_->at(code->instruction_index())->hydrogen_value();
273 RecordAndWritePosition( 269 RecordAndWritePosition(
274 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 270 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
275 271
276 Comment(";;; <@%d,#%d> " 272 Comment(
277 "-------------------- Deferred %s --------------------", 273 ";;; <@%d,#%d> "
278 code->instruction_index(), 274 "-------------------- Deferred %s --------------------",
279 code->instr()->hydrogen_value()->id(), 275 code->instruction_index(), code->instr()->hydrogen_value()->id(),
280 code->instr()->Mnemonic()); 276 code->instr()->Mnemonic());
281 __ bind(code->entry()); 277 __ bind(code->entry());
282 if (NeedsDeferredFrame()) { 278 if (NeedsDeferredFrame()) {
283 Comment(";;; Build frame"); 279 Comment(";;; Build frame");
284 DCHECK(!frame_is_built_); 280 DCHECK(!frame_is_built_);
285 DCHECK(info()->IsStub()); 281 DCHECK(info()->IsStub());
286 frame_is_built_ = true; 282 frame_is_built_ = true;
287 __ PushFixedFrame(); 283 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
288 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); 284 __ PushFixedFrame(scratch0());
289 __ push(scratch0()); 285 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
290 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
291 Comment(";;; Deferred code"); 286 Comment(";;; Deferred code");
292 } 287 }
293 code->Generate(); 288 code->Generate();
294 if (NeedsDeferredFrame()) { 289 if (NeedsDeferredFrame()) {
295 Comment(";;; Destroy frame"); 290 Comment(";;; Destroy frame");
296 DCHECK(frame_is_built_); 291 DCHECK(frame_is_built_);
297 __ pop(ip); 292 __ PopFixedFrame(ip);
298 __ PopFixedFrame();
299 frame_is_built_ = false; 293 frame_is_built_ = false;
300 } 294 }
301 __ jmp(code->exit()); 295 __ b(code->exit());
302 } 296 }
303 } 297 }
304 298
305 // Force constant pool emission at the end of the deferred code to make
306 // sure that no constant pools are emitted after.
307 masm()->CheckConstPool(true, false);
308
309 return !is_aborted(); 299 return !is_aborted();
310 } 300 }
311 301
312 302
313 bool LCodeGen::GenerateJumpTable() { 303 bool LCodeGen::GenerateJumpTable() {
314 // Check that the jump table is accessible from everywhere in the function 304 // Check that the jump table is accessible from everywhere in the function
315 // code, i.e. that offsets to the table can be encoded in the 24bit signed 305 // code, i.e. that offsets to the table can be encoded in the 24bit signed
316 // immediate of a branch instruction. 306 // immediate of a branch instruction.
317 // To simplify we consider the code size from the first instruction to the 307 // To simplify we consider the code size from the first instruction to the
318 // end of the jump table. We also don't consider the pc load delta. 308 // end of the jump table. We also don't consider the pc load delta.
(...skipping 26 matching lines...) Expand all
345 // offset which will be added to the base address later. 335 // offset which will be added to the base address later.
346 __ mov(entry_offset, Operand(entry - base)); 336 __ mov(entry_offset, Operand(entry - base));
347 337
348 if (table_entry->needs_frame) { 338 if (table_entry->needs_frame) {
349 DCHECK(!info()->saves_caller_doubles()); 339 DCHECK(!info()->saves_caller_doubles());
350 if (needs_frame.is_bound()) { 340 if (needs_frame.is_bound()) {
351 __ b(&needs_frame); 341 __ b(&needs_frame);
352 } else { 342 } else {
353 __ bind(&needs_frame); 343 __ bind(&needs_frame);
354 Comment(";;; call deopt with frame"); 344 Comment(";;; call deopt with frame");
355 __ PushFixedFrame();
356 // This variant of deopt can only be used with stubs. Since we don't 345 // This variant of deopt can only be used with stubs. Since we don't
357 // have a function pointer to install in the stack frame that we're 346 // have a function pointer to install in the stack frame that we're
358 // building, install a special marker there instead. 347 // building, install a special marker there instead.
359 DCHECK(info()->IsStub()); 348 DCHECK(info()->IsStub());
360 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB))); 349 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
361 __ push(ip); 350 __ PushFixedFrame(ip);
362 __ add(fp, sp, 351 __ addi(fp, sp,
363 Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 352 Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
364 __ bind(&call_deopt_entry); 353 __ bind(&call_deopt_entry);
365 // Add the base address to the offset previously loaded in 354 // Add the base address to the offset previously loaded in
366 // entry_offset. 355 // entry_offset.
367 __ add(entry_offset, entry_offset, 356 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
368 Operand(ExternalReference::ForDeoptEntry(base))); 357 __ add(ip, entry_offset, ip);
369 __ blx(entry_offset); 358 __ Call(ip);
370 } 359 }
371
372 masm()->CheckConstPool(false, false);
373 } else { 360 } else {
374 // The last entry can fall through into `call_deopt_entry`, avoiding a 361 // The last entry can fall through into `call_deopt_entry`, avoiding a
375 // branch. 362 // branch.
376 bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound(); 363 bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
377 364
378 if (need_branch) __ b(&call_deopt_entry); 365 if (need_branch) __ b(&call_deopt_entry);
379
380 masm()->CheckConstPool(false, !need_branch);
381 } 366 }
382 } 367 }
383 368
384 if (!call_deopt_entry.is_bound()) { 369 if (!call_deopt_entry.is_bound()) {
385 Comment(";;; call deopt"); 370 Comment(";;; call deopt");
386 __ bind(&call_deopt_entry); 371 __ bind(&call_deopt_entry);
387 372
388 if (info()->saves_caller_doubles()) { 373 if (info()->saves_caller_doubles()) {
389 DCHECK(info()->IsStub()); 374 DCHECK(info()->IsStub());
390 RestoreCallerDoubles(); 375 RestoreCallerDoubles();
391 } 376 }
392 377
393 // Add the base address to the offset previously loaded in entry_offset. 378 // Add the base address to the offset previously loaded in entry_offset.
394 __ add(entry_offset, entry_offset, 379 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
395 Operand(ExternalReference::ForDeoptEntry(base))); 380 __ add(ip, entry_offset, ip);
396 __ blx(entry_offset); 381 __ Call(ip);
397 } 382 }
398 } 383 }
399 384
400 // Force constant pool emission at the end of the deopt jump table to make
401 // sure that no constant pools are emitted after.
402 masm()->CheckConstPool(true, false);
403
404 // The deoptimization jump table is the last part of the instruction 385 // The deoptimization jump table is the last part of the instruction
405 // sequence. Mark the generated code as done unless we bailed out. 386 // sequence. Mark the generated code as done unless we bailed out.
406 if (!is_aborted()) status_ = DONE; 387 if (!is_aborted()) status_ = DONE;
407 return !is_aborted(); 388 return !is_aborted();
408 } 389 }
409 390
410 391
411 bool LCodeGen::GenerateSafepointTable() { 392 bool LCodeGen::GenerateSafepointTable() {
412 DCHECK(is_done()); 393 DCHECK(is_done());
413 safepoints_.Emit(masm(), GetStackSlotCount()); 394 safepoints_.Emit(masm(), GetStackSlotCount());
414 return !is_aborted(); 395 return !is_aborted();
415 } 396 }
416 397
417 398
418 Register LCodeGen::ToRegister(int index) const { 399 Register LCodeGen::ToRegister(int index) const {
419 return Register::FromAllocationIndex(index); 400 return Register::FromAllocationIndex(index);
420 } 401 }
421 402
422 403
423 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { 404 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
424 return DwVfpRegister::FromAllocationIndex(index); 405 return DoubleRegister::FromAllocationIndex(index);
425 } 406 }
426 407
427 408
428 Register LCodeGen::ToRegister(LOperand* op) const { 409 Register LCodeGen::ToRegister(LOperand* op) const {
429 DCHECK(op->IsRegister()); 410 DCHECK(op->IsRegister());
430 return ToRegister(op->index()); 411 return ToRegister(op->index());
431 } 412 }
432 413
433 414
434 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { 415 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
435 if (op->IsRegister()) { 416 if (op->IsRegister()) {
436 return ToRegister(op->index()); 417 return ToRegister(op->index());
437 } else if (op->IsConstantOperand()) { 418 } else if (op->IsConstantOperand()) {
438 LConstantOperand* const_op = LConstantOperand::cast(op); 419 LConstantOperand* const_op = LConstantOperand::cast(op);
439 HConstant* constant = chunk_->LookupConstant(const_op); 420 HConstant* constant = chunk_->LookupConstant(const_op);
440 Handle<Object> literal = constant->handle(isolate()); 421 Handle<Object> literal = constant->handle(isolate());
441 Representation r = chunk_->LookupLiteralRepresentation(const_op); 422 Representation r = chunk_->LookupLiteralRepresentation(const_op);
442 if (r.IsInteger32()) { 423 if (r.IsInteger32()) {
443 DCHECK(literal->IsNumber()); 424 DCHECK(literal->IsNumber());
444 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); 425 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
445 } else if (r.IsDouble()) { 426 } else if (r.IsDouble()) {
446 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); 427 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
447 } else { 428 } else {
448 DCHECK(r.IsSmiOrTagged()); 429 DCHECK(r.IsSmiOrTagged());
449 __ Move(scratch, literal); 430 __ Move(scratch, literal);
450 } 431 }
451 return scratch; 432 return scratch;
452 } else if (op->IsStackSlot()) { 433 } else if (op->IsStackSlot()) {
453 __ ldr(scratch, ToMemOperand(op)); 434 __ LoadP(scratch, ToMemOperand(op));
454 return scratch; 435 return scratch;
455 } 436 }
456 UNREACHABLE(); 437 UNREACHABLE();
457 return scratch; 438 return scratch;
458 } 439 }
459 440
460 441
461 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 442 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
443 Register dst) {
444 DCHECK(IsInteger32(const_op));
445 HConstant* constant = chunk_->LookupConstant(const_op);
446 int32_t value = constant->Integer32Value();
447 if (IsSmi(const_op)) {
448 __ LoadSmiLiteral(dst, Smi::FromInt(value));
449 } else {
450 __ LoadIntLiteral(dst, value);
451 }
452 }
453
454
455 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
462 DCHECK(op->IsDoubleRegister()); 456 DCHECK(op->IsDoubleRegister());
463 return ToDoubleRegister(op->index()); 457 return ToDoubleRegister(op->index());
464 } 458 }
465 459
466 460
467 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
468 SwVfpRegister flt_scratch,
469 DwVfpRegister dbl_scratch) {
470 if (op->IsDoubleRegister()) {
471 return ToDoubleRegister(op->index());
472 } else if (op->IsConstantOperand()) {
473 LConstantOperand* const_op = LConstantOperand::cast(op);
474 HConstant* constant = chunk_->LookupConstant(const_op);
475 Handle<Object> literal = constant->handle(isolate());
476 Representation r = chunk_->LookupLiteralRepresentation(const_op);
477 if (r.IsInteger32()) {
478 DCHECK(literal->IsNumber());
479 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
480 __ vmov(flt_scratch, ip);
481 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
482 return dbl_scratch;
483 } else if (r.IsDouble()) {
484 Abort(kUnsupportedDoubleImmediate);
485 } else if (r.IsTagged()) {
486 Abort(kUnsupportedTaggedImmediate);
487 }
488 } else if (op->IsStackSlot()) {
489 // TODO(regis): Why is vldr not taking a MemOperand?
490 // __ vldr(dbl_scratch, ToMemOperand(op));
491 MemOperand mem_op = ToMemOperand(op);
492 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
493 return dbl_scratch;
494 }
495 UNREACHABLE();
496 return dbl_scratch;
497 }
498
499
500 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 461 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
501 HConstant* constant = chunk_->LookupConstant(op); 462 HConstant* constant = chunk_->LookupConstant(op);
502 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 463 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
503 return constant->handle(isolate()); 464 return constant->handle(isolate());
504 } 465 }
505 466
506 467
507 bool LCodeGen::IsInteger32(LConstantOperand* op) const { 468 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
508 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 469 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
509 } 470 }
510 471
511 472
512 bool LCodeGen::IsSmi(LConstantOperand* op) const { 473 bool LCodeGen::IsSmi(LConstantOperand* op) const {
513 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 474 return chunk_->LookupLiteralRepresentation(op).IsSmi();
514 } 475 }
515 476
516 477
517 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 478 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
518 return ToRepresentation(op, Representation::Integer32()); 479 return ToRepresentation(op, Representation::Integer32());
519 } 480 }
520 481
521 482
522 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, 483 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
523 const Representation& r) const { 484 const Representation& r) const {
524 HConstant* constant = chunk_->LookupConstant(op); 485 HConstant* constant = chunk_->LookupConstant(op);
525 int32_t value = constant->Integer32Value(); 486 int32_t value = constant->Integer32Value();
526 if (r.IsInteger32()) return value; 487 if (r.IsInteger32()) return value;
527 DCHECK(r.IsSmiOrTagged()); 488 DCHECK(r.IsSmiOrTagged());
528 return reinterpret_cast<int32_t>(Smi::FromInt(value)); 489 return reinterpret_cast<intptr_t>(Smi::FromInt(value));
529 } 490 }
530 491
531 492
532 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 493 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
533 HConstant* constant = chunk_->LookupConstant(op); 494 HConstant* constant = chunk_->LookupConstant(op);
534 return Smi::FromInt(constant->Integer32Value()); 495 return Smi::FromInt(constant->Integer32Value());
535 } 496 }
536 497
537 498
538 double LCodeGen::ToDouble(LConstantOperand* op) const { 499 double LCodeGen::ToDouble(LConstantOperand* op) const {
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
590 } 551 }
591 552
592 553
593 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { 554 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
594 DCHECK(op->IsDoubleStackSlot()); 555 DCHECK(op->IsDoubleStackSlot());
595 if (NeedsEagerFrame()) { 556 if (NeedsEagerFrame()) {
596 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); 557 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
597 } else { 558 } else {
598 // Retrieve parameter without eager stack-frame relative to the 559 // Retrieve parameter without eager stack-frame relative to the
599 // stack-pointer. 560 // stack-pointer.
600 return MemOperand( 561 return MemOperand(sp,
601 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); 562 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
602 } 563 }
603 } 564 }
604 565
605 566
606 void LCodeGen::WriteTranslation(LEnvironment* environment, 567 void LCodeGen::WriteTranslation(LEnvironment* environment,
607 Translation* translation) { 568 Translation* translation) {
608 if (environment == NULL) return; 569 if (environment == NULL) return;
609 570
610 // The translation includes one command per value in the environment. 571 // The translation includes one command per value in the environment.
611 int translation_size = environment->translation_size(); 572 int translation_size = environment->translation_size();
612 // The output frame height does not include the parameters. 573 // The output frame height does not include the parameters.
613 int height = translation_size - environment->parameter_count(); 574 int height = translation_size - environment->parameter_count();
614 575
615 WriteTranslation(environment->outer(), translation); 576 WriteTranslation(environment->outer(), translation);
616 bool has_closure_id = !info()->closure().is_null() && 577 bool has_closure_id =
578 !info()->closure().is_null() &&
617 !info()->closure().is_identical_to(environment->closure()); 579 !info()->closure().is_identical_to(environment->closure());
618 int closure_id = has_closure_id 580 int closure_id = has_closure_id
619 ? DefineDeoptimizationLiteral(environment->closure()) 581 ? DefineDeoptimizationLiteral(environment->closure())
620 : Translation::kSelfLiteralId; 582 : Translation::kSelfLiteralId;
621 583
622 switch (environment->frame_type()) { 584 switch (environment->frame_type()) {
623 case JS_FUNCTION: 585 case JS_FUNCTION:
624 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 586 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
625 break; 587 break;
626 case JS_CONSTRUCT: 588 case JS_CONSTRUCT:
627 translation->BeginConstructStubFrame(closure_id, translation_size); 589 translation->BeginConstructStubFrame(closure_id, translation_size);
628 break; 590 break;
629 case JS_GETTER: 591 case JS_GETTER:
630 DCHECK(translation_size == 1); 592 DCHECK(translation_size == 1);
(...skipping 10 matching lines...) Expand all
641 break; 603 break;
642 case ARGUMENTS_ADAPTOR: 604 case ARGUMENTS_ADAPTOR:
643 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 605 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
644 break; 606 break;
645 } 607 }
646 608
647 int object_index = 0; 609 int object_index = 0;
648 int dematerialized_index = 0; 610 int dematerialized_index = 0;
649 for (int i = 0; i < translation_size; ++i) { 611 for (int i = 0; i < translation_size; ++i) {
650 LOperand* value = environment->values()->at(i); 612 LOperand* value = environment->values()->at(i);
651 AddToTranslation(environment, 613 AddToTranslation(
652 translation, 614 environment, translation, value, environment->HasTaggedValueAt(i),
653 value, 615 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
654 environment->HasTaggedValueAt(i),
655 environment->HasUint32ValueAt(i),
656 &object_index,
657 &dematerialized_index);
658 } 616 }
659 } 617 }
660 618
661 619
662 void LCodeGen::AddToTranslation(LEnvironment* environment, 620 void LCodeGen::AddToTranslation(LEnvironment* environment,
663 Translation* translation, 621 Translation* translation, LOperand* op,
664 LOperand* op, 622 bool is_tagged, bool is_uint32,
665 bool is_tagged,
666 bool is_uint32,
667 int* object_index_pointer, 623 int* object_index_pointer,
668 int* dematerialized_index_pointer) { 624 int* dematerialized_index_pointer) {
669 if (op == LEnvironment::materialization_marker()) { 625 if (op == LEnvironment::materialization_marker()) {
670 int object_index = (*object_index_pointer)++; 626 int object_index = (*object_index_pointer)++;
671 if (environment->ObjectIsDuplicateAt(object_index)) { 627 if (environment->ObjectIsDuplicateAt(object_index)) {
672 int dupe_of = environment->ObjectDuplicateOfAt(object_index); 628 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
673 translation->DuplicateObject(dupe_of); 629 translation->DuplicateObject(dupe_of);
674 return; 630 return;
675 } 631 }
676 int object_length = environment->ObjectLengthAt(object_index); 632 int object_length = environment->ObjectLengthAt(object_index);
677 if (environment->ObjectIsArgumentsAt(object_index)) { 633 if (environment->ObjectIsArgumentsAt(object_index)) {
678 translation->BeginArgumentsObject(object_length); 634 translation->BeginArgumentsObject(object_length);
679 } else { 635 } else {
680 translation->BeginCapturedObject(object_length); 636 translation->BeginCapturedObject(object_length);
681 } 637 }
682 int dematerialized_index = *dematerialized_index_pointer; 638 int dematerialized_index = *dematerialized_index_pointer;
683 int env_offset = environment->translation_size() + dematerialized_index; 639 int env_offset = environment->translation_size() + dematerialized_index;
684 *dematerialized_index_pointer += object_length; 640 *dematerialized_index_pointer += object_length;
685 for (int i = 0; i < object_length; ++i) { 641 for (int i = 0; i < object_length; ++i) {
686 LOperand* value = environment->values()->at(env_offset + i); 642 LOperand* value = environment->values()->at(env_offset + i);
687 AddToTranslation(environment, 643 AddToTranslation(environment, translation, value,
688 translation,
689 value,
690 environment->HasTaggedValueAt(env_offset + i), 644 environment->HasTaggedValueAt(env_offset + i),
691 environment->HasUint32ValueAt(env_offset + i), 645 environment->HasUint32ValueAt(env_offset + i),
692 object_index_pointer, 646 object_index_pointer, dematerialized_index_pointer);
693 dematerialized_index_pointer);
694 } 647 }
695 return; 648 return;
696 } 649 }
697 650
698 if (op->IsStackSlot()) { 651 if (op->IsStackSlot()) {
699 if (is_tagged) { 652 if (is_tagged) {
700 translation->StoreStackSlot(op->index()); 653 translation->StoreStackSlot(op->index());
701 } else if (is_uint32) { 654 } else if (is_uint32) {
702 translation->StoreUint32StackSlot(op->index()); 655 translation->StoreUint32StackSlot(op->index());
703 } else { 656 } else {
(...skipping 16 matching lines...) Expand all
720 } else if (op->IsConstantOperand()) { 673 } else if (op->IsConstantOperand()) {
721 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); 674 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
722 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); 675 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
723 translation->StoreLiteral(src_index); 676 translation->StoreLiteral(src_index);
724 } else { 677 } else {
725 UNREACHABLE(); 678 UNREACHABLE();
726 } 679 }
727 } 680 }
728 681
729 682
730 int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) { 683 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
731 int size = masm()->CallSize(code, mode); 684 LInstruction* instr) {
732 if (code->kind() == Code::BINARY_OP_IC || 685 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
733 code->kind() == Code::COMPARE_IC) {
734 size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
735 }
736 return size;
737 } 686 }
738 687
739 688
740 void LCodeGen::CallCode(Handle<Code> code, 689 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
741 RelocInfo::Mode mode,
742 LInstruction* instr,
743 TargetAddressStorageMode storage_mode) {
744 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
745 }
746
747
748 void LCodeGen::CallCodeGeneric(Handle<Code> code,
749 RelocInfo::Mode mode,
750 LInstruction* instr, 690 LInstruction* instr,
751 SafepointMode safepoint_mode, 691 SafepointMode safepoint_mode) {
752 TargetAddressStorageMode storage_mode) {
753 DCHECK(instr != NULL); 692 DCHECK(instr != NULL);
754 // Block literal pool emission to ensure nop indicating no inlined smi code 693 __ Call(code, mode);
755 // is in the correct position.
756 Assembler::BlockConstPoolScope block_const_pool(masm());
757 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
758 RecordSafepointWithLazyDeopt(instr, safepoint_mode); 694 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
759 695
760 // Signal that we don't inline smi code before these stubs in the 696 // Signal that we don't inline smi code before these stubs in the
761 // optimizing code generator. 697 // optimizing code generator.
762 if (code->kind() == Code::BINARY_OP_IC || 698 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
763 code->kind() == Code::COMPARE_IC) {
764 __ nop(); 699 __ nop();
765 } 700 }
766 } 701 }
767 702
768 703
769 void LCodeGen::CallRuntime(const Runtime::Function* function, 704 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
770 int num_arguments, 705 LInstruction* instr, SaveFPRegsMode save_doubles) {
771 LInstruction* instr,
772 SaveFPRegsMode save_doubles) {
773 DCHECK(instr != NULL); 706 DCHECK(instr != NULL);
774 707
775 __ CallRuntime(function, num_arguments, save_doubles); 708 __ CallRuntime(function, num_arguments, save_doubles);
776 709
777 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 710 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
778 } 711 }
779 712
780 713
781 void LCodeGen::LoadContextFromDeferred(LOperand* context) { 714 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
782 if (context->IsRegister()) { 715 if (context->IsRegister()) {
783 __ Move(cp, ToRegister(context)); 716 __ Move(cp, ToRegister(context));
784 } else if (context->IsStackSlot()) { 717 } else if (context->IsStackSlot()) {
785 __ ldr(cp, ToMemOperand(context)); 718 __ LoadP(cp, ToMemOperand(context));
786 } else if (context->IsConstantOperand()) { 719 } else if (context->IsConstantOperand()) {
787 HConstant* constant = 720 HConstant* constant =
788 chunk_->LookupConstant(LConstantOperand::cast(context)); 721 chunk_->LookupConstant(LConstantOperand::cast(context));
789 __ Move(cp, Handle<Object>::cast(constant->handle(isolate()))); 722 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
790 } else { 723 } else {
791 UNREACHABLE(); 724 UNREACHABLE();
792 } 725 }
793 } 726 }
794 727
795 728
796 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 729 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
797 int argc, 730 LInstruction* instr, LOperand* context) {
798 LInstruction* instr,
799 LOperand* context) {
800 LoadContextFromDeferred(context); 731 LoadContextFromDeferred(context);
801 __ CallRuntimeSaveDoubles(id); 732 __ CallRuntimeSaveDoubles(id);
802 RecordSafepointWithRegisters( 733 RecordSafepointWithRegisters(instr->pointer_map(), argc,
803 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 734 Safepoint::kNoLazyDeopt);
804 } 735 }
805 736
806 737
807 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, 738 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
808 Safepoint::DeoptMode mode) { 739 Safepoint::DeoptMode mode) {
809 environment->set_has_been_used(); 740 environment->set_has_been_used();
810 if (!environment->HasBeenRegistered()) { 741 if (!environment->HasBeenRegistered()) {
811 // Physical stack frame layout: 742 // Physical stack frame layout:
812 // -x ............. -4 0 ..................................... y 743 // -x ............. -4 0 ..................................... y
813 // [incoming arguments] [spill slots] [pushed outgoing arguments] 744 // [incoming arguments] [spill slots] [pushed outgoing arguments]
(...skipping 12 matching lines...) Expand all
826 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { 757 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
827 ++frame_count; 758 ++frame_count;
828 if (e->frame_type() == JS_FUNCTION) { 759 if (e->frame_type() == JS_FUNCTION) {
829 ++jsframe_count; 760 ++jsframe_count;
830 } 761 }
831 } 762 }
832 Translation translation(&translations_, frame_count, jsframe_count, zone()); 763 Translation translation(&translations_, frame_count, jsframe_count, zone());
833 WriteTranslation(environment, &translation); 764 WriteTranslation(environment, &translation);
834 int deoptimization_index = deoptimizations_.length(); 765 int deoptimization_index = deoptimizations_.length();
835 int pc_offset = masm()->pc_offset(); 766 int pc_offset = masm()->pc_offset();
836 environment->Register(deoptimization_index, 767 environment->Register(deoptimization_index, translation.index(),
837 translation.index(),
838 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 768 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
839 deoptimizations_.Add(environment, zone()); 769 deoptimizations_.Add(environment, zone());
840 } 770 }
841 } 771 }
842 772
843 773
844 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, 774 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
845 const char* detail, 775 const char* detail,
846 Deoptimizer::BailoutType bailout_type) { 776 Deoptimizer::BailoutType bailout_type,
777 CRegister cr) {
847 LEnvironment* environment = instr->environment(); 778 LEnvironment* environment = instr->environment();
848 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 779 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
849 DCHECK(environment->HasBeenRegistered()); 780 DCHECK(environment->HasBeenRegistered());
850 int id = environment->deoptimization_index(); 781 int id = environment->deoptimization_index();
851 DCHECK(info()->IsOptimizing() || info()->IsStub()); 782 DCHECK(info()->IsOptimizing() || info()->IsStub());
852 Address entry = 783 Address entry =
853 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 784 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
854 if (entry == NULL) { 785 if (entry == NULL) {
855 Abort(kBailoutWasNotPrepared); 786 Abort(kBailoutWasNotPrepared);
856 return; 787 return;
857 } 788 }
858 789
859 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { 790 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
791 CRegister alt_cr = cr6;
860 Register scratch = scratch0(); 792 Register scratch = scratch0();
861 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 793 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
794 Label no_deopt;
795 DCHECK(!alt_cr.is(cr));
796 __ Push(r4, scratch);
797 __ mov(scratch, Operand(count));
798 __ lwz(r4, MemOperand(scratch));
799 __ subi(r4, r4, Operand(1));
800 __ cmpi(r4, Operand::Zero(), alt_cr);
801 __ bne(&no_deopt, alt_cr);
802 __ li(r4, Operand(FLAG_deopt_every_n_times));
803 __ stw(r4, MemOperand(scratch));
804 __ Pop(r4, scratch);
862 805
863 // Store the condition on the stack if necessary 806 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
864 if (condition != al) { 807 __ bind(&no_deopt);
865 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition)); 808 __ stw(r4, MemOperand(scratch));
866 __ mov(scratch, Operand(1), LeaveCC, condition); 809 __ Pop(r4, scratch);
867 __ push(scratch);
868 }
869
870 __ push(r1);
871 __ mov(scratch, Operand(count));
872 __ ldr(r1, MemOperand(scratch));
873 __ sub(r1, r1, Operand(1), SetCC);
874 __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
875 __ str(r1, MemOperand(scratch));
876 __ pop(r1);
877
878 if (condition != al) {
879 // Clean up the stack before the deoptimizer call
880 __ pop(scratch);
881 }
882
883 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
884
885 // 'Restore' the condition in a slightly hacky way. (It would be better
886 // to use 'msr' and 'mrs' instructions here, but they are not supported by
887 // our ARM simulator).
888 if (condition != al) {
889 condition = ne;
890 __ cmp(scratch, Operand::Zero());
891 }
892 } 810 }
893 811
894 if (info()->ShouldTrapOnDeopt()) { 812 if (info()->ShouldTrapOnDeopt()) {
895 __ stop("trap_on_deopt", condition); 813 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
896 } 814 }
897 815
898 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), 816 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
899 instr->Mnemonic(), detail); 817 instr->Mnemonic(), detail);
900 DCHECK(info()->IsStub() || frame_is_built_); 818 DCHECK(info()->IsStub() || frame_is_built_);
901 // Go through jump table if we need to handle condition, build frame, or 819 // Go through jump table if we need to handle condition, build frame, or
902 // restore caller doubles. 820 // restore caller doubles.
903 if (condition == al && frame_is_built_ && 821 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
904 !info()->saves_caller_doubles()) {
905 DeoptComment(reason); 822 DeoptComment(reason);
906 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 823 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
907 } else { 824 } else {
908 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, 825 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
909 !frame_is_built_); 826 !frame_is_built_);
910 // We often have several deopts to the same entry, reuse the last 827 // We often have several deopts to the same entry, reuse the last
911 // jump entry if this is the case. 828 // jump entry if this is the case.
912 if (jump_table_.is_empty() || 829 if (jump_table_.is_empty() ||
913 !table_entry.IsEquivalentTo(jump_table_.last())) { 830 !table_entry.IsEquivalentTo(jump_table_.last())) {
914 jump_table_.Add(table_entry, zone()); 831 jump_table_.Add(table_entry, zone());
915 } 832 }
916 __ b(condition, &jump_table_.last().label); 833 __ b(cond, &jump_table_.last().label, cr);
917 } 834 }
918 } 835 }
919 836
920 837
921 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, 838 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
922 const char* detail) { 839 const char* detail, CRegister cr) {
923 Deoptimizer::BailoutType bailout_type = info()->IsStub() 840 Deoptimizer::BailoutType bailout_type =
924 ? Deoptimizer::LAZY 841 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
925 : Deoptimizer::EAGER; 842 DeoptimizeIf(condition, instr, detail, bailout_type, cr);
926 DeoptimizeIf(condition, instr, detail, bailout_type);
927 } 843 }
928 844
929 845
930 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 846 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
931 int length = deoptimizations_.length(); 847 int length = deoptimizations_.length();
932 if (length == 0) return; 848 if (length == 0) return;
933 Handle<DeoptimizationInputData> data = 849 Handle<DeoptimizationInputData> data =
934 DeoptimizationInputData::New(isolate(), length, TENURED); 850 DeoptimizationInputData::New(isolate(), length, TENURED);
935 851
936 Handle<ByteArray> translations = 852 Handle<ByteArray> translations =
937 translations_.CreateByteArray(isolate()->factory()); 853 translations_.CreateByteArray(isolate()->factory());
938 data->SetTranslationByteArray(*translations); 854 data->SetTranslationByteArray(*translations);
939 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); 855 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
940 data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); 856 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
941 if (info_->IsOptimizing()) { 857 if (info_->IsOptimizing()) {
942 // Reference to shared function info does not change between phases. 858 // Reference to shared function info does not change between phases.
943 AllowDeferredHandleDereference allow_handle_dereference; 859 AllowDeferredHandleDereference allow_handle_dereference;
944 data->SetSharedFunctionInfo(*info_->shared_info()); 860 data->SetSharedFunctionInfo(*info_->shared_info());
945 } else { 861 } else {
946 data->SetSharedFunctionInfo(Smi::FromInt(0)); 862 data->SetSharedFunctionInfo(Smi::FromInt(0));
947 } 863 }
948 864
949 Handle<FixedArray> literals = 865 Handle<FixedArray> literals =
950 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); 866 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
951 { AllowDeferredHandleDereference copy_handles; 867 {
868 AllowDeferredHandleDereference copy_handles;
952 for (int i = 0; i < deoptimization_literals_.length(); i++) { 869 for (int i = 0; i < deoptimization_literals_.length(); i++) {
953 literals->set(i, *deoptimization_literals_[i]); 870 literals->set(i, *deoptimization_literals_[i]);
954 } 871 }
955 data->SetLiteralArray(*literals); 872 data->SetLiteralArray(*literals);
956 } 873 }
957 874
958 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); 875 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
959 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); 876 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
960 877
961 // Populate the deoptimization entries. 878 // Populate the deoptimization entries.
(...skipping 18 matching lines...) Expand all
980 return result; 897 return result;
981 } 898 }
982 899
983 900
984 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { 901 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
985 DCHECK(deoptimization_literals_.length() == 0); 902 DCHECK(deoptimization_literals_.length() == 0);
986 903
987 const ZoneList<Handle<JSFunction> >* inlined_closures = 904 const ZoneList<Handle<JSFunction> >* inlined_closures =
988 chunk()->inlined_closures(); 905 chunk()->inlined_closures();
989 906
990 for (int i = 0, length = inlined_closures->length(); 907 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
991 i < length;
992 i++) {
993 DefineDeoptimizationLiteral(inlined_closures->at(i)); 908 DefineDeoptimizationLiteral(inlined_closures->at(i));
994 } 909 }
995 910
996 inlined_function_count_ = deoptimization_literals_.length(); 911 inlined_function_count_ = deoptimization_literals_.length();
997 } 912 }
998 913
999 914
1000 void LCodeGen::RecordSafepointWithLazyDeopt( 915 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
1001 LInstruction* instr, SafepointMode safepoint_mode) { 916 SafepointMode safepoint_mode) {
1002 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 917 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1003 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 918 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1004 } else { 919 } else {
1005 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 920 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1006 RecordSafepointWithRegisters( 921 RecordSafepointWithRegisters(instr->pointer_map(), 0,
1007 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 922 Safepoint::kLazyDeopt);
1008 } 923 }
1009 } 924 }
1010 925
1011 926
1012 void LCodeGen::RecordSafepoint( 927 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
1013 LPointerMap* pointers, 928 int arguments, Safepoint::DeoptMode deopt_mode) {
1014 Safepoint::Kind kind,
1015 int arguments,
1016 Safepoint::DeoptMode deopt_mode) {
1017 DCHECK(expected_safepoint_kind_ == kind); 929 DCHECK(expected_safepoint_kind_ == kind);
1018 930
1019 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 931 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1020 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), 932 Safepoint safepoint =
1021 kind, arguments, deopt_mode); 933 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1022 for (int i = 0; i < operands->length(); i++) { 934 for (int i = 0; i < operands->length(); i++) {
1023 LOperand* pointer = operands->at(i); 935 LOperand* pointer = operands->at(i);
1024 if (pointer->IsStackSlot()) { 936 if (pointer->IsStackSlot()) {
1025 safepoint.DefinePointerSlot(pointer->index(), zone()); 937 safepoint.DefinePointerSlot(pointer->index(), zone());
1026 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 938 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1027 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); 939 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1028 } 940 }
1029 } 941 }
1030 if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) { 942 #if V8_OOL_CONSTANT_POOL
1031 // Register pp always contains a pointer to the constant pool. 943 if (kind & Safepoint::kWithRegisters) {
1032 safepoint.DefinePointerRegister(pp, zone()); 944 // Register always contains a pointer to the constant pool.
945 safepoint.DefinePointerRegister(kConstantPoolRegister, zone());
1033 } 946 }
947 #endif
1034 } 948 }
1035 949
1036 950
1037 void LCodeGen::RecordSafepoint(LPointerMap* pointers, 951 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1038 Safepoint::DeoptMode deopt_mode) { 952 Safepoint::DeoptMode deopt_mode) {
1039 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); 953 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1040 } 954 }
1041 955
1042 956
1043 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { 957 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1044 LPointerMap empty_pointers(zone()); 958 LPointerMap empty_pointers(zone());
1045 RecordSafepoint(&empty_pointers, deopt_mode); 959 RecordSafepoint(&empty_pointers, deopt_mode);
1046 } 960 }
1047 961
1048 962
1049 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 963 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1050 int arguments, 964 int arguments,
1051 Safepoint::DeoptMode deopt_mode) { 965 Safepoint::DeoptMode deopt_mode) {
1052 RecordSafepoint( 966 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1053 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1054 } 967 }
1055 968
1056 969
1057 void LCodeGen::RecordAndWritePosition(int position) { 970 void LCodeGen::RecordAndWritePosition(int position) {
1058 if (position == RelocInfo::kNoPosition) return; 971 if (position == RelocInfo::kNoPosition) return;
1059 masm()->positions_recorder()->RecordPosition(position); 972 masm()->positions_recorder()->RecordPosition(position);
1060 masm()->positions_recorder()->WriteRecordedPositions(); 973 masm()->positions_recorder()->WriteRecordedPositions();
1061 } 974 }
1062 975
1063 976
1064 static const char* LabelType(LLabel* label) { 977 static const char* LabelType(LLabel* label) {
1065 if (label->is_loop_header()) return " (loop header)"; 978 if (label->is_loop_header()) return " (loop header)";
1066 if (label->is_osr_entry()) return " (OSR entry)"; 979 if (label->is_osr_entry()) return " (OSR entry)";
1067 return ""; 980 return "";
1068 } 981 }
1069 982
1070 983
1071 void LCodeGen::DoLabel(LLabel* label) { 984 void LCodeGen::DoLabel(LLabel* label) {
1072 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", 985 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1073 current_instruction_, 986 current_instruction_, label->hydrogen_value()->id(),
1074 label->hydrogen_value()->id(), 987 label->block_id(), LabelType(label));
1075 label->block_id(),
1076 LabelType(label));
1077 __ bind(label->label()); 988 __ bind(label->label());
1078 current_block_ = label->block_id(); 989 current_block_ = label->block_id();
1079 DoGap(label); 990 DoGap(label);
1080 } 991 }
1081 992
1082 993
1083 void LCodeGen::DoParallelMove(LParallelMove* move) { 994 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
1084 resolver_.Resolve(move);
1085 }
1086 995
1087 996
1088 void LCodeGen::DoGap(LGap* gap) { 997 void LCodeGen::DoGap(LGap* gap) {
1089 for (int i = LGap::FIRST_INNER_POSITION; 998 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
1090 i <= LGap::LAST_INNER_POSITION;
1091 i++) { 999 i++) {
1092 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 1000 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1093 LParallelMove* move = gap->GetParallelMove(inner_pos); 1001 LParallelMove* move = gap->GetParallelMove(inner_pos);
1094 if (move != NULL) DoParallelMove(move); 1002 if (move != NULL) DoParallelMove(move);
1095 } 1003 }
1096 } 1004 }
1097 1005
1098 1006
1099 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { 1007 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
1100 DoGap(instr);
1101 }
1102 1008
1103 1009
1104 void LCodeGen::DoParameter(LParameter* instr) { 1010 void LCodeGen::DoParameter(LParameter* instr) {
1105 // Nothing to do. 1011 // Nothing to do.
1106 } 1012 }
1107 1013
1108 1014
1109 void LCodeGen::DoCallStub(LCallStub* instr) { 1015 void LCodeGen::DoCallStub(LCallStub* instr) {
1110 DCHECK(ToRegister(instr->context()).is(cp)); 1016 DCHECK(ToRegister(instr->context()).is(cp));
1111 DCHECK(ToRegister(instr->result()).is(r0)); 1017 DCHECK(ToRegister(instr->result()).is(r3));
1112 switch (instr->hydrogen()->major_key()) { 1018 switch (instr->hydrogen()->major_key()) {
1113 case CodeStub::RegExpExec: { 1019 case CodeStub::RegExpExec: {
1114 RegExpExecStub stub(isolate()); 1020 RegExpExecStub stub(isolate());
1115 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1021 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1116 break; 1022 break;
1117 } 1023 }
1118 case CodeStub::SubString: { 1024 case CodeStub::SubString: {
1119 SubStringStub stub(isolate()); 1025 SubStringStub stub(isolate());
1120 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1026 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1121 break; 1027 break;
(...skipping 19 matching lines...) Expand all
1141 int32_t divisor = instr->divisor(); 1047 int32_t divisor = instr->divisor();
1142 DCHECK(dividend.is(ToRegister(instr->result()))); 1048 DCHECK(dividend.is(ToRegister(instr->result())));
1143 1049
1144 // Theoretically, a variation of the branch-free code for integer division by 1050 // Theoretically, a variation of the branch-free code for integer division by
1145 // a power of 2 (calculating the remainder via an additional multiplication 1051 // a power of 2 (calculating the remainder via an additional multiplication
1146 // (which gets simplified to an 'and') and subtraction) should be faster, and 1052 // (which gets simplified to an 'and') and subtraction) should be faster, and
1147 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 1053 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1148 // indicate that positive dividends are heavily favored, so the branching 1054 // indicate that positive dividends are heavily favored, so the branching
1149 // version performs better. 1055 // version performs better.
1150 HMod* hmod = instr->hydrogen(); 1056 HMod* hmod = instr->hydrogen();
1151 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1057 int32_t shift = WhichPowerOf2Abs(divisor);
1152 Label dividend_is_not_negative, done; 1058 Label dividend_is_not_negative, done;
1153 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 1059 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1154 __ cmp(dividend, Operand::Zero()); 1060 __ cmpwi(dividend, Operand::Zero());
1155 __ b(pl, &dividend_is_not_negative); 1061 __ bge(&dividend_is_not_negative);
1156 // Note that this is correct even for kMinInt operands. 1062 if (shift) {
1157 __ rsb(dividend, dividend, Operand::Zero()); 1063 // Note that this is correct even for kMinInt operands.
1158 __ and_(dividend, dividend, Operand(mask)); 1064 __ neg(dividend, dividend);
1159 __ rsb(dividend, dividend, Operand::Zero(), SetCC); 1065 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1160 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1066 __ neg(dividend, dividend, LeaveOE, SetRC);
1161 DeoptimizeIf(eq, instr, "minus zero"); 1067 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1068 DeoptimizeIf(eq, instr, "minus zero", cr0);
1069 }
1070 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1071 __ li(dividend, Operand::Zero());
1072 } else {
1073 DeoptimizeIf(al, instr, "minus zero");
1162 } 1074 }
1163 __ b(&done); 1075 __ b(&done);
1164 } 1076 }
1165 1077
1166 __ bind(&dividend_is_not_negative); 1078 __ bind(&dividend_is_not_negative);
1167 __ and_(dividend, dividend, Operand(mask)); 1079 if (shift) {
1080 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1081 } else {
1082 __ li(dividend, Operand::Zero());
1083 }
1168 __ bind(&done); 1084 __ bind(&done);
1169 } 1085 }
1170 1086
1171 1087
1172 void LCodeGen::DoModByConstI(LModByConstI* instr) { 1088 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1173 Register dividend = ToRegister(instr->dividend()); 1089 Register dividend = ToRegister(instr->dividend());
1174 int32_t divisor = instr->divisor(); 1090 int32_t divisor = instr->divisor();
1175 Register result = ToRegister(instr->result()); 1091 Register result = ToRegister(instr->result());
1176 DCHECK(!dividend.is(result)); 1092 DCHECK(!dividend.is(result));
1177 1093
1178 if (divisor == 0) { 1094 if (divisor == 0) {
1179 DeoptimizeIf(al, instr, "division by zero"); 1095 DeoptimizeIf(al, instr, "division by zero");
1180 return; 1096 return;
1181 } 1097 }
1182 1098
1183 __ TruncatingDiv(result, dividend, Abs(divisor)); 1099 __ TruncatingDiv(result, dividend, Abs(divisor));
1184 __ mov(ip, Operand(Abs(divisor))); 1100 __ mov(ip, Operand(Abs(divisor)));
1185 __ smull(result, ip, result, ip); 1101 __ mullw(result, result, ip);
1186 __ sub(result, dividend, result, SetCC); 1102 __ sub(result, dividend, result, LeaveOE, SetRC);
1187 1103
1188 // Check for negative zero. 1104 // Check for negative zero.
1189 HMod* hmod = instr->hydrogen(); 1105 HMod* hmod = instr->hydrogen();
1190 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1106 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1191 Label remainder_not_zero; 1107 Label remainder_not_zero;
1192 __ b(ne, &remainder_not_zero); 1108 __ bne(&remainder_not_zero, cr0);
1193 __ cmp(dividend, Operand::Zero()); 1109 __ cmpwi(dividend, Operand::Zero());
1194 DeoptimizeIf(lt, instr, "minus zero"); 1110 DeoptimizeIf(lt, instr, "minus zero");
1195 __ bind(&remainder_not_zero); 1111 __ bind(&remainder_not_zero);
1196 } 1112 }
1197 } 1113 }
1198 1114
1199 1115
1200 void LCodeGen::DoModI(LModI* instr) { 1116 void LCodeGen::DoModI(LModI* instr) {
1201 HMod* hmod = instr->hydrogen(); 1117 HMod* hmod = instr->hydrogen();
1202 if (CpuFeatures::IsSupported(SUDIV)) { 1118 Register left_reg = ToRegister(instr->left());
1203 CpuFeatureScope scope(masm(), SUDIV); 1119 Register right_reg = ToRegister(instr->right());
1120 Register result_reg = ToRegister(instr->result());
1121 Register scratch = scratch0();
1122 Label done;
1204 1123
1205 Register left_reg = ToRegister(instr->left()); 1124 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1206 Register right_reg = ToRegister(instr->right()); 1125 __ li(r0, Operand::Zero()); // clear xer
1207 Register result_reg = ToRegister(instr->result()); 1126 __ mtxer(r0);
1127 }
1208 1128
1209 Label done; 1129 __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
1210 // Check for x % 0, sdiv might signal an exception. We have to deopt in this 1130
1211 // case because we can't return a NaN. 1131 // Check for x % 0.
1212 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 1132 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1213 __ cmp(right_reg, Operand::Zero()); 1133 __ cmpwi(right_reg, Operand::Zero());
1214 DeoptimizeIf(eq, instr, "division by zero"); 1134 DeoptimizeIf(eq, instr, "division by zero");
1135 }
1136
1137 // Check for kMinInt % -1, divw will return undefined, which is not what we
1138 // want. We have to deopt if we care about -0, because we can't return that.
1139 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1140 Label no_overflow_possible;
1141 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1142 DeoptimizeIf(overflow, instr, "minus zero", cr0);
1143 } else {
1144 __ bnooverflow(&no_overflow_possible, cr0);
1145 __ li(result_reg, Operand::Zero());
1146 __ b(&done);
1215 } 1147 }
1148 __ bind(&no_overflow_possible);
1149 }
1216 1150
1217 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we 1151 __ mullw(scratch, right_reg, scratch);
1218 // want. We have to deopt if we care about -0, because we can't return that. 1152 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
1219 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1220 Label no_overflow_possible;
1221 __ cmp(left_reg, Operand(kMinInt));
1222 __ b(ne, &no_overflow_possible);
1223 __ cmp(right_reg, Operand(-1));
1224 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1225 DeoptimizeIf(eq, instr, "minus zero");
1226 } else {
1227 __ b(ne, &no_overflow_possible);
1228 __ mov(result_reg, Operand::Zero());
1229 __ jmp(&done);
1230 }
1231 __ bind(&no_overflow_possible);
1232 }
1233 1153
1234 // For 'r3 = r1 % r2' we can have the following ARM code: 1154 // If we care about -0, test if the dividend is <0 and the result is 0.
1235 // sdiv r3, r1, r2 1155 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1236 // mls r3, r3, r2, r1 1156 __ bne(&done, cr0);
1157 __ cmpwi(left_reg, Operand::Zero());
1158 DeoptimizeIf(lt, instr, "minus zero");
1159 }
1237 1160
1238 __ sdiv(result_reg, left_reg, right_reg); 1161 __ bind(&done);
1239 __ Mls(result_reg, result_reg, right_reg, left_reg);
1240
1241 // If we care about -0, test if the dividend is <0 and the result is 0.
1242 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1243 __ cmp(result_reg, Operand::Zero());
1244 __ b(ne, &done);
1245 __ cmp(left_reg, Operand::Zero());
1246 DeoptimizeIf(lt, instr, "minus zero");
1247 }
1248 __ bind(&done);
1249
1250 } else {
1251 // General case, without any SDIV support.
1252 Register left_reg = ToRegister(instr->left());
1253 Register right_reg = ToRegister(instr->right());
1254 Register result_reg = ToRegister(instr->result());
1255 Register scratch = scratch0();
1256 DCHECK(!scratch.is(left_reg));
1257 DCHECK(!scratch.is(right_reg));
1258 DCHECK(!scratch.is(result_reg));
1259 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1260 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1261 DCHECK(!divisor.is(dividend));
1262 LowDwVfpRegister quotient = double_scratch0();
1263 DCHECK(!quotient.is(dividend));
1264 DCHECK(!quotient.is(divisor));
1265
1266 Label done;
1267 // Check for x % 0, we have to deopt in this case because we can't return a
1268 // NaN.
1269 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1270 __ cmp(right_reg, Operand::Zero());
1271 DeoptimizeIf(eq, instr, "division by zero");
1272 }
1273
1274 __ Move(result_reg, left_reg);
1275 // Load the arguments in VFP registers. The divisor value is preloaded
1276 // before. Be careful that 'right_reg' is only live on entry.
1277 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1278 __ vmov(double_scratch0().low(), left_reg);
1279 __ vcvt_f64_s32(dividend, double_scratch0().low());
1280 __ vmov(double_scratch0().low(), right_reg);
1281 __ vcvt_f64_s32(divisor, double_scratch0().low());
1282
1283 // We do not care about the sign of the divisor. Note that we still handle
1284 // the kMinInt % -1 case correctly, though.
1285 __ vabs(divisor, divisor);
1286 // Compute the quotient and round it to a 32bit integer.
1287 __ vdiv(quotient, dividend, divisor);
1288 __ vcvt_s32_f64(quotient.low(), quotient);
1289 __ vcvt_f64_s32(quotient, quotient.low());
1290
1291 // Compute the remainder in result.
1292 __ vmul(double_scratch0(), divisor, quotient);
1293 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1294 __ vmov(scratch, double_scratch0().low());
1295 __ sub(result_reg, left_reg, scratch, SetCC);
1296
1297 // If we care about -0, test if the dividend is <0 and the result is 0.
1298 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1299 __ b(ne, &done);
1300 __ cmp(left_reg, Operand::Zero());
1301 DeoptimizeIf(mi, instr, "minus zero");
1302 }
1303 __ bind(&done);
1304 }
1305 } 1162 }
1306 1163
1307 1164
1308 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 1165 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1309 Register dividend = ToRegister(instr->dividend()); 1166 Register dividend = ToRegister(instr->dividend());
1310 int32_t divisor = instr->divisor(); 1167 int32_t divisor = instr->divisor();
1311 Register result = ToRegister(instr->result()); 1168 Register result = ToRegister(instr->result());
1312 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 1169 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1313 DCHECK(!result.is(dividend)); 1170 DCHECK(!result.is(dividend));
1314 1171
1315 // Check for (0 / -x) that will produce negative zero. 1172 // Check for (0 / -x) that will produce negative zero.
1316 HDiv* hdiv = instr->hydrogen(); 1173 HDiv* hdiv = instr->hydrogen();
1317 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1174 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1318 __ cmp(dividend, Operand::Zero()); 1175 __ cmpwi(dividend, Operand::Zero());
1319 DeoptimizeIf(eq, instr, "minus zero"); 1176 DeoptimizeIf(eq, instr, "minus zero");
1320 } 1177 }
1321 // Check for (kMinInt / -1). 1178 // Check for (kMinInt / -1).
1322 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 1179 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1323 __ cmp(dividend, Operand(kMinInt)); 1180 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1181 __ cmpw(dividend, r0);
1324 DeoptimizeIf(eq, instr, "overflow"); 1182 DeoptimizeIf(eq, instr, "overflow");
1325 } 1183 }
1184
1185 int32_t shift = WhichPowerOf2Abs(divisor);
1186
1326 // Deoptimize if remainder will not be 0. 1187 // Deoptimize if remainder will not be 0.
1327 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 1188 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1328 divisor != 1 && divisor != -1) { 1189 __ TestBitRange(dividend, shift - 1, 0, r0);
1329 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1190 DeoptimizeIf(ne, instr, "lost precision", cr0);
1330 __ tst(dividend, Operand(mask));
1331 DeoptimizeIf(ne, instr, "lost precision");
1332 } 1191 }
1333 1192
1334 if (divisor == -1) { // Nice shortcut, not needed for correctness. 1193 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1335 __ rsb(result, dividend, Operand(0)); 1194 __ neg(result, dividend);
1336 return; 1195 return;
1337 } 1196 }
1338 int32_t shift = WhichPowerOf2Abs(divisor);
1339 if (shift == 0) { 1197 if (shift == 0) {
1340 __ mov(result, dividend); 1198 __ mr(result, dividend);
1341 } else if (shift == 1) {
1342 __ add(result, dividend, Operand(dividend, LSR, 31));
1343 } else { 1199 } else {
1344 __ mov(result, Operand(dividend, ASR, 31)); 1200 if (shift == 1) {
1345 __ add(result, dividend, Operand(result, LSR, 32 - shift)); 1201 __ srwi(result, dividend, Operand(31));
1202 } else {
1203 __ srawi(result, dividend, 31);
1204 __ srwi(result, result, Operand(32 - shift));
1205 }
1206 __ add(result, dividend, result);
1207 __ srawi(result, result, shift);
1346 } 1208 }
1347 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); 1209 if (divisor < 0) __ neg(result, result);
1348 if (divisor < 0) __ rsb(result, result, Operand(0));
1349 } 1210 }
1350 1211
1351 1212
1352 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1213 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1353 Register dividend = ToRegister(instr->dividend()); 1214 Register dividend = ToRegister(instr->dividend());
1354 int32_t divisor = instr->divisor(); 1215 int32_t divisor = instr->divisor();
1355 Register result = ToRegister(instr->result()); 1216 Register result = ToRegister(instr->result());
1356 DCHECK(!dividend.is(result)); 1217 DCHECK(!dividend.is(result));
1357 1218
1358 if (divisor == 0) { 1219 if (divisor == 0) {
1359 DeoptimizeIf(al, instr, "division by zero"); 1220 DeoptimizeIf(al, instr, "division by zero");
1360 return; 1221 return;
1361 } 1222 }
1362 1223
1363 // Check for (0 / -x) that will produce negative zero. 1224 // Check for (0 / -x) that will produce negative zero.
1364 HDiv* hdiv = instr->hydrogen(); 1225 HDiv* hdiv = instr->hydrogen();
1365 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1226 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1366 __ cmp(dividend, Operand::Zero()); 1227 __ cmpwi(dividend, Operand::Zero());
1367 DeoptimizeIf(eq, instr, "minus zero"); 1228 DeoptimizeIf(eq, instr, "minus zero");
1368 } 1229 }
1369 1230
1370 __ TruncatingDiv(result, dividend, Abs(divisor)); 1231 __ TruncatingDiv(result, dividend, Abs(divisor));
1371 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1232 if (divisor < 0) __ neg(result, result);
1372 1233
1373 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1234 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1235 Register scratch = scratch0();
1374 __ mov(ip, Operand(divisor)); 1236 __ mov(ip, Operand(divisor));
1375 __ smull(scratch0(), ip, result, ip); 1237 __ mullw(scratch, result, ip);
1376 __ sub(scratch0(), scratch0(), dividend, SetCC); 1238 __ cmpw(scratch, dividend);
1377 DeoptimizeIf(ne, instr, "lost precision"); 1239 DeoptimizeIf(ne, instr, "lost precision");
1378 } 1240 }
1379 } 1241 }
1380 1242
1381 1243
1382 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1244 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1383 void LCodeGen::DoDivI(LDivI* instr) { 1245 void LCodeGen::DoDivI(LDivI* instr) {
1384 HBinaryOperation* hdiv = instr->hydrogen(); 1246 HBinaryOperation* hdiv = instr->hydrogen();
1385 Register dividend = ToRegister(instr->dividend()); 1247 const Register dividend = ToRegister(instr->dividend());
1386 Register divisor = ToRegister(instr->divisor()); 1248 const Register divisor = ToRegister(instr->divisor());
1387 Register result = ToRegister(instr->result()); 1249 Register result = ToRegister(instr->result());
1388 1250
1251 DCHECK(!dividend.is(result));
1252 DCHECK(!divisor.is(result));
1253
1254 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1255 __ li(r0, Operand::Zero()); // clear xer
1256 __ mtxer(r0);
1257 }
1258
1259 __ divw(result, dividend, divisor, SetOE, SetRC);
1260
1389 // Check for x / 0. 1261 // Check for x / 0.
1390 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1262 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1391 __ cmp(divisor, Operand::Zero()); 1263 __ cmpwi(divisor, Operand::Zero());
1392 DeoptimizeIf(eq, instr, "division by zero"); 1264 DeoptimizeIf(eq, instr, "division by zero");
1393 } 1265 }
1394 1266
1395 // Check for (0 / -x) that will produce negative zero. 1267 // Check for (0 / -x) that will produce negative zero.
1396 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1268 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1397 Label positive; 1269 Label dividend_not_zero;
1398 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { 1270 __ cmpwi(dividend, Operand::Zero());
1399 // Do the test only if it hadn't be done above. 1271 __ bne(&dividend_not_zero);
1400 __ cmp(divisor, Operand::Zero()); 1272 __ cmpwi(divisor, Operand::Zero());
1401 } 1273 DeoptimizeIf(lt, instr, "minus zero");
1402 __ b(pl, &positive); 1274 __ bind(&dividend_not_zero);
1403 __ cmp(dividend, Operand::Zero());
1404 DeoptimizeIf(eq, instr, "minus zero");
1405 __ bind(&positive);
1406 } 1275 }
1407 1276
1408 // Check for (kMinInt / -1). 1277 // Check for (kMinInt / -1).
1409 if (hdiv->CheckFlag(HValue::kCanOverflow) && 1278 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1410 (!CpuFeatures::IsSupported(SUDIV) || 1279 Label no_overflow_possible;
1411 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { 1280 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1412 // We don't need to check for overflow when truncating with sdiv 1281 DeoptimizeIf(overflow, instr, "overflow", cr0);
1413 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. 1282 } else {
1414 __ cmp(dividend, Operand(kMinInt)); 1283 // When truncating, we want kMinInt / -1 = kMinInt.
1415 __ cmp(divisor, Operand(-1), eq); 1284 __ bnooverflow(&no_overflow_possible, cr0);
1416 DeoptimizeIf(eq, instr, "overflow"); 1285 __ mr(result, dividend);
1286 }
1287 __ bind(&no_overflow_possible);
1417 } 1288 }
1418 1289
1419 if (CpuFeatures::IsSupported(SUDIV)) { 1290 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1420 CpuFeatureScope scope(masm(), SUDIV); 1291 // Deoptimize if remainder is not 0.
1421 __ sdiv(result, dividend, divisor); 1292 Register scratch = scratch0();
1422 } else { 1293 __ mullw(scratch, divisor, result);
1423 DoubleRegister vleft = ToDoubleRegister(instr->temp()); 1294 __ cmpw(dividend, scratch);
1424 DoubleRegister vright = double_scratch0();
1425 __ vmov(double_scratch0().low(), dividend);
1426 __ vcvt_f64_s32(vleft, double_scratch0().low());
1427 __ vmov(double_scratch0().low(), divisor);
1428 __ vcvt_f64_s32(vright, double_scratch0().low());
1429 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1430 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1431 __ vmov(result, double_scratch0().low());
1432 }
1433
1434 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1435 // Compute remainder and deopt if it's not zero.
1436 Register remainder = scratch0();
1437 __ Mls(remainder, result, divisor, dividend);
1438 __ cmp(remainder, Operand::Zero());
1439 DeoptimizeIf(ne, instr, "lost precision"); 1295 DeoptimizeIf(ne, instr, "lost precision");
1440 } 1296 }
1441 } 1297 }
1442 1298
1443 1299
1444 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1445 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1446 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1447 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1448
1449 // This is computed in-place.
1450 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1451
1452 __ vmla(addend, multiplier, multiplicand);
1453 }
1454
1455
1456 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1457 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1458 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1459 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1460
1461 // This is computed in-place.
1462 DCHECK(minuend.is(ToDoubleRegister(instr->result())));
1463
1464 __ vmls(minuend, multiplier, multiplicand);
1465 }
1466
1467
1468 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1300 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1301 HBinaryOperation* hdiv = instr->hydrogen();
1469 Register dividend = ToRegister(instr->dividend()); 1302 Register dividend = ToRegister(instr->dividend());
1470 Register result = ToRegister(instr->result()); 1303 Register result = ToRegister(instr->result());
1471 int32_t divisor = instr->divisor(); 1304 int32_t divisor = instr->divisor();
1472 1305
1473 // If the divisor is 1, return the dividend.
1474 if (divisor == 1) {
1475 __ Move(result, dividend);
1476 return;
1477 }
1478
1479 // If the divisor is positive, things are easy: There can be no deopts and we 1306 // If the divisor is positive, things are easy: There can be no deopts and we
1480 // can simply do an arithmetic right shift. 1307 // can simply do an arithmetic right shift.
1481 int32_t shift = WhichPowerOf2Abs(divisor); 1308 int32_t shift = WhichPowerOf2Abs(divisor);
1482 if (divisor > 1) { 1309 if (divisor > 0) {
1483 __ mov(result, Operand(dividend, ASR, shift)); 1310 if (shift || !result.is(dividend)) {
1311 __ srawi(result, dividend, shift);
1312 }
1484 return; 1313 return;
1485 } 1314 }
1486 1315
1487 // If the divisor is negative, we have to negate and handle edge cases. 1316 // If the divisor is negative, we have to negate and handle edge cases.
1488 __ rsb(result, dividend, Operand::Zero(), SetCC); 1317 OEBit oe = LeaveOE;
1489 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1318 #if V8_TARGET_ARCH_PPC64
1490 DeoptimizeIf(eq, instr, "minus zero"); 1319 if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
1320 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1321 __ cmpw(dividend, r0);
1322 DeoptimizeIf(eq, instr, "overflow");
1323 }
1324 #else
1325 if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
1326 __ li(r0, Operand::Zero()); // clear xer
1327 __ mtxer(r0);
1328 oe = SetOE;
1329 }
1330 #endif
1331
1332 __ neg(result, dividend, oe, SetRC);
1333 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1334 DeoptimizeIf(eq, instr, "minus zero", cr0);
1335 }
1336
1337 // If the negation could not overflow, simply shifting is OK.
1338 #if !V8_TARGET_ARCH_PPC64
1339 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1340 #endif
1341 if (shift) {
1342 __ ShiftRightArithImm(result, result, shift);
1343 }
1344 return;
1345 #if !V8_TARGET_ARCH_PPC64
1491 } 1346 }
1492 1347
1493 // Dividing by -1 is basically negation, unless we overflow. 1348 // Dividing by -1 is basically negation, unless we overflow.
1494 if (divisor == -1) { 1349 if (divisor == -1) {
1495 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1350 DeoptimizeIf(overflow, instr, "overflow", cr0);
1496 DeoptimizeIf(vs, instr, "overflow");
1497 }
1498 return; 1351 return;
1499 } 1352 }
1500 1353
1501 // If the negation could not overflow, simply shifting is OK. 1354 Label overflow, done;
1502 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1355 __ boverflow(&overflow, cr0);
1503 __ mov(result, Operand(result, ASR, shift)); 1356 __ srawi(result, result, shift);
1504 return; 1357 __ b(&done);
1505 } 1358 __ bind(&overflow);
1506 1359 __ mov(result, Operand(kMinInt / divisor));
1507 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); 1360 __ bind(&done);
1508 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); 1361 #endif
1509 } 1362 }
1510 1363
1511 1364
1512 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1365 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1513 Register dividend = ToRegister(instr->dividend()); 1366 Register dividend = ToRegister(instr->dividend());
1514 int32_t divisor = instr->divisor(); 1367 int32_t divisor = instr->divisor();
1515 Register result = ToRegister(instr->result()); 1368 Register result = ToRegister(instr->result());
1516 DCHECK(!dividend.is(result)); 1369 DCHECK(!dividend.is(result));
1517 1370
1518 if (divisor == 0) { 1371 if (divisor == 0) {
1519 DeoptimizeIf(al, instr, "division by zero"); 1372 DeoptimizeIf(al, instr, "division by zero");
1520 return; 1373 return;
1521 } 1374 }
1522 1375
1523 // Check for (0 / -x) that will produce negative zero. 1376 // Check for (0 / -x) that will produce negative zero.
1524 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1377 HMathFloorOfDiv* hdiv = instr->hydrogen();
1525 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1378 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1526 __ cmp(dividend, Operand::Zero()); 1379 __ cmpwi(dividend, Operand::Zero());
1527 DeoptimizeIf(eq, instr, "minus zero"); 1380 DeoptimizeIf(eq, instr, "minus zero");
1528 } 1381 }
1529 1382
1530 // Easy case: We need no dynamic check for the dividend and the flooring 1383 // Easy case: We need no dynamic check for the dividend and the flooring
1531 // division is the same as the truncating division. 1384 // division is the same as the truncating division.
1532 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1385 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1533 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1386 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1534 __ TruncatingDiv(result, dividend, Abs(divisor)); 1387 __ TruncatingDiv(result, dividend, Abs(divisor));
1535 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1388 if (divisor < 0) __ neg(result, result);
1536 return; 1389 return;
1537 } 1390 }
1538 1391
1539 // In the general case we may need to adjust before and after the truncating 1392 // In the general case we may need to adjust before and after the truncating
1540 // division to get a flooring division. 1393 // division to get a flooring division.
1541 Register temp = ToRegister(instr->temp()); 1394 Register temp = ToRegister(instr->temp());
1542 DCHECK(!temp.is(dividend) && !temp.is(result)); 1395 DCHECK(!temp.is(dividend) && !temp.is(result));
1543 Label needs_adjustment, done; 1396 Label needs_adjustment, done;
1544 __ cmp(dividend, Operand::Zero()); 1397 __ cmpwi(dividend, Operand::Zero());
1545 __ b(divisor > 0 ? lt : gt, &needs_adjustment); 1398 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1546 __ TruncatingDiv(result, dividend, Abs(divisor)); 1399 __ TruncatingDiv(result, dividend, Abs(divisor));
1547 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1400 if (divisor < 0) __ neg(result, result);
1548 __ jmp(&done); 1401 __ b(&done);
1549 __ bind(&needs_adjustment); 1402 __ bind(&needs_adjustment);
1550 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); 1403 __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1551 __ TruncatingDiv(result, temp, Abs(divisor)); 1404 __ TruncatingDiv(result, temp, Abs(divisor));
1552 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1405 if (divisor < 0) __ neg(result, result);
1553 __ sub(result, result, Operand(1)); 1406 __ subi(result, result, Operand(1));
1554 __ bind(&done); 1407 __ bind(&done);
1555 } 1408 }
1556 1409
1557 1410
1558 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1411 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1559 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1412 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1560 HBinaryOperation* hdiv = instr->hydrogen(); 1413 HBinaryOperation* hdiv = instr->hydrogen();
1561 Register left = ToRegister(instr->dividend()); 1414 const Register dividend = ToRegister(instr->dividend());
1562 Register right = ToRegister(instr->divisor()); 1415 const Register divisor = ToRegister(instr->divisor());
1563 Register result = ToRegister(instr->result()); 1416 Register result = ToRegister(instr->result());
1564 1417
1418 DCHECK(!dividend.is(result));
1419 DCHECK(!divisor.is(result));
1420
1421 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1422 __ li(r0, Operand::Zero()); // clear xer
1423 __ mtxer(r0);
1424 }
1425
1426 __ divw(result, dividend, divisor, SetOE, SetRC);
1427
1565 // Check for x / 0. 1428 // Check for x / 0.
1566 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1429 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1567 __ cmp(right, Operand::Zero()); 1430 __ cmpwi(divisor, Operand::Zero());
1568 DeoptimizeIf(eq, instr, "division by zero"); 1431 DeoptimizeIf(eq, instr, "division by zero");
1569 } 1432 }
1570 1433
1571 // Check for (0 / -x) that will produce negative zero. 1434 // Check for (0 / -x) that will produce negative zero.
1572 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1435 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1573 Label positive; 1436 Label dividend_not_zero;
1574 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { 1437 __ cmpwi(dividend, Operand::Zero());
1575 // Do the test only if it hadn't be done above. 1438 __ bne(&dividend_not_zero);
1576 __ cmp(right, Operand::Zero()); 1439 __ cmpwi(divisor, Operand::Zero());
1577 } 1440 DeoptimizeIf(lt, instr, "minus zero");
1578 __ b(pl, &positive); 1441 __ bind(&dividend_not_zero);
1579 __ cmp(left, Operand::Zero());
1580 DeoptimizeIf(eq, instr, "minus zero");
1581 __ bind(&positive);
1582 } 1442 }
1583 1443
1584 // Check for (kMinInt / -1). 1444 // Check for (kMinInt / -1).
1585 if (hdiv->CheckFlag(HValue::kCanOverflow) && 1445 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1586 (!CpuFeatures::IsSupported(SUDIV) || 1446 Label no_overflow_possible;
1587 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { 1447 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1588 // We don't need to check for overflow when truncating with sdiv 1448 DeoptimizeIf(overflow, instr, "overflow", cr0);
1589 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. 1449 } else {
1590 __ cmp(left, Operand(kMinInt)); 1450 // When truncating, we want kMinInt / -1 = kMinInt.
1591 __ cmp(right, Operand(-1), eq); 1451 __ bnooverflow(&no_overflow_possible, cr0);
1592 DeoptimizeIf(eq, instr, "overflow"); 1452 __ mr(result, dividend);
1593 } 1453 }
1594 1454 __ bind(&no_overflow_possible);
1595 if (CpuFeatures::IsSupported(SUDIV)) {
1596 CpuFeatureScope scope(masm(), SUDIV);
1597 __ sdiv(result, left, right);
1598 } else {
1599 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1600 DoubleRegister vright = double_scratch0();
1601 __ vmov(double_scratch0().low(), left);
1602 __ vcvt_f64_s32(vleft, double_scratch0().low());
1603 __ vmov(double_scratch0().low(), right);
1604 __ vcvt_f64_s32(vright, double_scratch0().low());
1605 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1606 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1607 __ vmov(result, double_scratch0().low());
1608 } 1455 }
1609 1456
1610 Label done; 1457 Label done;
1611 Register remainder = scratch0(); 1458 Register scratch = scratch0();
1612 __ Mls(remainder, result, right, left); 1459 // If both operands have the same sign then we are done.
1613 __ cmp(remainder, Operand::Zero()); 1460 #if V8_TARGET_ARCH_PPC64
1614 __ b(eq, &done); 1461 __ xor_(scratch, dividend, divisor);
1615 __ eor(remainder, remainder, Operand(right)); 1462 __ cmpwi(scratch, Operand::Zero());
1616 __ add(result, result, Operand(remainder, ASR, 31)); 1463 __ bge(&done);
1464 #else
1465 __ xor_(scratch, dividend, divisor, SetRC);
1466 __ bge(&done, cr0);
1467 #endif
1468
1469 // If there is no remainder then we are done.
1470 __ mullw(scratch, divisor, result);
1471 __ cmpw(dividend, scratch);
1472 __ beq(&done);
1473
1474 // We performed a truncating division. Correct the result.
1475 __ subi(result, result, Operand(1));
1617 __ bind(&done); 1476 __ bind(&done);
1618 } 1477 }
1619 1478
1620 1479
1480 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1481 DoubleRegister addend = ToDoubleRegister(instr->addend());
1482 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1483 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1484 DoubleRegister result = ToDoubleRegister(instr->result());
1485
1486 __ fmadd(result, multiplier, multiplicand, addend);
1487 }
1488
1489
1490 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1491 DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1492 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1493 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1494 DoubleRegister result = ToDoubleRegister(instr->result());
1495
1496 __ fmsub(result, multiplier, multiplicand, minuend);
1497 }
1498
1499
1621 void LCodeGen::DoMulI(LMulI* instr) { 1500 void LCodeGen::DoMulI(LMulI* instr) {
1501 Register scratch = scratch0();
1622 Register result = ToRegister(instr->result()); 1502 Register result = ToRegister(instr->result());
1623 // Note that result may alias left. 1503 // Note that result may alias left.
1624 Register left = ToRegister(instr->left()); 1504 Register left = ToRegister(instr->left());
1625 LOperand* right_op = instr->right(); 1505 LOperand* right_op = instr->right();
1626 1506
1627 bool bailout_on_minus_zero = 1507 bool bailout_on_minus_zero =
1628 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1508 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1629 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1509 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1630 1510
1631 if (right_op->IsConstantOperand()) { 1511 if (right_op->IsConstantOperand()) {
1632 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); 1512 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1633 1513
1634 if (bailout_on_minus_zero && (constant < 0)) { 1514 if (bailout_on_minus_zero && (constant < 0)) {
1635 // The case of a null constant will be handled separately. 1515 // The case of a null constant will be handled separately.
1636 // If constant is negative and left is null, the result should be -0. 1516 // If constant is negative and left is null, the result should be -0.
1637 __ cmp(left, Operand::Zero()); 1517 __ cmpi(left, Operand::Zero());
1638 DeoptimizeIf(eq, instr, "minus zero"); 1518 DeoptimizeIf(eq, instr, "minus zero");
1639 } 1519 }
1640 1520
1641 switch (constant) { 1521 switch (constant) {
1642 case -1: 1522 case -1:
1643 if (overflow) { 1523 if (can_overflow) {
1644 __ rsb(result, left, Operand::Zero(), SetCC); 1524 #if V8_TARGET_ARCH_PPC64
1645 DeoptimizeIf(vs, instr, "overflow"); 1525 if (instr->hydrogen()->representation().IsSmi()) {
1526 #endif
1527 __ li(r0, Operand::Zero()); // clear xer
1528 __ mtxer(r0);
1529 __ neg(result, left, SetOE, SetRC);
1530 DeoptimizeIf(overflow, instr, "overflow", cr0);
1531 #if V8_TARGET_ARCH_PPC64
1532 } else {
1533 __ neg(result, left);
1534 __ TestIfInt32(result, scratch, r0);
1535 DeoptimizeIf(ne, instr, "overflow");
1536 }
1537 #endif
1646 } else { 1538 } else {
1647 __ rsb(result, left, Operand::Zero()); 1539 __ neg(result, left);
1648 } 1540 }
1649 break; 1541 break;
1650 case 0: 1542 case 0:
1651 if (bailout_on_minus_zero) { 1543 if (bailout_on_minus_zero) {
1652 // If left is strictly negative and the constant is null, the 1544 // If left is strictly negative and the constant is null, the
1653 // result is -0. Deoptimize if required, otherwise return 0. 1545 // result is -0. Deoptimize if required, otherwise return 0.
1654 __ cmp(left, Operand::Zero()); 1546 #if V8_TARGET_ARCH_PPC64
1655 DeoptimizeIf(mi, instr, "minus zero"); 1547 if (instr->hydrogen()->representation().IsSmi()) {
1548 #endif
1549 __ cmpi(left, Operand::Zero());
1550 #if V8_TARGET_ARCH_PPC64
1551 } else {
1552 __ cmpwi(left, Operand::Zero());
1553 }
1554 #endif
1555 DeoptimizeIf(lt, instr, "minus zero");
1656 } 1556 }
1657 __ mov(result, Operand::Zero()); 1557 __ li(result, Operand::Zero());
1658 break; 1558 break;
1659 case 1: 1559 case 1:
1660 __ Move(result, left); 1560 __ Move(result, left);
1661 break; 1561 break;
1662 default: 1562 default:
1663 // Multiplying by powers of two and powers of two plus or minus 1563 // Multiplying by powers of two and powers of two plus or minus
1664 // one can be done faster with shifted operands. 1564 // one can be done faster with shifted operands.
1665 // For other constants we emit standard code. 1565 // For other constants we emit standard code.
1666 int32_t mask = constant >> 31; 1566 int32_t mask = constant >> 31;
1667 uint32_t constant_abs = (constant + mask) ^ mask; 1567 uint32_t constant_abs = (constant + mask) ^ mask;
1668 1568
1669 if (base::bits::IsPowerOfTwo32(constant_abs)) { 1569 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1670 int32_t shift = WhichPowerOf2(constant_abs); 1570 int32_t shift = WhichPowerOf2(constant_abs);
1671 __ mov(result, Operand(left, LSL, shift)); 1571 __ ShiftLeftImm(result, left, Operand(shift));
1672 // Correct the sign of the result is the constant is negative. 1572 // Correct the sign of the result if the constant is negative.
1673 if (constant < 0) __ rsb(result, result, Operand::Zero()); 1573 if (constant < 0) __ neg(result, result);
1674 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { 1574 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1675 int32_t shift = WhichPowerOf2(constant_abs - 1); 1575 int32_t shift = WhichPowerOf2(constant_abs - 1);
1676 __ add(result, left, Operand(left, LSL, shift)); 1576 __ ShiftLeftImm(scratch, left, Operand(shift));
1677 // Correct the sign of the result is the constant is negative. 1577 __ add(result, scratch, left);
1678 if (constant < 0) __ rsb(result, result, Operand::Zero()); 1578 // Correct the sign of the result if the constant is negative.
1579 if (constant < 0) __ neg(result, result);
1679 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { 1580 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1680 int32_t shift = WhichPowerOf2(constant_abs + 1); 1581 int32_t shift = WhichPowerOf2(constant_abs + 1);
1681 __ rsb(result, left, Operand(left, LSL, shift)); 1582 __ ShiftLeftImm(scratch, left, Operand(shift));
1682 // Correct the sign of the result is the constant is negative. 1583 __ sub(result, scratch, left);
1683 if (constant < 0) __ rsb(result, result, Operand::Zero()); 1584 // Correct the sign of the result if the constant is negative.
1585 if (constant < 0) __ neg(result, result);
1684 } else { 1586 } else {
1685 // Generate standard code. 1587 // Generate standard code.
1686 __ mov(ip, Operand(constant)); 1588 __ mov(ip, Operand(constant));
1687 __ mul(result, left, ip); 1589 __ Mul(result, left, ip);
1688 } 1590 }
1689 } 1591 }
1690 1592
1691 } else { 1593 } else {
1692 DCHECK(right_op->IsRegister()); 1594 DCHECK(right_op->IsRegister());
1693 Register right = ToRegister(right_op); 1595 Register right = ToRegister(right_op);
1694 1596
1695 if (overflow) { 1597 if (can_overflow) {
1696 Register scratch = scratch0(); 1598 #if V8_TARGET_ARCH_PPC64
1599 // result = left * right.
1600 if (instr->hydrogen()->representation().IsSmi()) {
1601 __ SmiUntag(result, left);
1602 __ SmiUntag(scratch, right);
1603 __ Mul(result, result, scratch);
1604 } else {
1605 __ Mul(result, left, right);
1606 }
1607 __ TestIfInt32(result, scratch, r0);
1608 DeoptimizeIf(ne, instr, "overflow");
1609 if (instr->hydrogen()->representation().IsSmi()) {
1610 __ SmiTag(result);
1611 }
1612 #else
1697 // scratch:result = left * right. 1613 // scratch:result = left * right.
1698 if (instr->hydrogen()->representation().IsSmi()) { 1614 if (instr->hydrogen()->representation().IsSmi()) {
1699 __ SmiUntag(result, left); 1615 __ SmiUntag(result, left);
1700 __ smull(result, scratch, result, right); 1616 __ mulhw(scratch, result, right);
1617 __ mullw(result, result, right);
1701 } else { 1618 } else {
1702 __ smull(result, scratch, left, right); 1619 __ mulhw(scratch, left, right);
1620 __ mullw(result, left, right);
1703 } 1621 }
1704 __ cmp(scratch, Operand(result, ASR, 31)); 1622 __ TestIfInt32(scratch, result, r0);
1705 DeoptimizeIf(ne, instr, "overflow"); 1623 DeoptimizeIf(ne, instr, "overflow");
1624 #endif
1706 } else { 1625 } else {
1707 if (instr->hydrogen()->representation().IsSmi()) { 1626 if (instr->hydrogen()->representation().IsSmi()) {
1708 __ SmiUntag(result, left); 1627 __ SmiUntag(result, left);
1709 __ mul(result, result, right); 1628 __ Mul(result, result, right);
1710 } else { 1629 } else {
1711 __ mul(result, left, right); 1630 __ Mul(result, left, right);
1712 } 1631 }
1713 } 1632 }
1714 1633
1715 if (bailout_on_minus_zero) { 1634 if (bailout_on_minus_zero) {
1716 Label done; 1635 Label done;
1717 __ teq(left, Operand(right)); 1636 #if V8_TARGET_ARCH_PPC64
1718 __ b(pl, &done); 1637 if (instr->hydrogen()->representation().IsSmi()) {
1638 #endif
1639 __ xor_(r0, left, right, SetRC);
1640 __ bge(&done, cr0);
1641 #if V8_TARGET_ARCH_PPC64
1642 } else {
1643 __ xor_(r0, left, right);
1644 __ cmpwi(r0, Operand::Zero());
1645 __ bge(&done);
1646 }
1647 #endif
1719 // Bail out if the result is minus zero. 1648 // Bail out if the result is minus zero.
1720 __ cmp(result, Operand::Zero()); 1649 __ cmpi(result, Operand::Zero());
1721 DeoptimizeIf(eq, instr, "minus zero"); 1650 DeoptimizeIf(eq, instr, "minus zero");
1722 __ bind(&done); 1651 __ bind(&done);
1723 } 1652 }
1724 } 1653 }
1725 } 1654 }
1726 1655
1727 1656
1728 void LCodeGen::DoBitI(LBitI* instr) { 1657 void LCodeGen::DoBitI(LBitI* instr) {
1729 LOperand* left_op = instr->left(); 1658 LOperand* left_op = instr->left();
1730 LOperand* right_op = instr->right(); 1659 LOperand* right_op = instr->right();
1731 DCHECK(left_op->IsRegister()); 1660 DCHECK(left_op->IsRegister());
1732 Register left = ToRegister(left_op); 1661 Register left = ToRegister(left_op);
1733 Register result = ToRegister(instr->result()); 1662 Register result = ToRegister(instr->result());
1734 Operand right(no_reg); 1663 Operand right(no_reg);
1735 1664
1736 if (right_op->IsStackSlot()) { 1665 if (right_op->IsStackSlot()) {
1737 right = Operand(EmitLoadRegister(right_op, ip)); 1666 right = Operand(EmitLoadRegister(right_op, ip));
1738 } else { 1667 } else {
1739 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); 1668 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1740 right = ToOperand(right_op); 1669 right = ToOperand(right_op);
1670
1671 if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
1672 switch (instr->op()) {
1673 case Token::BIT_AND:
1674 __ andi(result, left, right);
1675 break;
1676 case Token::BIT_OR:
1677 __ ori(result, left, right);
1678 break;
1679 case Token::BIT_XOR:
1680 __ xori(result, left, right);
1681 break;
1682 default:
1683 UNREACHABLE();
1684 break;
1685 }
1686 return;
1687 }
1741 } 1688 }
1742 1689
1743 switch (instr->op()) { 1690 switch (instr->op()) {
1744 case Token::BIT_AND: 1691 case Token::BIT_AND:
1745 __ and_(result, left, right); 1692 __ And(result, left, right);
1746 break; 1693 break;
1747 case Token::BIT_OR: 1694 case Token::BIT_OR:
1748 __ orr(result, left, right); 1695 __ Or(result, left, right);
1749 break; 1696 break;
1750 case Token::BIT_XOR: 1697 case Token::BIT_XOR:
1751 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { 1698 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1752 __ mvn(result, Operand(left)); 1699 __ notx(result, left);
1753 } else { 1700 } else {
1754 __ eor(result, left, right); 1701 __ Xor(result, left, right);
1755 } 1702 }
1756 break; 1703 break;
1757 default: 1704 default:
1758 UNREACHABLE(); 1705 UNREACHABLE();
1759 break; 1706 break;
1760 } 1707 }
1761 } 1708 }
1762 1709
1763 1710
1764 void LCodeGen::DoShiftI(LShiftI* instr) { 1711 void LCodeGen::DoShiftI(LShiftI* instr) {
1765 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so 1712 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1766 // result may alias either of them. 1713 // result may alias either of them.
1767 LOperand* right_op = instr->right(); 1714 LOperand* right_op = instr->right();
1768 Register left = ToRegister(instr->left()); 1715 Register left = ToRegister(instr->left());
1769 Register result = ToRegister(instr->result()); 1716 Register result = ToRegister(instr->result());
1770 Register scratch = scratch0(); 1717 Register scratch = scratch0();
1771 if (right_op->IsRegister()) { 1718 if (right_op->IsRegister()) {
1772 // Mask the right_op operand. 1719 // Mask the right_op operand.
1773 __ and_(scratch, ToRegister(right_op), Operand(0x1F)); 1720 __ andi(scratch, ToRegister(right_op), Operand(0x1F));
1774 switch (instr->op()) { 1721 switch (instr->op()) {
1775 case Token::ROR: 1722 case Token::ROR:
1776 __ mov(result, Operand(left, ROR, scratch)); 1723 // rotate_right(a, b) == rotate_left(a, 32 - b)
1724 __ subfic(scratch, scratch, Operand(32));
1725 __ rotlw(result, left, scratch);
1777 break; 1726 break;
1778 case Token::SAR: 1727 case Token::SAR:
1779 __ mov(result, Operand(left, ASR, scratch)); 1728 __ sraw(result, left, scratch);
1780 break; 1729 break;
1781 case Token::SHR: 1730 case Token::SHR:
1782 if (instr->can_deopt()) { 1731 if (instr->can_deopt()) {
1783 __ mov(result, Operand(left, LSR, scratch), SetCC); 1732 __ srw(result, left, scratch, SetRC);
1784 DeoptimizeIf(mi, instr, "negative value"); 1733 #if V8_TARGET_ARCH_PPC64
1734 __ extsw(result, result, SetRC);
1735 #endif
1736 DeoptimizeIf(lt, instr, "negative value", cr0);
1785 } else { 1737 } else {
1786 __ mov(result, Operand(left, LSR, scratch)); 1738 __ srw(result, left, scratch);
1787 } 1739 }
1788 break; 1740 break;
1789 case Token::SHL: 1741 case Token::SHL:
1790 __ mov(result, Operand(left, LSL, scratch)); 1742 __ slw(result, left, scratch);
1743 #if V8_TARGET_ARCH_PPC64
1744 __ extsw(result, result);
1745 #endif
1791 break; 1746 break;
1792 default: 1747 default:
1793 UNREACHABLE(); 1748 UNREACHABLE();
1794 break; 1749 break;
1795 } 1750 }
1796 } else { 1751 } else {
1797 // Mask the right_op operand. 1752 // Mask the right_op operand.
1798 int value = ToInteger32(LConstantOperand::cast(right_op)); 1753 int value = ToInteger32(LConstantOperand::cast(right_op));
1799 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1754 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1800 switch (instr->op()) { 1755 switch (instr->op()) {
1801 case Token::ROR: 1756 case Token::ROR:
1802 if (shift_count != 0) { 1757 if (shift_count != 0) {
1803 __ mov(result, Operand(left, ROR, shift_count)); 1758 __ rotrwi(result, left, shift_count);
1804 } else { 1759 } else {
1805 __ Move(result, left); 1760 __ Move(result, left);
1806 } 1761 }
1807 break; 1762 break;
1808 case Token::SAR: 1763 case Token::SAR:
1809 if (shift_count != 0) { 1764 if (shift_count != 0) {
1810 __ mov(result, Operand(left, ASR, shift_count)); 1765 __ srawi(result, left, shift_count);
1811 } else { 1766 } else {
1812 __ Move(result, left); 1767 __ Move(result, left);
1813 } 1768 }
1814 break; 1769 break;
1815 case Token::SHR: 1770 case Token::SHR:
1816 if (shift_count != 0) { 1771 if (shift_count != 0) {
1817 __ mov(result, Operand(left, LSR, shift_count)); 1772 __ srwi(result, left, Operand(shift_count));
1818 } else { 1773 } else {
1819 if (instr->can_deopt()) { 1774 if (instr->can_deopt()) {
1820 __ tst(left, Operand(0x80000000)); 1775 __ cmpwi(left, Operand::Zero());
1821 DeoptimizeIf(ne, instr, "negative value"); 1776 DeoptimizeIf(lt, instr, "negative value");
1822 } 1777 }
1823 __ Move(result, left); 1778 __ Move(result, left);
1824 } 1779 }
1825 break; 1780 break;
1826 case Token::SHL: 1781 case Token::SHL:
1827 if (shift_count != 0) { 1782 if (shift_count != 0) {
1783 #if V8_TARGET_ARCH_PPC64
1784 if (instr->hydrogen_value()->representation().IsSmi()) {
1785 __ sldi(result, left, Operand(shift_count));
1786 #else
1828 if (instr->hydrogen_value()->representation().IsSmi() && 1787 if (instr->hydrogen_value()->representation().IsSmi() &&
1829 instr->can_deopt()) { 1788 instr->can_deopt()) {
1830 if (shift_count != 1) { 1789 if (shift_count != 1) {
1831 __ mov(result, Operand(left, LSL, shift_count - 1)); 1790 __ slwi(result, left, Operand(shift_count - 1));
1832 __ SmiTag(result, result, SetCC); 1791 __ SmiTagCheckOverflow(result, result, scratch);
1833 } else { 1792 } else {
1834 __ SmiTag(result, left, SetCC); 1793 __ SmiTagCheckOverflow(result, left, scratch);
1835 } 1794 }
1836 DeoptimizeIf(vs, instr, "overflow"); 1795 DeoptimizeIf(lt, instr, "overflow", cr0);
1796 #endif
1837 } else { 1797 } else {
1838 __ mov(result, Operand(left, LSL, shift_count)); 1798 __ slwi(result, left, Operand(shift_count));
1799 #if V8_TARGET_ARCH_PPC64
1800 __ extsw(result, result);
1801 #endif
1839 } 1802 }
1840 } else { 1803 } else {
1841 __ Move(result, left); 1804 __ Move(result, left);
1842 } 1805 }
1843 break; 1806 break;
1844 default: 1807 default:
1845 UNREACHABLE(); 1808 UNREACHABLE();
1846 break; 1809 break;
1847 } 1810 }
1848 } 1811 }
1849 } 1812 }
1850 1813
1851 1814
1852 void LCodeGen::DoSubI(LSubI* instr) { 1815 void LCodeGen::DoSubI(LSubI* instr) {
1853 LOperand* left = instr->left();
1854 LOperand* right = instr->right(); 1816 LOperand* right = instr->right();
1855 LOperand* result = instr->result(); 1817 Register left = ToRegister(instr->left());
1818 Register result = ToRegister(instr->result());
1856 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1819 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1857 SBit set_cond = can_overflow ? SetCC : LeaveCC; 1820 if (!can_overflow) {
1858 1821 if (right->IsConstantOperand()) {
1859 if (right->IsStackSlot()) { 1822 __ Add(result, left, -(ToOperand(right).immediate()), r0);
1860 Register right_reg = EmitLoadRegister(right, ip); 1823 } else {
1861 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1824 __ sub(result, left, EmitLoadRegister(right, ip));
1825 }
1862 } else { 1826 } else {
1863 DCHECK(right->IsRegister() || right->IsConstantOperand()); 1827 if (right->IsConstantOperand()) {
1864 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1828 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
1829 scratch0(), r0);
1830 } else {
1831 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1832 scratch0(), r0);
1833 }
1834 // Doptimize on overflow
1835 #if V8_TARGET_ARCH_PPC64
1836 if (!instr->hydrogen()->representation().IsSmi()) {
1837 __ extsw(scratch0(), scratch0(), SetRC);
1838 }
1839 #endif
1840 DeoptimizeIf(lt, instr, "overflow", cr0);
1865 } 1841 }
1866 1842
1867 if (can_overflow) { 1843 #if V8_TARGET_ARCH_PPC64
1868 DeoptimizeIf(vs, instr, "overflow"); 1844 if (!instr->hydrogen()->representation().IsSmi()) {
1845 __ extsw(result, result);
1869 } 1846 }
1847 #endif
1870 } 1848 }
1871 1849
1872 1850
1873 void LCodeGen::DoRSubI(LRSubI* instr) { 1851 void LCodeGen::DoRSubI(LRSubI* instr) {
1874 LOperand* left = instr->left(); 1852 LOperand* left = instr->left();
1875 LOperand* right = instr->right(); 1853 LOperand* right = instr->right();
1876 LOperand* result = instr->result(); 1854 LOperand* result = instr->result();
1877 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1878 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1879 1855
1880 if (right->IsStackSlot()) { 1856 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1881 Register right_reg = EmitLoadRegister(right, ip); 1857 right->IsConstantOperand());
1882 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1858
1859 Operand right_operand = ToOperand(right);
1860 if (is_int16(right_operand.immediate())) {
1861 __ subfic(ToRegister(result), ToRegister(left), right_operand);
1883 } else { 1862 } else {
1884 DCHECK(right->IsRegister() || right->IsConstantOperand()); 1863 __ mov(r0, right_operand);
1885 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1864 __ sub(ToRegister(result), r0, ToRegister(left));
1886 }
1887
1888 if (can_overflow) {
1889 DeoptimizeIf(vs, instr, "overflow");
1890 } 1865 }
1891 } 1866 }
1892 1867
1893 1868
1894 void LCodeGen::DoConstantI(LConstantI* instr) { 1869 void LCodeGen::DoConstantI(LConstantI* instr) {
1895 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1870 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1896 } 1871 }
1897 1872
1898 1873
1899 void LCodeGen::DoConstantS(LConstantS* instr) { 1874 void LCodeGen::DoConstantS(LConstantS* instr) {
1900 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1875 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1901 } 1876 }
1902 1877
1903 1878
1879 // TODO(penguin): put const to constant pool instead
1880 // of storing double to stack
1904 void LCodeGen::DoConstantD(LConstantD* instr) { 1881 void LCodeGen::DoConstantD(LConstantD* instr) {
1905 DCHECK(instr->result()->IsDoubleRegister()); 1882 DCHECK(instr->result()->IsDoubleRegister());
1906 DwVfpRegister result = ToDoubleRegister(instr->result()); 1883 DoubleRegister result = ToDoubleRegister(instr->result());
1907 double v = instr->value(); 1884 double v = instr->value();
1908 __ Vmov(result, v, scratch0()); 1885 __ LoadDoubleLiteral(result, v, scratch0());
1909 } 1886 }
1910 1887
1911 1888
1912 void LCodeGen::DoConstantE(LConstantE* instr) { 1889 void LCodeGen::DoConstantE(LConstantE* instr) {
1913 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1890 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1914 } 1891 }
1915 1892
1916 1893
1917 void LCodeGen::DoConstantT(LConstantT* instr) { 1894 void LCodeGen::DoConstantT(LConstantT* instr) {
1918 Handle<Object> object = instr->value(isolate()); 1895 Handle<Object> object = instr->value(isolate());
1919 AllowDeferredHandleDereference smi_check; 1896 AllowDeferredHandleDereference smi_check;
1920 __ Move(ToRegister(instr->result()), object); 1897 __ Move(ToRegister(instr->result()), object);
1921 } 1898 }
1922 1899
1923 1900
1924 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { 1901 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1925 Register result = ToRegister(instr->result()); 1902 Register result = ToRegister(instr->result());
1926 Register map = ToRegister(instr->value()); 1903 Register map = ToRegister(instr->value());
1927 __ EnumLength(result, map); 1904 __ EnumLength(result, map);
1928 } 1905 }
1929 1906
1930 1907
1931 void LCodeGen::DoDateField(LDateField* instr) { 1908 void LCodeGen::DoDateField(LDateField* instr) {
1932 Register object = ToRegister(instr->date()); 1909 Register object = ToRegister(instr->date());
1933 Register result = ToRegister(instr->result()); 1910 Register result = ToRegister(instr->result());
1934 Register scratch = ToRegister(instr->temp()); 1911 Register scratch = ToRegister(instr->temp());
1935 Smi* index = instr->index(); 1912 Smi* index = instr->index();
1936 Label runtime, done; 1913 Label runtime, done;
1937 DCHECK(object.is(result)); 1914 DCHECK(object.is(result));
1938 DCHECK(object.is(r0)); 1915 DCHECK(object.is(r3));
1939 DCHECK(!scratch.is(scratch0())); 1916 DCHECK(!scratch.is(scratch0()));
1940 DCHECK(!scratch.is(object)); 1917 DCHECK(!scratch.is(object));
1941 1918
1942 __ SmiTst(object); 1919 __ TestIfSmi(object, r0);
1943 DeoptimizeIf(eq, instr, "Smi"); 1920 DeoptimizeIf(eq, instr, "Smi", cr0);
1944 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); 1921 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1945 DeoptimizeIf(ne, instr, "not a date object"); 1922 DeoptimizeIf(ne, instr, "not a date object");
1946 1923
1947 if (index->value() == 0) { 1924 if (index->value() == 0) {
1948 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); 1925 __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
1949 } else { 1926 } else {
1950 if (index->value() < JSDate::kFirstUncachedField) { 1927 if (index->value() < JSDate::kFirstUncachedField) {
1951 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 1928 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1952 __ mov(scratch, Operand(stamp)); 1929 __ mov(scratch, Operand(stamp));
1953 __ ldr(scratch, MemOperand(scratch)); 1930 __ LoadP(scratch, MemOperand(scratch));
1954 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); 1931 __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1955 __ cmp(scratch, scratch0()); 1932 __ cmp(scratch, scratch0());
1956 __ b(ne, &runtime); 1933 __ bne(&runtime);
1957 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset + 1934 __ LoadP(result,
1958 kPointerSize * index->value())); 1935 FieldMemOperand(object, JSDate::kValueOffset +
1959 __ jmp(&done); 1936 kPointerSize * index->value()));
1937 __ b(&done);
1960 } 1938 }
1961 __ bind(&runtime); 1939 __ bind(&runtime);
1962 __ PrepareCallCFunction(2, scratch); 1940 __ PrepareCallCFunction(2, scratch);
1963 __ mov(r1, Operand(index)); 1941 __ LoadSmiLiteral(r4, index);
1964 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); 1942 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1965 __ bind(&done); 1943 __ bind(&done);
1966 } 1944 }
1967 } 1945 }
1968 1946
1969 1947
1970 MemOperand LCodeGen::BuildSeqStringOperand(Register string, 1948 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
1971 LOperand* index,
1972 String::Encoding encoding) { 1949 String::Encoding encoding) {
1973 if (index->IsConstantOperand()) { 1950 if (index->IsConstantOperand()) {
1974 int offset = ToInteger32(LConstantOperand::cast(index)); 1951 int offset = ToInteger32(LConstantOperand::cast(index));
1975 if (encoding == String::TWO_BYTE_ENCODING) { 1952 if (encoding == String::TWO_BYTE_ENCODING) {
1976 offset *= kUC16Size; 1953 offset *= kUC16Size;
1977 } 1954 }
1978 STATIC_ASSERT(kCharSize == 1); 1955 STATIC_ASSERT(kCharSize == 1);
1979 return FieldMemOperand(string, SeqString::kHeaderSize + offset); 1956 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1980 } 1957 }
1981 Register scratch = scratch0(); 1958 Register scratch = scratch0();
1982 DCHECK(!scratch.is(string)); 1959 DCHECK(!scratch.is(string));
1983 DCHECK(!scratch.is(ToRegister(index))); 1960 DCHECK(!scratch.is(ToRegister(index)));
1984 if (encoding == String::ONE_BYTE_ENCODING) { 1961 if (encoding == String::ONE_BYTE_ENCODING) {
1985 __ add(scratch, string, Operand(ToRegister(index))); 1962 __ add(scratch, string, ToRegister(index));
1986 } else { 1963 } else {
1987 STATIC_ASSERT(kUC16Size == 2); 1964 STATIC_ASSERT(kUC16Size == 2);
1988 __ add(scratch, string, Operand(ToRegister(index), LSL, 1)); 1965 __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
1966 __ add(scratch, string, scratch);
1989 } 1967 }
1990 return FieldMemOperand(scratch, SeqString::kHeaderSize); 1968 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1991 } 1969 }
1992 1970
1993 1971
1994 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { 1972 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1995 String::Encoding encoding = instr->hydrogen()->encoding(); 1973 String::Encoding encoding = instr->hydrogen()->encoding();
1996 Register string = ToRegister(instr->string()); 1974 Register string = ToRegister(instr->string());
1997 Register result = ToRegister(instr->result()); 1975 Register result = ToRegister(instr->result());
1998 1976
1999 if (FLAG_debug_code) { 1977 if (FLAG_debug_code) {
2000 Register scratch = scratch0(); 1978 Register scratch = scratch0();
2001 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); 1979 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
2002 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 1980 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2003 1981
2004 __ and_(scratch, scratch, 1982 __ andi(scratch, scratch,
2005 Operand(kStringRepresentationMask | kStringEncodingMask)); 1983 Operand(kStringRepresentationMask | kStringEncodingMask));
2006 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1984 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2007 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1985 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2008 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING 1986 __ cmpi(scratch,
2009 ? one_byte_seq_type : two_byte_seq_type)); 1987 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
1988 : two_byte_seq_type));
2010 __ Check(eq, kUnexpectedStringType); 1989 __ Check(eq, kUnexpectedStringType);
2011 } 1990 }
2012 1991
2013 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1992 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2014 if (encoding == String::ONE_BYTE_ENCODING) { 1993 if (encoding == String::ONE_BYTE_ENCODING) {
2015 __ ldrb(result, operand); 1994 __ lbz(result, operand);
2016 } else { 1995 } else {
2017 __ ldrh(result, operand); 1996 __ lhz(result, operand);
2018 } 1997 }
2019 } 1998 }
2020 1999
2021 2000
2022 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 2001 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2023 String::Encoding encoding = instr->hydrogen()->encoding(); 2002 String::Encoding encoding = instr->hydrogen()->encoding();
2024 Register string = ToRegister(instr->string()); 2003 Register string = ToRegister(instr->string());
2025 Register value = ToRegister(instr->value()); 2004 Register value = ToRegister(instr->value());
2026 2005
2027 if (FLAG_debug_code) { 2006 if (FLAG_debug_code) {
2028 Register index = ToRegister(instr->index()); 2007 Register index = ToRegister(instr->index());
2029 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 2008 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2030 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 2009 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2031 int encoding_mask = 2010 int encoding_mask =
2032 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 2011 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2033 ? one_byte_seq_type : two_byte_seq_type; 2012 ? one_byte_seq_type
2013 : two_byte_seq_type;
2034 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); 2014 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2035 } 2015 }
2036 2016
2037 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 2017 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2038 if (encoding == String::ONE_BYTE_ENCODING) { 2018 if (encoding == String::ONE_BYTE_ENCODING) {
2039 __ strb(value, operand); 2019 __ stb(value, operand);
2040 } else { 2020 } else {
2041 __ strh(value, operand); 2021 __ sth(value, operand);
2042 } 2022 }
2043 } 2023 }
2044 2024
2045 2025
2046 void LCodeGen::DoAddI(LAddI* instr) { 2026 void LCodeGen::DoAddI(LAddI* instr) {
2047 LOperand* left = instr->left();
2048 LOperand* right = instr->right(); 2027 LOperand* right = instr->right();
2049 LOperand* result = instr->result(); 2028 Register left = ToRegister(instr->left());
2029 Register result = ToRegister(instr->result());
2050 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 2030 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2051 SBit set_cond = can_overflow ? SetCC : LeaveCC; 2031 #if V8_TARGET_ARCH_PPC64
2032 bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
2033 instr->hydrogen()->representation().IsExternal());
2034 #endif
2052 2035
2053 if (right->IsStackSlot()) { 2036 if (!can_overflow) {
2054 Register right_reg = EmitLoadRegister(right, ip); 2037 if (right->IsConstantOperand()) {
2055 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 2038 __ Add(result, left, ToOperand(right).immediate(), r0);
2039 } else {
2040 __ add(result, left, EmitLoadRegister(right, ip));
2041 }
2056 } else { 2042 } else {
2057 DCHECK(right->IsRegister() || right->IsConstantOperand()); 2043 if (right->IsConstantOperand()) {
2058 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 2044 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
2045 scratch0(), r0);
2046 } else {
2047 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
2048 scratch0(), r0);
2049 }
2050 // Doptimize on overflow
2051 #if V8_TARGET_ARCH_PPC64
2052 if (isInteger) {
2053 __ extsw(scratch0(), scratch0(), SetRC);
2054 }
2055 #endif
2056 DeoptimizeIf(lt, instr, "overflow", cr0);
2059 } 2057 }
2060 2058
2061 if (can_overflow) { 2059 #if V8_TARGET_ARCH_PPC64
2062 DeoptimizeIf(vs, instr, "overflow"); 2060 if (isInteger) {
2061 __ extsw(result, result);
2063 } 2062 }
2063 #endif
2064 } 2064 }
2065 2065
2066 2066
2067 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 2067 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2068 LOperand* left = instr->left(); 2068 LOperand* left = instr->left();
2069 LOperand* right = instr->right(); 2069 LOperand* right = instr->right();
2070 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 2070 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2071 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
2071 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 2072 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2072 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2073 Register left_reg = ToRegister(left); 2073 Register left_reg = ToRegister(left);
2074 Operand right_op = (right->IsRegister() || right->IsConstantOperand()) 2074 Register right_reg = EmitLoadRegister(right, ip);
2075 ? ToOperand(right)
2076 : Operand(EmitLoadRegister(right, ip));
2077 Register result_reg = ToRegister(instr->result()); 2075 Register result_reg = ToRegister(instr->result());
2078 __ cmp(left_reg, right_op); 2076 Label return_left, done;
2079 __ Move(result_reg, left_reg, condition); 2077 #if V8_TARGET_ARCH_PPC64
2080 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); 2078 if (instr->hydrogen_value()->representation().IsSmi()) {
2079 #endif
2080 __ cmp(left_reg, right_reg);
2081 #if V8_TARGET_ARCH_PPC64
2082 } else {
2083 __ cmpw(left_reg, right_reg);
2084 }
2085 #endif
2086 __ b(cond, &return_left);
2087 __ Move(result_reg, right_reg);
2088 __ b(&done);
2089 __ bind(&return_left);
2090 __ Move(result_reg, left_reg);
2091 __ bind(&done);
2081 } else { 2092 } else {
2082 DCHECK(instr->hydrogen()->representation().IsDouble()); 2093 DCHECK(instr->hydrogen()->representation().IsDouble());
2083 DwVfpRegister left_reg = ToDoubleRegister(left); 2094 DoubleRegister left_reg = ToDoubleRegister(left);
2084 DwVfpRegister right_reg = ToDoubleRegister(right); 2095 DoubleRegister right_reg = ToDoubleRegister(right);
2085 DwVfpRegister result_reg = ToDoubleRegister(instr->result()); 2096 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2086 Label result_is_nan, return_left, return_right, check_zero, done; 2097 Label check_nan_left, check_zero, return_left, return_right, done;
2087 __ VFPCompareAndSetFlags(left_reg, right_reg); 2098 __ fcmpu(left_reg, right_reg);
2099 __ bunordered(&check_nan_left);
2100 __ beq(&check_zero);
2101 __ b(cond, &return_left);
2102 __ b(&return_right);
2103
2104 __ bind(&check_zero);
2105 __ fcmpu(left_reg, kDoubleRegZero);
2106 __ bne(&return_left); // left == right != 0.
2107
2108 // At this point, both left and right are either 0 or -0.
2109 // N.B. The following works because +0 + -0 == +0
2088 if (operation == HMathMinMax::kMathMin) { 2110 if (operation == HMathMinMax::kMathMin) {
2089 __ b(mi, &return_left); 2111 // For min we want logical-or of sign bit: -(-L + -R)
2090 __ b(gt, &return_right); 2112 __ fneg(left_reg, left_reg);
2113 __ fsub(result_reg, left_reg, right_reg);
2114 __ fneg(result_reg, result_reg);
2091 } else { 2115 } else {
2092 __ b(mi, &return_right); 2116 // For max we want logical-and of sign bit: (L + R)
2093 __ b(gt, &return_left); 2117 __ fadd(result_reg, left_reg, right_reg);
2094 }
2095 __ b(vs, &result_is_nan);
2096 // Left equals right => check for -0.
2097 __ VFPCompareAndSetFlags(left_reg, 0.0);
2098 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2099 __ b(ne, &done); // left == right != 0.
2100 } else {
2101 __ b(ne, &return_left); // left == right != 0.
2102 }
2103 // At this point, both left and right are either 0 or -0.
2104 if (operation == HMathMinMax::kMathMin) {
2105 // We could use a single 'vorr' instruction here if we had NEON support.
2106 __ vneg(left_reg, left_reg);
2107 __ vsub(result_reg, left_reg, right_reg);
2108 __ vneg(result_reg, result_reg);
2109 } else {
2110 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2111 // the decision for vadd is easy because vand is a NEON instruction.
2112 __ vadd(result_reg, left_reg, right_reg);
2113 } 2118 }
2114 __ b(&done); 2119 __ b(&done);
2115 2120
2116 __ bind(&result_is_nan); 2121 __ bind(&check_nan_left);
2117 __ vadd(result_reg, left_reg, right_reg); 2122 __ fcmpu(left_reg, left_reg);
2123 __ bunordered(&return_left); // left == NaN.
2124
2125 __ bind(&return_right);
2126 if (!right_reg.is(result_reg)) {
2127 __ fmr(result_reg, right_reg);
2128 }
2118 __ b(&done); 2129 __ b(&done);
2119 2130
2120 __ bind(&return_right); 2131 __ bind(&return_left);
2121 __ Move(result_reg, right_reg);
2122 if (!left_reg.is(result_reg)) { 2132 if (!left_reg.is(result_reg)) {
2123 __ b(&done); 2133 __ fmr(result_reg, left_reg);
2124 } 2134 }
2125
2126 __ bind(&return_left);
2127 __ Move(result_reg, left_reg);
2128
2129 __ bind(&done); 2135 __ bind(&done);
2130 } 2136 }
2131 } 2137 }
2132 2138
2133 2139
2134 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 2140 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2135 DwVfpRegister left = ToDoubleRegister(instr->left()); 2141 DoubleRegister left = ToDoubleRegister(instr->left());
2136 DwVfpRegister right = ToDoubleRegister(instr->right()); 2142 DoubleRegister right = ToDoubleRegister(instr->right());
2137 DwVfpRegister result = ToDoubleRegister(instr->result()); 2143 DoubleRegister result = ToDoubleRegister(instr->result());
2138 switch (instr->op()) { 2144 switch (instr->op()) {
2139 case Token::ADD: 2145 case Token::ADD:
2140 __ vadd(result, left, right); 2146 __ fadd(result, left, right);
2141 break; 2147 break;
2142 case Token::SUB: 2148 case Token::SUB:
2143 __ vsub(result, left, right); 2149 __ fsub(result, left, right);
2144 break; 2150 break;
2145 case Token::MUL: 2151 case Token::MUL:
2146 __ vmul(result, left, right); 2152 __ fmul(result, left, right);
2147 break; 2153 break;
2148 case Token::DIV: 2154 case Token::DIV:
2149 __ vdiv(result, left, right); 2155 __ fdiv(result, left, right);
2150 break; 2156 break;
2151 case Token::MOD: { 2157 case Token::MOD: {
2152 __ PrepareCallCFunction(0, 2, scratch0()); 2158 __ PrepareCallCFunction(0, 2, scratch0());
2153 __ MovToFloatParameters(left, right); 2159 __ MovToFloatParameters(left, right);
2154 __ CallCFunction( 2160 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
2155 ExternalReference::mod_two_doubles_operation(isolate()), 2161 0, 2);
2156 0, 2);
2157 // Move the result in the double result register. 2162 // Move the result in the double result register.
2158 __ MovFromFloatResult(result); 2163 __ MovFromFloatResult(result);
2159 break; 2164 break;
2160 } 2165 }
2161 default: 2166 default:
2162 UNREACHABLE(); 2167 UNREACHABLE();
2163 break; 2168 break;
2164 } 2169 }
2165 } 2170 }
2166 2171
2167 2172
2168 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2173 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2169 DCHECK(ToRegister(instr->context()).is(cp)); 2174 DCHECK(ToRegister(instr->context()).is(cp));
2170 DCHECK(ToRegister(instr->left()).is(r1)); 2175 DCHECK(ToRegister(instr->left()).is(r4));
2171 DCHECK(ToRegister(instr->right()).is(r0)); 2176 DCHECK(ToRegister(instr->right()).is(r3));
2172 DCHECK(ToRegister(instr->result()).is(r0)); 2177 DCHECK(ToRegister(instr->result()).is(r3));
2173 2178
2174 Handle<Code> code = 2179 Handle<Code> code =
2175 CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code(); 2180 CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2176 // Block literal pool emission to ensure nop indicating no inlined smi code
2177 // is in the correct position.
2178 Assembler::BlockConstPoolScope block_const_pool(masm());
2179 CallCode(code, RelocInfo::CODE_TARGET, instr); 2181 CallCode(code, RelocInfo::CODE_TARGET, instr);
2180 } 2182 }
2181 2183
2182 2184
2183 template<class InstrType> 2185 template <class InstrType>
2184 void LCodeGen::EmitBranch(InstrType instr, Condition condition) { 2186 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
2185 int left_block = instr->TrueDestination(chunk_); 2187 int left_block = instr->TrueDestination(chunk_);
2186 int right_block = instr->FalseDestination(chunk_); 2188 int right_block = instr->FalseDestination(chunk_);
2187 2189
2188 int next_block = GetNextEmittedBlock(); 2190 int next_block = GetNextEmittedBlock();
2189 2191
2190 if (right_block == left_block || condition == al) { 2192 if (right_block == left_block || cond == al) {
2191 EmitGoto(left_block); 2193 EmitGoto(left_block);
2192 } else if (left_block == next_block) { 2194 } else if (left_block == next_block) {
2193 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block)); 2195 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
2194 } else if (right_block == next_block) { 2196 } else if (right_block == next_block) {
2195 __ b(condition, chunk_->GetAssemblyLabel(left_block)); 2197 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2196 } else { 2198 } else {
2197 __ b(condition, chunk_->GetAssemblyLabel(left_block)); 2199 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2198 __ b(chunk_->GetAssemblyLabel(right_block)); 2200 __ b(chunk_->GetAssemblyLabel(right_block));
2199 } 2201 }
2200 } 2202 }
2201 2203
2202 2204
2203 template<class InstrType> 2205 template <class InstrType>
2204 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) { 2206 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
2205 int false_block = instr->FalseDestination(chunk_); 2207 int false_block = instr->FalseDestination(chunk_);
2206 __ b(condition, chunk_->GetAssemblyLabel(false_block)); 2208 __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
2207 } 2209 }
2208 2210
2209 2211
2210 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { 2212 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
2211 __ stop("LBreak");
2212 }
2213 2213
2214 2214
2215 void LCodeGen::DoBranch(LBranch* instr) { 2215 void LCodeGen::DoBranch(LBranch* instr) {
2216 Representation r = instr->hydrogen()->value()->representation(); 2216 Representation r = instr->hydrogen()->value()->representation();
2217 if (r.IsInteger32() || r.IsSmi()) { 2217 DoubleRegister dbl_scratch = double_scratch0();
2218 const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
2219 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
2220
2221 if (r.IsInteger32()) {
2218 DCHECK(!info()->IsStub()); 2222 DCHECK(!info()->IsStub());
2219 Register reg = ToRegister(instr->value()); 2223 Register reg = ToRegister(instr->value());
2220 __ cmp(reg, Operand::Zero()); 2224 __ cmpwi(reg, Operand::Zero());
2225 EmitBranch(instr, ne);
2226 } else if (r.IsSmi()) {
2227 DCHECK(!info()->IsStub());
2228 Register reg = ToRegister(instr->value());
2229 __ cmpi(reg, Operand::Zero());
2221 EmitBranch(instr, ne); 2230 EmitBranch(instr, ne);
2222 } else if (r.IsDouble()) { 2231 } else if (r.IsDouble()) {
2223 DCHECK(!info()->IsStub()); 2232 DCHECK(!info()->IsStub());
2224 DwVfpRegister reg = ToDoubleRegister(instr->value()); 2233 DoubleRegister reg = ToDoubleRegister(instr->value());
2225 // Test the double value. Zero and NaN are false. 2234 // Test the double value. Zero and NaN are false.
2226 __ VFPCompareAndSetFlags(reg, 0.0); 2235 __ fcmpu(reg, kDoubleRegZero, cr7);
2227 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false) 2236 __ mfcr(r0);
2228 EmitBranch(instr, ne); 2237 __ andi(r0, r0, Operand(crZOrNaNBits));
2238 EmitBranch(instr, eq, cr0);
2229 } else { 2239 } else {
2230 DCHECK(r.IsTagged()); 2240 DCHECK(r.IsTagged());
2231 Register reg = ToRegister(instr->value()); 2241 Register reg = ToRegister(instr->value());
2232 HType type = instr->hydrogen()->value()->type(); 2242 HType type = instr->hydrogen()->value()->type();
2233 if (type.IsBoolean()) { 2243 if (type.IsBoolean()) {
2234 DCHECK(!info()->IsStub()); 2244 DCHECK(!info()->IsStub());
2235 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2245 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2236 EmitBranch(instr, eq); 2246 EmitBranch(instr, eq);
2237 } else if (type.IsSmi()) { 2247 } else if (type.IsSmi()) {
2238 DCHECK(!info()->IsStub()); 2248 DCHECK(!info()->IsStub());
2239 __ cmp(reg, Operand::Zero()); 2249 __ cmpi(reg, Operand::Zero());
2240 EmitBranch(instr, ne); 2250 EmitBranch(instr, ne);
2241 } else if (type.IsJSArray()) { 2251 } else if (type.IsJSArray()) {
2242 DCHECK(!info()->IsStub()); 2252 DCHECK(!info()->IsStub());
2243 EmitBranch(instr, al); 2253 EmitBranch(instr, al);
2244 } else if (type.IsHeapNumber()) { 2254 } else if (type.IsHeapNumber()) {
2245 DCHECK(!info()->IsStub()); 2255 DCHECK(!info()->IsStub());
2246 DwVfpRegister dbl_scratch = double_scratch0(); 2256 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2247 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2248 // Test the double value. Zero and NaN are false. 2257 // Test the double value. Zero and NaN are false.
2249 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); 2258 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2250 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN) 2259 __ mfcr(r0);
2251 EmitBranch(instr, ne); 2260 __ andi(r0, r0, Operand(crZOrNaNBits));
2261 EmitBranch(instr, eq, cr0);
2252 } else if (type.IsString()) { 2262 } else if (type.IsString()) {
2253 DCHECK(!info()->IsStub()); 2263 DCHECK(!info()->IsStub());
2254 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 2264 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2255 __ cmp(ip, Operand::Zero()); 2265 __ cmpi(ip, Operand::Zero());
2256 EmitBranch(instr, ne); 2266 EmitBranch(instr, ne);
2257 } else { 2267 } else {
2258 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 2268 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2259 // Avoid deopts in the case where we've never executed this path before. 2269 // Avoid deopts in the case where we've never executed this path before.
2260 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); 2270 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2261 2271
2262 if (expected.Contains(ToBooleanStub::UNDEFINED)) { 2272 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2263 // undefined -> false. 2273 // undefined -> false.
2264 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); 2274 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2265 __ b(eq, instr->FalseLabel(chunk_)); 2275 __ beq(instr->FalseLabel(chunk_));
2266 } 2276 }
2267 if (expected.Contains(ToBooleanStub::BOOLEAN)) { 2277 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2268 // Boolean -> its value. 2278 // Boolean -> its value.
2269 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2279 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2270 __ b(eq, instr->TrueLabel(chunk_)); 2280 __ beq(instr->TrueLabel(chunk_));
2271 __ CompareRoot(reg, Heap::kFalseValueRootIndex); 2281 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2272 __ b(eq, instr->FalseLabel(chunk_)); 2282 __ beq(instr->FalseLabel(chunk_));
2273 } 2283 }
2274 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { 2284 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2275 // 'null' -> false. 2285 // 'null' -> false.
2276 __ CompareRoot(reg, Heap::kNullValueRootIndex); 2286 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2277 __ b(eq, instr->FalseLabel(chunk_)); 2287 __ beq(instr->FalseLabel(chunk_));
2278 } 2288 }
2279 2289
2280 if (expected.Contains(ToBooleanStub::SMI)) { 2290 if (expected.Contains(ToBooleanStub::SMI)) {
2281 // Smis: 0 -> false, all other -> true. 2291 // Smis: 0 -> false, all other -> true.
2282 __ cmp(reg, Operand::Zero()); 2292 __ cmpi(reg, Operand::Zero());
2283 __ b(eq, instr->FalseLabel(chunk_)); 2293 __ beq(instr->FalseLabel(chunk_));
2284 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2294 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2285 } else if (expected.NeedsMap()) { 2295 } else if (expected.NeedsMap()) {
2286 // If we need a map later and have a Smi -> deopt. 2296 // If we need a map later and have a Smi -> deopt.
2287 __ SmiTst(reg); 2297 __ TestIfSmi(reg, r0);
2288 DeoptimizeIf(eq, instr, "Smi"); 2298 DeoptimizeIf(eq, instr, "Smi", cr0);
2289 } 2299 }
2290 2300
2291 const Register map = scratch0(); 2301 const Register map = scratch0();
2292 if (expected.NeedsMap()) { 2302 if (expected.NeedsMap()) {
2293 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 2303 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2294 2304
2295 if (expected.CanBeUndetectable()) { 2305 if (expected.CanBeUndetectable()) {
2296 // Undetectable -> false. 2306 // Undetectable -> false.
2297 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); 2307 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2298 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 2308 __ TestBit(ip, Map::kIsUndetectable, r0);
2299 __ b(ne, instr->FalseLabel(chunk_)); 2309 __ bne(instr->FalseLabel(chunk_), cr0);
2300 } 2310 }
2301 } 2311 }
2302 2312
2303 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { 2313 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2304 // spec object -> true. 2314 // spec object -> true.
2305 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); 2315 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2306 __ b(ge, instr->TrueLabel(chunk_)); 2316 __ bge(instr->TrueLabel(chunk_));
2307 } 2317 }
2308 2318
2309 if (expected.Contains(ToBooleanStub::STRING)) { 2319 if (expected.Contains(ToBooleanStub::STRING)) {
2310 // String value -> false iff empty. 2320 // String value -> false iff empty.
2311 Label not_string; 2321 Label not_string;
2312 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); 2322 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2313 __ b(ge, &not_string); 2323 __ bge(&not_string);
2314 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 2324 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2315 __ cmp(ip, Operand::Zero()); 2325 __ cmpi(ip, Operand::Zero());
2316 __ b(ne, instr->TrueLabel(chunk_)); 2326 __ bne(instr->TrueLabel(chunk_));
2317 __ b(instr->FalseLabel(chunk_)); 2327 __ b(instr->FalseLabel(chunk_));
2318 __ bind(&not_string); 2328 __ bind(&not_string);
2319 } 2329 }
2320 2330
2321 if (expected.Contains(ToBooleanStub::SYMBOL)) { 2331 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2322 // Symbol value -> true. 2332 // Symbol value -> true.
2323 __ CompareInstanceType(map, ip, SYMBOL_TYPE); 2333 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2324 __ b(eq, instr->TrueLabel(chunk_)); 2334 __ beq(instr->TrueLabel(chunk_));
2325 } 2335 }
2326 2336
2327 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { 2337 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2328 // heap number -> false iff +0, -0, or NaN. 2338 // heap number -> false iff +0, -0, or NaN.
2329 DwVfpRegister dbl_scratch = double_scratch0();
2330 Label not_heap_number; 2339 Label not_heap_number;
2331 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 2340 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2332 __ b(ne, &not_heap_number); 2341 __ bne(&not_heap_number);
2333 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2342 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2334 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); 2343 // Test the double value. Zero and NaN are false.
2335 __ cmp(r0, r0, vs); // NaN -> false. 2344 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2336 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. 2345 __ mfcr(r0);
2346 __ andi(r0, r0, Operand(crZOrNaNBits));
2347 __ bne(instr->FalseLabel(chunk_), cr0);
2337 __ b(instr->TrueLabel(chunk_)); 2348 __ b(instr->TrueLabel(chunk_));
2338 __ bind(&not_heap_number); 2349 __ bind(&not_heap_number);
2339 } 2350 }
2340 2351
2341 if (!expected.IsGeneric()) { 2352 if (!expected.IsGeneric()) {
2342 // We've seen something for the first time -> deopt. 2353 // We've seen something for the first time -> deopt.
2343 // This can only happen if we are not generic already. 2354 // This can only happen if we are not generic already.
2344 DeoptimizeIf(al, instr, "unexpected object"); 2355 DeoptimizeIf(al, instr, "unexpected object");
2345 } 2356 }
2346 } 2357 }
2347 } 2358 }
2348 } 2359 }
2349 2360
2350 2361
2351 void LCodeGen::EmitGoto(int block) { 2362 void LCodeGen::EmitGoto(int block) {
2352 if (!IsNextEmittedBlock(block)) { 2363 if (!IsNextEmittedBlock(block)) {
2353 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); 2364 __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2354 } 2365 }
2355 } 2366 }
2356 2367
2357 2368
2358 void LCodeGen::DoGoto(LGoto* instr) { 2369 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
2359 EmitGoto(instr->block_id());
2360 }
2361 2370
2362 2371
2363 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 2372 Condition LCodeGen::TokenToCondition(Token::Value op) {
2364 Condition cond = kNoCondition; 2373 Condition cond = kNoCondition;
2365 switch (op) { 2374 switch (op) {
2366 case Token::EQ: 2375 case Token::EQ:
2367 case Token::EQ_STRICT: 2376 case Token::EQ_STRICT:
2368 cond = eq; 2377 cond = eq;
2369 break; 2378 break;
2370 case Token::NE: 2379 case Token::NE:
2371 case Token::NE_STRICT: 2380 case Token::NE_STRICT:
2372 cond = ne; 2381 cond = ne;
2373 break; 2382 break;
2374 case Token::LT: 2383 case Token::LT:
2375 cond = is_unsigned ? lo : lt; 2384 cond = lt;
2376 break; 2385 break;
2377 case Token::GT: 2386 case Token::GT:
2378 cond = is_unsigned ? hi : gt; 2387 cond = gt;
2379 break; 2388 break;
2380 case Token::LTE: 2389 case Token::LTE:
2381 cond = is_unsigned ? ls : le; 2390 cond = le;
2382 break; 2391 break;
2383 case Token::GTE: 2392 case Token::GTE:
2384 cond = is_unsigned ? hs : ge; 2393 cond = ge;
2385 break; 2394 break;
2386 case Token::IN: 2395 case Token::IN:
2387 case Token::INSTANCEOF: 2396 case Token::INSTANCEOF:
2388 default: 2397 default:
2389 UNREACHABLE(); 2398 UNREACHABLE();
2390 } 2399 }
2391 return cond; 2400 return cond;
2392 } 2401 }
2393 2402
2394 2403
2395 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { 2404 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2396 LOperand* left = instr->left(); 2405 LOperand* left = instr->left();
2397 LOperand* right = instr->right(); 2406 LOperand* right = instr->right();
2398 bool is_unsigned = 2407 bool is_unsigned =
2399 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || 2408 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2400 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); 2409 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2401 Condition cond = TokenToCondition(instr->op(), is_unsigned); 2410 Condition cond = TokenToCondition(instr->op());
2402 2411
2403 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2412 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2404 // We can statically evaluate the comparison. 2413 // We can statically evaluate the comparison.
2405 double left_val = ToDouble(LConstantOperand::cast(left)); 2414 double left_val = ToDouble(LConstantOperand::cast(left));
2406 double right_val = ToDouble(LConstantOperand::cast(right)); 2415 double right_val = ToDouble(LConstantOperand::cast(right));
2407 int next_block = EvalComparison(instr->op(), left_val, right_val) ? 2416 int next_block = EvalComparison(instr->op(), left_val, right_val)
2408 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); 2417 ? instr->TrueDestination(chunk_)
2418 : instr->FalseDestination(chunk_);
2409 EmitGoto(next_block); 2419 EmitGoto(next_block);
2410 } else { 2420 } else {
2411 if (instr->is_double()) { 2421 if (instr->is_double()) {
2412 // Compare left and right operands as doubles and load the 2422 // Compare left and right operands as doubles and load the
2413 // resulting flags into the normal status register. 2423 // resulting flags into the normal status register.
2414 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); 2424 __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
2415 // If a NaN is involved, i.e. the result is unordered (V set), 2425 // If a NaN is involved, i.e. the result is unordered,
2416 // jump to false block label. 2426 // jump to false block label.
2417 __ b(vs, instr->FalseLabel(chunk_)); 2427 __ bunordered(instr->FalseLabel(chunk_));
2418 } else { 2428 } else {
2419 if (right->IsConstantOperand()) { 2429 if (right->IsConstantOperand()) {
2420 int32_t value = ToInteger32(LConstantOperand::cast(right)); 2430 int32_t value = ToInteger32(LConstantOperand::cast(right));
2421 if (instr->hydrogen_value()->representation().IsSmi()) { 2431 if (instr->hydrogen_value()->representation().IsSmi()) {
2422 __ cmp(ToRegister(left), Operand(Smi::FromInt(value))); 2432 if (is_unsigned) {
2433 __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2434 } else {
2435 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2436 }
2423 } else { 2437 } else {
2424 __ cmp(ToRegister(left), Operand(value)); 2438 if (is_unsigned) {
2439 __ Cmplwi(ToRegister(left), Operand(value), r0);
2440 } else {
2441 __ Cmpwi(ToRegister(left), Operand(value), r0);
2442 }
2425 } 2443 }
2426 } else if (left->IsConstantOperand()) { 2444 } else if (left->IsConstantOperand()) {
2427 int32_t value = ToInteger32(LConstantOperand::cast(left)); 2445 int32_t value = ToInteger32(LConstantOperand::cast(left));
2428 if (instr->hydrogen_value()->representation().IsSmi()) { 2446 if (instr->hydrogen_value()->representation().IsSmi()) {
2429 __ cmp(ToRegister(right), Operand(Smi::FromInt(value))); 2447 if (is_unsigned) {
2448 __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2449 } else {
2450 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2451 }
2430 } else { 2452 } else {
2431 __ cmp(ToRegister(right), Operand(value)); 2453 if (is_unsigned) {
2454 __ Cmplwi(ToRegister(right), Operand(value), r0);
2455 } else {
2456 __ Cmpwi(ToRegister(right), Operand(value), r0);
2457 }
2432 } 2458 }
2433 // We commuted the operands, so commute the condition. 2459 // We commuted the operands, so commute the condition.
2434 cond = CommuteCondition(cond); 2460 cond = CommuteCondition(cond);
2461 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2462 if (is_unsigned) {
2463 __ cmpl(ToRegister(left), ToRegister(right));
2464 } else {
2465 __ cmp(ToRegister(left), ToRegister(right));
2466 }
2435 } else { 2467 } else {
2436 __ cmp(ToRegister(left), ToRegister(right)); 2468 if (is_unsigned) {
2469 __ cmplw(ToRegister(left), ToRegister(right));
2470 } else {
2471 __ cmpw(ToRegister(left), ToRegister(right));
2472 }
2437 } 2473 }
2438 } 2474 }
2439 EmitBranch(instr, cond); 2475 EmitBranch(instr, cond);
2440 } 2476 }
2441 } 2477 }
2442 2478
2443 2479
2444 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2480 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2445 Register left = ToRegister(instr->left()); 2481 Register left = ToRegister(instr->left());
2446 Register right = ToRegister(instr->right()); 2482 Register right = ToRegister(instr->right());
2447 2483
2448 __ cmp(left, Operand(right)); 2484 __ cmp(left, right);
2449 EmitBranch(instr, eq); 2485 EmitBranch(instr, eq);
2450 } 2486 }
2451 2487
2452 2488
2453 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { 2489 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2454 if (instr->hydrogen()->representation().IsTagged()) { 2490 if (instr->hydrogen()->representation().IsTagged()) {
2455 Register input_reg = ToRegister(instr->object()); 2491 Register input_reg = ToRegister(instr->object());
2456 __ mov(ip, Operand(factory()->the_hole_value())); 2492 __ mov(ip, Operand(factory()->the_hole_value()));
2457 __ cmp(input_reg, ip); 2493 __ cmp(input_reg, ip);
2458 EmitBranch(instr, eq); 2494 EmitBranch(instr, eq);
2459 return; 2495 return;
2460 } 2496 }
2461 2497
2462 DwVfpRegister input_reg = ToDoubleRegister(instr->object()); 2498 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2463 __ VFPCompareAndSetFlags(input_reg, input_reg); 2499 __ fcmpu(input_reg, input_reg);
2464 EmitFalseBranch(instr, vc); 2500 EmitFalseBranch(instr, ordered);
2465 2501
2466 Register scratch = scratch0(); 2502 Register scratch = scratch0();
2467 __ VmovHigh(scratch, input_reg); 2503 __ MovDoubleHighToInt(scratch, input_reg);
2468 __ cmp(scratch, Operand(kHoleNanUpper32)); 2504 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
2469 EmitBranch(instr, eq); 2505 EmitBranch(instr, eq);
2470 } 2506 }
2471 2507
2472 2508
2473 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { 2509 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2474 Representation rep = instr->hydrogen()->value()->representation(); 2510 Representation rep = instr->hydrogen()->value()->representation();
2475 DCHECK(!rep.IsInteger32()); 2511 DCHECK(!rep.IsInteger32());
2476 Register scratch = ToRegister(instr->temp()); 2512 Register scratch = ToRegister(instr->temp());
2477 2513
2478 if (rep.IsDouble()) { 2514 if (rep.IsDouble()) {
2479 DwVfpRegister value = ToDoubleRegister(instr->value()); 2515 DoubleRegister value = ToDoubleRegister(instr->value());
2480 __ VFPCompareAndSetFlags(value, 0.0); 2516 __ fcmpu(value, kDoubleRegZero);
2481 EmitFalseBranch(instr, ne); 2517 EmitFalseBranch(instr, ne);
2482 __ VmovHigh(scratch, value); 2518 #if V8_TARGET_ARCH_PPC64
2483 __ cmp(scratch, Operand(0x80000000)); 2519 __ MovDoubleToInt64(scratch, value);
2520 #else
2521 __ MovDoubleHighToInt(scratch, value);
2522 #endif
2523 __ cmpi(scratch, Operand::Zero());
2524 EmitBranch(instr, lt);
2484 } else { 2525 } else {
2485 Register value = ToRegister(instr->value()); 2526 Register value = ToRegister(instr->value());
2486 __ CheckMap(value, 2527 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2487 scratch, 2528 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2488 Heap::kHeapNumberMapRootIndex, 2529 #if V8_TARGET_ARCH_PPC64
2489 instr->FalseLabel(chunk()), 2530 __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2490 DO_SMI_CHECK); 2531 __ li(ip, Operand(1));
2491 __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset)); 2532 __ rotrdi(ip, ip, 1); // ip = 0x80000000_00000000
2492 __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset)); 2533 __ cmp(scratch, ip);
2493 __ cmp(scratch, Operand(0x80000000)); 2534 #else
2494 __ cmp(ip, Operand(0x00000000), eq); 2535 __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2536 __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2537 Label skip;
2538 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
2539 __ cmp(scratch, r0);
2540 __ bne(&skip);
2541 __ cmpi(ip, Operand::Zero());
2542 __ bind(&skip);
2543 #endif
2544 EmitBranch(instr, eq);
2495 } 2545 }
2496 EmitBranch(instr, eq);
2497 } 2546 }
2498 2547
2499 2548
2500 Condition LCodeGen::EmitIsObject(Register input, 2549 Condition LCodeGen::EmitIsObject(Register input, Register temp1,
2501 Register temp1, 2550 Label* is_not_object, Label* is_object) {
2502 Label* is_not_object,
2503 Label* is_object) {
2504 Register temp2 = scratch0(); 2551 Register temp2 = scratch0();
2505 __ JumpIfSmi(input, is_not_object); 2552 __ JumpIfSmi(input, is_not_object);
2506 2553
2507 __ LoadRoot(temp2, Heap::kNullValueRootIndex); 2554 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2508 __ cmp(input, temp2); 2555 __ cmp(input, temp2);
2509 __ b(eq, is_object); 2556 __ beq(is_object);
2510 2557
2511 // Load map. 2558 // Load map.
2512 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); 2559 __ LoadP(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2513 // Undetectable objects behave like undefined. 2560 // Undetectable objects behave like undefined.
2514 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); 2561 __ lbz(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2515 __ tst(temp2, Operand(1 << Map::kIsUndetectable)); 2562 __ TestBit(temp2, Map::kIsUndetectable, r0);
2516 __ b(ne, is_not_object); 2563 __ bne(is_not_object, cr0);
2517 2564
2518 // Load instance type and check that it is in object type range. 2565 // Load instance type and check that it is in object type range.
2519 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); 2566 __ lbz(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2520 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2567 __ cmpi(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2521 __ b(lt, is_not_object); 2568 __ blt(is_not_object);
2522 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2569 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2523 return le; 2570 return le;
2524 } 2571 }
2525 2572
2526 2573
2527 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { 2574 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2528 Register reg = ToRegister(instr->value()); 2575 Register reg = ToRegister(instr->value());
2529 Register temp1 = ToRegister(instr->temp()); 2576 Register temp1 = ToRegister(instr->temp());
2530 2577
2531 Condition true_cond = 2578 Condition true_cond = EmitIsObject(reg, temp1, instr->FalseLabel(chunk_),
2532 EmitIsObject(reg, temp1, 2579 instr->TrueLabel(chunk_));
2533 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2534 2580
2535 EmitBranch(instr, true_cond); 2581 EmitBranch(instr, true_cond);
2536 } 2582 }
2537 2583
2538 2584
2539 Condition LCodeGen::EmitIsString(Register input, 2585 Condition LCodeGen::EmitIsString(Register input, Register temp1,
2540 Register temp1,
2541 Label* is_not_string, 2586 Label* is_not_string,
2542 SmiCheck check_needed = INLINE_SMI_CHECK) { 2587 SmiCheck check_needed = INLINE_SMI_CHECK) {
2543 if (check_needed == INLINE_SMI_CHECK) { 2588 if (check_needed == INLINE_SMI_CHECK) {
2544 __ JumpIfSmi(input, is_not_string); 2589 __ JumpIfSmi(input, is_not_string);
2545 } 2590 }
2546 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); 2591 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2547 2592
2548 return lt; 2593 return lt;
2549 } 2594 }
2550 2595
2551 2596
2552 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { 2597 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2553 Register reg = ToRegister(instr->value()); 2598 Register reg = ToRegister(instr->value());
2554 Register temp1 = ToRegister(instr->temp()); 2599 Register temp1 = ToRegister(instr->temp());
2555 2600
2556 SmiCheck check_needed = 2601 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2557 instr->hydrogen()->value()->type().IsHeapObject() 2602 ? OMIT_SMI_CHECK
2558 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2603 : INLINE_SMI_CHECK;
2559 Condition true_cond = 2604 Condition true_cond =
2560 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); 2605 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2561 2606
2562 EmitBranch(instr, true_cond); 2607 EmitBranch(instr, true_cond);
2563 } 2608 }
2564 2609
2565 2610
2566 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 2611 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2567 Register input_reg = EmitLoadRegister(instr->value(), ip); 2612 Register input_reg = EmitLoadRegister(instr->value(), ip);
2568 __ SmiTst(input_reg); 2613 __ TestIfSmi(input_reg, r0);
2569 EmitBranch(instr, eq); 2614 EmitBranch(instr, eq, cr0);
2570 } 2615 }
2571 2616
2572 2617
2573 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2618 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2574 Register input = ToRegister(instr->value()); 2619 Register input = ToRegister(instr->value());
2575 Register temp = ToRegister(instr->temp()); 2620 Register temp = ToRegister(instr->temp());
2576 2621
2577 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2622 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2578 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2623 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2579 } 2624 }
2580 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2625 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2581 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); 2626 __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2582 __ tst(temp, Operand(1 << Map::kIsUndetectable)); 2627 __ TestBit(temp, Map::kIsUndetectable, r0);
2583 EmitBranch(instr, ne); 2628 EmitBranch(instr, ne, cr0);
2584 } 2629 }
2585 2630
2586 2631
2587 static Condition ComputeCompareCondition(Token::Value op) { 2632 static Condition ComputeCompareCondition(Token::Value op) {
2588 switch (op) { 2633 switch (op) {
2589 case Token::EQ_STRICT: 2634 case Token::EQ_STRICT:
2590 case Token::EQ: 2635 case Token::EQ:
2591 return eq; 2636 return eq;
2592 case Token::LT: 2637 case Token::LT:
2593 return lt; 2638 return lt;
2594 case Token::GT: 2639 case Token::GT:
2595 return gt; 2640 return gt;
2596 case Token::LTE: 2641 case Token::LTE:
2597 return le; 2642 return le;
2598 case Token::GTE: 2643 case Token::GTE:
2599 return ge; 2644 return ge;
2600 default: 2645 default:
2601 UNREACHABLE(); 2646 UNREACHABLE();
2602 return kNoCondition; 2647 return kNoCondition;
2603 } 2648 }
2604 } 2649 }
2605 2650
2606 2651
2607 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2652 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2608 DCHECK(ToRegister(instr->context()).is(cp)); 2653 DCHECK(ToRegister(instr->context()).is(cp));
2609 Token::Value op = instr->op(); 2654 Token::Value op = instr->op();
2610 2655
2611 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); 2656 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2612 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2657 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2613 // This instruction also signals no smi code inlined. 2658 // This instruction also signals no smi code inlined
2614 __ cmp(r0, Operand::Zero()); 2659 __ cmpi(r3, Operand::Zero());
2615 2660
2616 Condition condition = ComputeCompareCondition(op); 2661 Condition condition = ComputeCompareCondition(op);
2617 2662
2618 EmitBranch(instr, condition); 2663 EmitBranch(instr, condition);
2619 } 2664 }
2620 2665
2621 2666
2622 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2667 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2623 InstanceType from = instr->from(); 2668 InstanceType from = instr->from();
2624 InstanceType to = instr->to(); 2669 InstanceType to = instr->to();
2625 if (from == FIRST_TYPE) return to; 2670 if (from == FIRST_TYPE) return to;
2626 DCHECK(from == to || to == LAST_TYPE); 2671 DCHECK(from == to || to == LAST_TYPE);
2627 return from; 2672 return from;
2628 } 2673 }
2629 2674
2630 2675
2631 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2676 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2632 InstanceType from = instr->from(); 2677 InstanceType from = instr->from();
2633 InstanceType to = instr->to(); 2678 InstanceType to = instr->to();
2634 if (from == to) return eq; 2679 if (from == to) return eq;
2635 if (to == LAST_TYPE) return hs; 2680 if (to == LAST_TYPE) return ge;
2636 if (from == FIRST_TYPE) return ls; 2681 if (from == FIRST_TYPE) return le;
2637 UNREACHABLE(); 2682 UNREACHABLE();
2638 return eq; 2683 return eq;
2639 } 2684 }
2640 2685
2641 2686
2642 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 2687 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2643 Register scratch = scratch0(); 2688 Register scratch = scratch0();
2644 Register input = ToRegister(instr->value()); 2689 Register input = ToRegister(instr->value());
2645 2690
2646 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2691 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2647 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2692 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2648 } 2693 }
2649 2694
2650 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); 2695 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2651 EmitBranch(instr, BranchCondition(instr->hydrogen())); 2696 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2652 } 2697 }
2653 2698
2654 2699
2655 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2700 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2656 Register input = ToRegister(instr->value()); 2701 Register input = ToRegister(instr->value());
2657 Register result = ToRegister(instr->result()); 2702 Register result = ToRegister(instr->result());
2658 2703
2659 __ AssertString(input); 2704 __ AssertString(input);
2660 2705
2661 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset)); 2706 __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
2662 __ IndexFromHash(result, result); 2707 __ IndexFromHash(result, result);
2663 } 2708 }
2664 2709
2665 2710
2666 void LCodeGen::DoHasCachedArrayIndexAndBranch( 2711 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2667 LHasCachedArrayIndexAndBranch* instr) { 2712 LHasCachedArrayIndexAndBranch* instr) {
2668 Register input = ToRegister(instr->value()); 2713 Register input = ToRegister(instr->value());
2669 Register scratch = scratch0(); 2714 Register scratch = scratch0();
2670 2715
2671 __ ldr(scratch, 2716 __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
2672 FieldMemOperand(input, String::kHashFieldOffset)); 2717 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
2673 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); 2718 __ and_(r0, scratch, r0, SetRC);
2674 EmitBranch(instr, eq); 2719 EmitBranch(instr, eq, cr0);
2675 } 2720 }
2676 2721
2677 2722
2678 // Branches to a label or falls through with the answer in flags. Trashes 2723 // Branches to a label or falls through with the answer in flags. Trashes
2679 // the temp registers, but not the input. 2724 // the temp registers, but not the input.
2680 void LCodeGen::EmitClassOfTest(Label* is_true, 2725 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
2681 Label* is_false, 2726 Handle<String> class_name, Register input,
2682 Handle<String>class_name, 2727 Register temp, Register temp2) {
2683 Register input,
2684 Register temp,
2685 Register temp2) {
2686 DCHECK(!input.is(temp)); 2728 DCHECK(!input.is(temp));
2687 DCHECK(!input.is(temp2)); 2729 DCHECK(!input.is(temp2));
2688 DCHECK(!temp.is(temp2)); 2730 DCHECK(!temp.is(temp2));
2689 2731
2690 __ JumpIfSmi(input, is_false); 2732 __ JumpIfSmi(input, is_false);
2691 2733
2692 if (String::Equals(isolate()->factory()->Function_string(), class_name)) { 2734 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2693 // Assuming the following assertions, we can use the same compares to test 2735 // Assuming the following assertions, we can use the same compares to test
2694 // for both being a function type and being in the object type range. 2736 // for both being a function type and being in the object type range.
2695 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 2737 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2696 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == 2738 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2697 FIRST_SPEC_OBJECT_TYPE + 1); 2739 FIRST_SPEC_OBJECT_TYPE + 1);
2698 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == 2740 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2699 LAST_SPEC_OBJECT_TYPE - 1); 2741 LAST_SPEC_OBJECT_TYPE - 1);
2700 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); 2742 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2701 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); 2743 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2702 __ b(lt, is_false); 2744 __ blt(is_false);
2703 __ b(eq, is_true); 2745 __ beq(is_true);
2704 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); 2746 __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2705 __ b(eq, is_true); 2747 __ beq(is_true);
2706 } else { 2748 } else {
2707 // Faster code path to avoid two compares: subtract lower bound from the 2749 // Faster code path to avoid two compares: subtract lower bound from the
2708 // actual type and do a signed compare with the width of the type range. 2750 // actual type and do a signed compare with the width of the type range.
2709 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2751 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2710 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); 2752 __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2711 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2753 __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2712 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - 2754 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2713 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2755 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2714 __ b(gt, is_false); 2756 __ bgt(is_false);
2715 } 2757 }
2716 2758
2717 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. 2759 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2718 // Check if the constructor in the map is a function. 2760 // Check if the constructor in the map is a function.
2719 __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); 2761 __ LoadP(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2720 2762
2721 // Objects with a non-function constructor have class 'Object'. 2763 // Objects with a non-function constructor have class 'Object'.
2722 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); 2764 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2723 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) { 2765 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
2724 __ b(ne, is_true); 2766 __ bne(is_true);
2725 } else { 2767 } else {
2726 __ b(ne, is_false); 2768 __ bne(is_false);
2727 } 2769 }
2728 2770
2729 // temp now contains the constructor function. Grab the 2771 // temp now contains the constructor function. Grab the
2730 // instance class name from there. 2772 // instance class name from there.
2731 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2773 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2732 __ ldr(temp, FieldMemOperand(temp, 2774 __ LoadP(temp,
2733 SharedFunctionInfo::kInstanceClassNameOffset)); 2775 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
2734 // The class name we are testing against is internalized since it's a literal. 2776 // The class name we are testing against is internalized since it's a literal.
2735 // The name in the constructor is internalized because of the way the context 2777 // The name in the constructor is internalized because of the way the context
2736 // is booted. This routine isn't expected to work for random API-created 2778 // is booted. This routine isn't expected to work for random API-created
2737 // classes and it doesn't have to because you can't access it with natives 2779 // classes and it doesn't have to because you can't access it with natives
2738 // syntax. Since both sides are internalized it is sufficient to use an 2780 // syntax. Since both sides are internalized it is sufficient to use an
2739 // identity comparison. 2781 // identity comparison.
2740 __ cmp(temp, Operand(class_name)); 2782 __ Cmpi(temp, Operand(class_name), r0);
2741 // End with the answer in flags. 2783 // End with the answer in flags.
2742 } 2784 }
2743 2785
2744 2786
2745 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2787 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2746 Register input = ToRegister(instr->value()); 2788 Register input = ToRegister(instr->value());
2747 Register temp = scratch0(); 2789 Register temp = scratch0();
2748 Register temp2 = ToRegister(instr->temp()); 2790 Register temp2 = ToRegister(instr->temp());
2749 Handle<String> class_name = instr->hydrogen()->class_name(); 2791 Handle<String> class_name = instr->hydrogen()->class_name();
2750 2792
2751 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2793 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2752 class_name, input, temp, temp2); 2794 class_name, input, temp, temp2);
2753 2795
2754 EmitBranch(instr, eq); 2796 EmitBranch(instr, eq);
2755 } 2797 }
2756 2798
2757 2799
2758 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2800 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2759 Register reg = ToRegister(instr->value()); 2801 Register reg = ToRegister(instr->value());
2760 Register temp = ToRegister(instr->temp()); 2802 Register temp = ToRegister(instr->temp());
2761 2803
2762 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); 2804 __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2763 __ cmp(temp, Operand(instr->map())); 2805 __ Cmpi(temp, Operand(instr->map()), r0);
2764 EmitBranch(instr, eq); 2806 EmitBranch(instr, eq);
2765 } 2807 }
2766 2808
2767 2809
2768 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 2810 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2769 DCHECK(ToRegister(instr->context()).is(cp)); 2811 DCHECK(ToRegister(instr->context()).is(cp));
2770 DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0. 2812 DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
2771 DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1. 2813 DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
2772 2814
2773 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); 2815 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2774 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2816 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2775 2817
2776 __ cmp(r0, Operand::Zero()); 2818 Label equal, done;
2777 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); 2819 __ cmpi(r3, Operand::Zero());
2778 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); 2820 __ beq(&equal);
2821 __ mov(r3, Operand(factory()->false_value()));
2822 __ b(&done);
2823
2824 __ bind(&equal);
2825 __ mov(r3, Operand(factory()->true_value()));
2826 __ bind(&done);
2779 } 2827 }
2780 2828
2781 2829
2782 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 2830 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2783 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode { 2831 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2784 public: 2832 public:
2785 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, 2833 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2786 LInstanceOfKnownGlobal* instr) 2834 LInstanceOfKnownGlobal* instr)
2787 : LDeferredCode(codegen), instr_(instr) { } 2835 : LDeferredCode(codegen), instr_(instr) {}
2788 virtual void Generate() OVERRIDE { 2836 virtual void Generate() OVERRIDE {
2789 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_, 2837 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2790 &load_bool_);
2791 } 2838 }
2792 virtual LInstruction* instr() OVERRIDE { return instr_; } 2839 virtual LInstruction* instr() OVERRIDE { return instr_; }
2793 Label* map_check() { return &map_check_; } 2840 Label* map_check() { return &map_check_; }
2794 Label* load_bool() { return &load_bool_; }
2795 2841
2796 private: 2842 private:
2797 LInstanceOfKnownGlobal* instr_; 2843 LInstanceOfKnownGlobal* instr_;
2798 Label map_check_; 2844 Label map_check_;
2799 Label load_bool_;
2800 }; 2845 };
2801 2846
2802 DeferredInstanceOfKnownGlobal* deferred; 2847 DeferredInstanceOfKnownGlobal* deferred;
2803 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); 2848 deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
2804 2849
2805 Label done, false_result; 2850 Label done, false_result;
2806 Register object = ToRegister(instr->value()); 2851 Register object = ToRegister(instr->value());
2807 Register temp = ToRegister(instr->temp()); 2852 Register temp = ToRegister(instr->temp());
2808 Register result = ToRegister(instr->result()); 2853 Register result = ToRegister(instr->result());
2809 2854
2810 // A Smi is not instance of anything. 2855 // A Smi is not instance of anything.
2811 __ JumpIfSmi(object, &false_result); 2856 __ JumpIfSmi(object, &false_result);
2812 2857
2813 // This is the inlined call site instanceof cache. The two occurences of the 2858 // This is the inlined call site instanceof cache. The two occurences of the
2814 // hole value will be patched to the last map/result pair generated by the 2859 // hole value will be patched to the last map/result pair generated by the
2815 // instanceof stub. 2860 // instanceof stub.
2816 Label cache_miss; 2861 Label cache_miss;
2817 Register map = temp; 2862 Register map = temp;
2818 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); 2863 __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
2819 { 2864 {
2820 // Block constant pool emission to ensure the positions of instructions are 2865 // Block constant pool emission to ensure the positions of instructions are
2821 // as expected by the patcher. See InstanceofStub::Generate(). 2866 // as expected by the patcher. See InstanceofStub::Generate().
2822 Assembler::BlockConstPoolScope block_const_pool(masm()); 2867 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2823 __ bind(deferred->map_check()); // Label for calculating code patching. 2868 __ bind(deferred->map_check()); // Label for calculating code patching.
2824 // We use Factory::the_hole_value() on purpose instead of loading from the 2869 // We use Factory::the_hole_value() on purpose instead of loading from the
2825 // root array to force relocation to be able to later patch with 2870 // root array to force relocation to be able to later patch with
2826 // the cached map. 2871 // the cached map.
2827 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); 2872 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2828 __ mov(ip, Operand(Handle<Object>(cell))); 2873 __ mov(ip, Operand(Handle<Object>(cell)));
2829 __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset)); 2874 __ LoadP(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
2830 __ cmp(map, Operand(ip)); 2875 __ cmp(map, ip);
2831 __ b(ne, &cache_miss); 2876 __ bne(&cache_miss);
2832 __ bind(deferred->load_bool()); // Label for calculating code patching.
2833 // We use Factory::the_hole_value() on purpose instead of loading from the 2877 // We use Factory::the_hole_value() on purpose instead of loading from the
2834 // root array to force relocation to be able to later patch 2878 // root array to force relocation to be able to later patch
2835 // with true or false. 2879 // with true or false.
2836 __ mov(result, Operand(factory()->the_hole_value())); 2880 __ mov(result, Operand(factory()->the_hole_value()));
2837 } 2881 }
2838 __ b(&done); 2882 __ b(&done);
2839 2883
2840 // The inlined call site cache did not match. Check null and string before 2884 // The inlined call site cache did not match. Check null and string before
2841 // calling the deferred code. 2885 // calling the deferred code.
2842 __ bind(&cache_miss); 2886 __ bind(&cache_miss);
2843 // Null is not instance of anything. 2887 // Null is not instance of anything.
2844 __ LoadRoot(ip, Heap::kNullValueRootIndex); 2888 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2845 __ cmp(object, Operand(ip)); 2889 __ cmp(object, ip);
2846 __ b(eq, &false_result); 2890 __ beq(&false_result);
2847 2891
2848 // String values is not instance of anything. 2892 // String values is not instance of anything.
2849 Condition is_string = masm_->IsObjectStringType(object, temp); 2893 Condition is_string = masm_->IsObjectStringType(object, temp);
2850 __ b(is_string, &false_result); 2894 __ b(is_string, &false_result, cr0);
2851 2895
2852 // Go to the deferred code. 2896 // Go to the deferred code.
2853 __ b(deferred->entry()); 2897 __ b(deferred->entry());
2854 2898
2855 __ bind(&false_result); 2899 __ bind(&false_result);
2856 __ LoadRoot(result, Heap::kFalseValueRootIndex); 2900 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2857 2901
2858 // Here result has either true or false. Deferred code also produces true or 2902 // Here result has either true or false. Deferred code also produces true or
2859 // false object. 2903 // false object.
2860 __ bind(deferred->exit()); 2904 __ bind(deferred->exit());
2861 __ bind(&done); 2905 __ bind(&done);
2862 } 2906 }
2863 2907
2864 2908
2865 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, 2909 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2866 Label* map_check, 2910 Label* map_check) {
2867 Label* bool_load) {
2868 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; 2911 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2869 flags = static_cast<InstanceofStub::Flags>( 2912 flags = static_cast<InstanceofStub::Flags>(flags |
2870 flags | InstanceofStub::kArgsInRegisters); 2913 InstanceofStub::kArgsInRegisters);
2871 flags = static_cast<InstanceofStub::Flags>( 2914 flags = static_cast<InstanceofStub::Flags>(
2872 flags | InstanceofStub::kCallSiteInlineCheck); 2915 flags | InstanceofStub::kCallSiteInlineCheck);
2873 flags = static_cast<InstanceofStub::Flags>( 2916 flags = static_cast<InstanceofStub::Flags>(
2874 flags | InstanceofStub::kReturnTrueFalseObject); 2917 flags | InstanceofStub::kReturnTrueFalseObject);
2875 InstanceofStub stub(isolate(), flags); 2918 InstanceofStub stub(isolate(), flags);
2876 2919
2877 PushSafepointRegistersScope scope(this); 2920 PushSafepointRegistersScope scope(this);
2878 LoadContextFromDeferred(instr->context()); 2921 LoadContextFromDeferred(instr->context());
2879 2922
2880 __ Move(InstanceofStub::right(), instr->function()); 2923 __ Move(InstanceofStub::right(), instr->function());
2881 2924 // Include instructions below in delta: mov + call = mov + (mov + 2)
2882 int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET); 2925 static const int kAdditionalDelta = (2 * Assembler::kMovInstructions) + 2;
2883 int additional_delta = (call_size / Assembler::kInstrSize) + 4; 2926 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2884 // Make sure that code size is predicable, since we use specific constants
2885 // offsets in the code to find embedded values..
2886 PredictableCodeSizeScope predictable(
2887 masm_, (additional_delta + 1) * Assembler::kInstrSize);
2888 // Make sure we don't emit any additional entries in the constant pool before
2889 // the call to ensure that the CallCodeSize() calculated the correct number of
2890 // instructions for the constant pool load.
2891 { 2927 {
2892 ConstantPoolUnavailableScope constant_pool_unavailable(masm_); 2928 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2893 int map_check_delta = 2929 // r8 is used to communicate the offset to the location of the map check.
2894 masm_->InstructionsGeneratedSince(map_check) + additional_delta; 2930 __ mov(r8, Operand(delta * Instruction::kInstrSize));
2895 int bool_load_delta =
2896 masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
2897 Label before_push_delta;
2898 __ bind(&before_push_delta);
2899 __ BlockConstPoolFor(additional_delta);
2900 // r5 is used to communicate the offset to the location of the map check.
2901 __ mov(r5, Operand(map_check_delta * kPointerSize));
2902 // r6 is used to communicate the offset to the location of the bool load.
2903 __ mov(r6, Operand(bool_load_delta * kPointerSize));
2904 // The mov above can generate one or two instructions. The delta was
2905 // computed for two instructions, so we need to pad here in case of one
2906 // instruction.
2907 while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
2908 __ nop();
2909 }
2910 } 2931 }
2911 CallCodeGeneric(stub.GetCode(), 2932 CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
2912 RelocInfo::CODE_TARGET,
2913 instr,
2914 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 2933 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2934 DCHECK(delta == masm_->InstructionsGeneratedSince(map_check));
2915 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); 2935 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2916 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2936 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2917 // Put the result value (r0) into the result register slot and 2937 // Put the result value (r3) into the result register slot and
2918 // restore all registers. 2938 // restore all registers.
2919 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result())); 2939 __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
2920 } 2940 }
2921 2941
2922 2942
2923 void LCodeGen::DoCmpT(LCmpT* instr) { 2943 void LCodeGen::DoCmpT(LCmpT* instr) {
2924 DCHECK(ToRegister(instr->context()).is(cp)); 2944 DCHECK(ToRegister(instr->context()).is(cp));
2925 Token::Value op = instr->op(); 2945 Token::Value op = instr->op();
2926 2946
2927 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); 2947 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2928 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2948 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2929 // This instruction also signals no smi code inlined. 2949 // This instruction also signals no smi code inlined
2930 __ cmp(r0, Operand::Zero()); 2950 __ cmpi(r3, Operand::Zero());
2931 2951
2932 Condition condition = ComputeCompareCondition(op); 2952 Condition condition = ComputeCompareCondition(op);
2933 __ LoadRoot(ToRegister(instr->result()), 2953 Label true_value, done;
2934 Heap::kTrueValueRootIndex, 2954
2935 condition); 2955 __ b(condition, &true_value);
2936 __ LoadRoot(ToRegister(instr->result()), 2956
2937 Heap::kFalseValueRootIndex, 2957 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2938 NegateCondition(condition)); 2958 __ b(&done);
2959
2960 __ bind(&true_value);
2961 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2962
2963 __ bind(&done);
2939 } 2964 }
2940 2965
2941 2966
2942 void LCodeGen::DoReturn(LReturn* instr) { 2967 void LCodeGen::DoReturn(LReturn* instr) {
2943 if (FLAG_trace && info()->IsOptimizing()) { 2968 if (FLAG_trace && info()->IsOptimizing()) {
2944 // Push the return value on the stack as the parameter. 2969 // Push the return value on the stack as the parameter.
2945 // Runtime::TraceExit returns its parameter in r0. We're leaving the code 2970 // Runtime::TraceExit returns its parameter in r3. We're leaving the code
2946 // managed by the register allocator and tearing down the frame, it's 2971 // managed by the register allocator and tearing down the frame, it's
2947 // safe to write to the context register. 2972 // safe to write to the context register.
2948 __ push(r0); 2973 __ push(r3);
2949 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 2974 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2950 __ CallRuntime(Runtime::kTraceExit, 1); 2975 __ CallRuntime(Runtime::kTraceExit, 1);
2951 } 2976 }
2952 if (info()->saves_caller_doubles()) { 2977 if (info()->saves_caller_doubles()) {
2953 RestoreCallerDoubles(); 2978 RestoreCallerDoubles();
2954 } 2979 }
2955 int no_frame_start = -1; 2980 int no_frame_start = -1;
2956 if (NeedsEagerFrame()) { 2981 if (instr->has_constant_parameter_count()) {
2957 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT); 2982 int parameter_count = ToInteger32(instr->constant_parameter_count());
2983 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2984 if (NeedsEagerFrame()) {
2985 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
2986 } else if (sp_delta != 0) {
2987 __ addi(sp, sp, Operand(sp_delta));
2988 }
2989 } else {
2990 Register reg = ToRegister(instr->parameter_count());
2991 // The argument count parameter is a smi
2992 if (NeedsEagerFrame()) {
2993 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2994 }
2995 __ SmiToPtrArrayOffset(r0, reg);
2996 __ add(sp, sp, r0);
2958 } 2997 }
2959 { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
2960 if (instr->has_constant_parameter_count()) {
2961 int parameter_count = ToInteger32(instr->constant_parameter_count());
2962 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2963 if (sp_delta != 0) {
2964 __ add(sp, sp, Operand(sp_delta));
2965 }
2966 } else {
2967 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2968 Register reg = ToRegister(instr->parameter_count());
2969 // The argument count parameter is a smi
2970 __ SmiUntag(reg);
2971 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2972 }
2973 2998
2974 __ Jump(lr); 2999 __ blr();
2975 3000
2976 if (no_frame_start != -1) { 3001 if (no_frame_start != -1) {
2977 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); 3002 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2978 }
2979 } 3003 }
2980 } 3004 }
2981 3005
2982 3006
2983 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 3007 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2984 Register result = ToRegister(instr->result()); 3008 Register result = ToRegister(instr->result());
2985 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); 3009 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2986 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); 3010 __ LoadP(result, FieldMemOperand(ip, Cell::kValueOffset));
2987 if (instr->hydrogen()->RequiresHoleCheck()) { 3011 if (instr->hydrogen()->RequiresHoleCheck()) {
2988 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3012 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2989 __ cmp(result, ip); 3013 __ cmp(result, ip);
2990 DeoptimizeIf(eq, instr, "hole"); 3014 DeoptimizeIf(eq, instr, "hole");
2991 } 3015 }
2992 } 3016 }
2993 3017
2994 3018
2995 template <class T> 3019 template <class T>
2996 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { 3020 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2997 DCHECK(FLAG_vector_ics); 3021 DCHECK(FLAG_vector_ics);
2998 Register vector_register = ToRegister(instr->temp_vector()); 3022 Register vector = ToRegister(instr->temp_vector());
2999 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); 3023 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
3000 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); 3024 __ Move(vector, instr->hydrogen()->feedback_vector());
3001 __ Move(vector_register, vector);
3002 // No need to allocate this register. 3025 // No need to allocate this register.
3003 DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0)); 3026 DCHECK(VectorLoadICDescriptor::SlotRegister().is(r3));
3004 int index = vector->GetIndex(instr->hydrogen()->slot()); 3027 __ mov(VectorLoadICDescriptor::SlotRegister(),
3005 __ mov(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index))); 3028 Operand(Smi::FromInt(instr->hydrogen()->slot())));
3006 } 3029 }
3007 3030
3008 3031
3009 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 3032 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3010 DCHECK(ToRegister(instr->context()).is(cp)); 3033 DCHECK(ToRegister(instr->context()).is(cp));
3011 DCHECK(ToRegister(instr->global_object()) 3034 DCHECK(ToRegister(instr->global_object())
3012 .is(LoadDescriptor::ReceiverRegister())); 3035 .is(LoadDescriptor::ReceiverRegister()));
3013 DCHECK(ToRegister(instr->result()).is(r0)); 3036 DCHECK(ToRegister(instr->result()).is(r3));
3014 3037
3015 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); 3038 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3016 if (FLAG_vector_ics) { 3039 if (FLAG_vector_ics) {
3017 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr); 3040 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3018 } 3041 }
3019 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; 3042 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3020 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code(); 3043 Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
3021 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3044 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3022 } 3045 }
3023 3046
3024 3047
3025 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { 3048 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3026 Register value = ToRegister(instr->value()); 3049 Register value = ToRegister(instr->value());
3027 Register cell = scratch0(); 3050 Register cell = scratch0();
3028 3051
3029 // Load the cell. 3052 // Load the cell.
3030 __ mov(cell, Operand(instr->hydrogen()->cell().handle())); 3053 __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
3031 3054
3032 // If the cell we are storing to contains the hole it could have 3055 // If the cell we are storing to contains the hole it could have
3033 // been deleted from the property dictionary. In that case, we need 3056 // been deleted from the property dictionary. In that case, we need
3034 // to update the property details in the property dictionary to mark 3057 // to update the property details in the property dictionary to mark
3035 // it as no longer deleted. 3058 // it as no longer deleted.
3036 if (instr->hydrogen()->RequiresHoleCheck()) { 3059 if (instr->hydrogen()->RequiresHoleCheck()) {
3037 // We use a temp to check the payload (CompareRoot might clobber ip). 3060 // We use a temp to check the payload (CompareRoot might clobber ip).
3038 Register payload = ToRegister(instr->temp()); 3061 Register payload = ToRegister(instr->temp());
3039 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); 3062 __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset));
3040 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); 3063 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
3041 DeoptimizeIf(eq, instr, "hole"); 3064 DeoptimizeIf(eq, instr, "hole");
3042 } 3065 }
3043 3066
3044 // Store the value. 3067 // Store the value.
3045 __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); 3068 __ StoreP(value, FieldMemOperand(cell, Cell::kValueOffset), r0);
3046 // Cells are always rescanned, so no write barrier here. 3069 // Cells are always rescanned, so no write barrier here.
3047 } 3070 }
3048 3071
3049 3072
3050 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 3073 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3051 Register context = ToRegister(instr->context()); 3074 Register context = ToRegister(instr->context());
3052 Register result = ToRegister(instr->result()); 3075 Register result = ToRegister(instr->result());
3053 __ ldr(result, ContextOperand(context, instr->slot_index())); 3076 __ LoadP(result, ContextOperand(context, instr->slot_index()));
3054 if (instr->hydrogen()->RequiresHoleCheck()) { 3077 if (instr->hydrogen()->RequiresHoleCheck()) {
3055 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3078 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3056 __ cmp(result, ip); 3079 __ cmp(result, ip);
3057 if (instr->hydrogen()->DeoptimizesOnHole()) { 3080 if (instr->hydrogen()->DeoptimizesOnHole()) {
3058 DeoptimizeIf(eq, instr, "hole"); 3081 DeoptimizeIf(eq, instr, "hole");
3059 } else { 3082 } else {
3060 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); 3083 Label skip;
3084 __ bne(&skip);
3085 __ mov(result, Operand(factory()->undefined_value()));
3086 __ bind(&skip);
3061 } 3087 }
3062 } 3088 }
3063 } 3089 }
3064 3090
3065 3091
3066 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 3092 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3067 Register context = ToRegister(instr->context()); 3093 Register context = ToRegister(instr->context());
3068 Register value = ToRegister(instr->value()); 3094 Register value = ToRegister(instr->value());
3069 Register scratch = scratch0(); 3095 Register scratch = scratch0();
3070 MemOperand target = ContextOperand(context, instr->slot_index()); 3096 MemOperand target = ContextOperand(context, instr->slot_index());
3071 3097
3072 Label skip_assignment; 3098 Label skip_assignment;
3073 3099
3074 if (instr->hydrogen()->RequiresHoleCheck()) { 3100 if (instr->hydrogen()->RequiresHoleCheck()) {
3075 __ ldr(scratch, target); 3101 __ LoadP(scratch, target);
3076 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3102 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3077 __ cmp(scratch, ip); 3103 __ cmp(scratch, ip);
3078 if (instr->hydrogen()->DeoptimizesOnHole()) { 3104 if (instr->hydrogen()->DeoptimizesOnHole()) {
3079 DeoptimizeIf(eq, instr, "hole"); 3105 DeoptimizeIf(eq, instr, "hole");
3080 } else { 3106 } else {
3081 __ b(ne, &skip_assignment); 3107 __ bne(&skip_assignment);
3082 } 3108 }
3083 } 3109 }
3084 3110
3085 __ str(value, target); 3111 __ StoreP(value, target, r0);
3086 if (instr->hydrogen()->NeedsWriteBarrier()) { 3112 if (instr->hydrogen()->NeedsWriteBarrier()) {
3087 SmiCheck check_needed = 3113 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
3088 instr->hydrogen()->value()->type().IsHeapObject() 3114 ? OMIT_SMI_CHECK
3089 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 3115 : INLINE_SMI_CHECK;
3090 __ RecordWriteContextSlot(context, 3116 __ RecordWriteContextSlot(context, target.offset(), value, scratch,
3091 target.offset(), 3117 GetLinkRegisterState(), kSaveFPRegs,
3092 value, 3118 EMIT_REMEMBERED_SET, check_needed);
3093 scratch,
3094 GetLinkRegisterState(),
3095 kSaveFPRegs,
3096 EMIT_REMEMBERED_SET,
3097 check_needed);
3098 } 3119 }
3099 3120
3100 __ bind(&skip_assignment); 3121 __ bind(&skip_assignment);
3101 } 3122 }
3102 3123
3103 3124
3104 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 3125 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3105 HObjectAccess access = instr->hydrogen()->access(); 3126 HObjectAccess access = instr->hydrogen()->access();
3106 int offset = access.offset(); 3127 int offset = access.offset();
3107 Register object = ToRegister(instr->object()); 3128 Register object = ToRegister(instr->object());
3108 3129
3109 if (access.IsExternalMemory()) { 3130 if (access.IsExternalMemory()) {
3110 Register result = ToRegister(instr->result()); 3131 Register result = ToRegister(instr->result());
3111 MemOperand operand = MemOperand(object, offset); 3132 MemOperand operand = MemOperand(object, offset);
3112 __ Load(result, operand, access.representation()); 3133 __ LoadRepresentation(result, operand, access.representation(), r0);
3113 return; 3134 return;
3114 } 3135 }
3115 3136
3116 if (instr->hydrogen()->representation().IsDouble()) { 3137 if (instr->hydrogen()->representation().IsDouble()) {
3117 DwVfpRegister result = ToDoubleRegister(instr->result()); 3138 DoubleRegister result = ToDoubleRegister(instr->result());
3118 __ vldr(result, FieldMemOperand(object, offset)); 3139 __ lfd(result, FieldMemOperand(object, offset));
3119 return; 3140 return;
3120 } 3141 }
3121 3142
3122 Register result = ToRegister(instr->result()); 3143 Register result = ToRegister(instr->result());
3123 if (!access.IsInobject()) { 3144 if (!access.IsInobject()) {
3124 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 3145 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3125 object = result; 3146 object = result;
3126 } 3147 }
3127 MemOperand operand = FieldMemOperand(object, offset); 3148
3128 __ Load(result, operand, access.representation()); 3149 Representation representation = access.representation();
3150
3151 #if V8_TARGET_ARCH_PPC64
3152 // 64-bit Smi optimization
3153 if (representation.IsSmi() &&
3154 instr->hydrogen()->representation().IsInteger32()) {
3155 // Read int value directly from upper half of the smi.
3156 STATIC_ASSERT(kSmiTag == 0);
3157 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3158 #if V8_TARGET_LITTLE_ENDIAN
3159 offset += kPointerSize / 2;
3160 #endif
3161 representation = Representation::Integer32();
3162 }
3163 #endif
3164
3165 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
3166 r0);
3129 } 3167 }
3130 3168
3131 3169
3132 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 3170 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3133 DCHECK(ToRegister(instr->context()).is(cp)); 3171 DCHECK(ToRegister(instr->context()).is(cp));
3134 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3172 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3135 DCHECK(ToRegister(instr->result()).is(r0)); 3173 DCHECK(ToRegister(instr->result()).is(r3));
3136 3174
3137 // Name is always in r2. 3175 // Name is always in r5.
3138 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); 3176 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3139 if (FLAG_vector_ics) { 3177 if (FLAG_vector_ics) {
3140 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); 3178 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3141 } 3179 }
3142 Handle<Code> ic = 3180 Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3143 CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code(); 3181 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3144 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3145 } 3182 }
3146 3183
3147 3184
3148 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 3185 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3149 Register scratch = scratch0(); 3186 Register scratch = scratch0();
3150 Register function = ToRegister(instr->function()); 3187 Register function = ToRegister(instr->function());
3151 Register result = ToRegister(instr->result()); 3188 Register result = ToRegister(instr->result());
3152 3189
3153 // Get the prototype or initial map from the function. 3190 // Get the prototype or initial map from the function.
3154 __ ldr(result, 3191 __ LoadP(result,
3155 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3192 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3156 3193
3157 // Check that the function has a prototype or an initial map. 3194 // Check that the function has a prototype or an initial map.
3158 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3195 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3159 __ cmp(result, ip); 3196 __ cmp(result, ip);
3160 DeoptimizeIf(eq, instr, "hole"); 3197 DeoptimizeIf(eq, instr, "hole");
3161 3198
3162 // If the function does not have an initial map, we're done. 3199 // If the function does not have an initial map, we're done.
3163 Label done; 3200 Label done;
3164 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); 3201 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3165 __ b(ne, &done); 3202 __ bne(&done);
3166 3203
3167 // Get the prototype from the initial map. 3204 // Get the prototype from the initial map.
3168 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3205 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
3169 3206
3170 // All done. 3207 // All done.
3171 __ bind(&done); 3208 __ bind(&done);
3172 } 3209 }
3173 3210
3174 3211
3175 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 3212 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3176 Register result = ToRegister(instr->result()); 3213 Register result = ToRegister(instr->result());
3177 __ LoadRoot(result, instr->index()); 3214 __ LoadRoot(result, instr->index());
3178 } 3215 }
3179 3216
3180 3217
3181 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 3218 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3182 Register arguments = ToRegister(instr->arguments()); 3219 Register arguments = ToRegister(instr->arguments());
3183 Register result = ToRegister(instr->result()); 3220 Register result = ToRegister(instr->result());
3184 // There are two words between the frame pointer and the last argument. 3221 // There are two words between the frame pointer and the last argument.
3185 // Subtracting from length accounts for one of them add one more. 3222 // Subtracting from length accounts for one of them add one more.
3186 if (instr->length()->IsConstantOperand()) { 3223 if (instr->length()->IsConstantOperand()) {
3187 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); 3224 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3188 if (instr->index()->IsConstantOperand()) { 3225 if (instr->index()->IsConstantOperand()) {
3189 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3226 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3190 int index = (const_length - const_index) + 1; 3227 int index = (const_length - const_index) + 1;
3191 __ ldr(result, MemOperand(arguments, index * kPointerSize)); 3228 __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
3192 } else { 3229 } else {
3193 Register index = ToRegister(instr->index()); 3230 Register index = ToRegister(instr->index());
3194 __ rsb(result, index, Operand(const_length + 1)); 3231 __ subfic(result, index, Operand(const_length + 1));
3195 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); 3232 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3233 __ LoadPX(result, MemOperand(arguments, result));
3196 } 3234 }
3197 } else if (instr->index()->IsConstantOperand()) { 3235 } else if (instr->index()->IsConstantOperand()) {
3198 Register length = ToRegister(instr->length()); 3236 Register length = ToRegister(instr->length());
3199 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3237 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3200 int loc = const_index - 1; 3238 int loc = const_index - 1;
3201 if (loc != 0) { 3239 if (loc != 0) {
3202 __ sub(result, length, Operand(loc)); 3240 __ subi(result, length, Operand(loc));
3203 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); 3241 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3204 } else { 3242 __ LoadPX(result, MemOperand(arguments, result));
3205 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3206 }
3207 } else { 3243 } else {
3244 __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
3245 __ LoadPX(result, MemOperand(arguments, result));
3246 }
3247 } else {
3208 Register length = ToRegister(instr->length()); 3248 Register length = ToRegister(instr->length());
3209 Register index = ToRegister(instr->index()); 3249 Register index = ToRegister(instr->index());
3210 __ sub(result, length, index); 3250 __ sub(result, length, index);
3211 __ add(result, result, Operand(1)); 3251 __ addi(result, result, Operand(1));
3212 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); 3252 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3253 __ LoadPX(result, MemOperand(arguments, result));
3213 } 3254 }
3214 } 3255 }
3215 3256
3216 3257
3217 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 3258 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3218 Register external_pointer = ToRegister(instr->elements()); 3259 Register external_pointer = ToRegister(instr->elements());
3219 Register key = no_reg; 3260 Register key = no_reg;
3220 ElementsKind elements_kind = instr->elements_kind(); 3261 ElementsKind elements_kind = instr->elements_kind();
3221 bool key_is_constant = instr->key()->IsConstantOperand(); 3262 bool key_is_constant = instr->key()->IsConstantOperand();
3222 int constant_key = 0; 3263 int constant_key = 0;
3223 if (key_is_constant) { 3264 if (key_is_constant) {
3224 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3265 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3225 if (constant_key & 0xF0000000) { 3266 if (constant_key & 0xF0000000) {
3226 Abort(kArrayIndexConstantValueTooBig); 3267 Abort(kArrayIndexConstantValueTooBig);
3227 } 3268 }
3228 } else { 3269 } else {
3229 key = ToRegister(instr->key()); 3270 key = ToRegister(instr->key());
3230 } 3271 }
3231 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3272 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3232 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 3273 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3233 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3234 int base_offset = instr->base_offset(); 3274 int base_offset = instr->base_offset();
3235 3275
3236 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 3276 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3237 elements_kind == FLOAT32_ELEMENTS || 3277 elements_kind == FLOAT32_ELEMENTS ||
3238 elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 3278 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3239 elements_kind == FLOAT64_ELEMENTS) { 3279 elements_kind == FLOAT64_ELEMENTS) {
3240 int base_offset = instr->base_offset(); 3280 DoubleRegister result = ToDoubleRegister(instr->result());
3241 DwVfpRegister result = ToDoubleRegister(instr->result()); 3281 if (key_is_constant) {
3242 Operand operand = key_is_constant 3282 __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
3243 ? Operand(constant_key << element_size_shift) 3283 r0);
3244 : Operand(key, LSL, shift_size); 3284 } else {
3245 __ add(scratch0(), external_pointer, operand); 3285 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3286 __ add(scratch0(), external_pointer, r0);
3287 }
3246 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 3288 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3247 elements_kind == FLOAT32_ELEMENTS) { 3289 elements_kind == FLOAT32_ELEMENTS) {
3248 __ vldr(double_scratch0().low(), scratch0(), base_offset); 3290 __ lfs(result, MemOperand(scratch0(), base_offset));
3249 __ vcvt_f64_f32(result, double_scratch0().low()); 3291 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3250 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS 3292 __ lfd(result, MemOperand(scratch0(), base_offset));
3251 __ vldr(result, scratch0(), base_offset);
3252 } 3293 }
3253 } else { 3294 } else {
3254 Register result = ToRegister(instr->result()); 3295 Register result = ToRegister(instr->result());
3255 MemOperand mem_operand = PrepareKeyedOperand( 3296 MemOperand mem_operand =
3256 key, external_pointer, key_is_constant, constant_key, 3297 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
3257 element_size_shift, shift_size, base_offset); 3298 constant_key, element_size_shift, base_offset);
3258 switch (elements_kind) { 3299 switch (elements_kind) {
3259 case EXTERNAL_INT8_ELEMENTS: 3300 case EXTERNAL_INT8_ELEMENTS:
3260 case INT8_ELEMENTS: 3301 case INT8_ELEMENTS:
3261 __ ldrsb(result, mem_operand); 3302 if (key_is_constant) {
3303 __ LoadByte(result, mem_operand, r0);
3304 } else {
3305 __ lbzx(result, mem_operand);
3306 }
3307 __ extsb(result, result);
3262 break; 3308 break;
3263 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: 3309 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3264 case EXTERNAL_UINT8_ELEMENTS: 3310 case EXTERNAL_UINT8_ELEMENTS:
3265 case UINT8_ELEMENTS: 3311 case UINT8_ELEMENTS:
3266 case UINT8_CLAMPED_ELEMENTS: 3312 case UINT8_CLAMPED_ELEMENTS:
3267 __ ldrb(result, mem_operand); 3313 if (key_is_constant) {
3314 __ LoadByte(result, mem_operand, r0);
3315 } else {
3316 __ lbzx(result, mem_operand);
3317 }
3268 break; 3318 break;
3269 case EXTERNAL_INT16_ELEMENTS: 3319 case EXTERNAL_INT16_ELEMENTS:
3270 case INT16_ELEMENTS: 3320 case INT16_ELEMENTS:
3271 __ ldrsh(result, mem_operand); 3321 if (key_is_constant) {
3322 __ LoadHalfWord(result, mem_operand, r0);
3323 } else {
3324 __ lhzx(result, mem_operand);
3325 }
3326 __ extsh(result, result);
3272 break; 3327 break;
3273 case EXTERNAL_UINT16_ELEMENTS: 3328 case EXTERNAL_UINT16_ELEMENTS:
3274 case UINT16_ELEMENTS: 3329 case UINT16_ELEMENTS:
3275 __ ldrh(result, mem_operand); 3330 if (key_is_constant) {
3331 __ LoadHalfWord(result, mem_operand, r0);
3332 } else {
3333 __ lhzx(result, mem_operand);
3334 }
3276 break; 3335 break;
3277 case EXTERNAL_INT32_ELEMENTS: 3336 case EXTERNAL_INT32_ELEMENTS:
3278 case INT32_ELEMENTS: 3337 case INT32_ELEMENTS:
3279 __ ldr(result, mem_operand); 3338 if (key_is_constant) {
3339 __ LoadWord(result, mem_operand, r0);
3340 } else {
3341 __ lwzx(result, mem_operand);
3342 }
3343 #if V8_TARGET_ARCH_PPC64
3344 __ extsw(result, result);
3345 #endif
3280 break; 3346 break;
3281 case EXTERNAL_UINT32_ELEMENTS: 3347 case EXTERNAL_UINT32_ELEMENTS:
3282 case UINT32_ELEMENTS: 3348 case UINT32_ELEMENTS:
3283 __ ldr(result, mem_operand); 3349 if (key_is_constant) {
3350 __ LoadWord(result, mem_operand, r0);
3351 } else {
3352 __ lwzx(result, mem_operand);
3353 }
3284 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3354 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3285 __ cmp(result, Operand(0x80000000)); 3355 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3286 DeoptimizeIf(cs, instr, "negative value"); 3356 __ cmplw(result, r0);
3357 DeoptimizeIf(ge, instr, "negative value");
3287 } 3358 }
3288 break; 3359 break;
3289 case FLOAT32_ELEMENTS: 3360 case FLOAT32_ELEMENTS:
3290 case FLOAT64_ELEMENTS: 3361 case FLOAT64_ELEMENTS:
3291 case EXTERNAL_FLOAT32_ELEMENTS: 3362 case EXTERNAL_FLOAT32_ELEMENTS:
3292 case EXTERNAL_FLOAT64_ELEMENTS: 3363 case EXTERNAL_FLOAT64_ELEMENTS:
3293 case FAST_HOLEY_DOUBLE_ELEMENTS: 3364 case FAST_HOLEY_DOUBLE_ELEMENTS:
3294 case FAST_HOLEY_ELEMENTS: 3365 case FAST_HOLEY_ELEMENTS:
3295 case FAST_HOLEY_SMI_ELEMENTS: 3366 case FAST_HOLEY_SMI_ELEMENTS:
3296 case FAST_DOUBLE_ELEMENTS: 3367 case FAST_DOUBLE_ELEMENTS:
3297 case FAST_ELEMENTS: 3368 case FAST_ELEMENTS:
3298 case FAST_SMI_ELEMENTS: 3369 case FAST_SMI_ELEMENTS:
3299 case DICTIONARY_ELEMENTS: 3370 case DICTIONARY_ELEMENTS:
3300 case SLOPPY_ARGUMENTS_ELEMENTS: 3371 case SLOPPY_ARGUMENTS_ELEMENTS:
3301 UNREACHABLE(); 3372 UNREACHABLE();
3302 break; 3373 break;
3303 } 3374 }
3304 } 3375 }
3305 } 3376 }
3306 3377
3307 3378
3308 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 3379 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3309 Register elements = ToRegister(instr->elements()); 3380 Register elements = ToRegister(instr->elements());
3310 bool key_is_constant = instr->key()->IsConstantOperand(); 3381 bool key_is_constant = instr->key()->IsConstantOperand();
3311 Register key = no_reg; 3382 Register key = no_reg;
3312 DwVfpRegister result = ToDoubleRegister(instr->result()); 3383 DoubleRegister result = ToDoubleRegister(instr->result());
3313 Register scratch = scratch0(); 3384 Register scratch = scratch0();
3314 3385
3315 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 3386 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3316 3387 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3317 int base_offset = instr->base_offset(); 3388 int constant_key = 0;
3318 if (key_is_constant) { 3389 if (key_is_constant) {
3319 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3390 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3320 if (constant_key & 0xF0000000) { 3391 if (constant_key & 0xF0000000) {
3321 Abort(kArrayIndexConstantValueTooBig); 3392 Abort(kArrayIndexConstantValueTooBig);
3322 } 3393 }
3323 base_offset += constant_key * kDoubleSize; 3394 } else {
3324 }
3325 __ add(scratch, elements, Operand(base_offset));
3326
3327 if (!key_is_constant) {
3328 key = ToRegister(instr->key()); 3395 key = ToRegister(instr->key());
3329 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3330 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3331 __ add(scratch, scratch, Operand(key, LSL, shift_size));
3332 } 3396 }
3333 3397
3334 __ vldr(result, scratch, 0); 3398 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
3399 if (!key_is_constant) {
3400 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3401 __ add(scratch, elements, r0);
3402 elements = scratch;
3403 }
3404 if (!is_int16(base_offset)) {
3405 __ Add(scratch, elements, base_offset, r0);
3406 base_offset = 0;
3407 elements = scratch;
3408 }
3409 __ lfd(result, MemOperand(elements, base_offset));
3335 3410
3336 if (instr->hydrogen()->RequiresHoleCheck()) { 3411 if (instr->hydrogen()->RequiresHoleCheck()) {
3337 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); 3412 if (is_int16(base_offset + Register::kExponentOffset)) {
3338 __ cmp(scratch, Operand(kHoleNanUpper32)); 3413 __ lwz(scratch,
3414 MemOperand(elements, base_offset + Register::kExponentOffset));
3415 } else {
3416 __ addi(scratch, elements, Operand(base_offset));
3417 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
3418 }
3419 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
3339 DeoptimizeIf(eq, instr, "hole"); 3420 DeoptimizeIf(eq, instr, "hole");
3340 } 3421 }
3341 } 3422 }
3342 3423
3343 3424
3344 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3425 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3426 HLoadKeyed* hinstr = instr->hydrogen();
3345 Register elements = ToRegister(instr->elements()); 3427 Register elements = ToRegister(instr->elements());
3346 Register result = ToRegister(instr->result()); 3428 Register result = ToRegister(instr->result());
3347 Register scratch = scratch0(); 3429 Register scratch = scratch0();
3348 Register store_base = scratch; 3430 Register store_base = scratch;
3349 int offset = instr->base_offset(); 3431 int offset = instr->base_offset();
3350 3432
3351 if (instr->key()->IsConstantOperand()) { 3433 if (instr->key()->IsConstantOperand()) {
3352 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3434 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3353 offset += ToInteger32(const_operand) * kPointerSize; 3435 offset += ToInteger32(const_operand) * kPointerSize;
3354 store_base = elements; 3436 store_base = elements;
3355 } else { 3437 } else {
3356 Register key = ToRegister(instr->key()); 3438 Register key = ToRegister(instr->key());
3357 // Even though the HLoadKeyed instruction forces the input 3439 // Even though the HLoadKeyed instruction forces the input
3358 // representation for the key to be an integer, the input gets replaced 3440 // representation for the key to be an integer, the input gets replaced
3359 // during bound check elimination with the index argument to the bounds 3441 // during bound check elimination with the index argument to the bounds
3360 // check, which can be tagged, so that case must be handled here, too. 3442 // check, which can be tagged, so that case must be handled here, too.
3361 if (instr->hydrogen()->key()->representation().IsSmi()) { 3443 if (hinstr->key()->representation().IsSmi()) {
3362 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); 3444 __ SmiToPtrArrayOffset(r0, key);
3363 } else { 3445 } else {
3364 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); 3446 __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
3365 } 3447 }
3448 __ add(scratch, elements, r0);
3366 } 3449 }
3367 __ ldr(result, MemOperand(store_base, offset)); 3450
3451 bool requires_hole_check = hinstr->RequiresHoleCheck();
3452 Representation representation = hinstr->representation();
3453
3454 #if V8_TARGET_ARCH_PPC64
3455 // 64-bit Smi optimization
3456 if (representation.IsInteger32() &&
3457 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3458 DCHECK(!requires_hole_check);
3459 // Read int value directly from upper half of the smi.
3460 STATIC_ASSERT(kSmiTag == 0);
3461 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3462 #if V8_TARGET_LITTLE_ENDIAN
3463 offset += kPointerSize / 2;
3464 #endif
3465 }
3466 #endif
3467
3468 __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
3469 r0);
3368 3470
3369 // Check for the hole value. 3471 // Check for the hole value.
3370 if (instr->hydrogen()->RequiresHoleCheck()) { 3472 if (requires_hole_check) {
3371 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3473 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3372 __ SmiTst(result); 3474 __ TestIfSmi(result, r0);
3373 DeoptimizeIf(ne, instr, "not a Smi"); 3475 DeoptimizeIf(ne, instr, "not a Smi", cr0);
3374 } else { 3476 } else {
3375 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3477 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3376 __ cmp(result, scratch); 3478 __ cmp(result, scratch);
3377 DeoptimizeIf(eq, instr, "hole"); 3479 DeoptimizeIf(eq, instr, "hole");
3378 } 3480 }
3379 } 3481 }
3380 } 3482 }
3381 3483
3382 3484
3383 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 3485 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3384 if (instr->is_typed_elements()) { 3486 if (instr->is_typed_elements()) {
3385 DoLoadKeyedExternalArray(instr); 3487 DoLoadKeyedExternalArray(instr);
3386 } else if (instr->hydrogen()->representation().IsDouble()) { 3488 } else if (instr->hydrogen()->representation().IsDouble()) {
3387 DoLoadKeyedFixedDoubleArray(instr); 3489 DoLoadKeyedFixedDoubleArray(instr);
3388 } else { 3490 } else {
3389 DoLoadKeyedFixedArray(instr); 3491 DoLoadKeyedFixedArray(instr);
3390 } 3492 }
3391 } 3493 }
3392 3494
3393 3495
3394 MemOperand LCodeGen::PrepareKeyedOperand(Register key, 3496 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
3395 Register base, 3497 bool key_is_constant, bool key_is_smi,
3396 bool key_is_constant,
3397 int constant_key, 3498 int constant_key,
3398 int element_size, 3499 int element_size_shift,
3399 int shift_size,
3400 int base_offset) { 3500 int base_offset) {
3501 Register scratch = scratch0();
3502
3401 if (key_is_constant) { 3503 if (key_is_constant) {
3402 return MemOperand(base, (constant_key << element_size) + base_offset); 3504 return MemOperand(base, (constant_key << element_size_shift) + base_offset);
3403 } 3505 }
3404 3506
3405 if (base_offset == 0) { 3507 bool needs_shift =
3406 if (shift_size >= 0) { 3508 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
3407 return MemOperand(base, key, LSL, shift_size); 3509
3408 } else { 3510 if (!(base_offset || needs_shift)) {
3409 DCHECK_EQ(-1, shift_size); 3511 return MemOperand(base, key);
3410 return MemOperand(base, key, LSR, 1);
3411 }
3412 } 3512 }
3413 3513
3414 if (shift_size >= 0) { 3514 if (needs_shift) {
3415 __ add(scratch0(), base, Operand(key, LSL, shift_size)); 3515 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3416 return MemOperand(scratch0(), base_offset); 3516 key = scratch;
3417 } else {
3418 DCHECK_EQ(-1, shift_size);
3419 __ add(scratch0(), base, Operand(key, ASR, 1));
3420 return MemOperand(scratch0(), base_offset);
3421 } 3517 }
3518
3519 if (base_offset) {
3520 __ Add(scratch, key, base_offset, r0);
3521 }
3522
3523 return MemOperand(base, scratch);
3422 } 3524 }
3423 3525
3424 3526
3425 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3527 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3426 DCHECK(ToRegister(instr->context()).is(cp)); 3528 DCHECK(ToRegister(instr->context()).is(cp));
3427 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3529 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3428 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); 3530 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3429 3531
3430 if (FLAG_vector_ics) { 3532 if (FLAG_vector_ics) {
3431 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); 3533 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3432 } 3534 }
3433 3535
3434 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code(); 3536 Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3435 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 3537 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3436 } 3538 }
3437 3539
3438 3540
3439 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 3541 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3440 Register scratch = scratch0(); 3542 Register scratch = scratch0();
3441 Register result = ToRegister(instr->result()); 3543 Register result = ToRegister(instr->result());
3442 3544
3443 if (instr->hydrogen()->from_inlined()) { 3545 if (instr->hydrogen()->from_inlined()) {
3444 __ sub(result, sp, Operand(2 * kPointerSize)); 3546 __ subi(result, sp, Operand(2 * kPointerSize));
3445 } else { 3547 } else {
3446 // Check if the calling frame is an arguments adaptor frame. 3548 // Check if the calling frame is an arguments adaptor frame.
3447 Label done, adapted; 3549 Label done, adapted;
3448 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3550 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3449 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); 3551 __ LoadP(result,
3450 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3552 MemOperand(scratch, StandardFrameConstants::kContextOffset));
3553 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3451 3554
3452 // Result is the frame pointer for the frame if not adapted and for the real 3555 // Result is the frame pointer for the frame if not adapted and for the real
3453 // frame below the adaptor frame if adapted. 3556 // frame below the adaptor frame if adapted.
3454 __ mov(result, fp, LeaveCC, ne); 3557 __ beq(&adapted);
3455 __ mov(result, scratch, LeaveCC, eq); 3558 __ mr(result, fp);
3559 __ b(&done);
3560
3561 __ bind(&adapted);
3562 __ mr(result, scratch);
3563 __ bind(&done);
3456 } 3564 }
3457 } 3565 }
3458 3566
3459 3567
3460 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 3568 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3461 Register elem = ToRegister(instr->elements()); 3569 Register elem = ToRegister(instr->elements());
3462 Register result = ToRegister(instr->result()); 3570 Register result = ToRegister(instr->result());
3463 3571
3464 Label done; 3572 Label done;
3465 3573
3466 // If no arguments adaptor frame the number of arguments is fixed. 3574 // If no arguments adaptor frame the number of arguments is fixed.
3467 __ cmp(fp, elem); 3575 __ cmp(fp, elem);
3468 __ mov(result, Operand(scope()->num_parameters())); 3576 __ mov(result, Operand(scope()->num_parameters()));
3469 __ b(eq, &done); 3577 __ beq(&done);
3470 3578
3471 // Arguments adaptor frame present. Get argument length from there. 3579 // Arguments adaptor frame present. Get argument length from there.
3472 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3580 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3473 __ ldr(result, 3581 __ LoadP(result,
3474 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3582 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3475 __ SmiUntag(result); 3583 __ SmiUntag(result);
3476 3584
3477 // Argument length is in result register. 3585 // Argument length is in result register.
3478 __ bind(&done); 3586 __ bind(&done);
3479 } 3587 }
3480 3588
3481 3589
3482 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 3590 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3483 Register receiver = ToRegister(instr->receiver()); 3591 Register receiver = ToRegister(instr->receiver());
3484 Register function = ToRegister(instr->function()); 3592 Register function = ToRegister(instr->function());
3485 Register result = ToRegister(instr->result()); 3593 Register result = ToRegister(instr->result());
3486 Register scratch = scratch0(); 3594 Register scratch = scratch0();
3487 3595
3488 // If the receiver is null or undefined, we have to pass the global 3596 // If the receiver is null or undefined, we have to pass the global
3489 // object as a receiver to normal functions. Values have to be 3597 // object as a receiver to normal functions. Values have to be
3490 // passed unchanged to builtins and strict-mode functions. 3598 // passed unchanged to builtins and strict-mode functions.
3491 Label global_object, result_in_receiver; 3599 Label global_object, result_in_receiver;
3492 3600
3493 if (!instr->hydrogen()->known_function()) { 3601 if (!instr->hydrogen()->known_function()) {
3494 // Do not transform the receiver to object for strict mode 3602 // Do not transform the receiver to object for strict mode
3495 // functions. 3603 // functions.
3496 __ ldr(scratch, 3604 __ LoadP(scratch,
3497 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3605 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3498 __ ldr(scratch, 3606 __ lwz(scratch,
3499 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); 3607 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3500 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize); 3608 __ TestBit(scratch,
3501 __ tst(scratch, Operand(mask)); 3609 #if V8_TARGET_ARCH_PPC64
3502 __ b(ne, &result_in_receiver); 3610 SharedFunctionInfo::kStrictModeFunction,
3611 #else
3612 SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
3613 #endif
3614 r0);
3615 __ bne(&result_in_receiver, cr0);
3503 3616
3504 // Do not transform the receiver to object for builtins. 3617 // Do not transform the receiver to object for builtins.
3505 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); 3618 __ TestBit(scratch,
3506 __ b(ne, &result_in_receiver); 3619 #if V8_TARGET_ARCH_PPC64
3620 SharedFunctionInfo::kNative,
3621 #else
3622 SharedFunctionInfo::kNative + kSmiTagSize,
3623 #endif
3624 r0);
3625 __ bne(&result_in_receiver, cr0);
3507 } 3626 }
3508 3627
3509 // Normal function. Replace undefined or null with global receiver. 3628 // Normal function. Replace undefined or null with global receiver.
3510 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 3629 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3511 __ cmp(receiver, scratch); 3630 __ cmp(receiver, scratch);
3512 __ b(eq, &global_object); 3631 __ beq(&global_object);
3513 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 3632 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3514 __ cmp(receiver, scratch); 3633 __ cmp(receiver, scratch);
3515 __ b(eq, &global_object); 3634 __ beq(&global_object);
3516 3635
3517 // Deoptimize if the receiver is not a JS object. 3636 // Deoptimize if the receiver is not a JS object.
3518 __ SmiTst(receiver); 3637 __ TestIfSmi(receiver, r0);
3519 DeoptimizeIf(eq, instr, "Smi"); 3638 DeoptimizeIf(eq, instr, "Smi");
3520 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); 3639 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3521 DeoptimizeIf(lt, instr, "not a JavaScript object"); 3640 DeoptimizeIf(lt, instr, "not a JavaScript object");
3522 3641
3523 __ b(&result_in_receiver); 3642 __ b(&result_in_receiver);
3524 __ bind(&global_object); 3643 __ bind(&global_object);
3525 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); 3644 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3526 __ ldr(result, 3645 __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3527 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); 3646 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3528 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3529
3530 if (result.is(receiver)) { 3647 if (result.is(receiver)) {
3531 __ bind(&result_in_receiver); 3648 __ bind(&result_in_receiver);
3532 } else { 3649 } else {
3533 Label result_ok; 3650 Label result_ok;
3534 __ b(&result_ok); 3651 __ b(&result_ok);
3535 __ bind(&result_in_receiver); 3652 __ bind(&result_in_receiver);
3536 __ mov(result, receiver); 3653 __ mr(result, receiver);
3537 __ bind(&result_ok); 3654 __ bind(&result_ok);
3538 } 3655 }
3539 } 3656 }
3540 3657
3541 3658
3542 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 3659 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3543 Register receiver = ToRegister(instr->receiver()); 3660 Register receiver = ToRegister(instr->receiver());
3544 Register function = ToRegister(instr->function()); 3661 Register function = ToRegister(instr->function());
3545 Register length = ToRegister(instr->length()); 3662 Register length = ToRegister(instr->length());
3546 Register elements = ToRegister(instr->elements()); 3663 Register elements = ToRegister(instr->elements());
3547 Register scratch = scratch0(); 3664 Register scratch = scratch0();
3548 DCHECK(receiver.is(r0)); // Used for parameter count. 3665 DCHECK(receiver.is(r3)); // Used for parameter count.
3549 DCHECK(function.is(r1)); // Required by InvokeFunction. 3666 DCHECK(function.is(r4)); // Required by InvokeFunction.
3550 DCHECK(ToRegister(instr->result()).is(r0)); 3667 DCHECK(ToRegister(instr->result()).is(r3));
3551 3668
3552 // Copy the arguments to this function possibly from the 3669 // Copy the arguments to this function possibly from the
3553 // adaptor frame below it. 3670 // adaptor frame below it.
3554 const uint32_t kArgumentsLimit = 1 * KB; 3671 const uint32_t kArgumentsLimit = 1 * KB;
3555 __ cmp(length, Operand(kArgumentsLimit)); 3672 __ cmpli(length, Operand(kArgumentsLimit));
3556 DeoptimizeIf(hi, instr, "too many arguments"); 3673 DeoptimizeIf(gt, instr, "too many arguments");
3557 3674
3558 // Push the receiver and use the register to keep the original 3675 // Push the receiver and use the register to keep the original
3559 // number of arguments. 3676 // number of arguments.
3560 __ push(receiver); 3677 __ push(receiver);
3561 __ mov(receiver, length); 3678 __ mr(receiver, length);
3562 // The arguments are at a one pointer size offset from elements. 3679 // The arguments are at a one pointer size offset from elements.
3563 __ add(elements, elements, Operand(1 * kPointerSize)); 3680 __ addi(elements, elements, Operand(1 * kPointerSize));
3564 3681
3565 // Loop through the arguments pushing them onto the execution 3682 // Loop through the arguments pushing them onto the execution
3566 // stack. 3683 // stack.
3567 Label invoke, loop; 3684 Label invoke, loop;
3568 // length is a small non-negative integer, due to the test above. 3685 // length is a small non-negative integer, due to the test above.
3569 __ cmp(length, Operand::Zero()); 3686 __ cmpi(length, Operand::Zero());
3570 __ b(eq, &invoke); 3687 __ beq(&invoke);
3688 __ mtctr(length);
3571 __ bind(&loop); 3689 __ bind(&loop);
3572 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); 3690 __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
3691 __ LoadPX(scratch, MemOperand(elements, r0));
3573 __ push(scratch); 3692 __ push(scratch);
3574 __ sub(length, length, Operand(1), SetCC); 3693 __ addi(length, length, Operand(-1));
3575 __ b(ne, &loop); 3694 __ bdnz(&loop);
3576 3695
3577 __ bind(&invoke); 3696 __ bind(&invoke);
3578 DCHECK(instr->HasPointerMap()); 3697 DCHECK(instr->HasPointerMap());
3579 LPointerMap* pointers = instr->pointer_map(); 3698 LPointerMap* pointers = instr->pointer_map();
3580 SafepointGenerator safepoint_generator( 3699 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3581 this, pointers, Safepoint::kLazyDeopt); 3700 // The number of arguments is stored in receiver which is r3, as expected
3582 // The number of arguments is stored in receiver which is r0, as expected
3583 // by InvokeFunction. 3701 // by InvokeFunction.
3584 ParameterCount actual(receiver); 3702 ParameterCount actual(receiver);
3585 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); 3703 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3586 } 3704 }
3587 3705
3588 3706
3589 void LCodeGen::DoPushArgument(LPushArgument* instr) { 3707 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3590 LOperand* argument = instr->value(); 3708 LOperand* argument = instr->value();
3591 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { 3709 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3592 Abort(kDoPushArgumentNotImplementedForDoubleType); 3710 Abort(kDoPushArgumentNotImplementedForDoubleType);
3593 } else { 3711 } else {
3594 Register argument_reg = EmitLoadRegister(argument, ip); 3712 Register argument_reg = EmitLoadRegister(argument, ip);
3595 __ push(argument_reg); 3713 __ push(argument_reg);
3596 } 3714 }
3597 } 3715 }
3598 3716
3599 3717
3600 void LCodeGen::DoDrop(LDrop* instr) { 3718 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
3601 __ Drop(instr->count());
3602 }
3603 3719
3604 3720
3605 void LCodeGen::DoThisFunction(LThisFunction* instr) { 3721 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3606 Register result = ToRegister(instr->result()); 3722 Register result = ToRegister(instr->result());
3607 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 3723 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3608 } 3724 }
3609 3725
3610 3726
3611 void LCodeGen::DoContext(LContext* instr) { 3727 void LCodeGen::DoContext(LContext* instr) {
3612 // If there is a non-return use, the context must be moved to a register. 3728 // If there is a non-return use, the context must be moved to a register.
3613 Register result = ToRegister(instr->result()); 3729 Register result = ToRegister(instr->result());
3614 if (info()->IsOptimizing()) { 3730 if (info()->IsOptimizing()) {
3615 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3731 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3616 } else { 3732 } else {
3617 // If there is no frame, the context must be in cp. 3733 // If there is no frame, the context must be in cp.
3618 DCHECK(result.is(cp)); 3734 DCHECK(result.is(cp));
3619 } 3735 }
3620 } 3736 }
3621 3737
3622 3738
3623 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3739 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3624 DCHECK(ToRegister(instr->context()).is(cp)); 3740 DCHECK(ToRegister(instr->context()).is(cp));
3625 __ push(cp); // The context is the first argument. 3741 __ push(cp); // The context is the first argument.
3626 __ Move(scratch0(), instr->hydrogen()->pairs()); 3742 __ Move(scratch0(), instr->hydrogen()->pairs());
3627 __ push(scratch0()); 3743 __ push(scratch0());
3628 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); 3744 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3629 __ push(scratch0()); 3745 __ push(scratch0());
3630 CallRuntime(Runtime::kDeclareGlobals, 3, instr); 3746 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3631 } 3747 }
3632 3748
3633 3749
3634 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3750 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3635 int formal_parameter_count, 3751 int formal_parameter_count, int arity,
3636 int arity, 3752 LInstruction* instr, R4State r4_state) {
3637 LInstruction* instr,
3638 R1State r1_state) {
3639 bool dont_adapt_arguments = 3753 bool dont_adapt_arguments =
3640 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3754 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3641 bool can_invoke_directly = 3755 bool can_invoke_directly =
3642 dont_adapt_arguments || formal_parameter_count == arity; 3756 dont_adapt_arguments || formal_parameter_count == arity;
3643 3757
3644 LPointerMap* pointers = instr->pointer_map(); 3758 LPointerMap* pointers = instr->pointer_map();
3645 3759
3646 if (can_invoke_directly) { 3760 if (can_invoke_directly) {
3647 if (r1_state == R1_UNINITIALIZED) { 3761 if (r4_state == R4_UNINITIALIZED) {
3648 __ Move(r1, function); 3762 __ Move(r4, function);
3649 } 3763 }
3650 3764
3651 // Change context. 3765 // Change context.
3652 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); 3766 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
3653 3767
3654 // Set r0 to arguments count if adaption is not needed. Assumes that r0 3768 // Set r3 to arguments count if adaption is not needed. Assumes that r3
3655 // is available to write to at this point. 3769 // is available to write to at this point.
3656 if (dont_adapt_arguments) { 3770 if (dont_adapt_arguments) {
3657 __ mov(r0, Operand(arity)); 3771 __ mov(r3, Operand(arity));
3658 } 3772 }
3659 3773
3774 bool is_self_call = function.is_identical_to(info()->closure());
3775
3660 // Invoke function. 3776 // Invoke function.
3661 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 3777 if (is_self_call) {
3662 __ Call(ip); 3778 __ CallSelf();
3779 } else {
3780 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
3781 __ CallJSEntry(ip);
3782 }
3663 3783
3664 // Set up deoptimization. 3784 // Set up deoptimization.
3665 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3785 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3666 } else { 3786 } else {
3667 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3787 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3668 ParameterCount count(arity); 3788 ParameterCount count(arity);
3669 ParameterCount expected(formal_parameter_count); 3789 ParameterCount expected(formal_parameter_count);
3670 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); 3790 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3671 } 3791 }
3672 } 3792 }
3673 3793
3674 3794
3675 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3795 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3676 DCHECK(instr->context() != NULL); 3796 DCHECK(instr->context() != NULL);
3677 DCHECK(ToRegister(instr->context()).is(cp)); 3797 DCHECK(ToRegister(instr->context()).is(cp));
3678 Register input = ToRegister(instr->value()); 3798 Register input = ToRegister(instr->value());
3679 Register result = ToRegister(instr->result()); 3799 Register result = ToRegister(instr->result());
3680 Register scratch = scratch0(); 3800 Register scratch = scratch0();
3681 3801
3682 // Deoptimize if not a heap number. 3802 // Deoptimize if not a heap number.
3683 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 3803 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3684 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 3804 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3685 __ cmp(scratch, Operand(ip)); 3805 __ cmp(scratch, ip);
3686 DeoptimizeIf(ne, instr, "not a heap number"); 3806 DeoptimizeIf(ne, instr, "not a heap number");
3687 3807
3688 Label done; 3808 Label done;
3689 Register exponent = scratch0(); 3809 Register exponent = scratch0();
3690 scratch = no_reg; 3810 scratch = no_reg;
3691 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3811 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3692 // Check the sign of the argument. If the argument is positive, just 3812 // Check the sign of the argument. If the argument is positive, just
3693 // return it. 3813 // return it.
3694 __ tst(exponent, Operand(HeapNumber::kSignMask)); 3814 __ cmpwi(exponent, Operand::Zero());
3695 // Move the input to the result if necessary. 3815 // Move the input to the result if necessary.
3696 __ Move(result, input); 3816 __ Move(result, input);
3697 __ b(eq, &done); 3817 __ bge(&done);
3698 3818
3699 // Input is negative. Reverse its sign. 3819 // Input is negative. Reverse its sign.
3700 // Preserve the value of all registers. 3820 // Preserve the value of all registers.
3701 { 3821 {
3702 PushSafepointRegistersScope scope(this); 3822 PushSafepointRegistersScope scope(this);
3703 3823
3704 // Registers were saved at the safepoint, so we can use 3824 // Registers were saved at the safepoint, so we can use
3705 // many scratch registers. 3825 // many scratch registers.
3706 Register tmp1 = input.is(r1) ? r0 : r1; 3826 Register tmp1 = input.is(r4) ? r3 : r4;
3707 Register tmp2 = input.is(r2) ? r0 : r2; 3827 Register tmp2 = input.is(r5) ? r3 : r5;
3708 Register tmp3 = input.is(r3) ? r0 : r3; 3828 Register tmp3 = input.is(r6) ? r3 : r6;
3709 Register tmp4 = input.is(r4) ? r0 : r4; 3829 Register tmp4 = input.is(r7) ? r3 : r7;
3710 3830
3711 // exponent: floating point exponent value. 3831 // exponent: floating point exponent value.
3712 3832
3713 Label allocated, slow; 3833 Label allocated, slow;
3714 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); 3834 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3715 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); 3835 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3716 __ b(&allocated); 3836 __ b(&allocated);
3717 3837
3718 // Slow case: Call the runtime system to do the number allocation. 3838 // Slow case: Call the runtime system to do the number allocation.
3719 __ bind(&slow); 3839 __ bind(&slow);
3720 3840
3721 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, 3841 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3722 instr->context()); 3842 instr->context());
3723 // Set the pointer to the new heap number in tmp. 3843 // Set the pointer to the new heap number in tmp.
3724 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); 3844 if (!tmp1.is(r3)) __ mr(tmp1, r3);
3725 // Restore input_reg after call to runtime. 3845 // Restore input_reg after call to runtime.
3726 __ LoadFromSafepointRegisterSlot(input, input); 3846 __ LoadFromSafepointRegisterSlot(input, input);
3727 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3847 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3728 3848
3729 __ bind(&allocated); 3849 __ bind(&allocated);
3730 // exponent: floating point exponent value. 3850 // exponent: floating point exponent value.
3731 // tmp1: allocated heap number. 3851 // tmp1: allocated heap number.
3732 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask)); 3852 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
3733 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); 3853 __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
3734 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); 3854 __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3735 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); 3855 __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3856 __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3736 3857
3737 __ StoreToSafepointRegisterSlot(tmp1, result); 3858 __ StoreToSafepointRegisterSlot(tmp1, result);
3738 } 3859 }
3739 3860
3740 __ bind(&done); 3861 __ bind(&done);
3741 } 3862 }
3742 3863
3743 3864
3744 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { 3865 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3745 Register input = ToRegister(instr->value()); 3866 Register input = ToRegister(instr->value());
3746 Register result = ToRegister(instr->result()); 3867 Register result = ToRegister(instr->result());
3747 __ cmp(input, Operand::Zero()); 3868 Label done;
3748 __ Move(result, input, pl); 3869 __ cmpi(input, Operand::Zero());
3749 // We can make rsb conditional because the previous cmp instruction 3870 __ Move(result, input);
3750 // will clear the V (overflow) flag and rsb won't set this flag 3871 __ bge(&done);
3751 // if input is positive. 3872 __ li(r0, Operand::Zero()); // clear xer
3752 __ rsb(result, input, Operand::Zero(), SetCC, mi); 3873 __ mtxer(r0);
3874 __ neg(result, result, SetOE, SetRC);
3753 // Deoptimize on overflow. 3875 // Deoptimize on overflow.
3754 DeoptimizeIf(vs, instr, "overflow"); 3876 DeoptimizeIf(overflow, instr, "overflow", cr0);
3877 __ bind(&done);
3755 } 3878 }
3756 3879
3757 3880
3881 #if V8_TARGET_ARCH_PPC64
3882 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3883 Register input = ToRegister(instr->value());
3884 Register result = ToRegister(instr->result());
3885 Label done;
3886 __ cmpwi(input, Operand::Zero());
3887 __ Move(result, input);
3888 __ bge(&done);
3889
3890 // Deoptimize on overflow.
3891 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3892 __ cmpw(input, r0);
3893 DeoptimizeIf(eq, instr, "overflow");
3894
3895 __ neg(result, result);
3896 __ bind(&done);
3897 }
3898 #endif
3899
3900
3758 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3901 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3759 // Class for deferred case. 3902 // Class for deferred case.
3760 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { 3903 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3761 public: 3904 public:
3762 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) 3905 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3763 : LDeferredCode(codegen), instr_(instr) { } 3906 : LDeferredCode(codegen), instr_(instr) {}
3764 virtual void Generate() OVERRIDE { 3907 virtual void Generate() OVERRIDE {
3765 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3908 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3766 } 3909 }
3767 virtual LInstruction* instr() OVERRIDE { return instr_; } 3910 virtual LInstruction* instr() OVERRIDE { return instr_; }
3911
3768 private: 3912 private:
3769 LMathAbs* instr_; 3913 LMathAbs* instr_;
3770 }; 3914 };
3771 3915
3772 Representation r = instr->hydrogen()->value()->representation(); 3916 Representation r = instr->hydrogen()->value()->representation();
3773 if (r.IsDouble()) { 3917 if (r.IsDouble()) {
3774 DwVfpRegister input = ToDoubleRegister(instr->value()); 3918 DoubleRegister input = ToDoubleRegister(instr->value());
3775 DwVfpRegister result = ToDoubleRegister(instr->result()); 3919 DoubleRegister result = ToDoubleRegister(instr->result());
3776 __ vabs(result, input); 3920 __ fabs(result, input);
3921 #if V8_TARGET_ARCH_PPC64
3922 } else if (r.IsInteger32()) {
3923 EmitInteger32MathAbs(instr);
3924 } else if (r.IsSmi()) {
3925 #else
3777 } else if (r.IsSmiOrInteger32()) { 3926 } else if (r.IsSmiOrInteger32()) {
3778 EmitIntegerMathAbs(instr); 3927 #endif
3928 EmitMathAbs(instr);
3779 } else { 3929 } else {
3780 // Representation is tagged. 3930 // Representation is tagged.
3781 DeferredMathAbsTaggedHeapNumber* deferred = 3931 DeferredMathAbsTaggedHeapNumber* deferred =
3782 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3932 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3783 Register input = ToRegister(instr->value()); 3933 Register input = ToRegister(instr->value());
3784 // Smi check. 3934 // Smi check.
3785 __ JumpIfNotSmi(input, deferred->entry()); 3935 __ JumpIfNotSmi(input, deferred->entry());
3786 // If smi, handle it directly. 3936 // If smi, handle it directly.
3787 EmitIntegerMathAbs(instr); 3937 EmitMathAbs(instr);
3788 __ bind(deferred->exit()); 3938 __ bind(deferred->exit());
3789 } 3939 }
3790 } 3940 }
3791 3941
3792 3942
3793 void LCodeGen::DoMathFloor(LMathFloor* instr) { 3943 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3794 DwVfpRegister input = ToDoubleRegister(instr->value()); 3944 DoubleRegister input = ToDoubleRegister(instr->value());
3795 Register result = ToRegister(instr->result()); 3945 Register result = ToRegister(instr->result());
3796 Register input_high = scratch0(); 3946 Register input_high = scratch0();
3947 Register scratch = ip;
3797 Label done, exact; 3948 Label done, exact;
3798 3949
3799 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); 3950 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3951 &exact);
3800 DeoptimizeIf(al, instr, "lost precision or NaN"); 3952 DeoptimizeIf(al, instr, "lost precision or NaN");
3801 3953
3802 __ bind(&exact); 3954 __ bind(&exact);
3803 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3955 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3804 // Test for -0. 3956 // Test for -0.
3805 __ cmp(result, Operand::Zero()); 3957 __ cmpi(result, Operand::Zero());
3806 __ b(ne, &done); 3958 __ bne(&done);
3807 __ cmp(input_high, Operand::Zero()); 3959 __ cmpwi(input_high, Operand::Zero());
3808 DeoptimizeIf(mi, instr, "minus zero"); 3960 DeoptimizeIf(lt, instr, "minus zero");
3809 } 3961 }
3810 __ bind(&done); 3962 __ bind(&done);
3811 } 3963 }
3812 3964
3813 3965
3814 void LCodeGen::DoMathRound(LMathRound* instr) { 3966 void LCodeGen::DoMathRound(LMathRound* instr) {
3815 DwVfpRegister input = ToDoubleRegister(instr->value()); 3967 DoubleRegister input = ToDoubleRegister(instr->value());
3816 Register result = ToRegister(instr->result()); 3968 Register result = ToRegister(instr->result());
3817 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3969 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3818 DwVfpRegister input_plus_dot_five = double_scratch1; 3970 DoubleRegister input_plus_dot_five = double_scratch1;
3819 Register input_high = scratch0(); 3971 Register scratch1 = scratch0();
3820 DwVfpRegister dot_five = double_scratch0(); 3972 Register scratch2 = ip;
3973 DoubleRegister dot_five = double_scratch0();
3821 Label convert, done; 3974 Label convert, done;
3822 3975
3823 __ Vmov(dot_five, 0.5, scratch0()); 3976 __ LoadDoubleLiteral(dot_five, 0.5, r0);
3824 __ vabs(double_scratch1, input); 3977 __ fabs(double_scratch1, input);
3825 __ VFPCompareAndSetFlags(double_scratch1, dot_five); 3978 __ fcmpu(double_scratch1, dot_five);
3979 DeoptimizeIf(unordered, instr, "lost precision or NaN");
3826 // If input is in [-0.5, -0], the result is -0. 3980 // If input is in [-0.5, -0], the result is -0.
3827 // If input is in [+0, +0.5[, the result is +0. 3981 // If input is in [+0, +0.5[, the result is +0.
3828 // If the input is +0.5, the result is 1. 3982 // If the input is +0.5, the result is 1.
3829 __ b(hi, &convert); // Out of [-0.5, +0.5]. 3983 __ bgt(&convert); // Out of [-0.5, +0.5].
3830 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3984 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3831 __ VmovHigh(input_high, input); 3985 #if V8_TARGET_ARCH_PPC64
3832 __ cmp(input_high, Operand::Zero()); 3986 __ MovDoubleToInt64(scratch1, input);
3987 #else
3988 __ MovDoubleHighToInt(scratch1, input);
3989 #endif
3990 __ cmpi(scratch1, Operand::Zero());
3833 // [-0.5, -0]. 3991 // [-0.5, -0].
3834 DeoptimizeIf(mi, instr, "minus zero"); 3992 DeoptimizeIf(lt, instr, "minus zero");
3835 } 3993 }
3836 __ VFPCompareAndSetFlags(input, dot_five); 3994 Label return_zero;
3837 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. 3995 __ fcmpu(input, dot_five);
3996 __ bne(&return_zero);
3997 __ li(result, Operand(1)); // +0.5.
3998 __ b(&done);
3838 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on 3999 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3839 // flag kBailoutOnMinusZero. 4000 // flag kBailoutOnMinusZero.
3840 __ mov(result, Operand::Zero(), LeaveCC, ne); 4001 __ bind(&return_zero);
4002 __ li(result, Operand::Zero());
3841 __ b(&done); 4003 __ b(&done);
3842 4004
3843 __ bind(&convert); 4005 __ bind(&convert);
3844 __ vadd(input_plus_dot_five, input, dot_five); 4006 __ fadd(input_plus_dot_five, input, dot_five);
3845 // Reuse dot_five (double_scratch0) as we no longer need this value. 4007 // Reuse dot_five (double_scratch0) as we no longer need this value.
3846 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), 4008 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
3847 &done, &done); 4009 double_scratch0(), &done, &done);
3848 DeoptimizeIf(al, instr, "lost precision or NaN"); 4010 DeoptimizeIf(al, instr, "lost precision or NaN");
3849 __ bind(&done); 4011 __ bind(&done);
3850 } 4012 }
3851 4013
3852 4014
3853 void LCodeGen::DoMathFround(LMathFround* instr) { 4015 void LCodeGen::DoMathFround(LMathFround* instr) {
3854 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); 4016 DoubleRegister input_reg = ToDoubleRegister(instr->value());
3855 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); 4017 DoubleRegister output_reg = ToDoubleRegister(instr->result());
3856 LowDwVfpRegister scratch = double_scratch0(); 4018 __ frsp(output_reg, input_reg);
3857 __ vcvt_f32_f64(scratch.low(), input_reg);
3858 __ vcvt_f64_f32(output_reg, scratch.low());
3859 } 4019 }
3860 4020
3861 4021
3862 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 4022 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3863 DwVfpRegister input = ToDoubleRegister(instr->value()); 4023 DoubleRegister input = ToDoubleRegister(instr->value());
3864 DwVfpRegister result = ToDoubleRegister(instr->result()); 4024 DoubleRegister result = ToDoubleRegister(instr->result());
3865 __ vsqrt(result, input); 4025 __ fsqrt(result, input);
3866 } 4026 }
3867 4027
3868 4028
3869 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 4029 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3870 DwVfpRegister input = ToDoubleRegister(instr->value()); 4030 DoubleRegister input = ToDoubleRegister(instr->value());
3871 DwVfpRegister result = ToDoubleRegister(instr->result()); 4031 DoubleRegister result = ToDoubleRegister(instr->result());
3872 DwVfpRegister temp = double_scratch0(); 4032 DoubleRegister temp = double_scratch0();
3873 4033
3874 // Note that according to ECMA-262 15.8.2.13: 4034 // Note that according to ECMA-262 15.8.2.13:
3875 // Math.pow(-Infinity, 0.5) == Infinity 4035 // Math.pow(-Infinity, 0.5) == Infinity
3876 // Math.sqrt(-Infinity) == NaN 4036 // Math.sqrt(-Infinity) == NaN
3877 Label done; 4037 Label skip, done;
3878 __ vmov(temp, -V8_INFINITY, scratch0()); 4038
3879 __ VFPCompareAndSetFlags(input, temp); 4039 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
3880 __ vneg(result, temp, eq); 4040 __ fcmpu(input, temp);
3881 __ b(&done, eq); 4041 __ bne(&skip);
4042 __ fneg(result, temp);
4043 __ b(&done);
3882 4044
3883 // Add +0 to convert -0 to +0. 4045 // Add +0 to convert -0 to +0.
3884 __ vadd(result, input, kDoubleRegZero); 4046 __ bind(&skip);
3885 __ vsqrt(result, result); 4047 __ fadd(result, input, kDoubleRegZero);
4048 __ fsqrt(result, result);
3886 __ bind(&done); 4049 __ bind(&done);
3887 } 4050 }
3888 4051
3889 4052
3890 void LCodeGen::DoPower(LPower* instr) { 4053 void LCodeGen::DoPower(LPower* instr) {
3891 Representation exponent_type = instr->hydrogen()->right()->representation(); 4054 Representation exponent_type = instr->hydrogen()->right()->representation();
3892 // Having marked this as a call, we can use any registers. 4055 // Having marked this as a call, we can use any registers.
3893 // Just make sure that the input/output registers are the expected ones. 4056 // Just make sure that the input/output registers are the expected ones.
4057 #ifdef DEBUG
3894 Register tagged_exponent = MathPowTaggedDescriptor::exponent(); 4058 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4059 #endif
3895 DCHECK(!instr->right()->IsDoubleRegister() || 4060 DCHECK(!instr->right()->IsDoubleRegister() ||
3896 ToDoubleRegister(instr->right()).is(d1)); 4061 ToDoubleRegister(instr->right()).is(d2));
3897 DCHECK(!instr->right()->IsRegister() || 4062 DCHECK(!instr->right()->IsRegister() ||
3898 ToRegister(instr->right()).is(tagged_exponent)); 4063 ToRegister(instr->right()).is(tagged_exponent));
3899 DCHECK(ToDoubleRegister(instr->left()).is(d0)); 4064 DCHECK(ToDoubleRegister(instr->left()).is(d1));
3900 DCHECK(ToDoubleRegister(instr->result()).is(d2)); 4065 DCHECK(ToDoubleRegister(instr->result()).is(d3));
3901 4066
3902 if (exponent_type.IsSmi()) { 4067 if (exponent_type.IsSmi()) {
3903 MathPowStub stub(isolate(), MathPowStub::TAGGED); 4068 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3904 __ CallStub(&stub); 4069 __ CallStub(&stub);
3905 } else if (exponent_type.IsTagged()) { 4070 } else if (exponent_type.IsTagged()) {
3906 Label no_deopt; 4071 Label no_deopt;
3907 __ JumpIfSmi(tagged_exponent, &no_deopt); 4072 __ JumpIfSmi(r5, &no_deopt);
3908 DCHECK(!r6.is(tagged_exponent)); 4073 __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset));
3909 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3910 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4074 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3911 __ cmp(r6, Operand(ip)); 4075 __ cmp(r10, ip);
3912 DeoptimizeIf(ne, instr, "not a heap number"); 4076 DeoptimizeIf(ne, instr, "not a heap number");
3913 __ bind(&no_deopt); 4077 __ bind(&no_deopt);
3914 MathPowStub stub(isolate(), MathPowStub::TAGGED); 4078 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3915 __ CallStub(&stub); 4079 __ CallStub(&stub);
3916 } else if (exponent_type.IsInteger32()) { 4080 } else if (exponent_type.IsInteger32()) {
3917 MathPowStub stub(isolate(), MathPowStub::INTEGER); 4081 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3918 __ CallStub(&stub); 4082 __ CallStub(&stub);
3919 } else { 4083 } else {
3920 DCHECK(exponent_type.IsDouble()); 4084 DCHECK(exponent_type.IsDouble());
3921 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 4085 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3922 __ CallStub(&stub); 4086 __ CallStub(&stub);
3923 } 4087 }
3924 } 4088 }
3925 4089
3926 4090
3927 void LCodeGen::DoMathExp(LMathExp* instr) { 4091 void LCodeGen::DoMathExp(LMathExp* instr) {
3928 DwVfpRegister input = ToDoubleRegister(instr->value()); 4092 DoubleRegister input = ToDoubleRegister(instr->value());
3929 DwVfpRegister result = ToDoubleRegister(instr->result()); 4093 DoubleRegister result = ToDoubleRegister(instr->result());
3930 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); 4094 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3931 DwVfpRegister double_scratch2 = double_scratch0(); 4095 DoubleRegister double_scratch2 = double_scratch0();
3932 Register temp1 = ToRegister(instr->temp1()); 4096 Register temp1 = ToRegister(instr->temp1());
3933 Register temp2 = ToRegister(instr->temp2()); 4097 Register temp2 = ToRegister(instr->temp2());
3934 4098
3935 MathExpGenerator::EmitMathExp( 4099 MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
3936 masm(), input, result, double_scratch1, double_scratch2, 4100 double_scratch2, temp1, temp2, scratch0());
3937 temp1, temp2, scratch0());
3938 } 4101 }
3939 4102
3940 4103
3941 void LCodeGen::DoMathLog(LMathLog* instr) { 4104 void LCodeGen::DoMathLog(LMathLog* instr) {
3942 __ PrepareCallCFunction(0, 1, scratch0()); 4105 __ PrepareCallCFunction(0, 1, scratch0());
3943 __ MovToFloatParameter(ToDoubleRegister(instr->value())); 4106 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3944 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 4107 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
3945 0, 1); 4108 1);
3946 __ MovFromFloatResult(ToDoubleRegister(instr->result())); 4109 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3947 } 4110 }
3948 4111
3949 4112
3950 void LCodeGen::DoMathClz32(LMathClz32* instr) { 4113 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3951 Register input = ToRegister(instr->value()); 4114 Register input = ToRegister(instr->value());
3952 Register result = ToRegister(instr->result()); 4115 Register result = ToRegister(instr->result());
3953 __ clz(result, input); 4116 __ cntlzw_(result, input);
3954 } 4117 }
3955 4118
3956 4119
3957 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 4120 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3958 DCHECK(ToRegister(instr->context()).is(cp)); 4121 DCHECK(ToRegister(instr->context()).is(cp));
3959 DCHECK(ToRegister(instr->function()).is(r1)); 4122 DCHECK(ToRegister(instr->function()).is(r4));
3960 DCHECK(instr->HasPointerMap()); 4123 DCHECK(instr->HasPointerMap());
3961 4124
3962 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); 4125 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3963 if (known_function.is_null()) { 4126 if (known_function.is_null()) {
3964 LPointerMap* pointers = instr->pointer_map(); 4127 LPointerMap* pointers = instr->pointer_map();
3965 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 4128 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3966 ParameterCount count(instr->arity()); 4129 ParameterCount count(instr->arity());
3967 __ InvokeFunction(r1, count, CALL_FUNCTION, generator); 4130 __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
3968 } else { 4131 } else {
3969 CallKnownFunction(known_function, 4132 CallKnownFunction(known_function,
3970 instr->hydrogen()->formal_parameter_count(), 4133 instr->hydrogen()->formal_parameter_count(),
3971 instr->arity(), 4134 instr->arity(), instr, R4_CONTAINS_TARGET);
3972 instr,
3973 R1_CONTAINS_TARGET);
3974 } 4135 }
3975 } 4136 }
3976 4137
3977 4138
3978 void LCodeGen::DoTailCallThroughMegamorphicCache( 4139 void LCodeGen::DoTailCallThroughMegamorphicCache(
3979 LTailCallThroughMegamorphicCache* instr) { 4140 LTailCallThroughMegamorphicCache* instr) {
3980 Register receiver = ToRegister(instr->receiver()); 4141 Register receiver = ToRegister(instr->receiver());
3981 Register name = ToRegister(instr->name()); 4142 Register name = ToRegister(instr->name());
3982 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister())); 4143 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3983 DCHECK(name.is(LoadDescriptor::NameRegister())); 4144 DCHECK(name.is(LoadDescriptor::NameRegister()));
3984 DCHECK(receiver.is(r1)); 4145 DCHECK(receiver.is(r4));
3985 DCHECK(name.is(r2)); 4146 DCHECK(name.is(r5));
3986 4147
3987 Register scratch = r3; 4148 Register scratch = r6;
3988 Register extra = r4; 4149 Register extra = r7;
3989 Register extra2 = r5; 4150 Register extra2 = r8;
3990 Register extra3 = r6; 4151 Register extra3 = r9;
3991 4152
3992 // Important for the tail-call. 4153 // Important for the tail-call.
3993 bool must_teardown_frame = NeedsEagerFrame(); 4154 bool must_teardown_frame = NeedsEagerFrame();
3994 4155
3995 // The probe will tail call to a handler if found. 4156 // The probe will tail call to a handler if found.
3996 isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(), 4157 isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3997 must_teardown_frame, receiver, name, 4158 must_teardown_frame, receiver, name,
3998 scratch, extra, extra2, extra3); 4159 scratch, extra, extra2, extra3);
3999 4160
4000 // Tail call to miss if we ended up here. 4161 // Tail call to miss if we ended up here.
4001 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL); 4162 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
4002 LoadIC::GenerateMiss(masm()); 4163 LoadIC::GenerateMiss(masm());
4003 } 4164 }
4004 4165
4005 4166
4006 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 4167 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
4007 DCHECK(ToRegister(instr->result()).is(r0)); 4168 DCHECK(ToRegister(instr->result()).is(r3));
4008 4169
4009 LPointerMap* pointers = instr->pointer_map(); 4170 LPointerMap* pointers = instr->pointer_map();
4010 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 4171 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4011 4172
4012 if (instr->target()->IsConstantOperand()) { 4173 if (instr->target()->IsConstantOperand()) {
4013 LConstantOperand* target = LConstantOperand::cast(instr->target()); 4174 LConstantOperand* target = LConstantOperand::cast(instr->target());
4014 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 4175 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4015 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 4176 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4016 PlatformInterfaceDescriptor* call_descriptor = 4177 __ Call(code, RelocInfo::CODE_TARGET);
4017 instr->descriptor().platform_specific_descriptor();
4018 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
4019 call_descriptor->storage_mode());
4020 } else { 4178 } else {
4021 DCHECK(instr->target()->IsRegister()); 4179 DCHECK(instr->target()->IsRegister());
4022 Register target = ToRegister(instr->target()); 4180 Register target = ToRegister(instr->target());
4023 generator.BeforeCall(__ CallSize(target)); 4181 generator.BeforeCall(__ CallSize(target));
4024 // Make sure we don't emit any additional entries in the constant pool 4182 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4025 // before the call to ensure that the CallCodeSize() calculated the correct 4183 __ CallJSEntry(ip);
4026 // number of instructions for the constant pool load.
4027 {
4028 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
4029 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4030 }
4031 __ Call(target);
4032 } 4184 }
4033 generator.AfterCall(); 4185 generator.AfterCall();
4034 } 4186 }
4035 4187
4036 4188
4037 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { 4189 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4038 DCHECK(ToRegister(instr->function()).is(r1)); 4190 DCHECK(ToRegister(instr->function()).is(r4));
4039 DCHECK(ToRegister(instr->result()).is(r0)); 4191 DCHECK(ToRegister(instr->result()).is(r3));
4040 4192
4041 if (instr->hydrogen()->pass_argument_count()) { 4193 if (instr->hydrogen()->pass_argument_count()) {
4042 __ mov(r0, Operand(instr->arity())); 4194 __ mov(r3, Operand(instr->arity()));
4043 } 4195 }
4044 4196
4045 // Change context. 4197 // Change context.
4046 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); 4198 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
4047 4199
4048 // Load the code entry address 4200 bool is_self_call = false;
4049 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 4201 if (instr->hydrogen()->function()->IsConstant()) {
4050 __ Call(ip); 4202 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
4203 Handle<JSFunction> jsfun =
4204 Handle<JSFunction>::cast(fun_const->handle(isolate()));
4205 is_self_call = jsfun.is_identical_to(info()->closure());
4206 }
4207
4208 if (is_self_call) {
4209 __ CallSelf();
4210 } else {
4211 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
4212 __ CallJSEntry(ip);
4213 }
4051 4214
4052 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 4215 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4053 } 4216 }
4054 4217
4055 4218
4056 void LCodeGen::DoCallFunction(LCallFunction* instr) { 4219 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4057 DCHECK(ToRegister(instr->context()).is(cp)); 4220 DCHECK(ToRegister(instr->context()).is(cp));
4058 DCHECK(ToRegister(instr->function()).is(r1)); 4221 DCHECK(ToRegister(instr->function()).is(r4));
4059 DCHECK(ToRegister(instr->result()).is(r0)); 4222 DCHECK(ToRegister(instr->result()).is(r3));
4060 4223
4061 int arity = instr->arity(); 4224 int arity = instr->arity();
4062 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); 4225 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4063 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4226 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4064 } 4227 }
4065 4228
4066 4229
4067 void LCodeGen::DoCallNew(LCallNew* instr) { 4230 void LCodeGen::DoCallNew(LCallNew* instr) {
4068 DCHECK(ToRegister(instr->context()).is(cp)); 4231 DCHECK(ToRegister(instr->context()).is(cp));
4069 DCHECK(ToRegister(instr->constructor()).is(r1)); 4232 DCHECK(ToRegister(instr->constructor()).is(r4));
4070 DCHECK(ToRegister(instr->result()).is(r0)); 4233 DCHECK(ToRegister(instr->result()).is(r3));
4071 4234
4072 __ mov(r0, Operand(instr->arity())); 4235 __ mov(r3, Operand(instr->arity()));
4073 // No cell in r2 for construct type feedback in optimized code 4236 // No cell in r5 for construct type feedback in optimized code
4074 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 4237 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4075 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); 4238 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4076 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4239 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4077 } 4240 }
4078 4241
4079 4242
4080 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 4243 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4081 DCHECK(ToRegister(instr->context()).is(cp)); 4244 DCHECK(ToRegister(instr->context()).is(cp));
4082 DCHECK(ToRegister(instr->constructor()).is(r1)); 4245 DCHECK(ToRegister(instr->constructor()).is(r4));
4083 DCHECK(ToRegister(instr->result()).is(r0)); 4246 DCHECK(ToRegister(instr->result()).is(r3));
4084 4247
4085 __ mov(r0, Operand(instr->arity())); 4248 __ mov(r3, Operand(instr->arity()));
4086 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 4249 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4087 ElementsKind kind = instr->hydrogen()->elements_kind(); 4250 ElementsKind kind = instr->hydrogen()->elements_kind();
4088 AllocationSiteOverrideMode override_mode = 4251 AllocationSiteOverrideMode override_mode =
4089 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 4252 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4090 ? DISABLE_ALLOCATION_SITES 4253 ? DISABLE_ALLOCATION_SITES
4091 : DONT_OVERRIDE; 4254 : DONT_OVERRIDE;
4092 4255
4093 if (instr->arity() == 0) { 4256 if (instr->arity() == 0) {
4094 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); 4257 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4095 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4258 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4096 } else if (instr->arity() == 1) { 4259 } else if (instr->arity() == 1) {
4097 Label done; 4260 Label done;
4098 if (IsFastPackedElementsKind(kind)) { 4261 if (IsFastPackedElementsKind(kind)) {
4099 Label packed_case; 4262 Label packed_case;
4100 // We might need a change here 4263 // We might need a change here
4101 // look at the first argument 4264 // look at the first argument
4102 __ ldr(r5, MemOperand(sp, 0)); 4265 __ LoadP(r8, MemOperand(sp, 0));
4103 __ cmp(r5, Operand::Zero()); 4266 __ cmpi(r8, Operand::Zero());
4104 __ b(eq, &packed_case); 4267 __ beq(&packed_case);
4105 4268
4106 ElementsKind holey_kind = GetHoleyElementsKind(kind); 4269 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4107 ArraySingleArgumentConstructorStub stub(isolate(), 4270 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
4108 holey_kind,
4109 override_mode); 4271 override_mode);
4110 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4272 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4111 __ jmp(&done); 4273 __ b(&done);
4112 __ bind(&packed_case); 4274 __ bind(&packed_case);
4113 } 4275 }
4114 4276
4115 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 4277 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4116 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4278 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4117 __ bind(&done); 4279 __ bind(&done);
4118 } else { 4280 } else {
4119 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); 4281 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4120 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4282 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4121 } 4283 }
4122 } 4284 }
4123 4285
4124 4286
4125 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 4287 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4126 CallRuntime(instr->function(), instr->arity(), instr); 4288 CallRuntime(instr->function(), instr->arity(), instr);
4127 } 4289 }
4128 4290
4129 4291
4130 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 4292 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4131 Register function = ToRegister(instr->function()); 4293 Register function = ToRegister(instr->function());
4132 Register code_object = ToRegister(instr->code_object()); 4294 Register code_object = ToRegister(instr->code_object());
4133 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag)); 4295 __ addi(code_object, code_object,
4134 __ str(code_object, 4296 Operand(Code::kHeaderSize - kHeapObjectTag));
4135 FieldMemOperand(function, JSFunction::kCodeEntryOffset)); 4297 __ StoreP(code_object,
4298 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
4136 } 4299 }
4137 4300
4138 4301
4139 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 4302 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4140 Register result = ToRegister(instr->result()); 4303 Register result = ToRegister(instr->result());
4141 Register base = ToRegister(instr->base_object()); 4304 Register base = ToRegister(instr->base_object());
4142 if (instr->offset()->IsConstantOperand()) { 4305 if (instr->offset()->IsConstantOperand()) {
4143 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); 4306 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4144 __ add(result, base, Operand(ToInteger32(offset))); 4307 __ Add(result, base, ToInteger32(offset), r0);
4145 } else { 4308 } else {
4146 Register offset = ToRegister(instr->offset()); 4309 Register offset = ToRegister(instr->offset());
4147 __ add(result, base, offset); 4310 __ add(result, base, offset);
4148 } 4311 }
4149 } 4312 }
4150 4313
4151 4314
4152 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 4315 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4316 HStoreNamedField* hinstr = instr->hydrogen();
4153 Representation representation = instr->representation(); 4317 Representation representation = instr->representation();
4154 4318
4155 Register object = ToRegister(instr->object()); 4319 Register object = ToRegister(instr->object());
4156 Register scratch = scratch0(); 4320 Register scratch = scratch0();
4157 HObjectAccess access = instr->hydrogen()->access(); 4321 HObjectAccess access = hinstr->access();
4158 int offset = access.offset(); 4322 int offset = access.offset();
4159 4323
4160 if (access.IsExternalMemory()) { 4324 if (access.IsExternalMemory()) {
4161 Register value = ToRegister(instr->value()); 4325 Register value = ToRegister(instr->value());
4162 MemOperand operand = MemOperand(object, offset); 4326 MemOperand operand = MemOperand(object, offset);
4163 __ Store(value, operand, representation); 4327 __ StoreRepresentation(value, operand, representation, r0);
4164 return; 4328 return;
4165 } 4329 }
4166 4330
4167 __ AssertNotSmi(object); 4331 __ AssertNotSmi(object);
4168 4332
4169 DCHECK(!representation.IsSmi() || 4333 #if V8_TARGET_ARCH_PPC64
4170 !instr->value()->IsConstantOperand() || 4334 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4335 IsInteger32(LConstantOperand::cast(instr->value())));
4336 #else
4337 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4171 IsSmi(LConstantOperand::cast(instr->value()))); 4338 IsSmi(LConstantOperand::cast(instr->value())));
4339 #endif
4172 if (representation.IsDouble()) { 4340 if (representation.IsDouble()) {
4173 DCHECK(access.IsInobject()); 4341 DCHECK(access.IsInobject());
4174 DCHECK(!instr->hydrogen()->has_transition()); 4342 DCHECK(!hinstr->has_transition());
4175 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 4343 DCHECK(!hinstr->NeedsWriteBarrier());
4176 DwVfpRegister value = ToDoubleRegister(instr->value()); 4344 DoubleRegister value = ToDoubleRegister(instr->value());
4177 __ vstr(value, FieldMemOperand(object, offset)); 4345 __ stfd(value, FieldMemOperand(object, offset));
4178 return; 4346 return;
4179 } 4347 }
4180 4348
4181 if (instr->hydrogen()->has_transition()) { 4349 if (hinstr->has_transition()) {
4182 Handle<Map> transition = instr->hydrogen()->transition_map(); 4350 Handle<Map> transition = hinstr->transition_map();
4183 AddDeprecationDependency(transition); 4351 AddDeprecationDependency(transition);
4184 __ mov(scratch, Operand(transition)); 4352 __ mov(scratch, Operand(transition));
4185 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 4353 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
4186 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { 4354 if (hinstr->NeedsWriteBarrierForMap()) {
4187 Register temp = ToRegister(instr->temp()); 4355 Register temp = ToRegister(instr->temp());
4188 // Update the write barrier for the map field. 4356 // Update the write barrier for the map field.
4189 __ RecordWriteForMap(object, 4357 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
4190 scratch,
4191 temp,
4192 GetLinkRegisterState(),
4193 kSaveFPRegs); 4358 kSaveFPRegs);
4194 } 4359 }
4195 } 4360 }
4196 4361
4197 // Do the store. 4362 // Do the store.
4198 Register value = ToRegister(instr->value()); 4363 Register value = ToRegister(instr->value());
4364
4365 #if V8_TARGET_ARCH_PPC64
4366 // 64-bit Smi optimization
4367 if (representation.IsSmi() &&
4368 hinstr->value()->representation().IsInteger32()) {
4369 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4370 // Store int value directly to upper half of the smi.
4371 STATIC_ASSERT(kSmiTag == 0);
4372 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4373 #if V8_TARGET_LITTLE_ENDIAN
4374 offset += kPointerSize / 2;
4375 #endif
4376 representation = Representation::Integer32();
4377 }
4378 #endif
4379
4199 if (access.IsInobject()) { 4380 if (access.IsInobject()) {
4200 MemOperand operand = FieldMemOperand(object, offset); 4381 MemOperand operand = FieldMemOperand(object, offset);
4201 __ Store(value, operand, representation); 4382 __ StoreRepresentation(value, operand, representation, r0);
4202 if (instr->hydrogen()->NeedsWriteBarrier()) { 4383 if (hinstr->NeedsWriteBarrier()) {
4203 // Update the write barrier for the object for in-object properties. 4384 // Update the write barrier for the object for in-object properties.
4204 __ RecordWriteField(object, 4385 __ RecordWriteField(
4205 offset, 4386 object, offset, value, scratch, GetLinkRegisterState(), kSaveFPRegs,
4206 value, 4387 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4207 scratch, 4388 hinstr->PointersToHereCheckForValue());
4208 GetLinkRegisterState(),
4209 kSaveFPRegs,
4210 EMIT_REMEMBERED_SET,
4211 instr->hydrogen()->SmiCheckForWriteBarrier(),
4212 instr->hydrogen()->PointersToHereCheckForValue());
4213 } 4389 }
4214 } else { 4390 } else {
4215 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); 4391 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4216 MemOperand operand = FieldMemOperand(scratch, offset); 4392 MemOperand operand = FieldMemOperand(scratch, offset);
4217 __ Store(value, operand, representation); 4393 __ StoreRepresentation(value, operand, representation, r0);
4218 if (instr->hydrogen()->NeedsWriteBarrier()) { 4394 if (hinstr->NeedsWriteBarrier()) {
4219 // Update the write barrier for the properties array. 4395 // Update the write barrier for the properties array.
4220 // object is used as a scratch register. 4396 // object is used as a scratch register.
4221 __ RecordWriteField(scratch, 4397 __ RecordWriteField(
4222 offset, 4398 scratch, offset, value, object, GetLinkRegisterState(), kSaveFPRegs,
4223 value, 4399 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4224 object, 4400 hinstr->PointersToHereCheckForValue());
4225 GetLinkRegisterState(),
4226 kSaveFPRegs,
4227 EMIT_REMEMBERED_SET,
4228 instr->hydrogen()->SmiCheckForWriteBarrier(),
4229 instr->hydrogen()->PointersToHereCheckForValue());
4230 } 4401 }
4231 } 4402 }
4232 } 4403 }
4233 4404
4234 4405
4235 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 4406 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4236 DCHECK(ToRegister(instr->context()).is(cp)); 4407 DCHECK(ToRegister(instr->context()).is(cp));
4237 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 4408 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4238 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 4409 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4239 4410
4240 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name())); 4411 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4241 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); 4412 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4242 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4413 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4243 } 4414 }
4244 4415
4245 4416
4246 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 4417 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4247 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; 4418 Representation representation = instr->hydrogen()->length()->representation();
4248 if (instr->index()->IsConstantOperand()) { 4419 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4249 Operand index = ToOperand(instr->index()); 4420 DCHECK(representation.IsSmiOrInteger32());
4421
4422 Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4423 if (instr->length()->IsConstantOperand()) {
4424 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4425 Register index = ToRegister(instr->index());
4426 if (representation.IsSmi()) {
4427 __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
4428 } else {
4429 __ Cmplwi(index, Operand(length), r0);
4430 }
4431 cc = CommuteCondition(cc);
4432 } else if (instr->index()->IsConstantOperand()) {
4433 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4250 Register length = ToRegister(instr->length()); 4434 Register length = ToRegister(instr->length());
4251 __ cmp(length, index); 4435 if (representation.IsSmi()) {
4252 cc = CommuteCondition(cc); 4436 __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
4437 } else {
4438 __ Cmplwi(length, Operand(index), r0);
4439 }
4253 } else { 4440 } else {
4254 Register index = ToRegister(instr->index()); 4441 Register index = ToRegister(instr->index());
4255 Operand length = ToOperand(instr->length()); 4442 Register length = ToRegister(instr->length());
4256 __ cmp(index, length); 4443 if (representation.IsSmi()) {
4444 __ cmpl(length, index);
4445 } else {
4446 __ cmplw(length, index);
4447 }
4257 } 4448 }
4258 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 4449 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4259 Label done; 4450 Label done;
4260 __ b(NegateCondition(cc), &done); 4451 __ b(NegateCondition(cc), &done);
4261 __ stop("eliminated bounds check failed"); 4452 __ stop("eliminated bounds check failed");
4262 __ bind(&done); 4453 __ bind(&done);
4263 } else { 4454 } else {
4264 DeoptimizeIf(cc, instr, "out of bounds"); 4455 DeoptimizeIf(cc, instr, "out of bounds");
4265 } 4456 }
4266 } 4457 }
4267 4458
4268 4459
4269 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4460 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4270 Register external_pointer = ToRegister(instr->elements()); 4461 Register external_pointer = ToRegister(instr->elements());
4271 Register key = no_reg; 4462 Register key = no_reg;
4272 ElementsKind elements_kind = instr->elements_kind(); 4463 ElementsKind elements_kind = instr->elements_kind();
4273 bool key_is_constant = instr->key()->IsConstantOperand(); 4464 bool key_is_constant = instr->key()->IsConstantOperand();
4274 int constant_key = 0; 4465 int constant_key = 0;
4275 if (key_is_constant) { 4466 if (key_is_constant) {
4276 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4467 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4277 if (constant_key & 0xF0000000) { 4468 if (constant_key & 0xF0000000) {
4278 Abort(kArrayIndexConstantValueTooBig); 4469 Abort(kArrayIndexConstantValueTooBig);
4279 } 4470 }
4280 } else { 4471 } else {
4281 key = ToRegister(instr->key()); 4472 key = ToRegister(instr->key());
4282 } 4473 }
4283 int element_size_shift = ElementsKindToShiftSize(elements_kind); 4474 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4284 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 4475 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4285 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4286 int base_offset = instr->base_offset(); 4476 int base_offset = instr->base_offset();
4287 4477
4288 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 4478 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4289 elements_kind == FLOAT32_ELEMENTS || 4479 elements_kind == FLOAT32_ELEMENTS ||
4290 elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 4480 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4291 elements_kind == FLOAT64_ELEMENTS) { 4481 elements_kind == FLOAT64_ELEMENTS) {
4292 Register address = scratch0(); 4482 Register address = scratch0();
4293 DwVfpRegister value(ToDoubleRegister(instr->value())); 4483 DoubleRegister value(ToDoubleRegister(instr->value()));
4294 if (key_is_constant) { 4484 if (key_is_constant) {
4295 if (constant_key != 0) { 4485 if (constant_key != 0) {
4296 __ add(address, external_pointer, 4486 __ Add(address, external_pointer, constant_key << element_size_shift,
4297 Operand(constant_key << element_size_shift)); 4487 r0);
4298 } else { 4488 } else {
4299 address = external_pointer; 4489 address = external_pointer;
4300 } 4490 }
4301 } else { 4491 } else {
4302 __ add(address, external_pointer, Operand(key, LSL, shift_size)); 4492 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
4493 __ add(address, external_pointer, r0);
4303 } 4494 }
4304 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 4495 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4305 elements_kind == FLOAT32_ELEMENTS) { 4496 elements_kind == FLOAT32_ELEMENTS) {
4306 __ vcvt_f32_f64(double_scratch0().low(), value); 4497 __ frsp(double_scratch0(), value);
4307 __ vstr(double_scratch0().low(), address, base_offset); 4498 __ stfs(double_scratch0(), MemOperand(address, base_offset));
4308 } else { // Storing doubles, not floats. 4499 } else { // Storing doubles, not floats.
4309 __ vstr(value, address, base_offset); 4500 __ stfd(value, MemOperand(address, base_offset));
4310 } 4501 }
4311 } else { 4502 } else {
4312 Register value(ToRegister(instr->value())); 4503 Register value(ToRegister(instr->value()));
4313 MemOperand mem_operand = PrepareKeyedOperand( 4504 MemOperand mem_operand =
4314 key, external_pointer, key_is_constant, constant_key, 4505 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
4315 element_size_shift, shift_size, 4506 constant_key, element_size_shift, base_offset);
4316 base_offset);
4317 switch (elements_kind) { 4507 switch (elements_kind) {
4318 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: 4508 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4319 case EXTERNAL_INT8_ELEMENTS: 4509 case EXTERNAL_INT8_ELEMENTS:
4320 case EXTERNAL_UINT8_ELEMENTS: 4510 case EXTERNAL_UINT8_ELEMENTS:
4321 case UINT8_ELEMENTS: 4511 case UINT8_ELEMENTS:
4322 case UINT8_CLAMPED_ELEMENTS: 4512 case UINT8_CLAMPED_ELEMENTS:
4323 case INT8_ELEMENTS: 4513 case INT8_ELEMENTS:
4324 __ strb(value, mem_operand); 4514 if (key_is_constant) {
4515 __ StoreByte(value, mem_operand, r0);
4516 } else {
4517 __ stbx(value, mem_operand);
4518 }
4325 break; 4519 break;
4326 case EXTERNAL_INT16_ELEMENTS: 4520 case EXTERNAL_INT16_ELEMENTS:
4327 case EXTERNAL_UINT16_ELEMENTS: 4521 case EXTERNAL_UINT16_ELEMENTS:
4328 case INT16_ELEMENTS: 4522 case INT16_ELEMENTS:
4329 case UINT16_ELEMENTS: 4523 case UINT16_ELEMENTS:
4330 __ strh(value, mem_operand); 4524 if (key_is_constant) {
4525 __ StoreHalfWord(value, mem_operand, r0);
4526 } else {
4527 __ sthx(value, mem_operand);
4528 }
4331 break; 4529 break;
4332 case EXTERNAL_INT32_ELEMENTS: 4530 case EXTERNAL_INT32_ELEMENTS:
4333 case EXTERNAL_UINT32_ELEMENTS: 4531 case EXTERNAL_UINT32_ELEMENTS:
4334 case INT32_ELEMENTS: 4532 case INT32_ELEMENTS:
4335 case UINT32_ELEMENTS: 4533 case UINT32_ELEMENTS:
4336 __ str(value, mem_operand); 4534 if (key_is_constant) {
4535 __ StoreWord(value, mem_operand, r0);
4536 } else {
4537 __ stwx(value, mem_operand);
4538 }
4337 break; 4539 break;
4338 case FLOAT32_ELEMENTS: 4540 case FLOAT32_ELEMENTS:
4339 case FLOAT64_ELEMENTS: 4541 case FLOAT64_ELEMENTS:
4340 case EXTERNAL_FLOAT32_ELEMENTS: 4542 case EXTERNAL_FLOAT32_ELEMENTS:
4341 case EXTERNAL_FLOAT64_ELEMENTS: 4543 case EXTERNAL_FLOAT64_ELEMENTS:
4342 case FAST_DOUBLE_ELEMENTS: 4544 case FAST_DOUBLE_ELEMENTS:
4343 case FAST_ELEMENTS: 4545 case FAST_ELEMENTS:
4344 case FAST_SMI_ELEMENTS: 4546 case FAST_SMI_ELEMENTS:
4345 case FAST_HOLEY_DOUBLE_ELEMENTS: 4547 case FAST_HOLEY_DOUBLE_ELEMENTS:
4346 case FAST_HOLEY_ELEMENTS: 4548 case FAST_HOLEY_ELEMENTS:
4347 case FAST_HOLEY_SMI_ELEMENTS: 4549 case FAST_HOLEY_SMI_ELEMENTS:
4348 case DICTIONARY_ELEMENTS: 4550 case DICTIONARY_ELEMENTS:
4349 case SLOPPY_ARGUMENTS_ELEMENTS: 4551 case SLOPPY_ARGUMENTS_ELEMENTS:
4350 UNREACHABLE(); 4552 UNREACHABLE();
4351 break; 4553 break;
4352 } 4554 }
4353 } 4555 }
4354 } 4556 }
4355 4557
4356 4558
4357 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4559 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4358 DwVfpRegister value = ToDoubleRegister(instr->value()); 4560 DoubleRegister value = ToDoubleRegister(instr->value());
4359 Register elements = ToRegister(instr->elements()); 4561 Register elements = ToRegister(instr->elements());
4562 Register key = no_reg;
4360 Register scratch = scratch0(); 4563 Register scratch = scratch0();
4361 DwVfpRegister double_scratch = double_scratch0(); 4564 DoubleRegister double_scratch = double_scratch0();
4362 bool key_is_constant = instr->key()->IsConstantOperand(); 4565 bool key_is_constant = instr->key()->IsConstantOperand();
4363 int base_offset = instr->base_offset(); 4566 int constant_key = 0;
4364 4567
4365 // Calculate the effective address of the slot in the array to store the 4568 // Calculate the effective address of the slot in the array to store the
4366 // double value. 4569 // double value.
4367 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4368 if (key_is_constant) { 4570 if (key_is_constant) {
4369 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4571 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4370 if (constant_key & 0xF0000000) { 4572 if (constant_key & 0xF0000000) {
4371 Abort(kArrayIndexConstantValueTooBig); 4573 Abort(kArrayIndexConstantValueTooBig);
4372 } 4574 }
4373 __ add(scratch, elements,
4374 Operand((constant_key << element_size_shift) + base_offset));
4375 } else { 4575 } else {
4376 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 4576 key = ToRegister(instr->key());
4377 ? (element_size_shift - kSmiTagSize) : element_size_shift; 4577 }
4378 __ add(scratch, elements, Operand(base_offset)); 4578 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4379 __ add(scratch, scratch, 4579 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4380 Operand(ToRegister(instr->key()), LSL, shift_size)); 4580 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4581 if (!key_is_constant) {
4582 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
4583 __ add(scratch, elements, scratch);
4584 elements = scratch;
4585 }
4586 if (!is_int16(base_offset)) {
4587 __ Add(scratch, elements, base_offset, r0);
4588 base_offset = 0;
4589 elements = scratch;
4381 } 4590 }
4382 4591
4383 if (instr->NeedsCanonicalization()) { 4592 if (instr->NeedsCanonicalization()) {
4384 // Force a canonical NaN. 4593 // Force a canonical NaN.
4385 if (masm()->emit_debug_code()) { 4594 __ CanonicalizeNaN(double_scratch, value);
4386 __ vmrs(ip); 4595 __ stfd(double_scratch, MemOperand(elements, base_offset));
4387 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4388 __ Assert(ne, kDefaultNaNModeNotSet);
4389 }
4390 __ VFPCanonicalizeNaN(double_scratch, value);
4391 __ vstr(double_scratch, scratch, 0);
4392 } else { 4596 } else {
4393 __ vstr(value, scratch, 0); 4597 __ stfd(value, MemOperand(elements, base_offset));
4394 } 4598 }
4395 } 4599 }
4396 4600
4397 4601
4398 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4602 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4603 HStoreKeyed* hinstr = instr->hydrogen();
4399 Register value = ToRegister(instr->value()); 4604 Register value = ToRegister(instr->value());
4400 Register elements = ToRegister(instr->elements()); 4605 Register elements = ToRegister(instr->elements());
4401 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) 4606 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4402 : no_reg;
4403 Register scratch = scratch0(); 4607 Register scratch = scratch0();
4404 Register store_base = scratch; 4608 Register store_base = scratch;
4405 int offset = instr->base_offset(); 4609 int offset = instr->base_offset();
4406 4610
4407 // Do the store. 4611 // Do the store.
4408 if (instr->key()->IsConstantOperand()) { 4612 if (instr->key()->IsConstantOperand()) {
4409 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 4613 DCHECK(!hinstr->NeedsWriteBarrier());
4410 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 4614 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4411 offset += ToInteger32(const_operand) * kPointerSize; 4615 offset += ToInteger32(const_operand) * kPointerSize;
4412 store_base = elements; 4616 store_base = elements;
4413 } else { 4617 } else {
4414 // Even though the HLoadKeyed instruction forces the input 4618 // Even though the HLoadKeyed instruction forces the input
4415 // representation for the key to be an integer, the input gets replaced 4619 // representation for the key to be an integer, the input gets replaced
4416 // during bound check elimination with the index argument to the bounds 4620 // during bound check elimination with the index argument to the bounds
4417 // check, which can be tagged, so that case must be handled here, too. 4621 // check, which can be tagged, so that case must be handled here, too.
4418 if (instr->hydrogen()->key()->representation().IsSmi()) { 4622 if (hinstr->key()->representation().IsSmi()) {
4419 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); 4623 __ SmiToPtrArrayOffset(scratch, key);
4420 } else { 4624 } else {
4421 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); 4625 __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
4422 } 4626 }
4627 __ add(scratch, elements, scratch);
4423 } 4628 }
4424 __ str(value, MemOperand(store_base, offset));
4425 4629
4426 if (instr->hydrogen()->NeedsWriteBarrier()) { 4630 Representation representation = hinstr->value()->representation();
4427 SmiCheck check_needed = 4631
4428 instr->hydrogen()->value()->type().IsHeapObject() 4632 #if V8_TARGET_ARCH_PPC64
4429 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4633 // 64-bit Smi optimization
4634 if (representation.IsInteger32()) {
4635 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4636 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4637 // Store int value directly to upper half of the smi.
4638 STATIC_ASSERT(kSmiTag == 0);
4639 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4640 #if V8_TARGET_LITTLE_ENDIAN
4641 offset += kPointerSize / 2;
4642 #endif
4643 }
4644 #endif
4645
4646 __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
4647 r0);
4648
4649 if (hinstr->NeedsWriteBarrier()) {
4650 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4651 ? OMIT_SMI_CHECK
4652 : INLINE_SMI_CHECK;
4430 // Compute address of modified element and store it into key register. 4653 // Compute address of modified element and store it into key register.
4431 __ add(key, store_base, Operand(offset)); 4654 __ Add(key, store_base, offset, r0);
4432 __ RecordWrite(elements, 4655 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4433 key, 4656 EMIT_REMEMBERED_SET, check_needed,
4434 value, 4657 hinstr->PointersToHereCheckForValue());
4435 GetLinkRegisterState(),
4436 kSaveFPRegs,
4437 EMIT_REMEMBERED_SET,
4438 check_needed,
4439 instr->hydrogen()->PointersToHereCheckForValue());
4440 } 4658 }
4441 } 4659 }
4442 4660
4443 4661
4444 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4662 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4445 // By cases: external, fast double 4663 // By cases: external, fast double
4446 if (instr->is_typed_elements()) { 4664 if (instr->is_typed_elements()) {
4447 DoStoreKeyedExternalArray(instr); 4665 DoStoreKeyedExternalArray(instr);
4448 } else if (instr->hydrogen()->value()->representation().IsDouble()) { 4666 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4449 DoStoreKeyedFixedDoubleArray(instr); 4667 DoStoreKeyedFixedDoubleArray(instr);
4450 } else { 4668 } else {
4451 DoStoreKeyedFixedArray(instr); 4669 DoStoreKeyedFixedArray(instr);
4452 } 4670 }
4453 } 4671 }
4454 4672
4455 4673
4456 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4674 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4457 DCHECK(ToRegister(instr->context()).is(cp)); 4675 DCHECK(ToRegister(instr->context()).is(cp));
4458 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 4676 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4459 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); 4677 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4460 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 4678 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4461 4679
4462 Handle<Code> ic = 4680 Handle<Code> ic =
4463 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code(); 4681 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4464 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4682 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4465 } 4683 }
4466 4684
4467 4685
4468 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4686 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4469 Register object_reg = ToRegister(instr->object()); 4687 Register object_reg = ToRegister(instr->object());
4470 Register scratch = scratch0(); 4688 Register scratch = scratch0();
4471 4689
4472 Handle<Map> from_map = instr->original_map(); 4690 Handle<Map> from_map = instr->original_map();
4473 Handle<Map> to_map = instr->transitioned_map(); 4691 Handle<Map> to_map = instr->transitioned_map();
4474 ElementsKind from_kind = instr->from_kind(); 4692 ElementsKind from_kind = instr->from_kind();
4475 ElementsKind to_kind = instr->to_kind(); 4693 ElementsKind to_kind = instr->to_kind();
4476 4694
4477 Label not_applicable; 4695 Label not_applicable;
4478 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4696 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4479 __ cmp(scratch, Operand(from_map)); 4697 __ Cmpi(scratch, Operand(from_map), r0);
4480 __ b(ne, &not_applicable); 4698 __ bne(&not_applicable);
4481 4699
4482 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4700 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4483 Register new_map_reg = ToRegister(instr->new_map_temp()); 4701 Register new_map_reg = ToRegister(instr->new_map_temp());
4484 __ mov(new_map_reg, Operand(to_map)); 4702 __ mov(new_map_reg, Operand(to_map));
4485 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4703 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
4704 r0);
4486 // Write barrier. 4705 // Write barrier.
4487 __ RecordWriteForMap(object_reg, 4706 __ RecordWriteForMap(object_reg, new_map_reg, scratch,
4488 new_map_reg, 4707 GetLinkRegisterState(), kDontSaveFPRegs);
4489 scratch,
4490 GetLinkRegisterState(),
4491 kDontSaveFPRegs);
4492 } else { 4708 } else {
4493 DCHECK(ToRegister(instr->context()).is(cp)); 4709 DCHECK(ToRegister(instr->context()).is(cp));
4494 DCHECK(object_reg.is(r0)); 4710 DCHECK(object_reg.is(r3));
4495 PushSafepointRegistersScope scope(this); 4711 PushSafepointRegistersScope scope(this);
4496 __ Move(r1, to_map); 4712 __ Move(r4, to_map);
4497 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; 4713 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4498 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); 4714 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4499 __ CallStub(&stub); 4715 __ CallStub(&stub);
4500 RecordSafepointWithRegisters( 4716 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4501 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 4717 Safepoint::kLazyDeopt);
4502 } 4718 }
4503 __ bind(&not_applicable); 4719 __ bind(&not_applicable);
4504 } 4720 }
4505 4721
4506 4722
4507 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4723 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4508 Register object = ToRegister(instr->object()); 4724 Register object = ToRegister(instr->object());
4509 Register temp = ToRegister(instr->temp()); 4725 Register temp = ToRegister(instr->temp());
4510 Label no_memento_found; 4726 Label no_memento_found;
4511 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); 4727 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4512 DeoptimizeIf(eq, instr, "memento found"); 4728 DeoptimizeIf(eq, instr, "memento found");
4513 __ bind(&no_memento_found); 4729 __ bind(&no_memento_found);
4514 } 4730 }
4515 4731
4516 4732
4517 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4733 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4518 DCHECK(ToRegister(instr->context()).is(cp)); 4734 DCHECK(ToRegister(instr->context()).is(cp));
4519 DCHECK(ToRegister(instr->left()).is(r1)); 4735 DCHECK(ToRegister(instr->left()).is(r4));
4520 DCHECK(ToRegister(instr->right()).is(r0)); 4736 DCHECK(ToRegister(instr->right()).is(r3));
4521 StringAddStub stub(isolate(), 4737 StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4522 instr->hydrogen()->flags(),
4523 instr->hydrogen()->pretenure_flag()); 4738 instr->hydrogen()->pretenure_flag());
4524 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4739 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4525 } 4740 }
4526 4741
4527 4742
4528 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4743 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4529 class DeferredStringCharCodeAt FINAL : public LDeferredCode { 4744 class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4530 public: 4745 public:
4531 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 4746 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4532 : LDeferredCode(codegen), instr_(instr) { } 4747 : LDeferredCode(codegen), instr_(instr) {}
4533 virtual void Generate() OVERRIDE { 4748 virtual void Generate() OVERRIDE {
4534 codegen()->DoDeferredStringCharCodeAt(instr_); 4749 codegen()->DoDeferredStringCharCodeAt(instr_);
4535 } 4750 }
4536 virtual LInstruction* instr() OVERRIDE { return instr_; } 4751 virtual LInstruction* instr() OVERRIDE { return instr_; }
4752
4537 private: 4753 private:
4538 LStringCharCodeAt* instr_; 4754 LStringCharCodeAt* instr_;
4539 }; 4755 };
4540 4756
4541 DeferredStringCharCodeAt* deferred = 4757 DeferredStringCharCodeAt* deferred =
4542 new(zone()) DeferredStringCharCodeAt(this, instr); 4758 new (zone()) DeferredStringCharCodeAt(this, instr);
4543 4759
4544 StringCharLoadGenerator::Generate(masm(), 4760 StringCharLoadGenerator::Generate(
4545 ToRegister(instr->string()), 4761 masm(), ToRegister(instr->string()), ToRegister(instr->index()),
4546 ToRegister(instr->index()), 4762 ToRegister(instr->result()), deferred->entry());
4547 ToRegister(instr->result()),
4548 deferred->entry());
4549 __ bind(deferred->exit()); 4763 __ bind(deferred->exit());
4550 } 4764 }
4551 4765
4552 4766
4553 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 4767 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4554 Register string = ToRegister(instr->string()); 4768 Register string = ToRegister(instr->string());
4555 Register result = ToRegister(instr->result()); 4769 Register result = ToRegister(instr->result());
4556 Register scratch = scratch0(); 4770 Register scratch = scratch0();
4557 4771
4558 // TODO(3095996): Get rid of this. For now, we need to make the 4772 // TODO(3095996): Get rid of this. For now, we need to make the
4559 // result register contain a valid pointer because it is already 4773 // result register contain a valid pointer because it is already
4560 // contained in the register pointer map. 4774 // contained in the register pointer map.
4561 __ mov(result, Operand::Zero()); 4775 __ li(result, Operand::Zero());
4562 4776
4563 PushSafepointRegistersScope scope(this); 4777 PushSafepointRegistersScope scope(this);
4564 __ push(string); 4778 __ push(string);
4565 // Push the index as a smi. This is safe because of the checks in 4779 // Push the index as a smi. This is safe because of the checks in
4566 // DoStringCharCodeAt above. 4780 // DoStringCharCodeAt above.
4567 if (instr->index()->IsConstantOperand()) { 4781 if (instr->index()->IsConstantOperand()) {
4568 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 4782 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4569 __ mov(scratch, Operand(Smi::FromInt(const_index))); 4783 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4570 __ push(scratch); 4784 __ push(scratch);
4571 } else { 4785 } else {
4572 Register index = ToRegister(instr->index()); 4786 Register index = ToRegister(instr->index());
4573 __ SmiTag(index); 4787 __ SmiTag(index);
4574 __ push(index); 4788 __ push(index);
4575 } 4789 }
4576 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, 4790 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4577 instr->context()); 4791 instr->context());
4578 __ AssertSmi(r0); 4792 __ AssertSmi(r3);
4579 __ SmiUntag(r0); 4793 __ SmiUntag(r3);
4580 __ StoreToSafepointRegisterSlot(r0, result); 4794 __ StoreToSafepointRegisterSlot(r3, result);
4581 } 4795 }
4582 4796
4583 4797
4584 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 4798 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4585 class DeferredStringCharFromCode FINAL : public LDeferredCode { 4799 class DeferredStringCharFromCode FINAL : public LDeferredCode {
4586 public: 4800 public:
4587 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) 4801 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4588 : LDeferredCode(codegen), instr_(instr) { } 4802 : LDeferredCode(codegen), instr_(instr) {}
4589 virtual void Generate() OVERRIDE { 4803 virtual void Generate() OVERRIDE {
4590 codegen()->DoDeferredStringCharFromCode(instr_); 4804 codegen()->DoDeferredStringCharFromCode(instr_);
4591 } 4805 }
4592 virtual LInstruction* instr() OVERRIDE { return instr_; } 4806 virtual LInstruction* instr() OVERRIDE { return instr_; }
4807
4593 private: 4808 private:
4594 LStringCharFromCode* instr_; 4809 LStringCharFromCode* instr_;
4595 }; 4810 };
4596 4811
4597 DeferredStringCharFromCode* deferred = 4812 DeferredStringCharFromCode* deferred =
4598 new(zone()) DeferredStringCharFromCode(this, instr); 4813 new (zone()) DeferredStringCharFromCode(this, instr);
4599 4814
4600 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); 4815 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4601 Register char_code = ToRegister(instr->char_code()); 4816 Register char_code = ToRegister(instr->char_code());
4602 Register result = ToRegister(instr->result()); 4817 Register result = ToRegister(instr->result());
4603 DCHECK(!char_code.is(result)); 4818 DCHECK(!char_code.is(result));
4604 4819
4605 __ cmp(char_code, Operand(String::kMaxOneByteCharCode)); 4820 __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
4606 __ b(hi, deferred->entry()); 4821 __ bgt(deferred->entry());
4607 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 4822 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4608 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); 4823 __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
4609 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); 4824 __ add(result, result, r0);
4825 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4610 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4826 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4611 __ cmp(result, ip); 4827 __ cmp(result, ip);
4612 __ b(eq, deferred->entry()); 4828 __ beq(deferred->entry());
4613 __ bind(deferred->exit()); 4829 __ bind(deferred->exit());
4614 } 4830 }
4615 4831
4616 4832
4617 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4833 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4618 Register char_code = ToRegister(instr->char_code()); 4834 Register char_code = ToRegister(instr->char_code());
4619 Register result = ToRegister(instr->result()); 4835 Register result = ToRegister(instr->result());
4620 4836
4621 // TODO(3095996): Get rid of this. For now, we need to make the 4837 // TODO(3095996): Get rid of this. For now, we need to make the
4622 // result register contain a valid pointer because it is already 4838 // result register contain a valid pointer because it is already
4623 // contained in the register pointer map. 4839 // contained in the register pointer map.
4624 __ mov(result, Operand::Zero()); 4840 __ li(result, Operand::Zero());
4625 4841
4626 PushSafepointRegistersScope scope(this); 4842 PushSafepointRegistersScope scope(this);
4627 __ SmiTag(char_code); 4843 __ SmiTag(char_code);
4628 __ push(char_code); 4844 __ push(char_code);
4629 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); 4845 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4630 __ StoreToSafepointRegisterSlot(r0, result); 4846 __ StoreToSafepointRegisterSlot(r3, result);
4631 } 4847 }
4632 4848
4633 4849
4634 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4850 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4635 LOperand* input = instr->value(); 4851 LOperand* input = instr->value();
4636 DCHECK(input->IsRegister() || input->IsStackSlot()); 4852 DCHECK(input->IsRegister() || input->IsStackSlot());
4637 LOperand* output = instr->result(); 4853 LOperand* output = instr->result();
4638 DCHECK(output->IsDoubleRegister()); 4854 DCHECK(output->IsDoubleRegister());
4639 SwVfpRegister single_scratch = double_scratch0().low();
4640 if (input->IsStackSlot()) { 4855 if (input->IsStackSlot()) {
4641 Register scratch = scratch0(); 4856 Register scratch = scratch0();
4642 __ ldr(scratch, ToMemOperand(input)); 4857 __ LoadP(scratch, ToMemOperand(input));
4643 __ vmov(single_scratch, scratch); 4858 __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
4644 } else { 4859 } else {
4645 __ vmov(single_scratch, ToRegister(input)); 4860 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
4646 } 4861 }
4647 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4648 } 4862 }
4649 4863
4650 4864
4651 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4865 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4652 LOperand* input = instr->value(); 4866 LOperand* input = instr->value();
4653 LOperand* output = instr->result(); 4867 LOperand* output = instr->result();
4654 4868 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
4655 SwVfpRegister flt_scratch = double_scratch0().low();
4656 __ vmov(flt_scratch, ToRegister(input));
4657 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4658 } 4869 }
4659 4870
4660 4871
4661 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4872 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4662 class DeferredNumberTagI FINAL : public LDeferredCode { 4873 class DeferredNumberTagI FINAL : public LDeferredCode {
4663 public: 4874 public:
4664 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) 4875 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4665 : LDeferredCode(codegen), instr_(instr) { } 4876 : LDeferredCode(codegen), instr_(instr) {}
4666 virtual void Generate() OVERRIDE { 4877 virtual void Generate() OVERRIDE {
4667 codegen()->DoDeferredNumberTagIU(instr_, 4878 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4668 instr_->value(), 4879 instr_->temp2(), SIGNED_INT32);
4669 instr_->temp1(),
4670 instr_->temp2(),
4671 SIGNED_INT32);
4672 } 4880 }
4673 virtual LInstruction* instr() OVERRIDE { return instr_; } 4881 virtual LInstruction* instr() OVERRIDE { return instr_; }
4882
4674 private: 4883 private:
4675 LNumberTagI* instr_; 4884 LNumberTagI* instr_;
4676 }; 4885 };
4677 4886
4678 Register src = ToRegister(instr->value()); 4887 Register src = ToRegister(instr->value());
4679 Register dst = ToRegister(instr->result()); 4888 Register dst = ToRegister(instr->result());
4680 4889
4681 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); 4890 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
4682 __ SmiTag(dst, src, SetCC); 4891 #if V8_TARGET_ARCH_PPC64
4683 __ b(vs, deferred->entry()); 4892 __ SmiTag(dst, src);
4893 #else
4894 __ SmiTagCheckOverflow(dst, src, r0);
4895 __ BranchOnOverflow(deferred->entry());
4896 #endif
4684 __ bind(deferred->exit()); 4897 __ bind(deferred->exit());
4685 } 4898 }
4686 4899
4687 4900
4688 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4901 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4689 class DeferredNumberTagU FINAL : public LDeferredCode { 4902 class DeferredNumberTagU FINAL : public LDeferredCode {
4690 public: 4903 public:
4691 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4904 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4692 : LDeferredCode(codegen), instr_(instr) { } 4905 : LDeferredCode(codegen), instr_(instr) {}
4693 virtual void Generate() OVERRIDE { 4906 virtual void Generate() OVERRIDE {
4694 codegen()->DoDeferredNumberTagIU(instr_, 4907 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4695 instr_->value(), 4908 instr_->temp2(), UNSIGNED_INT32);
4696 instr_->temp1(),
4697 instr_->temp2(),
4698 UNSIGNED_INT32);
4699 } 4909 }
4700 virtual LInstruction* instr() OVERRIDE { return instr_; } 4910 virtual LInstruction* instr() OVERRIDE { return instr_; }
4911
4701 private: 4912 private:
4702 LNumberTagU* instr_; 4913 LNumberTagU* instr_;
4703 }; 4914 };
4704 4915
4705 Register input = ToRegister(instr->value()); 4916 Register input = ToRegister(instr->value());
4706 Register result = ToRegister(instr->result()); 4917 Register result = ToRegister(instr->result());
4707 4918
4708 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); 4919 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
4709 __ cmp(input, Operand(Smi::kMaxValue)); 4920 __ Cmpli(input, Operand(Smi::kMaxValue), r0);
4710 __ b(hi, deferred->entry()); 4921 __ bgt(deferred->entry());
4711 __ SmiTag(result, input); 4922 __ SmiTag(result, input);
4712 __ bind(deferred->exit()); 4923 __ bind(deferred->exit());
4713 } 4924 }
4714 4925
4715 4926
4716 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, 4927 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
4717 LOperand* value, 4928 LOperand* temp1, LOperand* temp2,
4718 LOperand* temp1,
4719 LOperand* temp2,
4720 IntegerSignedness signedness) { 4929 IntegerSignedness signedness) {
4721 Label done, slow; 4930 Label done, slow;
4722 Register src = ToRegister(value); 4931 Register src = ToRegister(value);
4723 Register dst = ToRegister(instr->result()); 4932 Register dst = ToRegister(instr->result());
4724 Register tmp1 = scratch0(); 4933 Register tmp1 = scratch0();
4725 Register tmp2 = ToRegister(temp1); 4934 Register tmp2 = ToRegister(temp1);
4726 Register tmp3 = ToRegister(temp2); 4935 Register tmp3 = ToRegister(temp2);
4727 LowDwVfpRegister dbl_scratch = double_scratch0(); 4936 DoubleRegister dbl_scratch = double_scratch0();
4728 4937
4729 if (signedness == SIGNED_INT32) { 4938 if (signedness == SIGNED_INT32) {
4730 // There was overflow, so bits 30 and 31 of the original integer 4939 // There was overflow, so bits 30 and 31 of the original integer
4731 // disagree. Try to allocate a heap number in new space and store 4940 // disagree. Try to allocate a heap number in new space and store
4732 // the value in there. If that fails, call the runtime system. 4941 // the value in there. If that fails, call the runtime system.
4733 if (dst.is(src)) { 4942 if (dst.is(src)) {
4734 __ SmiUntag(src, dst); 4943 __ SmiUntag(src, dst);
4735 __ eor(src, src, Operand(0x80000000)); 4944 __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
4736 } 4945 }
4737 __ vmov(dbl_scratch.low(), src); 4946 __ ConvertIntToDouble(src, dbl_scratch);
4738 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4739 } else { 4947 } else {
4740 __ vmov(dbl_scratch.low(), src); 4948 __ ConvertUnsignedIntToDouble(src, dbl_scratch);
4741 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4742 } 4949 }
4743 4950
4744 if (FLAG_inline_new) { 4951 if (FLAG_inline_new) {
4745 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); 4952 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4746 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT); 4953 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
4747 __ b(&done); 4954 __ b(&done);
4748 } 4955 }
4749 4956
4750 // Slow case: Call the runtime system to do the number allocation. 4957 // Slow case: Call the runtime system to do the number allocation.
4751 __ bind(&slow); 4958 __ bind(&slow);
4752 { 4959 {
4753 // TODO(3095996): Put a valid pointer value in the stack slot where the 4960 // TODO(3095996): Put a valid pointer value in the stack slot where the
4754 // result register is stored, as this register is in the pointer map, but 4961 // result register is stored, as this register is in the pointer map, but
4755 // contains an integer value. 4962 // contains an integer value.
4756 __ mov(dst, Operand::Zero()); 4963 __ li(dst, Operand::Zero());
4757 4964
4758 // Preserve the value of all registers. 4965 // Preserve the value of all registers.
4759 PushSafepointRegistersScope scope(this); 4966 PushSafepointRegistersScope scope(this);
4760 4967
4761 // NumberTagI and NumberTagD use the context from the frame, rather than 4968 // NumberTagI and NumberTagD use the context from the frame, rather than
4762 // the environment's HContext or HInlinedContext value. 4969 // the environment's HContext or HInlinedContext value.
4763 // They only call Runtime::kAllocateHeapNumber. 4970 // They only call Runtime::kAllocateHeapNumber.
4764 // The corresponding HChange instructions are added in a phase that does 4971 // The corresponding HChange instructions are added in a phase that does
4765 // not have easy access to the local context. 4972 // not have easy access to the local context.
4766 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4973 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4767 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4974 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4768 RecordSafepointWithRegisters( 4975 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4769 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4976 Safepoint::kNoLazyDeopt);
4770 __ sub(r0, r0, Operand(kHeapObjectTag)); 4977 __ StoreToSafepointRegisterSlot(r3, dst);
4771 __ StoreToSafepointRegisterSlot(r0, dst);
4772 } 4978 }
4773 4979
4774 // Done. Put the value in dbl_scratch into the value of the allocated heap 4980 // Done. Put the value in dbl_scratch into the value of the allocated heap
4775 // number. 4981 // number.
4776 __ bind(&done); 4982 __ bind(&done);
4777 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); 4983 __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4778 __ add(dst, dst, Operand(kHeapObjectTag));
4779 } 4984 }
4780 4985
4781 4986
4782 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4987 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4783 class DeferredNumberTagD FINAL : public LDeferredCode { 4988 class DeferredNumberTagD FINAL : public LDeferredCode {
4784 public: 4989 public:
4785 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4990 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4786 : LDeferredCode(codegen), instr_(instr) { } 4991 : LDeferredCode(codegen), instr_(instr) {}
4787 virtual void Generate() OVERRIDE { 4992 virtual void Generate() OVERRIDE {
4788 codegen()->DoDeferredNumberTagD(instr_); 4993 codegen()->DoDeferredNumberTagD(instr_);
4789 } 4994 }
4790 virtual LInstruction* instr() OVERRIDE { return instr_; } 4995 virtual LInstruction* instr() OVERRIDE { return instr_; }
4996
4791 private: 4997 private:
4792 LNumberTagD* instr_; 4998 LNumberTagD* instr_;
4793 }; 4999 };
4794 5000
4795 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); 5001 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4796 Register scratch = scratch0(); 5002 Register scratch = scratch0();
4797 Register reg = ToRegister(instr->result()); 5003 Register reg = ToRegister(instr->result());
4798 Register temp1 = ToRegister(instr->temp()); 5004 Register temp1 = ToRegister(instr->temp());
4799 Register temp2 = ToRegister(instr->temp2()); 5005 Register temp2 = ToRegister(instr->temp2());
4800 5006
4801 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 5007 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
4802 if (FLAG_inline_new) { 5008 if (FLAG_inline_new) {
4803 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 5009 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4804 // We want the untagged address first for performance 5010 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4805 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4806 DONT_TAG_RESULT);
4807 } else { 5011 } else {
4808 __ jmp(deferred->entry()); 5012 __ b(deferred->entry());
4809 } 5013 }
4810 __ bind(deferred->exit()); 5014 __ bind(deferred->exit());
4811 __ vstr(input_reg, reg, HeapNumber::kValueOffset); 5015 __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4812 // Now that we have finished with the object's real address tag it
4813 __ add(reg, reg, Operand(kHeapObjectTag));
4814 } 5016 }
4815 5017
4816 5018
4817 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 5019 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4818 // TODO(3095996): Get rid of this. For now, we need to make the 5020 // TODO(3095996): Get rid of this. For now, we need to make the
4819 // result register contain a valid pointer because it is already 5021 // result register contain a valid pointer because it is already
4820 // contained in the register pointer map. 5022 // contained in the register pointer map.
4821 Register reg = ToRegister(instr->result()); 5023 Register reg = ToRegister(instr->result());
4822 __ mov(reg, Operand::Zero()); 5024 __ li(reg, Operand::Zero());
4823 5025
4824 PushSafepointRegistersScope scope(this); 5026 PushSafepointRegistersScope scope(this);
4825 // NumberTagI and NumberTagD use the context from the frame, rather than 5027 // NumberTagI and NumberTagD use the context from the frame, rather than
4826 // the environment's HContext or HInlinedContext value. 5028 // the environment's HContext or HInlinedContext value.
4827 // They only call Runtime::kAllocateHeapNumber. 5029 // They only call Runtime::kAllocateHeapNumber.
4828 // The corresponding HChange instructions are added in a phase that does 5030 // The corresponding HChange instructions are added in a phase that does
4829 // not have easy access to the local context. 5031 // not have easy access to the local context.
4830 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 5032 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4831 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 5033 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4832 RecordSafepointWithRegisters( 5034 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4833 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 5035 Safepoint::kNoLazyDeopt);
4834 __ sub(r0, r0, Operand(kHeapObjectTag)); 5036 __ StoreToSafepointRegisterSlot(r3, reg);
4835 __ StoreToSafepointRegisterSlot(r0, reg);
4836 } 5037 }
4837 5038
4838 5039
4839 void LCodeGen::DoSmiTag(LSmiTag* instr) { 5040 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4840 HChange* hchange = instr->hydrogen(); 5041 HChange* hchange = instr->hydrogen();
4841 Register input = ToRegister(instr->value()); 5042 Register input = ToRegister(instr->value());
4842 Register output = ToRegister(instr->result()); 5043 Register output = ToRegister(instr->result());
4843 if (hchange->CheckFlag(HValue::kCanOverflow) && 5044 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4844 hchange->value()->CheckFlag(HValue::kUint32)) { 5045 hchange->value()->CheckFlag(HValue::kUint32)) {
4845 __ tst(input, Operand(0xc0000000)); 5046 __ TestUnsignedSmiCandidate(input, r0);
4846 DeoptimizeIf(ne, instr, "overflow"); 5047 DeoptimizeIf(ne, instr, "overflow", cr0);
4847 } 5048 }
5049 #if !V8_TARGET_ARCH_PPC64
4848 if (hchange->CheckFlag(HValue::kCanOverflow) && 5050 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4849 !hchange->value()->CheckFlag(HValue::kUint32)) { 5051 !hchange->value()->CheckFlag(HValue::kUint32)) {
4850 __ SmiTag(output, input, SetCC); 5052 __ SmiTagCheckOverflow(output, input, r0);
4851 DeoptimizeIf(vs, instr, "overflow"); 5053 DeoptimizeIf(lt, instr, "overflow", cr0);
4852 } else { 5054 } else {
5055 #endif
4853 __ SmiTag(output, input); 5056 __ SmiTag(output, input);
5057 #if !V8_TARGET_ARCH_PPC64
4854 } 5058 }
5059 #endif
4855 } 5060 }
4856 5061
4857 5062
4858 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 5063 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5064 Register scratch = scratch0();
4859 Register input = ToRegister(instr->value()); 5065 Register input = ToRegister(instr->value());
4860 Register result = ToRegister(instr->result()); 5066 Register result = ToRegister(instr->result());
4861 if (instr->needs_check()) { 5067 if (instr->needs_check()) {
4862 STATIC_ASSERT(kHeapObjectTag == 1); 5068 STATIC_ASSERT(kHeapObjectTag == 1);
4863 // If the input is a HeapObject, SmiUntag will set the carry flag. 5069 // If the input is a HeapObject, value of scratch won't be zero.
4864 __ SmiUntag(result, input, SetCC); 5070 __ andi(scratch, input, Operand(kHeapObjectTag));
4865 DeoptimizeIf(cs, instr, "not a Smi"); 5071 __ SmiUntag(result, input);
5072 DeoptimizeIf(ne, instr, "not a Smi", cr0);
4866 } else { 5073 } else {
4867 __ SmiUntag(result, input); 5074 __ SmiUntag(result, input);
4868 } 5075 }
4869 } 5076 }
4870 5077
4871 5078
4872 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, 5079 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4873 DwVfpRegister result_reg, 5080 DoubleRegister result_reg,
4874 NumberUntagDMode mode) { 5081 NumberUntagDMode mode) {
4875 bool can_convert_undefined_to_nan = 5082 bool can_convert_undefined_to_nan =
4876 instr->hydrogen()->can_convert_undefined_to_nan(); 5083 instr->hydrogen()->can_convert_undefined_to_nan();
4877 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); 5084 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4878 5085
4879 Register scratch = scratch0(); 5086 Register scratch = scratch0();
4880 SwVfpRegister flt_scratch = double_scratch0().low();
4881 DCHECK(!result_reg.is(double_scratch0())); 5087 DCHECK(!result_reg.is(double_scratch0()));
5088
4882 Label convert, load_smi, done; 5089 Label convert, load_smi, done;
5090
4883 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 5091 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4884 // Smi check. 5092 // Smi check.
4885 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 5093 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
5094
4886 // Heap number map check. 5095 // Heap number map check.
4887 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5096 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4888 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 5097 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4889 __ cmp(scratch, Operand(ip)); 5098 __ cmp(scratch, ip);
4890 if (can_convert_undefined_to_nan) { 5099 if (can_convert_undefined_to_nan) {
4891 __ b(ne, &convert); 5100 __ bne(&convert);
4892 } else { 5101 } else {
4893 DeoptimizeIf(ne, instr, "not a heap number"); 5102 DeoptimizeIf(ne, instr, "not a heap number");
4894 } 5103 }
4895 // load heap number 5104 // load heap number
4896 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); 5105 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4897 if (deoptimize_on_minus_zero) { 5106 if (deoptimize_on_minus_zero) {
4898 __ VmovLow(scratch, result_reg); 5107 #if V8_TARGET_ARCH_PPC64
4899 __ cmp(scratch, Operand::Zero()); 5108 __ MovDoubleToInt64(scratch, result_reg);
4900 __ b(ne, &done); 5109 // rotate left by one for simple compare.
4901 __ VmovHigh(scratch, result_reg); 5110 __ rldicl(scratch, scratch, 1, 0);
4902 __ cmp(scratch, Operand(HeapNumber::kSignMask)); 5111 __ cmpi(scratch, Operand(1));
5112 #else
5113 __ MovDoubleToInt64(scratch, ip, result_reg);
5114 __ cmpi(ip, Operand::Zero());
5115 __ bne(&done);
5116 __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
5117 #endif
4903 DeoptimizeIf(eq, instr, "minus zero"); 5118 DeoptimizeIf(eq, instr, "minus zero");
4904 } 5119 }
4905 __ jmp(&done); 5120 __ b(&done);
4906 if (can_convert_undefined_to_nan) { 5121 if (can_convert_undefined_to_nan) {
4907 __ bind(&convert); 5122 __ bind(&convert);
4908 // Convert undefined (and hole) to NaN. 5123 // Convert undefined (and hole) to NaN.
4909 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5124 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4910 __ cmp(input_reg, Operand(ip)); 5125 __ cmp(input_reg, ip);
4911 DeoptimizeIf(ne, instr, "not a heap number/undefined"); 5126 DeoptimizeIf(ne, instr, "not a heap number/undefined");
4912 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 5127 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4913 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); 5128 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4914 __ jmp(&done); 5129 __ b(&done);
4915 } 5130 }
4916 } else { 5131 } else {
4917 __ SmiUntag(scratch, input_reg); 5132 __ SmiUntag(scratch, input_reg);
4918 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 5133 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4919 } 5134 }
4920 // Smi to double register conversion 5135 // Smi to double register conversion
4921 __ bind(&load_smi); 5136 __ bind(&load_smi);
4922 // scratch: untagged value of input_reg 5137 // scratch: untagged value of input_reg
4923 __ vmov(flt_scratch, scratch); 5138 __ ConvertIntToDouble(scratch, result_reg);
4924 __ vcvt_f64_s32(result_reg, flt_scratch);
4925 __ bind(&done); 5139 __ bind(&done);
4926 } 5140 }
4927 5141
4928 5142
4929 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { 5143 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4930 Register input_reg = ToRegister(instr->value()); 5144 Register input_reg = ToRegister(instr->value());
4931 Register scratch1 = scratch0(); 5145 Register scratch1 = scratch0();
4932 Register scratch2 = ToRegister(instr->temp()); 5146 Register scratch2 = ToRegister(instr->temp());
4933 LowDwVfpRegister double_scratch = double_scratch0(); 5147 DoubleRegister double_scratch = double_scratch0();
4934 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2()); 5148 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4935 5149
4936 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); 5150 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4937 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); 5151 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4938 5152
4939 Label done; 5153 Label done;
4940 5154
4941 // The input was optimistically untagged; revert it.
4942 // The carry flag is set when we reach this deferred code as we just executed
4943 // SmiUntag(heap_object, SetCC)
4944 STATIC_ASSERT(kHeapObjectTag == 1);
4945 __ adc(scratch2, input_reg, Operand(input_reg));
4946
4947 // Heap number map check. 5155 // Heap number map check.
4948 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset)); 5156 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4949 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 5157 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4950 __ cmp(scratch1, Operand(ip)); 5158 __ cmp(scratch1, ip);
4951 5159
4952 if (instr->truncating()) { 5160 if (instr->truncating()) {
4953 // Performs a truncating conversion of a floating point number as used by 5161 // Performs a truncating conversion of a floating point number as used by
4954 // the JS bitwise operations. 5162 // the JS bitwise operations.
4955 Label no_heap_number, check_bools, check_false; 5163 Label no_heap_number, check_bools, check_false;
4956 __ b(ne, &no_heap_number); 5164 __ bne(&no_heap_number);
5165 __ mr(scratch2, input_reg);
4957 __ TruncateHeapNumberToI(input_reg, scratch2); 5166 __ TruncateHeapNumberToI(input_reg, scratch2);
4958 __ b(&done); 5167 __ b(&done);
4959 5168
4960 // Check for Oddballs. Undefined/False is converted to zero and True to one 5169 // Check for Oddballs. Undefined/False is converted to zero and True to one
4961 // for truncating conversions. 5170 // for truncating conversions.
4962 __ bind(&no_heap_number); 5171 __ bind(&no_heap_number);
4963 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5172 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4964 __ cmp(scratch2, Operand(ip)); 5173 __ cmp(input_reg, ip);
4965 __ b(ne, &check_bools); 5174 __ bne(&check_bools);
4966 __ mov(input_reg, Operand::Zero()); 5175 __ li(input_reg, Operand::Zero());
4967 __ b(&done); 5176 __ b(&done);
4968 5177
4969 __ bind(&check_bools); 5178 __ bind(&check_bools);
4970 __ LoadRoot(ip, Heap::kTrueValueRootIndex); 5179 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4971 __ cmp(scratch2, Operand(ip)); 5180 __ cmp(input_reg, ip);
4972 __ b(ne, &check_false); 5181 __ bne(&check_false);
4973 __ mov(input_reg, Operand(1)); 5182 __ li(input_reg, Operand(1));
4974 __ b(&done); 5183 __ b(&done);
4975 5184
4976 __ bind(&check_false); 5185 __ bind(&check_false);
4977 __ LoadRoot(ip, Heap::kFalseValueRootIndex); 5186 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4978 __ cmp(scratch2, Operand(ip)); 5187 __ cmp(input_reg, ip);
4979 DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false"); 5188 DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", cr7);
4980 __ mov(input_reg, Operand::Zero()); 5189 __ li(input_reg, Operand::Zero());
4981 } else { 5190 } else {
4982 DeoptimizeIf(ne, instr, "not a heap number"); 5191 DeoptimizeIf(ne, instr, "not a heap number", cr7);
4983 5192
4984 __ sub(ip, scratch2, Operand(kHeapObjectTag)); 5193 __ lfd(double_scratch2,
4985 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); 5194 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4986 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); 5195 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4987 DeoptimizeIf(ne, instr, "lost precision or NaN"); 5196 // preserve heap number pointer in scratch2 for minus zero check below
5197 __ mr(scratch2, input_reg);
5198 }
5199 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
5200 double_scratch);
5201 DeoptimizeIf(ne, instr, "lost precision or NaN", cr7);
4988 5202
4989 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5203 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4990 __ cmp(input_reg, Operand::Zero()); 5204 __ cmpi(input_reg, Operand::Zero());
4991 __ b(ne, &done); 5205 __ bne(&done);
4992 __ VmovHigh(scratch1, double_scratch2); 5206 __ lwz(scratch1,
4993 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5207 FieldMemOperand(scratch2, HeapNumber::kValueOffset +
4994 DeoptimizeIf(ne, instr, "minus zero"); 5208 Register::kExponentOffset));
5209 __ cmpwi(scratch1, Operand::Zero());
5210 DeoptimizeIf(lt, instr, "minus zero", cr7);
4995 } 5211 }
4996 } 5212 }
4997 __ bind(&done); 5213 __ bind(&done);
4998 } 5214 }
4999 5215
5000 5216
5001 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 5217 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5002 class DeferredTaggedToI FINAL : public LDeferredCode { 5218 class DeferredTaggedToI FINAL : public LDeferredCode {
5003 public: 5219 public:
5004 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 5220 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5005 : LDeferredCode(codegen), instr_(instr) { } 5221 : LDeferredCode(codegen), instr_(instr) {}
5006 virtual void Generate() OVERRIDE { 5222 virtual void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
5007 codegen()->DoDeferredTaggedToI(instr_);
5008 }
5009 virtual LInstruction* instr() OVERRIDE { return instr_; } 5223 virtual LInstruction* instr() OVERRIDE { return instr_; }
5224
5010 private: 5225 private:
5011 LTaggedToI* instr_; 5226 LTaggedToI* instr_;
5012 }; 5227 };
5013 5228
5014 LOperand* input = instr->value(); 5229 LOperand* input = instr->value();
5015 DCHECK(input->IsRegister()); 5230 DCHECK(input->IsRegister());
5016 DCHECK(input->Equals(instr->result())); 5231 DCHECK(input->Equals(instr->result()));
5017 5232
5018 Register input_reg = ToRegister(input); 5233 Register input_reg = ToRegister(input);
5019 5234
5020 if (instr->hydrogen()->value()->representation().IsSmi()) { 5235 if (instr->hydrogen()->value()->representation().IsSmi()) {
5021 __ SmiUntag(input_reg); 5236 __ SmiUntag(input_reg);
5022 } else { 5237 } else {
5023 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 5238 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
5024 5239
5025 // Optimistically untag the input. 5240 // Branch to deferred code if the input is a HeapObject.
5026 // If the input is a HeapObject, SmiUntag will set the carry flag. 5241 __ JumpIfNotSmi(input_reg, deferred->entry());
5027 __ SmiUntag(input_reg, SetCC); 5242
5028 // Branch to deferred code if the input was tagged. 5243 __ SmiUntag(input_reg);
5029 // The deferred code will take care of restoring the tag.
5030 __ b(cs, deferred->entry());
5031 __ bind(deferred->exit()); 5244 __ bind(deferred->exit());
5032 } 5245 }
5033 } 5246 }
5034 5247
5035 5248
5036 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5249 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5037 LOperand* input = instr->value(); 5250 LOperand* input = instr->value();
5038 DCHECK(input->IsRegister()); 5251 DCHECK(input->IsRegister());
5039 LOperand* result = instr->result(); 5252 LOperand* result = instr->result();
5040 DCHECK(result->IsDoubleRegister()); 5253 DCHECK(result->IsDoubleRegister());
5041 5254
5042 Register input_reg = ToRegister(input); 5255 Register input_reg = ToRegister(input);
5043 DwVfpRegister result_reg = ToDoubleRegister(result); 5256 DoubleRegister result_reg = ToDoubleRegister(result);
5044 5257
5045 HValue* value = instr->hydrogen()->value(); 5258 HValue* value = instr->hydrogen()->value();
5046 NumberUntagDMode mode = value->representation().IsSmi() 5259 NumberUntagDMode mode = value->representation().IsSmi()
5047 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 5260 ? NUMBER_CANDIDATE_IS_SMI
5261 : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5048 5262
5049 EmitNumberUntagD(instr, input_reg, result_reg, mode); 5263 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5050 } 5264 }
5051 5265
5052 5266
5053 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5267 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5054 Register result_reg = ToRegister(instr->result()); 5268 Register result_reg = ToRegister(instr->result());
5055 Register scratch1 = scratch0(); 5269 Register scratch1 = scratch0();
5056 DwVfpRegister double_input = ToDoubleRegister(instr->value()); 5270 DoubleRegister double_input = ToDoubleRegister(instr->value());
5057 LowDwVfpRegister double_scratch = double_scratch0(); 5271 DoubleRegister double_scratch = double_scratch0();
5058 5272
5059 if (instr->truncating()) { 5273 if (instr->truncating()) {
5060 __ TruncateDoubleToI(result_reg, double_input); 5274 __ TruncateDoubleToI(result_reg, double_input);
5061 } else { 5275 } else {
5062 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); 5276 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5277 double_scratch);
5063 // Deoptimize if the input wasn't a int32 (inside a double). 5278 // Deoptimize if the input wasn't a int32 (inside a double).
5064 DeoptimizeIf(ne, instr, "lost precision or NaN"); 5279 DeoptimizeIf(ne, instr, "lost precision or NaN");
5065 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5280 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5066 Label done; 5281 Label done;
5067 __ cmp(result_reg, Operand::Zero()); 5282 __ cmpi(result_reg, Operand::Zero());
5068 __ b(ne, &done); 5283 __ bne(&done);
5069 __ VmovHigh(scratch1, double_input); 5284 #if V8_TARGET_ARCH_PPC64
5070 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5285 __ MovDoubleToInt64(scratch1, double_input);
5071 DeoptimizeIf(ne, instr, "minus zero"); 5286 #else
5287 __ MovDoubleHighToInt(scratch1, double_input);
5288 #endif
5289 __ cmpi(scratch1, Operand::Zero());
5290 DeoptimizeIf(lt, instr, "minus zero");
5072 __ bind(&done); 5291 __ bind(&done);
5073 } 5292 }
5074 } 5293 }
5075 } 5294 }
5076 5295
5077 5296
5078 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 5297 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5079 Register result_reg = ToRegister(instr->result()); 5298 Register result_reg = ToRegister(instr->result());
5080 Register scratch1 = scratch0(); 5299 Register scratch1 = scratch0();
5081 DwVfpRegister double_input = ToDoubleRegister(instr->value()); 5300 DoubleRegister double_input = ToDoubleRegister(instr->value());
5082 LowDwVfpRegister double_scratch = double_scratch0(); 5301 DoubleRegister double_scratch = double_scratch0();
5083 5302
5084 if (instr->truncating()) { 5303 if (instr->truncating()) {
5085 __ TruncateDoubleToI(result_reg, double_input); 5304 __ TruncateDoubleToI(result_reg, double_input);
5086 } else { 5305 } else {
5087 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); 5306 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5307 double_scratch);
5088 // Deoptimize if the input wasn't a int32 (inside a double). 5308 // Deoptimize if the input wasn't a int32 (inside a double).
5089 DeoptimizeIf(ne, instr, "lost precision or NaN"); 5309 DeoptimizeIf(ne, instr, "lost precision or NaN");
5090 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5310 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5091 Label done; 5311 Label done;
5092 __ cmp(result_reg, Operand::Zero()); 5312 __ cmpi(result_reg, Operand::Zero());
5093 __ b(ne, &done); 5313 __ bne(&done);
5094 __ VmovHigh(scratch1, double_input); 5314 #if V8_TARGET_ARCH_PPC64
5095 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5315 __ MovDoubleToInt64(scratch1, double_input);
5096 DeoptimizeIf(ne, instr, "minus zero"); 5316 #else
5317 __ MovDoubleHighToInt(scratch1, double_input);
5318 #endif
5319 __ cmpi(scratch1, Operand::Zero());
5320 DeoptimizeIf(lt, instr, "minus zero");
5097 __ bind(&done); 5321 __ bind(&done);
5098 } 5322 }
5099 } 5323 }
5100 __ SmiTag(result_reg, SetCC); 5324 #if V8_TARGET_ARCH_PPC64
5101 DeoptimizeIf(vs, instr, "overflow"); 5325 __ SmiTag(result_reg);
5326 #else
5327 __ SmiTagCheckOverflow(result_reg, r0);
5328 DeoptimizeIf(lt, instr, "overflow", cr0);
5329 #endif
5102 } 5330 }
5103 5331
5104 5332
5105 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 5333 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5106 LOperand* input = instr->value(); 5334 LOperand* input = instr->value();
5107 __ SmiTst(ToRegister(input)); 5335 __ TestIfSmi(ToRegister(input), r0);
5108 DeoptimizeIf(ne, instr, "not a Smi"); 5336 DeoptimizeIf(ne, instr, "not a Smi", cr0);
5109 } 5337 }
5110 5338
5111 5339
5112 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 5340 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5113 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 5341 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5114 LOperand* input = instr->value(); 5342 LOperand* input = instr->value();
5115 __ SmiTst(ToRegister(input)); 5343 __ TestIfSmi(ToRegister(input), r0);
5116 DeoptimizeIf(eq, instr, "Smi"); 5344 DeoptimizeIf(eq, instr, "Smi", cr0);
5117 } 5345 }
5118 } 5346 }
5119 5347
5120 5348
5121 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 5349 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5122 Register input = ToRegister(instr->value()); 5350 Register input = ToRegister(instr->value());
5123 Register scratch = scratch0(); 5351 Register scratch = scratch0();
5124 5352
5125 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5353 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5126 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 5354 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5127 5355
5128 if (instr->hydrogen()->is_interval_check()) { 5356 if (instr->hydrogen()->is_interval_check()) {
5129 InstanceType first; 5357 InstanceType first;
5130 InstanceType last; 5358 InstanceType last;
5131 instr->hydrogen()->GetCheckInterval(&first, &last); 5359 instr->hydrogen()->GetCheckInterval(&first, &last);
5132 5360
5133 __ cmp(scratch, Operand(first)); 5361 __ cmpli(scratch, Operand(first));
5134 5362
5135 // If there is only one type in the interval check for equality. 5363 // If there is only one type in the interval check for equality.
5136 if (first == last) { 5364 if (first == last) {
5137 DeoptimizeIf(ne, instr, "wrong instance type"); 5365 DeoptimizeIf(ne, instr, "wrong instance type");
5138 } else { 5366 } else {
5139 DeoptimizeIf(lo, instr, "wrong instance type"); 5367 DeoptimizeIf(lt, instr, "wrong instance type");
5140 // Omit check for the last type. 5368 // Omit check for the last type.
5141 if (last != LAST_TYPE) { 5369 if (last != LAST_TYPE) {
5142 __ cmp(scratch, Operand(last)); 5370 __ cmpli(scratch, Operand(last));
5143 DeoptimizeIf(hi, instr, "wrong instance type"); 5371 DeoptimizeIf(gt, instr, "wrong instance type");
5144 } 5372 }
5145 } 5373 }
5146 } else { 5374 } else {
5147 uint8_t mask; 5375 uint8_t mask;
5148 uint8_t tag; 5376 uint8_t tag;
5149 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 5377 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5150 5378
5151 if (base::bits::IsPowerOfTwo32(mask)) { 5379 if (base::bits::IsPowerOfTwo32(mask)) {
5152 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); 5380 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5153 __ tst(scratch, Operand(mask)); 5381 __ andi(r0, scratch, Operand(mask));
5154 DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type"); 5382 DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", cr0);
5155 } else { 5383 } else {
5156 __ and_(scratch, scratch, Operand(mask)); 5384 __ andi(scratch, scratch, Operand(mask));
5157 __ cmp(scratch, Operand(tag)); 5385 __ cmpi(scratch, Operand(tag));
5158 DeoptimizeIf(ne, instr, "wrong instance type"); 5386 DeoptimizeIf(ne, instr, "wrong instance type");
5159 } 5387 }
5160 } 5388 }
5161 } 5389 }
5162 5390
5163 5391
5164 void LCodeGen::DoCheckValue(LCheckValue* instr) { 5392 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5165 Register reg = ToRegister(instr->value()); 5393 Register reg = ToRegister(instr->value());
5166 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 5394 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5167 AllowDeferredHandleDereference smi_check; 5395 AllowDeferredHandleDereference smi_check;
5168 if (isolate()->heap()->InNewSpace(*object)) { 5396 if (isolate()->heap()->InNewSpace(*object)) {
5169 Register reg = ToRegister(instr->value()); 5397 Register reg = ToRegister(instr->value());
5170 Handle<Cell> cell = isolate()->factory()->NewCell(object); 5398 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5171 __ mov(ip, Operand(Handle<Object>(cell))); 5399 __ mov(ip, Operand(Handle<Object>(cell)));
5172 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); 5400 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
5173 __ cmp(reg, ip); 5401 __ cmp(reg, ip);
5174 } else { 5402 } else {
5175 __ cmp(reg, Operand(object)); 5403 __ Cmpi(reg, Operand(object), r0);
5176 } 5404 }
5177 DeoptimizeIf(ne, instr, "value mismatch"); 5405 DeoptimizeIf(ne, instr, "value mismatch");
5178 } 5406 }
5179 5407
5180 5408
5181 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 5409 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5182 { 5410 {
5183 PushSafepointRegistersScope scope(this); 5411 PushSafepointRegistersScope scope(this);
5184 __ push(object); 5412 __ push(object);
5185 __ mov(cp, Operand::Zero()); 5413 __ li(cp, Operand::Zero());
5186 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 5414 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5187 RecordSafepointWithRegisters( 5415 RecordSafepointWithRegisters(instr->pointer_map(), 1,
5188 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 5416 Safepoint::kNoLazyDeopt);
5189 __ StoreToSafepointRegisterSlot(r0, scratch0()); 5417 __ StoreToSafepointRegisterSlot(r3, scratch0());
5190 } 5418 }
5191 __ tst(scratch0(), Operand(kSmiTagMask)); 5419 __ TestIfSmi(scratch0(), r0);
5192 DeoptimizeIf(eq, instr, "instance migration failed"); 5420 DeoptimizeIf(eq, instr, "instance migration failed", cr0);
5193 } 5421 }
5194 5422
5195 5423
5196 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5424 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5197 class DeferredCheckMaps FINAL : public LDeferredCode { 5425 class DeferredCheckMaps FINAL : public LDeferredCode {
5198 public: 5426 public:
5199 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 5427 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5200 : LDeferredCode(codegen), instr_(instr), object_(object) { 5428 : LDeferredCode(codegen), instr_(instr), object_(object) {
5201 SetExit(check_maps()); 5429 SetExit(check_maps());
5202 } 5430 }
5203 virtual void Generate() OVERRIDE { 5431 virtual void Generate() OVERRIDE {
5204 codegen()->DoDeferredInstanceMigration(instr_, object_); 5432 codegen()->DoDeferredInstanceMigration(instr_, object_);
5205 } 5433 }
5206 Label* check_maps() { return &check_maps_; } 5434 Label* check_maps() { return &check_maps_; }
5207 virtual LInstruction* instr() OVERRIDE { return instr_; } 5435 virtual LInstruction* instr() OVERRIDE { return instr_; }
5436
5208 private: 5437 private:
5209 LCheckMaps* instr_; 5438 LCheckMaps* instr_;
5210 Label check_maps_; 5439 Label check_maps_;
5211 Register object_; 5440 Register object_;
5212 }; 5441 };
5213 5442
5214 if (instr->hydrogen()->IsStabilityCheck()) { 5443 if (instr->hydrogen()->IsStabilityCheck()) {
5215 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5444 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5216 for (int i = 0; i < maps->size(); ++i) { 5445 for (int i = 0; i < maps->size(); ++i) {
5217 AddStabilityDependency(maps->at(i).handle()); 5446 AddStabilityDependency(maps->at(i).handle());
5218 } 5447 }
5219 return; 5448 return;
5220 } 5449 }
5221 5450
5222 Register map_reg = scratch0(); 5451 Register map_reg = scratch0();
5223 5452
5224 LOperand* input = instr->value(); 5453 LOperand* input = instr->value();
5225 DCHECK(input->IsRegister()); 5454 DCHECK(input->IsRegister());
5226 Register reg = ToRegister(input); 5455 Register reg = ToRegister(input);
5227 5456
5228 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); 5457 __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5229 5458
5230 DeferredCheckMaps* deferred = NULL; 5459 DeferredCheckMaps* deferred = NULL;
5231 if (instr->hydrogen()->HasMigrationTarget()) { 5460 if (instr->hydrogen()->HasMigrationTarget()) {
5232 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); 5461 deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
5233 __ bind(deferred->check_maps()); 5462 __ bind(deferred->check_maps());
5234 } 5463 }
5235 5464
5236 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5465 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5237 Label success; 5466 Label success;
5238 for (int i = 0; i < maps->size() - 1; i++) { 5467 for (int i = 0; i < maps->size() - 1; i++) {
5239 Handle<Map> map = maps->at(i).handle(); 5468 Handle<Map> map = maps->at(i).handle();
5240 __ CompareMap(map_reg, map, &success); 5469 __ CompareMap(map_reg, map, &success);
5241 __ b(eq, &success); 5470 __ beq(&success);
5242 } 5471 }
5243 5472
5244 Handle<Map> map = maps->at(maps->size() - 1).handle(); 5473 Handle<Map> map = maps->at(maps->size() - 1).handle();
5245 __ CompareMap(map_reg, map, &success); 5474 __ CompareMap(map_reg, map, &success);
5246 if (instr->hydrogen()->HasMigrationTarget()) { 5475 if (instr->hydrogen()->HasMigrationTarget()) {
5247 __ b(ne, deferred->entry()); 5476 __ bne(deferred->entry());
5248 } else { 5477 } else {
5249 DeoptimizeIf(ne, instr, "wrong map"); 5478 DeoptimizeIf(ne, instr, "wrong map");
5250 } 5479 }
5251 5480
5252 __ bind(&success); 5481 __ bind(&success);
5253 } 5482 }
5254 5483
5255 5484
5256 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5485 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5257 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); 5486 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5258 Register result_reg = ToRegister(instr->result()); 5487 Register result_reg = ToRegister(instr->result());
5259 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); 5488 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5260 } 5489 }
5261 5490
5262 5491
5263 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5492 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5264 Register unclamped_reg = ToRegister(instr->unclamped()); 5493 Register unclamped_reg = ToRegister(instr->unclamped());
5265 Register result_reg = ToRegister(instr->result()); 5494 Register result_reg = ToRegister(instr->result());
5266 __ ClampUint8(result_reg, unclamped_reg); 5495 __ ClampUint8(result_reg, unclamped_reg);
5267 } 5496 }
5268 5497
5269 5498
5270 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5499 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5271 Register scratch = scratch0(); 5500 Register scratch = scratch0();
5272 Register input_reg = ToRegister(instr->unclamped()); 5501 Register input_reg = ToRegister(instr->unclamped());
5273 Register result_reg = ToRegister(instr->result()); 5502 Register result_reg = ToRegister(instr->result());
5274 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); 5503 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5275 Label is_smi, done, heap_number; 5504 Label is_smi, done, heap_number;
5276 5505
5277 // Both smi and heap number cases are handled. 5506 // Both smi and heap number cases are handled.
5278 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); 5507 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5279 5508
5280 // Check for heap number 5509 // Check for heap number
5281 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5510 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5282 __ cmp(scratch, Operand(factory()->heap_number_map())); 5511 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
5283 __ b(eq, &heap_number); 5512 __ beq(&heap_number);
5284 5513
5285 // Check for undefined. Undefined is converted to zero for clamping 5514 // Check for undefined. Undefined is converted to zero for clamping
5286 // conversions. 5515 // conversions.
5287 __ cmp(input_reg, Operand(factory()->undefined_value())); 5516 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
5288 DeoptimizeIf(ne, instr, "not a heap number/undefined"); 5517 DeoptimizeIf(ne, instr, "not a heap number/undefined");
5289 __ mov(result_reg, Operand::Zero()); 5518 __ li(result_reg, Operand::Zero());
5290 __ jmp(&done); 5519 __ b(&done);
5291 5520
5292 // Heap number 5521 // Heap number
5293 __ bind(&heap_number); 5522 __ bind(&heap_number);
5294 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 5523 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5295 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); 5524 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5296 __ jmp(&done); 5525 __ b(&done);
5297 5526
5298 // smi 5527 // smi
5299 __ bind(&is_smi); 5528 __ bind(&is_smi);
5300 __ ClampUint8(result_reg, result_reg); 5529 __ ClampUint8(result_reg, result_reg);
5301 5530
5302 __ bind(&done); 5531 __ bind(&done);
5303 } 5532 }
5304 5533
5305 5534
5306 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { 5535 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5307 DwVfpRegister value_reg = ToDoubleRegister(instr->value()); 5536 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5308 Register result_reg = ToRegister(instr->result()); 5537 Register result_reg = ToRegister(instr->result());
5538
5309 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { 5539 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5310 __ VmovHigh(result_reg, value_reg); 5540 __ MovDoubleHighToInt(result_reg, value_reg);
5311 } else { 5541 } else {
5312 __ VmovLow(result_reg, value_reg); 5542 __ MovDoubleLowToInt(result_reg, value_reg);
5313 } 5543 }
5314 } 5544 }
5315 5545
5316 5546
5317 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { 5547 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5318 Register hi_reg = ToRegister(instr->hi()); 5548 Register hi_reg = ToRegister(instr->hi());
5319 Register lo_reg = ToRegister(instr->lo()); 5549 Register lo_reg = ToRegister(instr->lo());
5320 DwVfpRegister result_reg = ToDoubleRegister(instr->result()); 5550 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5321 __ VmovHigh(result_reg, hi_reg); 5551 #if V8_TARGET_ARCH_PPC64
5322 __ VmovLow(result_reg, lo_reg); 5552 __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
5553 #else
5554 __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
5555 #endif
5323 } 5556 }
5324 5557
5325 5558
5326 void LCodeGen::DoAllocate(LAllocate* instr) { 5559 void LCodeGen::DoAllocate(LAllocate* instr) {
5327 class DeferredAllocate FINAL : public LDeferredCode { 5560 class DeferredAllocate FINAL : public LDeferredCode {
5328 public: 5561 public:
5329 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 5562 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5330 : LDeferredCode(codegen), instr_(instr) { } 5563 : LDeferredCode(codegen), instr_(instr) {}
5331 virtual void Generate() OVERRIDE { 5564 virtual void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
5332 codegen()->DoDeferredAllocate(instr_);
5333 }
5334 virtual LInstruction* instr() OVERRIDE { return instr_; } 5565 virtual LInstruction* instr() OVERRIDE { return instr_; }
5566
5335 private: 5567 private:
5336 LAllocate* instr_; 5568 LAllocate* instr_;
5337 }; 5569 };
5338 5570
5339 DeferredAllocate* deferred = 5571 DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
5340 new(zone()) DeferredAllocate(this, instr);
5341 5572
5342 Register result = ToRegister(instr->result()); 5573 Register result = ToRegister(instr->result());
5343 Register scratch = ToRegister(instr->temp1()); 5574 Register scratch = ToRegister(instr->temp1());
5344 Register scratch2 = ToRegister(instr->temp2()); 5575 Register scratch2 = ToRegister(instr->temp2());
5345 5576
5346 // Allocate memory for the object. 5577 // Allocate memory for the object.
5347 AllocationFlags flags = TAG_OBJECT; 5578 AllocationFlags flags = TAG_OBJECT;
5348 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5579 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5349 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5580 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5350 } 5581 }
5351 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5582 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5352 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5583 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5353 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5584 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5354 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); 5585 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5355 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5586 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5356 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5587 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5357 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); 5588 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5358 } 5589 }
5359 5590
5360 if (instr->size()->IsConstantOperand()) { 5591 if (instr->size()->IsConstantOperand()) {
5361 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5592 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5362 if (size <= Page::kMaxRegularHeapObjectSize) { 5593 if (size <= Page::kMaxRegularHeapObjectSize) {
5363 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5594 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5364 } else { 5595 } else {
5365 __ jmp(deferred->entry()); 5596 __ b(deferred->entry());
5366 } 5597 }
5367 } else { 5598 } else {
5368 Register size = ToRegister(instr->size()); 5599 Register size = ToRegister(instr->size());
5369 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5600 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5370 } 5601 }
5371 5602
5372 __ bind(deferred->exit()); 5603 __ bind(deferred->exit());
5373 5604
5374 if (instr->hydrogen()->MustPrefillWithFiller()) { 5605 if (instr->hydrogen()->MustPrefillWithFiller()) {
5375 STATIC_ASSERT(kHeapObjectTag == 1); 5606 STATIC_ASSERT(kHeapObjectTag == 1);
5376 if (instr->size()->IsConstantOperand()) { 5607 if (instr->size()->IsConstantOperand()) {
5377 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5608 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5378 __ mov(scratch, Operand(size - kHeapObjectTag)); 5609 __ LoadIntLiteral(scratch, size - kHeapObjectTag);
5379 } else { 5610 } else {
5380 __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); 5611 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5381 } 5612 }
5382 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); 5613 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5383 Label loop; 5614 Label loop;
5384 __ bind(&loop); 5615 __ bind(&loop);
5385 __ sub(scratch, scratch, Operand(kPointerSize), SetCC); 5616 __ subi(scratch, scratch, Operand(kPointerSize));
5386 __ str(scratch2, MemOperand(result, scratch)); 5617 __ StorePX(scratch2, MemOperand(result, scratch));
5387 __ b(ge, &loop); 5618 __ cmpi(scratch, Operand::Zero());
5619 __ bge(&loop);
5388 } 5620 }
5389 } 5621 }
5390 5622
5391 5623
5392 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 5624 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5393 Register result = ToRegister(instr->result()); 5625 Register result = ToRegister(instr->result());
5394 5626
5395 // TODO(3095996): Get rid of this. For now, we need to make the 5627 // TODO(3095996): Get rid of this. For now, we need to make the
5396 // result register contain a valid pointer because it is already 5628 // result register contain a valid pointer because it is already
5397 // contained in the register pointer map. 5629 // contained in the register pointer map.
5398 __ mov(result, Operand(Smi::FromInt(0))); 5630 __ LoadSmiLiteral(result, Smi::FromInt(0));
5399 5631
5400 PushSafepointRegistersScope scope(this); 5632 PushSafepointRegistersScope scope(this);
5401 if (instr->size()->IsRegister()) { 5633 if (instr->size()->IsRegister()) {
5402 Register size = ToRegister(instr->size()); 5634 Register size = ToRegister(instr->size());
5403 DCHECK(!size.is(result)); 5635 DCHECK(!size.is(result));
5404 __ SmiTag(size); 5636 __ SmiTag(size);
5405 __ push(size); 5637 __ push(size);
5406 } else { 5638 } else {
5407 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5639 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5640 #if !V8_TARGET_ARCH_PPC64
5408 if (size >= 0 && size <= Smi::kMaxValue) { 5641 if (size >= 0 && size <= Smi::kMaxValue) {
5642 #endif
5409 __ Push(Smi::FromInt(size)); 5643 __ Push(Smi::FromInt(size));
5644 #if !V8_TARGET_ARCH_PPC64
5410 } else { 5645 } else {
5411 // We should never get here at runtime => abort 5646 // We should never get here at runtime => abort
5412 __ stop("invalid allocation size"); 5647 __ stop("invalid allocation size");
5413 return; 5648 return;
5414 } 5649 }
5650 #endif
5415 } 5651 }
5416 5652
5417 int flags = AllocateDoubleAlignFlag::encode( 5653 int flags = AllocateDoubleAlignFlag::encode(
5418 instr->hydrogen()->MustAllocateDoubleAligned()); 5654 instr->hydrogen()->MustAllocateDoubleAligned());
5419 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5655 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5420 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5656 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5421 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5657 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5422 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); 5658 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5423 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5659 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5424 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5660 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5425 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); 5661 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5426 } else { 5662 } else {
5427 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 5663 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5428 } 5664 }
5429 __ Push(Smi::FromInt(flags)); 5665 __ Push(Smi::FromInt(flags));
5430 5666
5431 CallRuntimeFromDeferred( 5667 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
5432 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); 5668 instr->context());
5433 __ StoreToSafepointRegisterSlot(r0, result); 5669 __ StoreToSafepointRegisterSlot(r3, result);
5434 } 5670 }
5435 5671
5436 5672
5437 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 5673 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5438 DCHECK(ToRegister(instr->value()).is(r0)); 5674 DCHECK(ToRegister(instr->value()).is(r3));
5439 __ push(r0); 5675 __ push(r3);
5440 CallRuntime(Runtime::kToFastProperties, 1, instr); 5676 CallRuntime(Runtime::kToFastProperties, 1, instr);
5441 } 5677 }
5442 5678
5443 5679
5444 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { 5680 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5445 DCHECK(ToRegister(instr->context()).is(cp)); 5681 DCHECK(ToRegister(instr->context()).is(cp));
5446 Label materialized; 5682 Label materialized;
5447 // Registers will be used as follows: 5683 // Registers will be used as follows:
5448 // r6 = literals array. 5684 // r10 = literals array.
5449 // r1 = regexp literal. 5685 // r4 = regexp literal.
5450 // r0 = regexp literal clone. 5686 // r3 = regexp literal clone.
5451 // r2-5 are used as temporaries. 5687 // r5 and r7-r9 are used as temporaries.
5452 int literal_offset = 5688 int literal_offset =
5453 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); 5689 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5454 __ Move(r6, instr->hydrogen()->literals()); 5690 __ Move(r10, instr->hydrogen()->literals());
5455 __ ldr(r1, FieldMemOperand(r6, literal_offset)); 5691 __ LoadP(r4, FieldMemOperand(r10, literal_offset));
5456 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5692 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5457 __ cmp(r1, ip); 5693 __ cmp(r4, ip);
5458 __ b(ne, &materialized); 5694 __ bne(&materialized);
5459 5695
5460 // Create regexp literal using runtime function 5696 // Create regexp literal using runtime function
5461 // Result will be in r0. 5697 // Result will be in r3.
5462 __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); 5698 __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index()));
5463 __ mov(r4, Operand(instr->hydrogen()->pattern())); 5699 __ mov(r8, Operand(instr->hydrogen()->pattern()));
5464 __ mov(r3, Operand(instr->hydrogen()->flags())); 5700 __ mov(r7, Operand(instr->hydrogen()->flags()));
5465 __ Push(r6, r5, r4, r3); 5701 __ Push(r10, r9, r8, r7);
5466 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); 5702 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5467 __ mov(r1, r0); 5703 __ mr(r4, r3);
5468 5704
5469 __ bind(&materialized); 5705 __ bind(&materialized);
5470 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; 5706 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5471 Label allocated, runtime_allocate; 5707 Label allocated, runtime_allocate;
5472 5708
5473 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); 5709 __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
5474 __ jmp(&allocated); 5710 __ b(&allocated);
5475 5711
5476 __ bind(&runtime_allocate); 5712 __ bind(&runtime_allocate);
5477 __ mov(r0, Operand(Smi::FromInt(size))); 5713 __ LoadSmiLiteral(r3, Smi::FromInt(size));
5478 __ Push(r1, r0); 5714 __ Push(r4, r3);
5479 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); 5715 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5480 __ pop(r1); 5716 __ pop(r4);
5481 5717
5482 __ bind(&allocated); 5718 __ bind(&allocated);
5483 // Copy the content into the newly allocated memory. 5719 // Copy the content into the newly allocated memory.
5484 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize); 5720 __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
5485 } 5721 }
5486 5722
5487 5723
5488 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 5724 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5489 DCHECK(ToRegister(instr->context()).is(cp)); 5725 DCHECK(ToRegister(instr->context()).is(cp));
5490 // Use the fast case closure allocation code that allocates in new 5726 // Use the fast case closure allocation code that allocates in new
5491 // space for nested functions that don't need literals cloning. 5727 // space for nested functions that don't need literals cloning.
5492 bool pretenure = instr->hydrogen()->pretenure(); 5728 bool pretenure = instr->hydrogen()->pretenure();
5493 if (!pretenure && instr->hydrogen()->has_no_literals()) { 5729 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5494 FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(), 5730 FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5495 instr->hydrogen()->kind()); 5731 instr->hydrogen()->kind());
5496 __ mov(r2, Operand(instr->hydrogen()->shared_info())); 5732 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5497 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5733 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5498 } else { 5734 } else {
5499 __ mov(r2, Operand(instr->hydrogen()->shared_info())); 5735 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5500 __ mov(r1, Operand(pretenure ? factory()->true_value() 5736 __ mov(r4, Operand(pretenure ? factory()->true_value()
5501 : factory()->false_value())); 5737 : factory()->false_value()));
5502 __ Push(cp, r2, r1); 5738 __ Push(cp, r5, r4);
5503 CallRuntime(Runtime::kNewClosure, 3, instr); 5739 CallRuntime(Runtime::kNewClosure, 3, instr);
5504 } 5740 }
5505 } 5741 }
5506 5742
5507 5743
5508 void LCodeGen::DoTypeof(LTypeof* instr) { 5744 void LCodeGen::DoTypeof(LTypeof* instr) {
5509 Register input = ToRegister(instr->value()); 5745 Register input = ToRegister(instr->value());
5510 __ push(input); 5746 __ push(input);
5511 CallRuntime(Runtime::kTypeof, 1, instr); 5747 CallRuntime(Runtime::kTypeof, 1, instr);
5512 } 5748 }
5513 5749
5514 5750
5515 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 5751 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5516 Register input = ToRegister(instr->value()); 5752 Register input = ToRegister(instr->value());
5517 5753
5518 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), 5754 Condition final_branch_condition =
5519 instr->FalseLabel(chunk_), 5755 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
5520 input, 5756 instr->type_literal());
5521 instr->type_literal());
5522 if (final_branch_condition != kNoCondition) { 5757 if (final_branch_condition != kNoCondition) {
5523 EmitBranch(instr, final_branch_condition); 5758 EmitBranch(instr, final_branch_condition);
5524 } 5759 }
5525 } 5760 }
5526 5761
5527 5762
5528 Condition LCodeGen::EmitTypeofIs(Label* true_label, 5763 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
5529 Label* false_label, 5764 Register input, Handle<String> type_name) {
5530 Register input,
5531 Handle<String> type_name) {
5532 Condition final_branch_condition = kNoCondition; 5765 Condition final_branch_condition = kNoCondition;
5533 Register scratch = scratch0(); 5766 Register scratch = scratch0();
5534 Factory* factory = isolate()->factory(); 5767 Factory* factory = isolate()->factory();
5535 if (String::Equals(type_name, factory->number_string())) { 5768 if (String::Equals(type_name, factory->number_string())) {
5536 __ JumpIfSmi(input, true_label); 5769 __ JumpIfSmi(input, true_label);
5537 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5770 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5538 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); 5771 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5539 final_branch_condition = eq; 5772 final_branch_condition = eq;
5540 5773
5541 } else if (String::Equals(type_name, factory->string_string())) { 5774 } else if (String::Equals(type_name, factory->string_string())) {
5542 __ JumpIfSmi(input, false_label); 5775 __ JumpIfSmi(input, false_label);
5543 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); 5776 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5544 __ b(ge, false_label); 5777 __ bge(false_label);
5545 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5778 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5546 __ tst(scratch, Operand(1 << Map::kIsUndetectable)); 5779 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5780 __ cmpi(r0, Operand::Zero());
5547 final_branch_condition = eq; 5781 final_branch_condition = eq;
5548 5782
5549 } else if (String::Equals(type_name, factory->symbol_string())) { 5783 } else if (String::Equals(type_name, factory->symbol_string())) {
5550 __ JumpIfSmi(input, false_label); 5784 __ JumpIfSmi(input, false_label);
5551 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); 5785 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5552 final_branch_condition = eq; 5786 final_branch_condition = eq;
5553 5787
5554 } else if (String::Equals(type_name, factory->boolean_string())) { 5788 } else if (String::Equals(type_name, factory->boolean_string())) {
5555 __ CompareRoot(input, Heap::kTrueValueRootIndex); 5789 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5556 __ b(eq, true_label); 5790 __ beq(true_label);
5557 __ CompareRoot(input, Heap::kFalseValueRootIndex); 5791 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5558 final_branch_condition = eq; 5792 final_branch_condition = eq;
5559 5793
5560 } else if (String::Equals(type_name, factory->undefined_string())) { 5794 } else if (String::Equals(type_name, factory->undefined_string())) {
5561 __ CompareRoot(input, Heap::kUndefinedValueRootIndex); 5795 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5562 __ b(eq, true_label); 5796 __ beq(true_label);
5563 __ JumpIfSmi(input, false_label); 5797 __ JumpIfSmi(input, false_label);
5564 // Check for undetectable objects => true. 5798 // Check for undetectable objects => true.
5565 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5799 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5566 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5800 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5567 __ tst(scratch, Operand(1 << Map::kIsUndetectable)); 5801 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5802 __ cmpi(r0, Operand::Zero());
5568 final_branch_condition = ne; 5803 final_branch_condition = ne;
5569 5804
5570 } else if (String::Equals(type_name, factory->function_string())) { 5805 } else if (String::Equals(type_name, factory->function_string())) {
5571 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 5806 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5572 Register type_reg = scratch; 5807 Register type_reg = scratch;
5573 __ JumpIfSmi(input, false_label); 5808 __ JumpIfSmi(input, false_label);
5574 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE); 5809 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5575 __ b(eq, true_label); 5810 __ beq(true_label);
5576 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE)); 5811 __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5577 final_branch_condition = eq; 5812 final_branch_condition = eq;
5578 5813
5579 } else if (String::Equals(type_name, factory->object_string())) { 5814 } else if (String::Equals(type_name, factory->object_string())) {
5580 Register map = scratch; 5815 Register map = scratch;
5581 __ JumpIfSmi(input, false_label); 5816 __ JumpIfSmi(input, false_label);
5582 __ CompareRoot(input, Heap::kNullValueRootIndex); 5817 __ CompareRoot(input, Heap::kNullValueRootIndex);
5583 __ b(eq, true_label); 5818 __ beq(true_label);
5584 __ CheckObjectTypeRange(input, 5819 __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5585 map, 5820 LAST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label);
5586 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5587 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5588 false_label);
5589 // Check for undetectable objects => false. 5821 // Check for undetectable objects => false.
5590 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); 5822 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5591 __ tst(scratch, Operand(1 << Map::kIsUndetectable)); 5823 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5824 __ cmpi(r0, Operand::Zero());
5592 final_branch_condition = eq; 5825 final_branch_condition = eq;
5593 5826
5594 } else { 5827 } else {
5595 __ b(false_label); 5828 __ b(false_label);
5596 } 5829 }
5597 5830
5598 return final_branch_condition; 5831 return final_branch_condition;
5599 } 5832 }
5600 5833
5601 5834
5602 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { 5835 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5603 Register temp1 = ToRegister(instr->temp()); 5836 Register temp1 = ToRegister(instr->temp());
5604 5837
5605 EmitIsConstructCall(temp1, scratch0()); 5838 EmitIsConstructCall(temp1, scratch0());
5606 EmitBranch(instr, eq); 5839 EmitBranch(instr, eq);
5607 } 5840 }
5608 5841
5609 5842
5610 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { 5843 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5611 DCHECK(!temp1.is(temp2)); 5844 DCHECK(!temp1.is(temp2));
5612 // Get the frame pointer for the calling frame. 5845 // Get the frame pointer for the calling frame.
5613 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 5846 __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5614 5847
5615 // Skip the arguments adaptor frame if it exists. 5848 // Skip the arguments adaptor frame if it exists.
5616 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); 5849 Label check_frame_marker;
5617 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 5850 __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5618 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq); 5851 __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
5852 __ bne(&check_frame_marker);
5853 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5619 5854
5620 // Check the marker in the calling frame. 5855 // Check the marker in the calling frame.
5621 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); 5856 __ bind(&check_frame_marker);
5622 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); 5857 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5858 __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0);
5623 } 5859 }
5624 5860
5625 5861
5626 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 5862 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5627 if (!info()->IsStub()) { 5863 if (!info()->IsStub()) {
5628 // Ensure that we have enough space after the previous lazy-bailout 5864 // Ensure that we have enough space after the previous lazy-bailout
5629 // instruction for patching the code here. 5865 // instruction for patching the code here.
5630 int current_pc = masm()->pc_offset(); 5866 int current_pc = masm()->pc_offset();
5631 if (current_pc < last_lazy_deopt_pc_ + space_needed) { 5867 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5632 // Block literal pool emission for duration of padding.
5633 Assembler::BlockConstPoolScope block_const_pool(masm());
5634 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 5868 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5635 DCHECK_EQ(0, padding_size % Assembler::kInstrSize); 5869 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5636 while (padding_size > 0) { 5870 while (padding_size > 0) {
5637 __ nop(); 5871 __ nop();
5638 padding_size -= Assembler::kInstrSize; 5872 padding_size -= Assembler::kInstrSize;
5639 } 5873 }
5640 } 5874 }
5641 } 5875 }
5642 last_lazy_deopt_pc_ = masm()->pc_offset(); 5876 last_lazy_deopt_pc_ = masm()->pc_offset();
5643 } 5877 }
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
5685 DCHECK(instr->HasEnvironment()); 5919 DCHECK(instr->HasEnvironment());
5686 LEnvironment* env = instr->environment(); 5920 LEnvironment* env = instr->environment();
5687 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5921 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5688 } 5922 }
5689 5923
5690 5924
5691 void LCodeGen::DoStackCheck(LStackCheck* instr) { 5925 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5692 class DeferredStackCheck FINAL : public LDeferredCode { 5926 class DeferredStackCheck FINAL : public LDeferredCode {
5693 public: 5927 public:
5694 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 5928 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5695 : LDeferredCode(codegen), instr_(instr) { } 5929 : LDeferredCode(codegen), instr_(instr) {}
5696 virtual void Generate() OVERRIDE { 5930 virtual void Generate() OVERRIDE {
5697 codegen()->DoDeferredStackCheck(instr_); 5931 codegen()->DoDeferredStackCheck(instr_);
5698 } 5932 }
5699 virtual LInstruction* instr() OVERRIDE { return instr_; } 5933 virtual LInstruction* instr() OVERRIDE { return instr_; }
5934
5700 private: 5935 private:
5701 LStackCheck* instr_; 5936 LStackCheck* instr_;
5702 }; 5937 };
5703 5938
5704 DCHECK(instr->HasEnvironment()); 5939 DCHECK(instr->HasEnvironment());
5705 LEnvironment* env = instr->environment(); 5940 LEnvironment* env = instr->environment();
5706 // There is no LLazyBailout instruction for stack-checks. We have to 5941 // There is no LLazyBailout instruction for stack-checks. We have to
5707 // prepare for lazy deoptimization explicitly here. 5942 // prepare for lazy deoptimization explicitly here.
5708 if (instr->hydrogen()->is_function_entry()) { 5943 if (instr->hydrogen()->is_function_entry()) {
5709 // Perform stack overflow check. 5944 // Perform stack overflow check.
5710 Label done; 5945 Label done;
5711 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 5946 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5712 __ cmp(sp, Operand(ip)); 5947 __ cmpl(sp, ip);
5713 __ b(hs, &done); 5948 __ bge(&done);
5714 Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5715 PredictableCodeSizeScope predictable(masm(),
5716 CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5717 DCHECK(instr->context()->IsRegister()); 5949 DCHECK(instr->context()->IsRegister());
5718 DCHECK(ToRegister(instr->context()).is(cp)); 5950 DCHECK(ToRegister(instr->context()).is(cp));
5719 CallCode(stack_check, RelocInfo::CODE_TARGET, instr); 5951 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
5952 instr);
5720 __ bind(&done); 5953 __ bind(&done);
5721 } else { 5954 } else {
5722 DCHECK(instr->hydrogen()->is_backwards_branch()); 5955 DCHECK(instr->hydrogen()->is_backwards_branch());
5723 // Perform stack overflow check if this goto needs it before jumping. 5956 // Perform stack overflow check if this goto needs it before jumping.
5724 DeferredStackCheck* deferred_stack_check = 5957 DeferredStackCheck* deferred_stack_check =
5725 new(zone()) DeferredStackCheck(this, instr); 5958 new (zone()) DeferredStackCheck(this, instr);
5726 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 5959 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5727 __ cmp(sp, Operand(ip)); 5960 __ cmpl(sp, ip);
5728 __ b(lo, deferred_stack_check->entry()); 5961 __ blt(deferred_stack_check->entry());
5729 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 5962 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5730 __ bind(instr->done_label()); 5963 __ bind(instr->done_label());
5731 deferred_stack_check->SetExit(instr->done_label()); 5964 deferred_stack_check->SetExit(instr->done_label());
5732 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5965 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5733 // Don't record a deoptimization index for the safepoint here. 5966 // Don't record a deoptimization index for the safepoint here.
5734 // This will be done explicitly when emitting call and the safepoint in 5967 // This will be done explicitly when emitting call and the safepoint in
5735 // the deferred code. 5968 // the deferred code.
5736 } 5969 }
5737 } 5970 }
5738 5971
5739 5972
5740 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 5973 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5741 // This is a pseudo-instruction that ensures that the environment here is 5974 // This is a pseudo-instruction that ensures that the environment here is
5742 // properly registered for deoptimization and records the assembler's PC 5975 // properly registered for deoptimization and records the assembler's PC
5743 // offset. 5976 // offset.
5744 LEnvironment* environment = instr->environment(); 5977 LEnvironment* environment = instr->environment();
5745 5978
5746 // If the environment were already registered, we would have no way of 5979 // If the environment were already registered, we would have no way of
5747 // backpatching it with the spill slot operands. 5980 // backpatching it with the spill slot operands.
5748 DCHECK(!environment->HasBeenRegistered()); 5981 DCHECK(!environment->HasBeenRegistered());
5749 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 5982 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5750 5983
5751 GenerateOsrPrologue(); 5984 GenerateOsrPrologue();
5752 } 5985 }
5753 5986
5754 5987
5755 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5988 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5756 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5989 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5757 __ cmp(r0, ip); 5990 __ cmp(r3, ip);
5758 DeoptimizeIf(eq, instr, "undefined"); 5991 DeoptimizeIf(eq, instr, "undefined");
5759 5992
5760 Register null_value = r5; 5993 Register null_value = r8;
5761 __ LoadRoot(null_value, Heap::kNullValueRootIndex); 5994 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5762 __ cmp(r0, null_value); 5995 __ cmp(r3, null_value);
5763 DeoptimizeIf(eq, instr, "null"); 5996 DeoptimizeIf(eq, instr, "null");
5764 5997
5765 __ SmiTst(r0); 5998 __ TestIfSmi(r3, r0);
5766 DeoptimizeIf(eq, instr, "Smi"); 5999 DeoptimizeIf(eq, instr, "Smi", cr0);
5767 6000
5768 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); 6001 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5769 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); 6002 __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
5770 DeoptimizeIf(le, instr, "wrong instance type"); 6003 DeoptimizeIf(le, instr, "wrong instance type");
5771 6004
5772 Label use_cache, call_runtime; 6005 Label use_cache, call_runtime;
5773 __ CheckEnumCache(null_value, &call_runtime); 6006 __ CheckEnumCache(null_value, &call_runtime);
5774 6007
5775 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); 6008 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
5776 __ b(&use_cache); 6009 __ b(&use_cache);
5777 6010
5778 // Get the set of properties to enumerate. 6011 // Get the set of properties to enumerate.
5779 __ bind(&call_runtime); 6012 __ bind(&call_runtime);
5780 __ push(r0); 6013 __ push(r3);
5781 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); 6014 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5782 6015
5783 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); 6016 __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
5784 __ LoadRoot(ip, Heap::kMetaMapRootIndex); 6017 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5785 __ cmp(r1, ip); 6018 __ cmp(r4, ip);
5786 DeoptimizeIf(ne, instr, "wrong map"); 6019 DeoptimizeIf(ne, instr, "wrong map");
5787 __ bind(&use_cache); 6020 __ bind(&use_cache);
5788 } 6021 }
5789 6022
5790 6023
5791 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 6024 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5792 Register map = ToRegister(instr->map()); 6025 Register map = ToRegister(instr->map());
5793 Register result = ToRegister(instr->result()); 6026 Register result = ToRegister(instr->result());
5794 Label load_cache, done; 6027 Label load_cache, done;
5795 __ EnumLength(result, map); 6028 __ EnumLength(result, map);
5796 __ cmp(result, Operand(Smi::FromInt(0))); 6029 __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
5797 __ b(ne, &load_cache); 6030 __ bne(&load_cache);
5798 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); 6031 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5799 __ jmp(&done); 6032 __ b(&done);
5800 6033
5801 __ bind(&load_cache); 6034 __ bind(&load_cache);
5802 __ LoadInstanceDescriptors(map, result); 6035 __ LoadInstanceDescriptors(map, result);
5803 __ ldr(result, 6036 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5804 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 6037 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5805 __ ldr(result, 6038 __ cmpi(result, Operand::Zero());
5806 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5807 __ cmp(result, Operand::Zero());
5808 DeoptimizeIf(eq, instr, "no cache"); 6039 DeoptimizeIf(eq, instr, "no cache");
5809 6040
5810 __ bind(&done); 6041 __ bind(&done);
5811 } 6042 }
5812 6043
5813 6044
5814 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 6045 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5815 Register object = ToRegister(instr->value()); 6046 Register object = ToRegister(instr->value());
5816 Register map = ToRegister(instr->map()); 6047 Register map = ToRegister(instr->map());
5817 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 6048 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5818 __ cmp(map, scratch0()); 6049 __ cmp(map, scratch0());
5819 DeoptimizeIf(ne, instr, "wrong map"); 6050 DeoptimizeIf(ne, instr, "wrong map");
5820 } 6051 }
5821 6052
5822 6053
5823 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 6054 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5824 Register result, 6055 Register result, Register object,
5825 Register object,
5826 Register index) { 6056 Register index) {
5827 PushSafepointRegistersScope scope(this); 6057 PushSafepointRegistersScope scope(this);
5828 __ Push(object); 6058 __ Push(object, index);
5829 __ Push(index); 6059 __ li(cp, Operand::Zero());
5830 __ mov(cp, Operand::Zero());
5831 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); 6060 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5832 RecordSafepointWithRegisters( 6061 RecordSafepointWithRegisters(instr->pointer_map(), 2,
5833 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); 6062 Safepoint::kNoLazyDeopt);
5834 __ StoreToSafepointRegisterSlot(r0, result); 6063 __ StoreToSafepointRegisterSlot(r3, result);
5835 } 6064 }
5836 6065
5837 6066
5838 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 6067 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5839 class DeferredLoadMutableDouble FINAL : public LDeferredCode { 6068 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5840 public: 6069 public:
5841 DeferredLoadMutableDouble(LCodeGen* codegen, 6070 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
5842 LLoadFieldByIndex* instr, 6071 Register result, Register object, Register index)
5843 Register result,
5844 Register object,
5845 Register index)
5846 : LDeferredCode(codegen), 6072 : LDeferredCode(codegen),
5847 instr_(instr), 6073 instr_(instr),
5848 result_(result), 6074 result_(result),
5849 object_(object), 6075 object_(object),
5850 index_(index) { 6076 index_(index) {}
5851 }
5852 virtual void Generate() OVERRIDE { 6077 virtual void Generate() OVERRIDE {
5853 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); 6078 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5854 } 6079 }
5855 virtual LInstruction* instr() OVERRIDE { return instr_; } 6080 virtual LInstruction* instr() OVERRIDE { return instr_; }
6081
5856 private: 6082 private:
5857 LLoadFieldByIndex* instr_; 6083 LLoadFieldByIndex* instr_;
5858 Register result_; 6084 Register result_;
5859 Register object_; 6085 Register object_;
5860 Register index_; 6086 Register index_;
5861 }; 6087 };
5862 6088
5863 Register object = ToRegister(instr->object()); 6089 Register object = ToRegister(instr->object());
5864 Register index = ToRegister(instr->index()); 6090 Register index = ToRegister(instr->index());
5865 Register result = ToRegister(instr->result()); 6091 Register result = ToRegister(instr->result());
5866 Register scratch = scratch0(); 6092 Register scratch = scratch0();
5867 6093
5868 DeferredLoadMutableDouble* deferred; 6094 DeferredLoadMutableDouble* deferred;
5869 deferred = new(zone()) DeferredLoadMutableDouble( 6095 deferred = new (zone())
5870 this, instr, result, object, index); 6096 DeferredLoadMutableDouble(this, instr, result, object, index);
5871 6097
5872 Label out_of_object, done; 6098 Label out_of_object, done;
5873 6099
5874 __ tst(index, Operand(Smi::FromInt(1))); 6100 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
5875 __ b(ne, deferred->entry()); 6101 __ bne(deferred->entry(), cr0);
5876 __ mov(index, Operand(index, ASR, 1)); 6102 __ ShiftRightArithImm(index, index, 1);
5877 6103
5878 __ cmp(index, Operand::Zero()); 6104 __ cmpi(index, Operand::Zero());
5879 __ b(lt, &out_of_object); 6105 __ blt(&out_of_object);
5880 6106
5881 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index)); 6107 __ SmiToPtrArrayOffset(r0, index);
5882 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); 6108 __ add(scratch, object, r0);
6109 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5883 6110
5884 __ b(&done); 6111 __ b(&done);
5885 6112
5886 __ bind(&out_of_object); 6113 __ bind(&out_of_object);
5887 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 6114 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5888 // Index is equal to negated out of object property index plus 1. 6115 // Index is equal to negated out of object property index plus 1.
5889 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); 6116 __ SmiToPtrArrayOffset(r0, index);
5890 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); 6117 __ sub(scratch, result, r0);
5891 __ ldr(result, FieldMemOperand(scratch, 6118 __ LoadP(result,
5892 FixedArray::kHeaderSize - kPointerSize)); 6119 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
5893 __ bind(deferred->exit()); 6120 __ bind(deferred->exit());
5894 __ bind(&done); 6121 __ bind(&done);
5895 } 6122 }
5896 6123
5897 6124
5898 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { 6125 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5899 Register context = ToRegister(instr->context()); 6126 Register context = ToRegister(instr->context());
5900 __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); 6127 __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5901 } 6128 }
5902 6129
5903 6130
5904 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { 6131 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5905 Handle<ScopeInfo> scope_info = instr->scope_info(); 6132 Handle<ScopeInfo> scope_info = instr->scope_info();
5906 __ Push(scope_info); 6133 __ Push(scope_info);
5907 __ push(ToRegister(instr->function())); 6134 __ push(ToRegister(instr->function()));
5908 CallRuntime(Runtime::kPushBlockContext, 2, instr); 6135 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5909 RecordSafepoint(Safepoint::kNoLazyDeopt); 6136 RecordSafepoint(Safepoint::kNoLazyDeopt);
5910 } 6137 }
5911 6138
5912 6139
5913 #undef __ 6140 #undef __
5914 6141 }
5915 } } // namespace v8::internal 6142 } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/ppc/lithium-codegen-ppc.h ('k') | src/ppc/lithium-gap-resolver-ppc.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698