Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(572)

Side by Side Diff: src/ppc/lithium-codegen-ppc.cc

Issue 422063005: Contribution of PowerPC port. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Caught up to bleending edge (8/15) Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 //
3 // Copyright IBM Corp. 2012, 2013. All rights reserved.
4 //
2 // Use of this source code is governed by a BSD-style license that can be 5 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 6 // found in the LICENSE file.
4 7
5 #include "src/v8.h" 8 #include "src/v8.h"
6 9
7 #include "src/arm/lithium-codegen-arm.h"
8 #include "src/arm/lithium-gap-resolver-arm.h"
9 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
10 #include "src/hydrogen-osr.h" 11 #include "src/hydrogen-osr.h"
11 #include "src/stub-cache.h" 12 #include "src/stub-cache.h"
12 13
14 #include "src/ppc/lithium-codegen-ppc.h"
15 #include "src/ppc/lithium-gap-resolver-ppc.h"
16
13 namespace v8 { 17 namespace v8 {
14 namespace internal { 18 namespace internal {
15 19
16 20
17 class SafepointGenerator V8_FINAL : public CallWrapper { 21 class SafepointGenerator V8_FINAL : public CallWrapper {
18 public: 22 public:
19 SafepointGenerator(LCodeGen* codegen, 23 SafepointGenerator(LCodeGen* codegen,
20 LPointerMap* pointers, 24 LPointerMap* pointers,
21 Safepoint::DeoptMode mode) 25 Safepoint::DeoptMode mode)
22 : codegen_(codegen), 26 : codegen_(codegen),
23 pointers_(pointers), 27 pointers_(pointers),
24 deopt_mode_(mode) { } 28 deopt_mode_(mode) { }
25 virtual ~SafepointGenerator() {} 29 virtual ~SafepointGenerator() { }
26 30
27 virtual void BeforeCall(int call_size) const V8_OVERRIDE {} 31 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
28 32
29 virtual void AfterCall() const V8_OVERRIDE { 33 virtual void AfterCall() const V8_OVERRIDE {
30 codegen_->RecordSafepoint(pointers_, deopt_mode_); 34 codegen_->RecordSafepoint(pointers_, deopt_mode_);
31 } 35 }
32 36
33 private: 37 private:
34 LCodeGen* codegen_; 38 LCodeGen* codegen_;
35 LPointerMap* pointers_; 39 LPointerMap* pointers_;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
67 71
68 72
69 void LCodeGen::SaveCallerDoubles() { 73 void LCodeGen::SaveCallerDoubles() {
70 DCHECK(info()->saves_caller_doubles()); 74 DCHECK(info()->saves_caller_doubles());
71 DCHECK(NeedsEagerFrame()); 75 DCHECK(NeedsEagerFrame());
72 Comment(";;; Save clobbered callee double registers"); 76 Comment(";;; Save clobbered callee double registers");
73 int count = 0; 77 int count = 0;
74 BitVector* doubles = chunk()->allocated_double_registers(); 78 BitVector* doubles = chunk()->allocated_double_registers();
75 BitVector::Iterator save_iterator(doubles); 79 BitVector::Iterator save_iterator(doubles);
76 while (!save_iterator.Done()) { 80 while (!save_iterator.Done()) {
77 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), 81 __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
78 MemOperand(sp, count * kDoubleSize)); 82 MemOperand(sp, count * kDoubleSize));
79 save_iterator.Advance(); 83 save_iterator.Advance();
80 count++; 84 count++;
81 } 85 }
82 } 86 }
83 87
84 88
85 void LCodeGen::RestoreCallerDoubles() { 89 void LCodeGen::RestoreCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles()); 90 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame()); 91 DCHECK(NeedsEagerFrame());
88 Comment(";;; Restore clobbered callee double registers"); 92 Comment(";;; Restore clobbered callee double registers");
89 BitVector* doubles = chunk()->allocated_double_registers(); 93 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles); 94 BitVector::Iterator save_iterator(doubles);
91 int count = 0; 95 int count = 0;
92 while (!save_iterator.Done()) { 96 while (!save_iterator.Done()) {
93 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), 97 __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
94 MemOperand(sp, count * kDoubleSize)); 98 MemOperand(sp, count * kDoubleSize));
95 save_iterator.Advance(); 99 save_iterator.Advance();
96 count++; 100 count++;
97 } 101 }
98 } 102 }
99 103
100 104
101 bool LCodeGen::GeneratePrologue() { 105 bool LCodeGen::GeneratePrologue() {
102 DCHECK(is_generating()); 106 DCHECK(is_generating());
103 107
104 if (info()->IsOptimizing()) { 108 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 109 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
106 110
107 #ifdef DEBUG 111 #ifdef DEBUG
108 if (strlen(FLAG_stop_at) > 0 && 112 if (strlen(FLAG_stop_at) > 0 &&
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { 113 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
110 __ stop("stop_at"); 114 __ stop("stop_at");
111 } 115 }
112 #endif 116 #endif
113 117
114 // r1: Callee's JS function. 118 // r4: Callee's JS function.
115 // cp: Callee's context. 119 // cp: Callee's context.
116 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
117 // fp: Caller's frame pointer. 120 // fp: Caller's frame pointer.
118 // lr: Caller's pc. 121 // lr: Caller's pc.
119 122
120 // Sloppy mode functions and builtins need to replace the receiver with the 123 // Sloppy mode functions and builtins need to replace the receiver with the
121 // global proxy when called as functions (without an explicit receiver 124 // global proxy when called as functions (without an explicit receiver
122 // object). 125 // object).
123 if (info_->this_has_uses() && 126 if (info_->this_has_uses() &&
124 info_->strict_mode() == SLOPPY && 127 info_->strict_mode() == SLOPPY &&
125 !info_->is_native()) { 128 !info_->is_native()) {
126 Label ok; 129 Label ok;
127 int receiver_offset = info_->scope()->num_parameters() * kPointerSize; 130 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
128 __ ldr(r2, MemOperand(sp, receiver_offset)); 131 __ LoadP(r5, MemOperand(sp, receiver_offset));
129 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); 132 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
130 __ b(ne, &ok); 133 __ bne(&ok);
131 134
132 __ ldr(r2, GlobalObjectOperand()); 135 __ LoadP(r5, GlobalObjectOperand());
133 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset)); 136 __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
134 137
135 __ str(r2, MemOperand(sp, receiver_offset)); 138 __ StoreP(r5, MemOperand(sp, receiver_offset));
136 139
137 __ bind(&ok); 140 __ bind(&ok);
138 } 141 }
139 } 142 }
140 143
141 info()->set_prologue_offset(masm_->pc_offset()); 144 info()->set_prologue_offset(masm_->pc_offset());
142 if (NeedsEagerFrame()) { 145 if (NeedsEagerFrame()) {
143 if (info()->IsStub()) { 146 if (info()->IsStub()) {
144 __ StubPrologue(); 147 __ StubPrologue();
145 } else { 148 } else {
146 __ Prologue(info()->IsCodePreAgingActive()); 149 __ Prologue(info()->IsCodePreAgingActive());
147 } 150 }
148 frame_is_built_ = true; 151 frame_is_built_ = true;
149 info_->AddNoFrameRange(0, masm_->pc_offset()); 152 info_->AddNoFrameRange(0, masm_->pc_offset());
150 } 153 }
151 154
152 // Reserve space for the stack slots needed by the code. 155 // Reserve space for the stack slots needed by the code.
153 int slots = GetStackSlotCount(); 156 int slots = GetStackSlotCount();
154 if (slots > 0) { 157 if (slots > 0) {
158 __ subi(sp, sp, Operand(slots * kPointerSize));
155 if (FLAG_debug_code) { 159 if (FLAG_debug_code) {
156 __ sub(sp, sp, Operand(slots * kPointerSize)); 160 __ Push(r3, r4);
157 __ push(r0); 161 __ li(r0, Operand(slots));
158 __ push(r1); 162 __ mtctr(r0);
159 __ add(r0, sp, Operand(slots * kPointerSize)); 163 __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
160 __ mov(r1, Operand(kSlotsZapValue)); 164 __ mov(r4, Operand(kSlotsZapValue));
161 Label loop; 165 Label loop;
162 __ bind(&loop); 166 __ bind(&loop);
163 __ sub(r0, r0, Operand(kPointerSize)); 167 __ StorePU(r4, MemOperand(r3, -kPointerSize));
164 __ str(r1, MemOperand(r0, 2 * kPointerSize)); 168 __ bdnz(&loop);
165 __ cmp(r0, sp); 169 __ Pop(r3, r4);
166 __ b(ne, &loop);
167 __ pop(r1);
168 __ pop(r0);
169 } else {
170 __ sub(sp, sp, Operand(slots * kPointerSize));
171 } 170 }
172 } 171 }
173 172
174 if (info()->saves_caller_doubles()) { 173 if (info()->saves_caller_doubles()) {
175 SaveCallerDoubles(); 174 SaveCallerDoubles();
176 } 175 }
177 176
178 // Possibly allocate a local context. 177 // Possibly allocate a local context.
179 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 178 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
180 if (heap_slots > 0) { 179 if (heap_slots > 0) {
181 Comment(";;; Allocate local context"); 180 Comment(";;; Allocate local context");
182 bool need_write_barrier = true; 181 bool need_write_barrier = true;
183 // Argument to NewContext is the function, which is in r1. 182 // Argument to NewContext is the function, which is in r4.
184 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 183 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
185 FastNewContextStub stub(isolate(), heap_slots); 184 FastNewContextStub stub(isolate(), heap_slots);
186 __ CallStub(&stub); 185 __ CallStub(&stub);
187 // Result of FastNewContextStub is always in new space. 186 // Result of FastNewContextStub is always in new space.
188 need_write_barrier = false; 187 need_write_barrier = false;
189 } else { 188 } else {
190 __ push(r1); 189 __ push(r4);
191 __ CallRuntime(Runtime::kNewFunctionContext, 1); 190 __ CallRuntime(Runtime::kNewFunctionContext, 1);
192 } 191 }
193 RecordSafepoint(Safepoint::kNoLazyDeopt); 192 RecordSafepoint(Safepoint::kNoLazyDeopt);
194 // Context is returned in both r0 and cp. It replaces the context 193 // Context is returned in both r3 and cp. It replaces the context
195 // passed to us. It's saved in the stack and kept live in cp. 194 // passed to us. It's saved in the stack and kept live in cp.
196 __ mov(cp, r0); 195 __ mr(cp, r3);
197 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset)); 196 __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
198 // Copy any necessary parameters into the context. 197 // Copy any necessary parameters into the context.
199 int num_parameters = scope()->num_parameters(); 198 int num_parameters = scope()->num_parameters();
200 for (int i = 0; i < num_parameters; i++) { 199 for (int i = 0; i < num_parameters; i++) {
201 Variable* var = scope()->parameter(i); 200 Variable* var = scope()->parameter(i);
202 if (var->IsContextSlot()) { 201 if (var->IsContextSlot()) {
203 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 202 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
204 (num_parameters - 1 - i) * kPointerSize; 203 (num_parameters - 1 - i) * kPointerSize;
205 // Load parameter from stack. 204 // Load parameter from stack.
206 __ ldr(r0, MemOperand(fp, parameter_offset)); 205 __ LoadP(r3, MemOperand(fp, parameter_offset));
207 // Store it in the context. 206 // Store it in the context.
208 MemOperand target = ContextOperand(cp, var->index()); 207 MemOperand target = ContextOperand(cp, var->index());
209 __ str(r0, target); 208 __ StoreP(r3, target, r0);
210 // Update the write barrier. This clobbers r3 and r0. 209 // Update the write barrier. This clobbers r6 and r3.
211 if (need_write_barrier) { 210 if (need_write_barrier) {
212 __ RecordWriteContextSlot( 211 __ RecordWriteContextSlot(
213 cp, 212 cp,
214 target.offset(), 213 target.offset(),
215 r0,
216 r3, 214 r3,
215 r6,
217 GetLinkRegisterState(), 216 GetLinkRegisterState(),
218 kSaveFPRegs); 217 kSaveFPRegs);
219 } else if (FLAG_debug_code) { 218 } else if (FLAG_debug_code) {
220 Label done; 219 Label done;
221 __ JumpIfInNewSpace(cp, r0, &done); 220 __ JumpIfInNewSpace(cp, r3, &done);
222 __ Abort(kExpectedNewSpaceObject); 221 __ Abort(kExpectedNewSpaceObject);
223 __ bind(&done); 222 __ bind(&done);
224 } 223 }
225 } 224 }
226 } 225 }
227 Comment(";;; End allocate local context"); 226 Comment(";;; End allocate local context");
228 } 227 }
229 228
230 // Trace the call. 229 // Trace the call.
231 if (FLAG_trace && info()->IsOptimizing()) { 230 if (FLAG_trace && info()->IsOptimizing()) {
232 // We have not executed any compiled code yet, so cp still holds the 231 // We have not executed any compiled code yet, so cp still holds the
233 // incoming context. 232 // incoming context.
234 __ CallRuntime(Runtime::kTraceEnter, 0); 233 __ CallRuntime(Runtime::kTraceEnter, 0);
235 } 234 }
236 return !is_aborted(); 235 return !is_aborted();
237 } 236 }
238 237
239 238
240 void LCodeGen::GenerateOsrPrologue() { 239 void LCodeGen::GenerateOsrPrologue() {
241 // Generate the OSR entry prologue at the first unknown OSR value, or if there 240 // Generate the OSR entry prologue at the first unknown OSR value, or if there
242 // are none, at the OSR entrypoint instruction. 241 // are none, at the OSR entrypoint instruction.
243 if (osr_pc_offset_ >= 0) return; 242 if (osr_pc_offset_ >= 0) return;
244 243
245 osr_pc_offset_ = masm()->pc_offset(); 244 osr_pc_offset_ = masm()->pc_offset();
246 245
247 // Adjust the frame size, subsuming the unoptimized frame into the 246 // Adjust the frame size, subsuming the unoptimized frame into the
248 // optimized frame. 247 // optimized frame.
249 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 248 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
250 DCHECK(slots >= 0); 249 DCHECK(slots >= 0);
251 __ sub(sp, sp, Operand(slots * kPointerSize)); 250 __ subi(sp, sp, Operand(slots * kPointerSize));
252 } 251 }
253 252
254 253
255 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 254 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
256 if (instr->IsCall()) { 255 if (instr->IsCall()) {
257 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 256 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
258 } 257 }
259 if (!instr->IsLazyBailout() && !instr->IsGap()) { 258 if (!instr->IsLazyBailout() && !instr->IsGap()) {
260 safepoints_.BumpLastLazySafepointIndex(); 259 safepoints_.BumpLastLazySafepointIndex();
261 } 260 }
(...skipping 16 matching lines...) Expand all
278 code->instruction_index(), 277 code->instruction_index(),
279 code->instr()->hydrogen_value()->id(), 278 code->instr()->hydrogen_value()->id(),
280 code->instr()->Mnemonic()); 279 code->instr()->Mnemonic());
281 __ bind(code->entry()); 280 __ bind(code->entry());
282 if (NeedsDeferredFrame()) { 281 if (NeedsDeferredFrame()) {
283 Comment(";;; Build frame"); 282 Comment(";;; Build frame");
284 DCHECK(!frame_is_built_); 283 DCHECK(!frame_is_built_);
285 DCHECK(info()->IsStub()); 284 DCHECK(info()->IsStub());
286 frame_is_built_ = true; 285 frame_is_built_ = true;
287 __ PushFixedFrame(); 286 __ PushFixedFrame();
288 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); 287 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
289 __ push(scratch0()); 288 __ push(scratch0());
290 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 289 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
291 Comment(";;; Deferred code"); 290 Comment(";;; Deferred code");
292 } 291 }
293 code->Generate(); 292 code->Generate();
294 if (NeedsDeferredFrame()) { 293 if (NeedsDeferredFrame()) {
295 Comment(";;; Destroy frame"); 294 Comment(";;; Destroy frame");
296 DCHECK(frame_is_built_); 295 DCHECK(frame_is_built_);
297 __ pop(ip); 296 __ pop(ip);
298 __ PopFixedFrame(); 297 __ PopFixedFrame();
299 frame_is_built_ = false; 298 frame_is_built_ = false;
300 } 299 }
301 __ jmp(code->exit()); 300 __ b(code->exit());
302 } 301 }
303 } 302 }
304 303
305 // Force constant pool emission at the end of the deferred code to make
306 // sure that no constant pools are emitted after.
307 masm()->CheckConstPool(true, false);
308
309 return !is_aborted(); 304 return !is_aborted();
310 } 305 }
311 306
312 307
313 bool LCodeGen::GenerateDeoptJumpTable() { 308 bool LCodeGen::GenerateDeoptJumpTable() {
314 // Check that the jump table is accessible from everywhere in the function
315 // code, i.e. that offsets to the table can be encoded in the 24bit signed
316 // immediate of a branch instruction.
317 // To simplify we consider the code size from the first instruction to the
318 // end of the jump table. We also don't consider the pc load delta.
319 // Each entry in the jump table generates one instruction and inlines one
320 // 32bit data after it.
321 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
322 deopt_jump_table_.length() * 7)) {
323 Abort(kGeneratedCodeIsTooLarge);
324 }
325
326 if (deopt_jump_table_.length() > 0) { 309 if (deopt_jump_table_.length() > 0) {
327 Label needs_frame, call_deopt_entry; 310 Label needs_frame, call_deopt_entry;
328 311
329 Comment(";;; -------------------- Jump table --------------------"); 312 Comment(";;; -------------------- Jump table --------------------");
330 Address base = deopt_jump_table_[0].address; 313 Address base = deopt_jump_table_[0].address;
331 314
332 Register entry_offset = scratch0(); 315 Register entry_offset = scratch0();
333 316
334 int length = deopt_jump_table_.length(); 317 int length = deopt_jump_table_.length();
335 for (int i = 0; i < length; i++) { 318 for (int i = 0; i < length; i++) {
319 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
336 __ bind(&deopt_jump_table_[i].label); 320 __ bind(&deopt_jump_table_[i].label);
337 321
338 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; 322 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
339 DCHECK(type == deopt_jump_table_[0].bailout_type); 323 DCHECK(type == deopt_jump_table_[0].bailout_type);
340 Address entry = deopt_jump_table_[i].address; 324 Address entry = deopt_jump_table_[i].address;
341 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); 325 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
342 DCHECK(id != Deoptimizer::kNotDeoptimizationEntry); 326 DCHECK(id != Deoptimizer::kNotDeoptimizationEntry);
343 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); 327 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
344 328
345 // Second-level deopt table entries are contiguous and small, so instead 329 // Second-level deopt table entries are contiguous and small, so instead
346 // of loading the full, absolute address of each one, load an immediate 330 // of loading the full, absolute address of each one, load an immediate
347 // offset which will be added to the base address later. 331 // offset which will be added to the base address later.
348 __ mov(entry_offset, Operand(entry - base)); 332 __ mov(entry_offset, Operand(entry - base));
349 333
350 if (deopt_jump_table_[i].needs_frame) { 334 if (deopt_jump_table_[i].needs_frame) {
351 DCHECK(!info()->saves_caller_doubles()); 335 DCHECK(!info()->saves_caller_doubles());
352 if (needs_frame.is_bound()) { 336 if (needs_frame.is_bound()) {
353 __ b(&needs_frame); 337 __ b(&needs_frame);
354 } else { 338 } else {
355 __ bind(&needs_frame); 339 __ bind(&needs_frame);
356 Comment(";;; call deopt with frame"); 340 Comment(";;; call deopt with frame");
357 __ PushFixedFrame(); 341 __ PushFixedFrame();
358 // This variant of deopt can only be used with stubs. Since we don't 342 // This variant of deopt can only be used with stubs. Since we don't
359 // have a function pointer to install in the stack frame that we're 343 // have a function pointer to install in the stack frame that we're
360 // building, install a special marker there instead. 344 // building, install a special marker there instead.
361 DCHECK(info()->IsStub()); 345 DCHECK(info()->IsStub());
362 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB))); 346 __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::STUB));
363 __ push(ip); 347 __ push(r0);
364 __ add(fp, sp, 348 __ addi(fp, sp,
365 Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 349 Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
366 __ bind(&call_deopt_entry); 350 __ bind(&call_deopt_entry);
367 // Add the base address to the offset previously loaded in 351 // Add the base address to the offset previously loaded in
368 // entry_offset. 352 // entry_offset.
369 __ add(entry_offset, entry_offset, 353 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
370 Operand(ExternalReference::ForDeoptEntry(base))); 354 __ add(ip, entry_offset, ip);
371 __ blx(entry_offset); 355 __ Call(ip);
372 } 356 }
373
374 masm()->CheckConstPool(false, false);
375 } else { 357 } else {
376 // The last entry can fall through into `call_deopt_entry`, avoiding a 358 // The last entry can fall through into `call_deopt_entry`, avoiding a
377 // branch. 359 // branch.
378 bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound(); 360 bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
379 361
380 if (need_branch) __ b(&call_deopt_entry); 362 if (need_branch) __ b(&call_deopt_entry);
381
382 masm()->CheckConstPool(false, !need_branch);
383 } 363 }
384 } 364 }
385 365
386 if (!call_deopt_entry.is_bound()) { 366 if (!call_deopt_entry.is_bound()) {
387 Comment(";;; call deopt"); 367 Comment(";;; call deopt");
388 __ bind(&call_deopt_entry); 368 __ bind(&call_deopt_entry);
389 369
390 if (info()->saves_caller_doubles()) { 370 if (info()->saves_caller_doubles()) {
391 DCHECK(info()->IsStub()); 371 DCHECK(info()->IsStub());
392 RestoreCallerDoubles(); 372 RestoreCallerDoubles();
393 } 373 }
394 374
395 // Add the base address to the offset previously loaded in entry_offset. 375 // Add the base address to the offset previously loaded in entry_offset.
396 __ add(entry_offset, entry_offset, 376 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
397 Operand(ExternalReference::ForDeoptEntry(base))); 377 __ add(ip, entry_offset, ip);
398 __ blx(entry_offset); 378 __ Call(ip);
399 } 379 }
400 } 380 }
401 381
402 // Force constant pool emission at the end of the deopt jump table to make
403 // sure that no constant pools are emitted after.
404 masm()->CheckConstPool(true, false);
405
406 // The deoptimization jump table is the last part of the instruction 382 // The deoptimization jump table is the last part of the instruction
407 // sequence. Mark the generated code as done unless we bailed out. 383 // sequence. Mark the generated code as done unless we bailed out.
408 if (!is_aborted()) status_ = DONE; 384 if (!is_aborted()) status_ = DONE;
409 return !is_aborted(); 385 return !is_aborted();
410 } 386 }
411 387
412 388
413 bool LCodeGen::GenerateSafepointTable() { 389 bool LCodeGen::GenerateSafepointTable() {
414 DCHECK(is_done()); 390 DCHECK(is_done());
415 safepoints_.Emit(masm(), GetStackSlotCount()); 391 safepoints_.Emit(masm(), GetStackSlotCount());
416 return !is_aborted(); 392 return !is_aborted();
417 } 393 }
418 394
419 395
420 Register LCodeGen::ToRegister(int index) const { 396 Register LCodeGen::ToRegister(int index) const {
421 return Register::FromAllocationIndex(index); 397 return Register::FromAllocationIndex(index);
422 } 398 }
423 399
424 400
425 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { 401 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
426 return DwVfpRegister::FromAllocationIndex(index); 402 return DoubleRegister::FromAllocationIndex(index);
427 } 403 }
428 404
429 405
430 Register LCodeGen::ToRegister(LOperand* op) const { 406 Register LCodeGen::ToRegister(LOperand* op) const {
431 DCHECK(op->IsRegister()); 407 DCHECK(op->IsRegister());
432 return ToRegister(op->index()); 408 return ToRegister(op->index());
433 } 409 }
434 410
435 411
436 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { 412 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
437 if (op->IsRegister()) { 413 if (op->IsRegister()) {
438 return ToRegister(op->index()); 414 return ToRegister(op->index());
439 } else if (op->IsConstantOperand()) { 415 } else if (op->IsConstantOperand()) {
440 LConstantOperand* const_op = LConstantOperand::cast(op); 416 LConstantOperand* const_op = LConstantOperand::cast(op);
441 HConstant* constant = chunk_->LookupConstant(const_op); 417 HConstant* constant = chunk_->LookupConstant(const_op);
442 Handle<Object> literal = constant->handle(isolate()); 418 Handle<Object> literal = constant->handle(isolate());
443 Representation r = chunk_->LookupLiteralRepresentation(const_op); 419 Representation r = chunk_->LookupLiteralRepresentation(const_op);
444 if (r.IsInteger32()) { 420 if (r.IsInteger32()) {
445 DCHECK(literal->IsNumber()); 421 DCHECK(literal->IsNumber());
446 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); 422 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
447 } else if (r.IsDouble()) { 423 } else if (r.IsDouble()) {
448 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); 424 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
449 } else { 425 } else {
450 DCHECK(r.IsSmiOrTagged()); 426 DCHECK(r.IsSmiOrTagged());
451 __ Move(scratch, literal); 427 __ Move(scratch, literal);
452 } 428 }
453 return scratch; 429 return scratch;
454 } else if (op->IsStackSlot()) { 430 } else if (op->IsStackSlot()) {
455 __ ldr(scratch, ToMemOperand(op)); 431 __ LoadP(scratch, ToMemOperand(op));
456 return scratch; 432 return scratch;
457 } 433 }
458 UNREACHABLE(); 434 UNREACHABLE();
459 return scratch; 435 return scratch;
460 } 436 }
461 437
462 438
463 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 439 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
440 Register dst) {
441 DCHECK(IsInteger32(const_op));
442 HConstant* constant = chunk_->LookupConstant(const_op);
443 int32_t value = constant->Integer32Value();
444 if (IsSmi(const_op)) {
445 __ LoadSmiLiteral(dst, Smi::FromInt(value));
446 } else {
447 __ LoadIntLiteral(dst, value);
448 }
449 }
450
451
452 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
464 DCHECK(op->IsDoubleRegister()); 453 DCHECK(op->IsDoubleRegister());
465 return ToDoubleRegister(op->index()); 454 return ToDoubleRegister(op->index());
466 } 455 }
467 456
468 457
469 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
470 SwVfpRegister flt_scratch,
471 DwVfpRegister dbl_scratch) {
472 if (op->IsDoubleRegister()) {
473 return ToDoubleRegister(op->index());
474 } else if (op->IsConstantOperand()) {
475 LConstantOperand* const_op = LConstantOperand::cast(op);
476 HConstant* constant = chunk_->LookupConstant(const_op);
477 Handle<Object> literal = constant->handle(isolate());
478 Representation r = chunk_->LookupLiteralRepresentation(const_op);
479 if (r.IsInteger32()) {
480 DCHECK(literal->IsNumber());
481 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
482 __ vmov(flt_scratch, ip);
483 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
484 return dbl_scratch;
485 } else if (r.IsDouble()) {
486 Abort(kUnsupportedDoubleImmediate);
487 } else if (r.IsTagged()) {
488 Abort(kUnsupportedTaggedImmediate);
489 }
490 } else if (op->IsStackSlot()) {
491 // TODO(regis): Why is vldr not taking a MemOperand?
492 // __ vldr(dbl_scratch, ToMemOperand(op));
493 MemOperand mem_op = ToMemOperand(op);
494 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
495 return dbl_scratch;
496 }
497 UNREACHABLE();
498 return dbl_scratch;
499 }
500
501
502 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 458 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
503 HConstant* constant = chunk_->LookupConstant(op); 459 HConstant* constant = chunk_->LookupConstant(op);
504 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 460 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
505 return constant->handle(isolate()); 461 return constant->handle(isolate());
506 } 462 }
507 463
508 464
509 bool LCodeGen::IsInteger32(LConstantOperand* op) const { 465 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
510 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 466 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
511 } 467 }
512 468
513 469
514 bool LCodeGen::IsSmi(LConstantOperand* op) const { 470 bool LCodeGen::IsSmi(LConstantOperand* op) const {
515 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 471 return chunk_->LookupLiteralRepresentation(op).IsSmi();
516 } 472 }
517 473
518 474
519 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 475 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
520 return ToRepresentation(op, Representation::Integer32()); 476 return ToRepresentation(op, Representation::Integer32());
521 } 477 }
522 478
523 479
524 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, 480 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
525 const Representation& r) const { 481 const Representation& r) const {
526 HConstant* constant = chunk_->LookupConstant(op); 482 HConstant* constant = chunk_->LookupConstant(op);
527 int32_t value = constant->Integer32Value(); 483 int32_t value = constant->Integer32Value();
528 if (r.IsInteger32()) return value; 484 if (r.IsInteger32()) return value;
529 DCHECK(r.IsSmiOrTagged()); 485 DCHECK(r.IsSmiOrTagged());
530 return reinterpret_cast<int32_t>(Smi::FromInt(value)); 486 return reinterpret_cast<intptr_t>(Smi::FromInt(value));
531 } 487 }
532 488
533 489
534 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 490 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
535 HConstant* constant = chunk_->LookupConstant(op); 491 HConstant* constant = chunk_->LookupConstant(op);
536 return Smi::FromInt(constant->Integer32Value()); 492 return Smi::FromInt(constant->Integer32Value());
537 } 493 }
538 494
539 495
540 double LCodeGen::ToDouble(LConstantOperand* op) const { 496 double LCodeGen::ToDouble(LConstantOperand* op) const {
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
722 } else if (op->IsConstantOperand()) { 678 } else if (op->IsConstantOperand()) {
723 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); 679 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
724 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); 680 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
725 translation->StoreLiteral(src_index); 681 translation->StoreLiteral(src_index);
726 } else { 682 } else {
727 UNREACHABLE(); 683 UNREACHABLE();
728 } 684 }
729 } 685 }
730 686
731 687
732 int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
733 int size = masm()->CallSize(code, mode);
734 if (code->kind() == Code::BINARY_OP_IC ||
735 code->kind() == Code::COMPARE_IC) {
736 size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
737 }
738 return size;
739 }
740
741
742 void LCodeGen::CallCode(Handle<Code> code, 688 void LCodeGen::CallCode(Handle<Code> code,
743 RelocInfo::Mode mode, 689 RelocInfo::Mode mode,
744 LInstruction* instr, 690 LInstruction* instr) {
745 TargetAddressStorageMode storage_mode) { 691 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
746 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
747 } 692 }
748 693
749 694
750 void LCodeGen::CallCodeGeneric(Handle<Code> code, 695 void LCodeGen::CallCodeGeneric(Handle<Code> code,
751 RelocInfo::Mode mode, 696 RelocInfo::Mode mode,
752 LInstruction* instr, 697 LInstruction* instr,
753 SafepointMode safepoint_mode, 698 SafepointMode safepoint_mode) {
754 TargetAddressStorageMode storage_mode) {
755 DCHECK(instr != NULL); 699 DCHECK(instr != NULL);
756 // Block literal pool emission to ensure nop indicating no inlined smi code 700 __ Call(code, mode);
757 // is in the correct position.
758 Assembler::BlockConstPoolScope block_const_pool(masm());
759 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
760 RecordSafepointWithLazyDeopt(instr, safepoint_mode); 701 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
761 702
762 // Signal that we don't inline smi code before these stubs in the 703 // Signal that we don't inline smi code before these stubs in the
763 // optimizing code generator. 704 // optimizing code generator.
764 if (code->kind() == Code::BINARY_OP_IC || 705 if (code->kind() == Code::BINARY_OP_IC ||
765 code->kind() == Code::COMPARE_IC) { 706 code->kind() == Code::COMPARE_IC) {
766 __ nop(); 707 __ nop();
767 } 708 }
768 } 709 }
769 710
770 711
771 void LCodeGen::CallRuntime(const Runtime::Function* function, 712 void LCodeGen::CallRuntime(const Runtime::Function* function,
772 int num_arguments, 713 int num_arguments,
773 LInstruction* instr, 714 LInstruction* instr,
774 SaveFPRegsMode save_doubles) { 715 SaveFPRegsMode save_doubles) {
775 DCHECK(instr != NULL); 716 DCHECK(instr != NULL);
776 717
777 __ CallRuntime(function, num_arguments, save_doubles); 718 __ CallRuntime(function, num_arguments, save_doubles);
778 719
779 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 720 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
780 } 721 }
781 722
782 723
783 void LCodeGen::LoadContextFromDeferred(LOperand* context) { 724 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
784 if (context->IsRegister()) { 725 if (context->IsRegister()) {
785 __ Move(cp, ToRegister(context)); 726 __ Move(cp, ToRegister(context));
786 } else if (context->IsStackSlot()) { 727 } else if (context->IsStackSlot()) {
787 __ ldr(cp, ToMemOperand(context)); 728 __ LoadP(cp, ToMemOperand(context));
788 } else if (context->IsConstantOperand()) { 729 } else if (context->IsConstantOperand()) {
789 HConstant* constant = 730 HConstant* constant =
790 chunk_->LookupConstant(LConstantOperand::cast(context)); 731 chunk_->LookupConstant(LConstantOperand::cast(context));
791 __ Move(cp, Handle<Object>::cast(constant->handle(isolate()))); 732 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
792 } else { 733 } else {
793 UNREACHABLE(); 734 UNREACHABLE();
794 } 735 }
795 } 736 }
796 737
797 738
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
836 int deoptimization_index = deoptimizations_.length(); 777 int deoptimization_index = deoptimizations_.length();
837 int pc_offset = masm()->pc_offset(); 778 int pc_offset = masm()->pc_offset();
838 environment->Register(deoptimization_index, 779 environment->Register(deoptimization_index,
839 translation.index(), 780 translation.index(),
840 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 781 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
841 deoptimizations_.Add(environment, zone()); 782 deoptimizations_.Add(environment, zone());
842 } 783 }
843 } 784 }
844 785
845 786
846 void LCodeGen::DeoptimizeIf(Condition condition, 787 void LCodeGen::DeoptimizeIf(Condition cond,
847 LEnvironment* environment, 788 LEnvironment* environment,
848 Deoptimizer::BailoutType bailout_type) { 789 Deoptimizer::BailoutType bailout_type,
790 CRegister cr) {
849 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 791 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
850 DCHECK(environment->HasBeenRegistered()); 792 DCHECK(environment->HasBeenRegistered());
851 int id = environment->deoptimization_index(); 793 int id = environment->deoptimization_index();
852 DCHECK(info()->IsOptimizing() || info()->IsStub()); 794 DCHECK(info()->IsOptimizing() || info()->IsStub());
853 Address entry = 795 Address entry =
854 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 796 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
855 if (entry == NULL) { 797 if (entry == NULL) {
856 Abort(kBailoutWasNotPrepared); 798 Abort(kBailoutWasNotPrepared);
857 return; 799 return;
858 } 800 }
859 801
860 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { 802 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
803 CRegister alt_cr = cr6;
861 Register scratch = scratch0(); 804 Register scratch = scratch0();
862 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 805 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
806 Label no_deopt;
807 DCHECK(!alt_cr.is(cr));
808 __ Push(r4, scratch);
809 __ mov(scratch, Operand(count));
810 __ lwz(r4, MemOperand(scratch));
811 __ subi(r4, r4, Operand(1));
812 __ cmpi(r4, Operand::Zero(), alt_cr);
813 __ bne(&no_deopt, alt_cr);
814 __ li(r4, Operand(FLAG_deopt_every_n_times));
815 __ stw(r4, MemOperand(scratch));
816 __ Pop(r4, scratch);
863 817
864 // Store the condition on the stack if necessary 818 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
865 if (condition != al) { 819 __ bind(&no_deopt);
866 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition)); 820 __ stw(r4, MemOperand(scratch));
867 __ mov(scratch, Operand(1), LeaveCC, condition); 821 __ Pop(r4, scratch);
868 __ push(scratch);
869 }
870
871 __ push(r1);
872 __ mov(scratch, Operand(count));
873 __ ldr(r1, MemOperand(scratch));
874 __ sub(r1, r1, Operand(1), SetCC);
875 __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
876 __ str(r1, MemOperand(scratch));
877 __ pop(r1);
878
879 if (condition != al) {
880 // Clean up the stack before the deoptimizer call
881 __ pop(scratch);
882 }
883
884 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
885
886 // 'Restore' the condition in a slightly hacky way. (It would be better
887 // to use 'msr' and 'mrs' instructions here, but they are not supported by
888 // our ARM simulator).
889 if (condition != al) {
890 condition = ne;
891 __ cmp(scratch, Operand::Zero());
892 }
893 } 822 }
894 823
895 if (info()->ShouldTrapOnDeopt()) { 824 if (info()->ShouldTrapOnDeopt()) {
896 __ stop("trap_on_deopt", condition); 825 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
897 } 826 }
898 827
899 DCHECK(info()->IsStub() || frame_is_built_); 828 DCHECK(info()->IsStub() || frame_is_built_);
900 // Go through jump table if we need to handle condition, build frame, or 829 // Go through jump table if we need to handle condition, build frame, or
901 // restore caller doubles. 830 // restore caller doubles.
902 if (condition == al && frame_is_built_ && 831 if (cond == al && frame_is_built_ &&
903 !info()->saves_caller_doubles()) { 832 !info()->saves_caller_doubles()) {
904 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 833 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
905 } else { 834 } else {
906 // We often have several deopts to the same entry, reuse the last 835 // We often have several deopts to the same entry, reuse the last
907 // jump entry if this is the case. 836 // jump entry if this is the case.
908 if (deopt_jump_table_.is_empty() || 837 if (deopt_jump_table_.is_empty() ||
909 (deopt_jump_table_.last().address != entry) || 838 (deopt_jump_table_.last().address != entry) ||
910 (deopt_jump_table_.last().bailout_type != bailout_type) || 839 (deopt_jump_table_.last().bailout_type != bailout_type) ||
911 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { 840 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
912 Deoptimizer::JumpTableEntry table_entry(entry, 841 Deoptimizer::JumpTableEntry table_entry(entry,
913 bailout_type, 842 bailout_type,
914 !frame_is_built_); 843 !frame_is_built_);
915 deopt_jump_table_.Add(table_entry, zone()); 844 deopt_jump_table_.Add(table_entry, zone());
916 } 845 }
917 __ b(condition, &deopt_jump_table_.last().label); 846 __ b(cond, &deopt_jump_table_.last().label, cr);
918 } 847 }
919 } 848 }
920 849
921 850
922 void LCodeGen::DeoptimizeIf(Condition condition, 851 void LCodeGen::DeoptimizeIf(Condition cond,
923 LEnvironment* environment) { 852 LEnvironment* environment,
853 CRegister cr) {
924 Deoptimizer::BailoutType bailout_type = info()->IsStub() 854 Deoptimizer::BailoutType bailout_type = info()->IsStub()
925 ? Deoptimizer::LAZY 855 ? Deoptimizer::LAZY
926 : Deoptimizer::EAGER; 856 : Deoptimizer::EAGER;
927 DeoptimizeIf(condition, environment, bailout_type); 857 DeoptimizeIf(cond, environment, bailout_type, cr);
928 } 858 }
929 859
930 860
931 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 861 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
932 int length = deoptimizations_.length(); 862 int length = deoptimizations_.length();
933 if (length == 0) return; 863 if (length == 0) return;
934 Handle<DeoptimizationInputData> data = 864 Handle<DeoptimizationInputData> data =
935 DeoptimizationInputData::New(isolate(), length, 0, TENURED); 865 DeoptimizationInputData::New(isolate(), length, 0, TENURED);
936 866
937 Handle<ByteArray> translations = 867 Handle<ByteArray> translations =
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
1021 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), 951 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
1022 kind, arguments, deopt_mode); 952 kind, arguments, deopt_mode);
1023 for (int i = 0; i < operands->length(); i++) { 953 for (int i = 0; i < operands->length(); i++) {
1024 LOperand* pointer = operands->at(i); 954 LOperand* pointer = operands->at(i);
1025 if (pointer->IsStackSlot()) { 955 if (pointer->IsStackSlot()) {
1026 safepoint.DefinePointerSlot(pointer->index(), zone()); 956 safepoint.DefinePointerSlot(pointer->index(), zone());
1027 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 957 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1028 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); 958 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1029 } 959 }
1030 } 960 }
1031 if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) { 961 #if V8_OOL_CONSTANT_POOL
1032 // Register pp always contains a pointer to the constant pool. 962 if (kind & Safepoint::kWithRegisters) {
1033 safepoint.DefinePointerRegister(pp, zone()); 963 // Register always contains a pointer to the constant pool.
964 safepoint.DefinePointerRegister(kConstantPoolRegister, zone());
1034 } 965 }
966 #endif
1035 } 967 }
1036 968
1037 969
1038 void LCodeGen::RecordSafepoint(LPointerMap* pointers, 970 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1039 Safepoint::DeoptMode deopt_mode) { 971 Safepoint::DeoptMode deopt_mode) {
1040 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); 972 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1041 } 973 }
1042 974
1043 975
1044 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { 976 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1102 } 1034 }
1103 1035
1104 1036
1105 void LCodeGen::DoParameter(LParameter* instr) { 1037 void LCodeGen::DoParameter(LParameter* instr) {
1106 // Nothing to do. 1038 // Nothing to do.
1107 } 1039 }
1108 1040
1109 1041
1110 void LCodeGen::DoCallStub(LCallStub* instr) { 1042 void LCodeGen::DoCallStub(LCallStub* instr) {
1111 DCHECK(ToRegister(instr->context()).is(cp)); 1043 DCHECK(ToRegister(instr->context()).is(cp));
1112 DCHECK(ToRegister(instr->result()).is(r0)); 1044 DCHECK(ToRegister(instr->result()).is(r3));
1113 switch (instr->hydrogen()->major_key()) { 1045 switch (instr->hydrogen()->major_key()) {
1114 case CodeStub::RegExpExec: { 1046 case CodeStub::RegExpExec: {
1115 RegExpExecStub stub(isolate()); 1047 RegExpExecStub stub(isolate());
1116 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1048 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1117 break; 1049 break;
1118 } 1050 }
1119 case CodeStub::SubString: { 1051 case CodeStub::SubString: {
1120 SubStringStub stub(isolate()); 1052 SubStringStub stub(isolate());
1121 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1053 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1122 break; 1054 break;
(...skipping 19 matching lines...) Expand all
1142 int32_t divisor = instr->divisor(); 1074 int32_t divisor = instr->divisor();
1143 DCHECK(dividend.is(ToRegister(instr->result()))); 1075 DCHECK(dividend.is(ToRegister(instr->result())));
1144 1076
1145 // Theoretically, a variation of the branch-free code for integer division by 1077 // Theoretically, a variation of the branch-free code for integer division by
1146 // a power of 2 (calculating the remainder via an additional multiplication 1078 // a power of 2 (calculating the remainder via an additional multiplication
1147 // (which gets simplified to an 'and') and subtraction) should be faster, and 1079 // (which gets simplified to an 'and') and subtraction) should be faster, and
1148 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 1080 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1149 // indicate that positive dividends are heavily favored, so the branching 1081 // indicate that positive dividends are heavily favored, so the branching
1150 // version performs better. 1082 // version performs better.
1151 HMod* hmod = instr->hydrogen(); 1083 HMod* hmod = instr->hydrogen();
1152 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1084 int32_t shift = WhichPowerOf2Abs(divisor);
1153 Label dividend_is_not_negative, done; 1085 Label dividend_is_not_negative, done;
1154 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 1086 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1155 __ cmp(dividend, Operand::Zero()); 1087 __ cmpwi(dividend, Operand::Zero());
1156 __ b(pl, &dividend_is_not_negative); 1088 __ bge(&dividend_is_not_negative);
1157 // Note that this is correct even for kMinInt operands. 1089 if (shift) {
1158 __ rsb(dividend, dividend, Operand::Zero()); 1090 // Note that this is correct even for kMinInt operands.
1159 __ and_(dividend, dividend, Operand(mask)); 1091 __ neg(dividend, dividend);
1160 __ rsb(dividend, dividend, Operand::Zero(), SetCC); 1092 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1161 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1093 __ neg(dividend, dividend, LeaveOE, SetRC);
1162 DeoptimizeIf(eq, instr->environment()); 1094 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1095 DeoptimizeIf(eq, instr->environment(), cr0);
1096 }
1097 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1098 __ li(dividend, Operand::Zero());
1099 } else {
1100 DeoptimizeIf(al, instr->environment());
1163 } 1101 }
1164 __ b(&done); 1102 __ b(&done);
1165 } 1103 }
1166 1104
1167 __ bind(&dividend_is_not_negative); 1105 __ bind(&dividend_is_not_negative);
1168 __ and_(dividend, dividend, Operand(mask)); 1106 if (shift) {
1107 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1108 } else {
1109 __ li(dividend, Operand::Zero());
1110 }
1169 __ bind(&done); 1111 __ bind(&done);
1170 } 1112 }
1171 1113
1172 1114
1173 void LCodeGen::DoModByConstI(LModByConstI* instr) { 1115 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1174 Register dividend = ToRegister(instr->dividend()); 1116 Register dividend = ToRegister(instr->dividend());
1175 int32_t divisor = instr->divisor(); 1117 int32_t divisor = instr->divisor();
1176 Register result = ToRegister(instr->result()); 1118 Register result = ToRegister(instr->result());
1177 DCHECK(!dividend.is(result)); 1119 DCHECK(!dividend.is(result));
1178 1120
1179 if (divisor == 0) { 1121 if (divisor == 0) {
1180 DeoptimizeIf(al, instr->environment()); 1122 DeoptimizeIf(al, instr->environment());
1181 return; 1123 return;
1182 } 1124 }
1183 1125
1184 __ TruncatingDiv(result, dividend, Abs(divisor)); 1126 __ TruncatingDiv(result, dividend, Abs(divisor));
1185 __ mov(ip, Operand(Abs(divisor))); 1127 __ mov(ip, Operand(Abs(divisor)));
1186 __ smull(result, ip, result, ip); 1128 __ mullw(result, result, ip);
1187 __ sub(result, dividend, result, SetCC); 1129 __ sub(result, dividend, result, LeaveOE, SetRC);
1188 1130
1189 // Check for negative zero. 1131 // Check for negative zero.
1190 HMod* hmod = instr->hydrogen(); 1132 HMod* hmod = instr->hydrogen();
1191 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1133 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1192 Label remainder_not_zero; 1134 Label remainder_not_zero;
1193 __ b(ne, &remainder_not_zero); 1135 __ bne(&remainder_not_zero, cr0);
1194 __ cmp(dividend, Operand::Zero()); 1136 __ cmpwi(dividend, Operand::Zero());
1195 DeoptimizeIf(lt, instr->environment()); 1137 DeoptimizeIf(lt, instr->environment());
1196 __ bind(&remainder_not_zero); 1138 __ bind(&remainder_not_zero);
1197 } 1139 }
1198 } 1140 }
1199 1141
1200 1142
1201 void LCodeGen::DoModI(LModI* instr) { 1143 void LCodeGen::DoModI(LModI* instr) {
1202 HMod* hmod = instr->hydrogen(); 1144 HMod* hmod = instr->hydrogen();
1203 if (CpuFeatures::IsSupported(SUDIV)) { 1145 Register left_reg = ToRegister(instr->left());
1204 CpuFeatureScope scope(masm(), SUDIV); 1146 Register right_reg = ToRegister(instr->right());
1147 Register result_reg = ToRegister(instr->result());
1148 Register scratch = scratch0();
1149 Label done;
1205 1150
1206 Register left_reg = ToRegister(instr->left()); 1151 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1207 Register right_reg = ToRegister(instr->right()); 1152 __ li(r0, Operand::Zero()); // clear xer
1208 Register result_reg = ToRegister(instr->result()); 1153 __ mtxer(r0);
1154 }
1209 1155
1210 Label done; 1156 __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
1211 // Check for x % 0, sdiv might signal an exception. We have to deopt in this 1157
1212 // case because we can't return a NaN. 1158 // Check for x % 0.
1213 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 1159 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1214 __ cmp(right_reg, Operand::Zero()); 1160 __ cmpwi(right_reg, Operand::Zero());
1215 DeoptimizeIf(eq, instr->environment()); 1161 DeoptimizeIf(eq, instr->environment());
1162 }
1163
1164 // Check for kMinInt % -1, divw will return undefined, which is not what we
1165 // want. We have to deopt if we care about -0, because we can't return that.
1166 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1167 Label no_overflow_possible;
1168 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1169 DeoptimizeIf(overflow, instr->environment(), cr0);
1170 } else {
1171 __ bnooverflow(&no_overflow_possible, cr0);
1172 __ li(result_reg, Operand::Zero());
1173 __ b(&done);
1216 } 1174 }
1175 __ bind(&no_overflow_possible);
1176 }
1217 1177
1218 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we 1178 __ mullw(scratch, right_reg, scratch);
1219 // want. We have to deopt if we care about -0, because we can't return that. 1179 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
1220 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1221 Label no_overflow_possible;
1222 __ cmp(left_reg, Operand(kMinInt));
1223 __ b(ne, &no_overflow_possible);
1224 __ cmp(right_reg, Operand(-1));
1225 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1226 DeoptimizeIf(eq, instr->environment());
1227 } else {
1228 __ b(ne, &no_overflow_possible);
1229 __ mov(result_reg, Operand::Zero());
1230 __ jmp(&done);
1231 }
1232 __ bind(&no_overflow_possible);
1233 }
1234 1180
1235 // For 'r3 = r1 % r2' we can have the following ARM code: 1181 // If we care about -0, test if the dividend is <0 and the result is 0.
1236 // sdiv r3, r1, r2 1182 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1237 // mls r3, r3, r2, r1 1183 __ bne(&done, cr0);
1184 __ cmpwi(left_reg, Operand::Zero());
1185 DeoptimizeIf(lt, instr->environment());
1186 }
1238 1187
1239 __ sdiv(result_reg, left_reg, right_reg); 1188 __ bind(&done);
1240 __ Mls(result_reg, result_reg, right_reg, left_reg);
1241
1242 // If we care about -0, test if the dividend is <0 and the result is 0.
1243 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1244 __ cmp(result_reg, Operand::Zero());
1245 __ b(ne, &done);
1246 __ cmp(left_reg, Operand::Zero());
1247 DeoptimizeIf(lt, instr->environment());
1248 }
1249 __ bind(&done);
1250
1251 } else {
1252 // General case, without any SDIV support.
1253 Register left_reg = ToRegister(instr->left());
1254 Register right_reg = ToRegister(instr->right());
1255 Register result_reg = ToRegister(instr->result());
1256 Register scratch = scratch0();
1257 DCHECK(!scratch.is(left_reg));
1258 DCHECK(!scratch.is(right_reg));
1259 DCHECK(!scratch.is(result_reg));
1260 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1261 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1262 DCHECK(!divisor.is(dividend));
1263 LowDwVfpRegister quotient = double_scratch0();
1264 DCHECK(!quotient.is(dividend));
1265 DCHECK(!quotient.is(divisor));
1266
1267 Label done;
1268 // Check for x % 0, we have to deopt in this case because we can't return a
1269 // NaN.
1270 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1271 __ cmp(right_reg, Operand::Zero());
1272 DeoptimizeIf(eq, instr->environment());
1273 }
1274
1275 __ Move(result_reg, left_reg);
1276 // Load the arguments in VFP registers. The divisor value is preloaded
1277 // before. Be careful that 'right_reg' is only live on entry.
1278 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1279 __ vmov(double_scratch0().low(), left_reg);
1280 __ vcvt_f64_s32(dividend, double_scratch0().low());
1281 __ vmov(double_scratch0().low(), right_reg);
1282 __ vcvt_f64_s32(divisor, double_scratch0().low());
1283
1284 // We do not care about the sign of the divisor. Note that we still handle
1285 // the kMinInt % -1 case correctly, though.
1286 __ vabs(divisor, divisor);
1287 // Compute the quotient and round it to a 32bit integer.
1288 __ vdiv(quotient, dividend, divisor);
1289 __ vcvt_s32_f64(quotient.low(), quotient);
1290 __ vcvt_f64_s32(quotient, quotient.low());
1291
1292 // Compute the remainder in result.
1293 __ vmul(double_scratch0(), divisor, quotient);
1294 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1295 __ vmov(scratch, double_scratch0().low());
1296 __ sub(result_reg, left_reg, scratch, SetCC);
1297
1298 // If we care about -0, test if the dividend is <0 and the result is 0.
1299 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1300 __ b(ne, &done);
1301 __ cmp(left_reg, Operand::Zero());
1302 DeoptimizeIf(mi, instr->environment());
1303 }
1304 __ bind(&done);
1305 }
1306 } 1189 }
1307 1190
1308 1191
1309 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 1192 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1310 Register dividend = ToRegister(instr->dividend()); 1193 Register dividend = ToRegister(instr->dividend());
1311 int32_t divisor = instr->divisor(); 1194 int32_t divisor = instr->divisor();
1312 Register result = ToRegister(instr->result()); 1195 Register result = ToRegister(instr->result());
1313 DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor))); 1196 DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
1314 DCHECK(!result.is(dividend)); 1197 DCHECK(!result.is(dividend));
1315 1198
1316 // Check for (0 / -x) that will produce negative zero. 1199 // Check for (0 / -x) that will produce negative zero.
1317 HDiv* hdiv = instr->hydrogen(); 1200 HDiv* hdiv = instr->hydrogen();
1318 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1201 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1319 __ cmp(dividend, Operand::Zero()); 1202 __ cmpwi(dividend, Operand::Zero());
1320 DeoptimizeIf(eq, instr->environment()); 1203 DeoptimizeIf(eq, instr->environment());
1321 } 1204 }
1322 // Check for (kMinInt / -1). 1205 // Check for (kMinInt / -1).
1323 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 1206 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1324 __ cmp(dividend, Operand(kMinInt)); 1207 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1208 __ cmpw(dividend, r0);
1325 DeoptimizeIf(eq, instr->environment()); 1209 DeoptimizeIf(eq, instr->environment());
1326 } 1210 }
1211
1212 int32_t shift = WhichPowerOf2Abs(divisor);
1213
1327 // Deoptimize if remainder will not be 0. 1214 // Deoptimize if remainder will not be 0.
1328 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 1215 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1329 divisor != 1 && divisor != -1) { 1216 __ TestBitRange(dividend, shift - 1, 0, r0);
1330 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1217 DeoptimizeIf(ne, instr->environment(), cr0);
1331 __ tst(dividend, Operand(mask));
1332 DeoptimizeIf(ne, instr->environment());
1333 } 1218 }
1334 1219
1335 if (divisor == -1) { // Nice shortcut, not needed for correctness. 1220 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1336 __ rsb(result, dividend, Operand(0)); 1221 __ neg(result, dividend);
1337 return; 1222 return;
1338 } 1223 }
1339 int32_t shift = WhichPowerOf2Abs(divisor);
1340 if (shift == 0) { 1224 if (shift == 0) {
1341 __ mov(result, dividend); 1225 __ mr(result, dividend);
1342 } else if (shift == 1) { 1226 } else {
1343 __ add(result, dividend, Operand(dividend, LSR, 31)); 1227 if (shift == 1) {
1344 } else { 1228 __ srwi(result, dividend, Operand(31));
1345 __ mov(result, Operand(dividend, ASR, 31)); 1229 } else {
1346 __ add(result, dividend, Operand(result, LSR, 32 - shift)); 1230 __ srawi(result, dividend, 31);
1231 __ srwi(result, result, Operand(32 - shift));
1232 }
1233 __ add(result, dividend, result);
1234 __ srawi(result, result, shift);
1347 } 1235 }
1348 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); 1236 if (divisor < 0) __ neg(result, result);
1349 if (divisor < 0) __ rsb(result, result, Operand(0));
1350 } 1237 }
1351 1238
1352 1239
1353 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1240 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1354 Register dividend = ToRegister(instr->dividend()); 1241 Register dividend = ToRegister(instr->dividend());
1355 int32_t divisor = instr->divisor(); 1242 int32_t divisor = instr->divisor();
1356 Register result = ToRegister(instr->result()); 1243 Register result = ToRegister(instr->result());
1357 DCHECK(!dividend.is(result)); 1244 DCHECK(!dividend.is(result));
1358 1245
1359 if (divisor == 0) { 1246 if (divisor == 0) {
1360 DeoptimizeIf(al, instr->environment()); 1247 DeoptimizeIf(al, instr->environment());
1361 return; 1248 return;
1362 } 1249 }
1363 1250
1364 // Check for (0 / -x) that will produce negative zero. 1251 // Check for (0 / -x) that will produce negative zero.
1365 HDiv* hdiv = instr->hydrogen(); 1252 HDiv* hdiv = instr->hydrogen();
1366 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1253 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1367 __ cmp(dividend, Operand::Zero()); 1254 __ cmpwi(dividend, Operand::Zero());
1368 DeoptimizeIf(eq, instr->environment()); 1255 DeoptimizeIf(eq, instr->environment());
1369 } 1256 }
1370 1257
1371 __ TruncatingDiv(result, dividend, Abs(divisor)); 1258 __ TruncatingDiv(result, dividend, Abs(divisor));
1372 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1259 if (divisor < 0) __ neg(result, result);
1373 1260
1374 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1261 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1262 Register scratch = scratch0();
1375 __ mov(ip, Operand(divisor)); 1263 __ mov(ip, Operand(divisor));
1376 __ smull(scratch0(), ip, result, ip); 1264 __ mullw(scratch, result, ip);
1377 __ sub(scratch0(), scratch0(), dividend, SetCC); 1265 __ cmpw(scratch, dividend);
1378 DeoptimizeIf(ne, instr->environment()); 1266 DeoptimizeIf(ne, instr->environment());
1379 } 1267 }
1380 } 1268 }
1381 1269
1382 1270
1383 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1271 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1384 void LCodeGen::DoDivI(LDivI* instr) { 1272 void LCodeGen::DoDivI(LDivI* instr) {
1385 HBinaryOperation* hdiv = instr->hydrogen(); 1273 HBinaryOperation* hdiv = instr->hydrogen();
1386 Register dividend = ToRegister(instr->dividend()); 1274 const Register dividend = ToRegister(instr->dividend());
1387 Register divisor = ToRegister(instr->divisor()); 1275 const Register divisor = ToRegister(instr->divisor());
1388 Register result = ToRegister(instr->result()); 1276 Register result = ToRegister(instr->result());
1389 1277
1278 DCHECK(!dividend.is(result));
1279 DCHECK(!divisor.is(result));
1280
1281 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1282 __ li(r0, Operand::Zero()); // clear xer
1283 __ mtxer(r0);
1284 }
1285
1286 __ divw(result, dividend, divisor, SetOE, SetRC);
1287
1390 // Check for x / 0. 1288 // Check for x / 0.
1391 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1289 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1392 __ cmp(divisor, Operand::Zero()); 1290 __ cmpwi(divisor, Operand::Zero());
1393 DeoptimizeIf(eq, instr->environment()); 1291 DeoptimizeIf(eq, instr->environment());
1394 } 1292 }
1395 1293
1396 // Check for (0 / -x) that will produce negative zero. 1294 // Check for (0 / -x) that will produce negative zero.
1397 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1295 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1398 Label positive; 1296 Label dividend_not_zero;
1399 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { 1297 __ cmpwi(dividend, Operand::Zero());
1400 // Do the test only if it hadn't be done above. 1298 __ bne(&dividend_not_zero);
1401 __ cmp(divisor, Operand::Zero()); 1299 __ cmpwi(divisor, Operand::Zero());
1402 } 1300 DeoptimizeIf(lt, instr->environment());
1403 __ b(pl, &positive); 1301 __ bind(&dividend_not_zero);
1404 __ cmp(dividend, Operand::Zero());
1405 DeoptimizeIf(eq, instr->environment());
1406 __ bind(&positive);
1407 } 1302 }
1408 1303
1409 // Check for (kMinInt / -1). 1304 // Check for (kMinInt / -1).
1410 if (hdiv->CheckFlag(HValue::kCanOverflow) && 1305 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1411 (!CpuFeatures::IsSupported(SUDIV) || 1306 Label no_overflow_possible;
1412 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { 1307 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1413 // We don't need to check for overflow when truncating with sdiv 1308 DeoptimizeIf(overflow, instr->environment(), cr0);
1414 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. 1309 } else {
1415 __ cmp(dividend, Operand(kMinInt)); 1310 // When truncating, we want kMinInt / -1 = kMinInt.
1416 __ cmp(divisor, Operand(-1), eq); 1311 __ bnooverflow(&no_overflow_possible, cr0);
1417 DeoptimizeIf(eq, instr->environment()); 1312 __ mr(result, dividend);
1313 }
1314 __ bind(&no_overflow_possible);
1418 } 1315 }
1419 1316
1420 if (CpuFeatures::IsSupported(SUDIV)) { 1317 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1421 CpuFeatureScope scope(masm(), SUDIV); 1318 // Deoptimize if remainder is not 0.
1422 __ sdiv(result, dividend, divisor); 1319 Register scratch = scratch0();
1423 } else { 1320 __ mullw(scratch, divisor, result);
1424 DoubleRegister vleft = ToDoubleRegister(instr->temp()); 1321 __ cmpw(dividend, scratch);
1425 DoubleRegister vright = double_scratch0();
1426 __ vmov(double_scratch0().low(), dividend);
1427 __ vcvt_f64_s32(vleft, double_scratch0().low());
1428 __ vmov(double_scratch0().low(), divisor);
1429 __ vcvt_f64_s32(vright, double_scratch0().low());
1430 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1431 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1432 __ vmov(result, double_scratch0().low());
1433 }
1434
1435 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1436 // Compute remainder and deopt if it's not zero.
1437 Register remainder = scratch0();
1438 __ Mls(remainder, result, divisor, dividend);
1439 __ cmp(remainder, Operand::Zero());
1440 DeoptimizeIf(ne, instr->environment()); 1322 DeoptimizeIf(ne, instr->environment());
1441 } 1323 }
1442 } 1324 }
1443 1325
1444 1326
1445 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1446 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1447 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1448 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1449
1450 // This is computed in-place.
1451 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1452
1453 __ vmla(addend, multiplier, multiplicand);
1454 }
1455
1456
1457 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1458 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1459 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1460 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1461
1462 // This is computed in-place.
1463 DCHECK(minuend.is(ToDoubleRegister(instr->result())));
1464
1465 __ vmls(minuend, multiplier, multiplicand);
1466 }
1467
1468
1469 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1327 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1328 HBinaryOperation* hdiv = instr->hydrogen();
1470 Register dividend = ToRegister(instr->dividend()); 1329 Register dividend = ToRegister(instr->dividend());
1471 Register result = ToRegister(instr->result()); 1330 Register result = ToRegister(instr->result());
1472 int32_t divisor = instr->divisor(); 1331 int32_t divisor = instr->divisor();
1473 1332
1474 // If the divisor is 1, return the dividend.
1475 if (divisor == 1) {
1476 __ Move(result, dividend);
1477 return;
1478 }
1479
1480 // If the divisor is positive, things are easy: There can be no deopts and we 1333 // If the divisor is positive, things are easy: There can be no deopts and we
1481 // can simply do an arithmetic right shift. 1334 // can simply do an arithmetic right shift.
1482 int32_t shift = WhichPowerOf2Abs(divisor); 1335 int32_t shift = WhichPowerOf2Abs(divisor);
1483 if (divisor > 1) { 1336 if (divisor > 0) {
1484 __ mov(result, Operand(dividend, ASR, shift)); 1337 if (shift || !result.is(dividend)) {
1338 __ srawi(result, dividend, shift);
1339 }
1485 return; 1340 return;
1486 } 1341 }
1487 1342
1488 // If the divisor is negative, we have to negate and handle edge cases. 1343 // If the divisor is negative, we have to negate and handle edge cases.
1489 __ rsb(result, dividend, Operand::Zero(), SetCC); 1344 OEBit oe = LeaveOE;
1490 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1345 #if V8_TARGET_ARCH_PPC64
1346 if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
1347 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1348 __ cmpw(dividend, r0);
1491 DeoptimizeIf(eq, instr->environment()); 1349 DeoptimizeIf(eq, instr->environment());
1492 } 1350 }
1351 #else
1352 if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
1353 __ li(r0, Operand::Zero()); // clear xer
1354 __ mtxer(r0);
1355 oe = SetOE;
1356 }
1357 #endif
1358
1359 __ neg(result, dividend, oe, SetRC);
1360 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1361 DeoptimizeIf(eq, instr->environment(), cr0);
1362 }
1363
1364 // If the negation could not overflow, simply shifting is OK.
1365 #if !V8_TARGET_ARCH_PPC64
1366 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1367 #endif
1368 if (shift) {
1369 __ ShiftRightArithImm(result, result, shift);
1370 }
1371 return;
1372 #if !V8_TARGET_ARCH_PPC64
1373 }
1493 1374
1494 // Dividing by -1 is basically negation, unless we overflow. 1375 // Dividing by -1 is basically negation, unless we overflow.
1495 if (divisor == -1) { 1376 if (divisor == -1) {
1496 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1377 DeoptimizeIf(overflow, instr->environment(), cr0);
1497 DeoptimizeIf(vs, instr->environment());
1498 }
1499 return; 1378 return;
1500 } 1379 }
1501 1380
1502 // If the negation could not overflow, simply shifting is OK. 1381 Label overflow, done;
1503 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1382 __ boverflow(&overflow, cr0);
1504 __ mov(result, Operand(result, ASR, shift)); 1383 __ srawi(result, result, shift);
1505 return; 1384 __ b(&done);
1506 } 1385 __ bind(&overflow);
1507 1386 __ mov(result, Operand(kMinInt / divisor));
1508 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); 1387 __ bind(&done);
1509 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); 1388 #endif
1510 } 1389 }
1511 1390
1512 1391
1513 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1392 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1514 Register dividend = ToRegister(instr->dividend()); 1393 Register dividend = ToRegister(instr->dividend());
1515 int32_t divisor = instr->divisor(); 1394 int32_t divisor = instr->divisor();
1516 Register result = ToRegister(instr->result()); 1395 Register result = ToRegister(instr->result());
1517 DCHECK(!dividend.is(result)); 1396 DCHECK(!dividend.is(result));
1518 1397
1519 if (divisor == 0) { 1398 if (divisor == 0) {
1520 DeoptimizeIf(al, instr->environment()); 1399 DeoptimizeIf(al, instr->environment());
1521 return; 1400 return;
1522 } 1401 }
1523 1402
1524 // Check for (0 / -x) that will produce negative zero. 1403 // Check for (0 / -x) that will produce negative zero.
1525 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1404 HMathFloorOfDiv* hdiv = instr->hydrogen();
1526 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1405 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1527 __ cmp(dividend, Operand::Zero()); 1406 __ cmpwi(dividend, Operand::Zero());
1528 DeoptimizeIf(eq, instr->environment()); 1407 DeoptimizeIf(eq, instr->environment());
1529 } 1408 }
1530 1409
1531 // Easy case: We need no dynamic check for the dividend and the flooring 1410 // Easy case: We need no dynamic check for the dividend and the flooring
1532 // division is the same as the truncating division. 1411 // division is the same as the truncating division.
1533 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1412 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1534 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1413 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1535 __ TruncatingDiv(result, dividend, Abs(divisor)); 1414 __ TruncatingDiv(result, dividend, Abs(divisor));
1536 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1415 if (divisor < 0) __ neg(result, result);
1537 return; 1416 return;
1538 } 1417 }
1539 1418
1540 // In the general case we may need to adjust before and after the truncating 1419 // In the general case we may need to adjust before and after the truncating
1541 // division to get a flooring division. 1420 // division to get a flooring division.
1542 Register temp = ToRegister(instr->temp()); 1421 Register temp = ToRegister(instr->temp());
1543 DCHECK(!temp.is(dividend) && !temp.is(result)); 1422 DCHECK(!temp.is(dividend) && !temp.is(result));
1544 Label needs_adjustment, done; 1423 Label needs_adjustment, done;
1545 __ cmp(dividend, Operand::Zero()); 1424 __ cmpwi(dividend, Operand::Zero());
1546 __ b(divisor > 0 ? lt : gt, &needs_adjustment); 1425 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1547 __ TruncatingDiv(result, dividend, Abs(divisor)); 1426 __ TruncatingDiv(result, dividend, Abs(divisor));
1548 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1427 if (divisor < 0) __ neg(result, result);
1549 __ jmp(&done); 1428 __ b(&done);
1550 __ bind(&needs_adjustment); 1429 __ bind(&needs_adjustment);
1551 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); 1430 __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1552 __ TruncatingDiv(result, temp, Abs(divisor)); 1431 __ TruncatingDiv(result, temp, Abs(divisor));
1553 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1432 if (divisor < 0) __ neg(result, result);
1554 __ sub(result, result, Operand(1)); 1433 __ subi(result, result, Operand(1));
1555 __ bind(&done); 1434 __ bind(&done);
1556 } 1435 }
1557 1436
1558 1437
1559 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1438 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1560 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1439 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1561 HBinaryOperation* hdiv = instr->hydrogen(); 1440 HBinaryOperation* hdiv = instr->hydrogen();
1562 Register left = ToRegister(instr->dividend()); 1441 const Register dividend = ToRegister(instr->dividend());
1563 Register right = ToRegister(instr->divisor()); 1442 const Register divisor = ToRegister(instr->divisor());
1564 Register result = ToRegister(instr->result()); 1443 Register result = ToRegister(instr->result());
1565 1444
1445 DCHECK(!dividend.is(result));
1446 DCHECK(!divisor.is(result));
1447
1448 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1449 __ li(r0, Operand::Zero()); // clear xer
1450 __ mtxer(r0);
1451 }
1452
1453 __ divw(result, dividend, divisor, SetOE, SetRC);
1454
1566 // Check for x / 0. 1455 // Check for x / 0.
1567 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1456 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1568 __ cmp(right, Operand::Zero()); 1457 __ cmpwi(divisor, Operand::Zero());
1569 DeoptimizeIf(eq, instr->environment()); 1458 DeoptimizeIf(eq, instr->environment());
1570 } 1459 }
1571 1460
1572 // Check for (0 / -x) that will produce negative zero. 1461 // Check for (0 / -x) that will produce negative zero.
1573 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1462 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1574 Label positive; 1463 Label dividend_not_zero;
1575 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { 1464 __ cmpwi(dividend, Operand::Zero());
1576 // Do the test only if it hadn't be done above. 1465 __ bne(&dividend_not_zero);
1577 __ cmp(right, Operand::Zero()); 1466 __ cmpwi(divisor, Operand::Zero());
1578 } 1467 DeoptimizeIf(lt, instr->environment());
1579 __ b(pl, &positive); 1468 __ bind(&dividend_not_zero);
1580 __ cmp(left, Operand::Zero());
1581 DeoptimizeIf(eq, instr->environment());
1582 __ bind(&positive);
1583 } 1469 }
1584 1470
1585 // Check for (kMinInt / -1). 1471 // Check for (kMinInt / -1).
1586 if (hdiv->CheckFlag(HValue::kCanOverflow) && 1472 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1587 (!CpuFeatures::IsSupported(SUDIV) || 1473 Label no_overflow_possible;
1588 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { 1474 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1589 // We don't need to check for overflow when truncating with sdiv 1475 DeoptimizeIf(overflow, instr->environment(), cr0);
1590 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. 1476 } else {
1591 __ cmp(left, Operand(kMinInt)); 1477 // When truncating, we want kMinInt / -1 = kMinInt.
1592 __ cmp(right, Operand(-1), eq); 1478 __ bnooverflow(&no_overflow_possible, cr0);
1593 DeoptimizeIf(eq, instr->environment()); 1479 __ mr(result, dividend);
1594 } 1480 }
1595 1481 __ bind(&no_overflow_possible);
1596 if (CpuFeatures::IsSupported(SUDIV)) {
1597 CpuFeatureScope scope(masm(), SUDIV);
1598 __ sdiv(result, left, right);
1599 } else {
1600 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1601 DoubleRegister vright = double_scratch0();
1602 __ vmov(double_scratch0().low(), left);
1603 __ vcvt_f64_s32(vleft, double_scratch0().low());
1604 __ vmov(double_scratch0().low(), right);
1605 __ vcvt_f64_s32(vright, double_scratch0().low());
1606 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1607 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1608 __ vmov(result, double_scratch0().low());
1609 } 1482 }
1610 1483
1611 Label done; 1484 Label done;
1612 Register remainder = scratch0(); 1485 Register scratch = scratch0();
1613 __ Mls(remainder, result, right, left); 1486 // If both operands have the same sign then we are done.
1614 __ cmp(remainder, Operand::Zero()); 1487 #if V8_TARGET_ARCH_PPC64
1615 __ b(eq, &done); 1488 __ xor_(scratch, dividend, divisor);
1616 __ eor(remainder, remainder, Operand(right)); 1489 __ cmpwi(scratch, Operand::Zero());
1617 __ add(result, result, Operand(remainder, ASR, 31)); 1490 __ bge(&done);
1491 #else
1492 __ xor_(scratch, dividend, divisor, SetRC);
1493 __ bge(&done, cr0);
1494 #endif
1495
1496 // If there is no remainder then we are done.
1497 __ mullw(scratch, divisor, result);
1498 __ cmpw(dividend, scratch);
1499 __ beq(&done);
1500
1501 // We performed a truncating division. Correct the result.
1502 __ subi(result, result, Operand(1));
1618 __ bind(&done); 1503 __ bind(&done);
1619 } 1504 }
1620 1505
1621 1506
1507 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1508 DoubleRegister addend = ToDoubleRegister(instr->addend());
1509 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1510 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1511 DoubleRegister result = ToDoubleRegister(instr->result());
1512
1513 __ fmadd(result, multiplier, multiplicand, addend);
1514 }
1515
1516
1517 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1518 DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1519 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1520 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1521 DoubleRegister result = ToDoubleRegister(instr->result());
1522
1523 __ fmsub(result, multiplier, multiplicand, minuend);
1524 }
1525
1526
1622 void LCodeGen::DoMulI(LMulI* instr) { 1527 void LCodeGen::DoMulI(LMulI* instr) {
1528 Register scratch = scratch0();
1623 Register result = ToRegister(instr->result()); 1529 Register result = ToRegister(instr->result());
1624 // Note that result may alias left. 1530 // Note that result may alias left.
1625 Register left = ToRegister(instr->left()); 1531 Register left = ToRegister(instr->left());
1626 LOperand* right_op = instr->right(); 1532 LOperand* right_op = instr->right();
1627 1533
1628 bool bailout_on_minus_zero = 1534 bool bailout_on_minus_zero =
1629 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1535 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1630 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1536 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1631 1537
1632 if (right_op->IsConstantOperand()) { 1538 if (right_op->IsConstantOperand()) {
1633 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); 1539 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1634 1540
1635 if (bailout_on_minus_zero && (constant < 0)) { 1541 if (bailout_on_minus_zero && (constant < 0)) {
1636 // The case of a null constant will be handled separately. 1542 // The case of a null constant will be handled separately.
1637 // If constant is negative and left is null, the result should be -0. 1543 // If constant is negative and left is null, the result should be -0.
1638 __ cmp(left, Operand::Zero()); 1544 __ cmpi(left, Operand::Zero());
1639 DeoptimizeIf(eq, instr->environment()); 1545 DeoptimizeIf(eq, instr->environment());
1640 } 1546 }
1641 1547
1642 switch (constant) { 1548 switch (constant) {
1643 case -1: 1549 case -1:
1644 if (overflow) { 1550 if (can_overflow) {
1645 __ rsb(result, left, Operand::Zero(), SetCC); 1551 #if V8_TARGET_ARCH_PPC64
1646 DeoptimizeIf(vs, instr->environment()); 1552 if (instr->hydrogen()->representation().IsSmi()) {
1553 #endif
1554 __ li(r0, Operand::Zero()); // clear xer
1555 __ mtxer(r0);
1556 __ neg(result, left, SetOE, SetRC);
1557 DeoptimizeIf(overflow, instr->environment(), cr0);
1558 #if V8_TARGET_ARCH_PPC64
1559 } else {
1560 __ neg(result, left);
1561 __ TestIfInt32(result, scratch, r0);
1562 DeoptimizeIf(ne, instr->environment());
1563 }
1564 #endif
1647 } else { 1565 } else {
1648 __ rsb(result, left, Operand::Zero()); 1566 __ neg(result, left);
1649 } 1567 }
1650 break; 1568 break;
1651 case 0: 1569 case 0:
1652 if (bailout_on_minus_zero) { 1570 if (bailout_on_minus_zero) {
1653 // If left is strictly negative and the constant is null, the 1571 // If left is strictly negative and the constant is null, the
1654 // result is -0. Deoptimize if required, otherwise return 0. 1572 // result is -0. Deoptimize if required, otherwise return 0.
1655 __ cmp(left, Operand::Zero()); 1573 #if V8_TARGET_ARCH_PPC64
1656 DeoptimizeIf(mi, instr->environment()); 1574 if (instr->hydrogen()->representation().IsSmi()) {
1575 #endif
1576 __ cmpi(left, Operand::Zero());
1577 #if V8_TARGET_ARCH_PPC64
1578 } else {
1579 __ cmpwi(left, Operand::Zero());
1580 }
1581 #endif
1582 DeoptimizeIf(lt, instr->environment());
1657 } 1583 }
1658 __ mov(result, Operand::Zero()); 1584 __ li(result, Operand::Zero());
1659 break; 1585 break;
1660 case 1: 1586 case 1:
1661 __ Move(result, left); 1587 __ Move(result, left);
1662 break; 1588 break;
1663 default: 1589 default:
1664 // Multiplying by powers of two and powers of two plus or minus 1590 // Multiplying by powers of two and powers of two plus or minus
1665 // one can be done faster with shifted operands. 1591 // one can be done faster with shifted operands.
1666 // For other constants we emit standard code. 1592 // For other constants we emit standard code.
1667 int32_t mask = constant >> 31; 1593 int32_t mask = constant >> 31;
1668 uint32_t constant_abs = (constant + mask) ^ mask; 1594 uint32_t constant_abs = (constant + mask) ^ mask;
1669 1595
1670 if (IsPowerOf2(constant_abs)) { 1596 if (IsPowerOf2(constant_abs)) {
1671 int32_t shift = WhichPowerOf2(constant_abs); 1597 int32_t shift = WhichPowerOf2(constant_abs);
1672 __ mov(result, Operand(left, LSL, shift)); 1598 __ ShiftLeftImm(result, left, Operand(shift));
1673 // Correct the sign of the result is the constant is negative. 1599 // Correct the sign of the result if the constant is negative.
1674 if (constant < 0) __ rsb(result, result, Operand::Zero()); 1600 if (constant < 0) __ neg(result, result);
1675 } else if (IsPowerOf2(constant_abs - 1)) { 1601 } else if (IsPowerOf2(constant_abs - 1)) {
1676 int32_t shift = WhichPowerOf2(constant_abs - 1); 1602 int32_t shift = WhichPowerOf2(constant_abs - 1);
1677 __ add(result, left, Operand(left, LSL, shift)); 1603 __ ShiftLeftImm(scratch, left, Operand(shift));
1678 // Correct the sign of the result is the constant is negative. 1604 __ add(result, scratch, left);
1679 if (constant < 0) __ rsb(result, result, Operand::Zero()); 1605 // Correct the sign of the result if the constant is negative.
1606 if (constant < 0) __ neg(result, result);
1680 } else if (IsPowerOf2(constant_abs + 1)) { 1607 } else if (IsPowerOf2(constant_abs + 1)) {
1681 int32_t shift = WhichPowerOf2(constant_abs + 1); 1608 int32_t shift = WhichPowerOf2(constant_abs + 1);
1682 __ rsb(result, left, Operand(left, LSL, shift)); 1609 __ ShiftLeftImm(scratch, left, Operand(shift));
1683 // Correct the sign of the result is the constant is negative. 1610 __ sub(result, scratch, left);
1684 if (constant < 0) __ rsb(result, result, Operand::Zero()); 1611 // Correct the sign of the result if the constant is negative.
1612 if (constant < 0) __ neg(result, result);
1685 } else { 1613 } else {
1686 // Generate standard code. 1614 // Generate standard code.
1687 __ mov(ip, Operand(constant)); 1615 __ mov(ip, Operand(constant));
1688 __ mul(result, left, ip); 1616 __ Mul(result, left, ip);
1689 } 1617 }
1690 } 1618 }
1691 1619
1692 } else { 1620 } else {
1693 DCHECK(right_op->IsRegister()); 1621 DCHECK(right_op->IsRegister());
1694 Register right = ToRegister(right_op); 1622 Register right = ToRegister(right_op);
1695 1623
1696 if (overflow) { 1624 if (can_overflow) {
1697 Register scratch = scratch0(); 1625 #if V8_TARGET_ARCH_PPC64
1626 // result = left * right.
1627 if (instr->hydrogen()->representation().IsSmi()) {
1628 __ SmiUntag(result, left);
1629 __ SmiUntag(scratch, right);
1630 __ Mul(result, result, scratch);
1631 } else {
1632 __ Mul(result, left, right);
1633 }
1634 __ TestIfInt32(result, scratch, r0);
1635 DeoptimizeIf(ne, instr->environment());
1636 if (instr->hydrogen()->representation().IsSmi()) {
1637 __ SmiTag(result);
1638 }
1639 #else
1698 // scratch:result = left * right. 1640 // scratch:result = left * right.
1699 if (instr->hydrogen()->representation().IsSmi()) { 1641 if (instr->hydrogen()->representation().IsSmi()) {
1700 __ SmiUntag(result, left); 1642 __ SmiUntag(result, left);
1701 __ smull(result, scratch, result, right); 1643 __ mulhw(scratch, result, right);
1644 __ mullw(result, result, right);
1702 } else { 1645 } else {
1703 __ smull(result, scratch, left, right); 1646 __ mulhw(scratch, left, right);
1647 __ mullw(result, left, right);
1704 } 1648 }
1705 __ cmp(scratch, Operand(result, ASR, 31)); 1649 __ TestIfInt32(scratch, result, r0);
1706 DeoptimizeIf(ne, instr->environment()); 1650 DeoptimizeIf(ne, instr->environment());
1651 #endif
1707 } else { 1652 } else {
1708 if (instr->hydrogen()->representation().IsSmi()) { 1653 if (instr->hydrogen()->representation().IsSmi()) {
1709 __ SmiUntag(result, left); 1654 __ SmiUntag(result, left);
1710 __ mul(result, result, right); 1655 __ Mul(result, result, right);
1711 } else { 1656 } else {
1712 __ mul(result, left, right); 1657 __ Mul(result, left, right);
1713 } 1658 }
1714 } 1659 }
1715 1660
1716 if (bailout_on_minus_zero) { 1661 if (bailout_on_minus_zero) {
1717 Label done; 1662 Label done;
1718 __ teq(left, Operand(right)); 1663 #if V8_TARGET_ARCH_PPC64
1719 __ b(pl, &done); 1664 if (instr->hydrogen()->representation().IsSmi()) {
1665 #endif
1666 __ xor_(r0, left, right, SetRC);
1667 __ bge(&done, cr0);
1668 #if V8_TARGET_ARCH_PPC64
1669 } else {
1670 __ xor_(r0, left, right);
1671 __ cmpwi(r0, Operand::Zero());
1672 __ bge(&done);
1673 }
1674 #endif
1720 // Bail out if the result is minus zero. 1675 // Bail out if the result is minus zero.
1721 __ cmp(result, Operand::Zero()); 1676 __ cmpi(result, Operand::Zero());
1722 DeoptimizeIf(eq, instr->environment()); 1677 DeoptimizeIf(eq, instr->environment());
1723 __ bind(&done); 1678 __ bind(&done);
1724 } 1679 }
1725 } 1680 }
1726 } 1681 }
1727 1682
1728 1683
1729 void LCodeGen::DoBitI(LBitI* instr) { 1684 void LCodeGen::DoBitI(LBitI* instr) {
1730 LOperand* left_op = instr->left(); 1685 LOperand* left_op = instr->left();
1731 LOperand* right_op = instr->right(); 1686 LOperand* right_op = instr->right();
1732 DCHECK(left_op->IsRegister()); 1687 DCHECK(left_op->IsRegister());
1733 Register left = ToRegister(left_op); 1688 Register left = ToRegister(left_op);
1734 Register result = ToRegister(instr->result()); 1689 Register result = ToRegister(instr->result());
1735 Operand right(no_reg); 1690 Operand right(no_reg);
1736 1691
1737 if (right_op->IsStackSlot()) { 1692 if (right_op->IsStackSlot()) {
1738 right = Operand(EmitLoadRegister(right_op, ip)); 1693 right = Operand(EmitLoadRegister(right_op, ip));
1739 } else { 1694 } else {
1740 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); 1695 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1741 right = ToOperand(right_op); 1696 right = ToOperand(right_op);
1697
1698 if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
1699 switch (instr->op()) {
1700 case Token::BIT_AND:
1701 __ andi(result, left, right);
1702 break;
1703 case Token::BIT_OR:
1704 __ ori(result, left, right);
1705 break;
1706 case Token::BIT_XOR:
1707 __ xori(result, left, right);
1708 break;
1709 default:
1710 UNREACHABLE();
1711 break;
1712 }
1713 return;
1714 }
1742 } 1715 }
1743 1716
1744 switch (instr->op()) { 1717 switch (instr->op()) {
1745 case Token::BIT_AND: 1718 case Token::BIT_AND:
1746 __ and_(result, left, right); 1719 __ And(result, left, right);
1747 break; 1720 break;
1748 case Token::BIT_OR: 1721 case Token::BIT_OR:
1749 __ orr(result, left, right); 1722 __ Or(result, left, right);
1750 break; 1723 break;
1751 case Token::BIT_XOR: 1724 case Token::BIT_XOR:
1752 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { 1725 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1753 __ mvn(result, Operand(left)); 1726 __ notx(result, left);
1754 } else { 1727 } else {
1755 __ eor(result, left, right); 1728 __ Xor(result, left, right);
1756 } 1729 }
1757 break; 1730 break;
1758 default: 1731 default:
1759 UNREACHABLE(); 1732 UNREACHABLE();
1760 break; 1733 break;
1761 } 1734 }
1762 } 1735 }
1763 1736
1764 1737
1765 void LCodeGen::DoShiftI(LShiftI* instr) { 1738 void LCodeGen::DoShiftI(LShiftI* instr) {
1766 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so 1739 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1767 // result may alias either of them. 1740 // result may alias either of them.
1768 LOperand* right_op = instr->right(); 1741 LOperand* right_op = instr->right();
1769 Register left = ToRegister(instr->left()); 1742 Register left = ToRegister(instr->left());
1770 Register result = ToRegister(instr->result()); 1743 Register result = ToRegister(instr->result());
1771 Register scratch = scratch0(); 1744 Register scratch = scratch0();
1772 if (right_op->IsRegister()) { 1745 if (right_op->IsRegister()) {
1773 // Mask the right_op operand. 1746 // Mask the right_op operand.
1774 __ and_(scratch, ToRegister(right_op), Operand(0x1F)); 1747 __ andi(scratch, ToRegister(right_op), Operand(0x1F));
1775 switch (instr->op()) { 1748 switch (instr->op()) {
1776 case Token::ROR: 1749 case Token::ROR:
1777 __ mov(result, Operand(left, ROR, scratch)); 1750 // rotate_right(a, b) == rotate_left(a, 32 - b)
1751 __ subfic(scratch, scratch, Operand(32));
1752 __ rotlw(result, left, scratch);
1778 break; 1753 break;
1779 case Token::SAR: 1754 case Token::SAR:
1780 __ mov(result, Operand(left, ASR, scratch)); 1755 __ sraw(result, left, scratch);
1781 break; 1756 break;
1782 case Token::SHR: 1757 case Token::SHR:
1783 if (instr->can_deopt()) { 1758 if (instr->can_deopt()) {
1784 __ mov(result, Operand(left, LSR, scratch), SetCC); 1759 __ srw(result, left, scratch, SetRC);
1785 DeoptimizeIf(mi, instr->environment()); 1760 #if V8_TARGET_ARCH_PPC64
1761 __ extsw(result, result, SetRC);
1762 #endif
1763 DeoptimizeIf(lt, instr->environment(), cr0);
1786 } else { 1764 } else {
1787 __ mov(result, Operand(left, LSR, scratch)); 1765 __ srw(result, left, scratch);
1788 } 1766 }
1789 break; 1767 break;
1790 case Token::SHL: 1768 case Token::SHL:
1791 __ mov(result, Operand(left, LSL, scratch)); 1769 __ slw(result, left, scratch);
1770 #if V8_TARGET_ARCH_PPC64
1771 __ extsw(result, result);
1772 #endif
1792 break; 1773 break;
1793 default: 1774 default:
1794 UNREACHABLE(); 1775 UNREACHABLE();
1795 break; 1776 break;
1796 } 1777 }
1797 } else { 1778 } else {
1798 // Mask the right_op operand. 1779 // Mask the right_op operand.
1799 int value = ToInteger32(LConstantOperand::cast(right_op)); 1780 int value = ToInteger32(LConstantOperand::cast(right_op));
1800 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1781 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1801 switch (instr->op()) { 1782 switch (instr->op()) {
1802 case Token::ROR: 1783 case Token::ROR:
1803 if (shift_count != 0) { 1784 if (shift_count != 0) {
1804 __ mov(result, Operand(left, ROR, shift_count)); 1785 __ rotrwi(result, left, shift_count);
1805 } else { 1786 } else {
1806 __ Move(result, left); 1787 __ Move(result, left);
1807 } 1788 }
1808 break; 1789 break;
1809 case Token::SAR: 1790 case Token::SAR:
1810 if (shift_count != 0) { 1791 if (shift_count != 0) {
1811 __ mov(result, Operand(left, ASR, shift_count)); 1792 __ srawi(result, left, shift_count);
1812 } else { 1793 } else {
1813 __ Move(result, left); 1794 __ Move(result, left);
1814 } 1795 }
1815 break; 1796 break;
1816 case Token::SHR: 1797 case Token::SHR:
1817 if (shift_count != 0) { 1798 if (shift_count != 0) {
1818 __ mov(result, Operand(left, LSR, shift_count)); 1799 __ srwi(result, left, Operand(shift_count));
1819 } else { 1800 } else {
1820 if (instr->can_deopt()) { 1801 if (instr->can_deopt()) {
1821 __ tst(left, Operand(0x80000000)); 1802 __ cmpwi(left, Operand::Zero());
1822 DeoptimizeIf(ne, instr->environment()); 1803 DeoptimizeIf(lt, instr->environment());
1823 } 1804 }
1824 __ Move(result, left); 1805 __ Move(result, left);
1825 } 1806 }
1826 break; 1807 break;
1827 case Token::SHL: 1808 case Token::SHL:
1828 if (shift_count != 0) { 1809 if (shift_count != 0) {
1810 #if V8_TARGET_ARCH_PPC64
1811 if (instr->hydrogen_value()->representation().IsSmi()) {
1812 __ sldi(result, left, Operand(shift_count));
1813 #else
1829 if (instr->hydrogen_value()->representation().IsSmi() && 1814 if (instr->hydrogen_value()->representation().IsSmi() &&
1830 instr->can_deopt()) { 1815 instr->can_deopt()) {
1831 if (shift_count != 1) { 1816 if (shift_count != 1) {
1832 __ mov(result, Operand(left, LSL, shift_count - 1)); 1817 __ slwi(result, left, Operand(shift_count - 1));
1833 __ SmiTag(result, result, SetCC); 1818 __ SmiTagCheckOverflow(result, result, scratch);
1834 } else { 1819 } else {
1835 __ SmiTag(result, left, SetCC); 1820 __ SmiTagCheckOverflow(result, left, scratch);
1836 } 1821 }
1837 DeoptimizeIf(vs, instr->environment()); 1822 DeoptimizeIf(lt, instr->environment(), cr0);
1823 #endif
1838 } else { 1824 } else {
1839 __ mov(result, Operand(left, LSL, shift_count)); 1825 __ slwi(result, left, Operand(shift_count));
1826 #if V8_TARGET_ARCH_PPC64
1827 __ extsw(result, result);
1828 #endif
1840 } 1829 }
1841 } else { 1830 } else {
1842 __ Move(result, left); 1831 __ Move(result, left);
1843 } 1832 }
1844 break; 1833 break;
1845 default: 1834 default:
1846 UNREACHABLE(); 1835 UNREACHABLE();
1847 break; 1836 break;
1848 } 1837 }
1849 } 1838 }
1850 } 1839 }
1851 1840
1852 1841
1853 void LCodeGen::DoSubI(LSubI* instr) { 1842 void LCodeGen::DoSubI(LSubI* instr) {
1854 LOperand* left = instr->left();
1855 LOperand* right = instr->right(); 1843 LOperand* right = instr->right();
1856 LOperand* result = instr->result(); 1844 Register left = ToRegister(instr->left());
1845 Register result = ToRegister(instr->result());
1857 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1846 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1858 SBit set_cond = can_overflow ? SetCC : LeaveCC; 1847 if (!can_overflow && right->IsConstantOperand()) {
1848 Operand right_operand = ToOperand(right);
1849 __ Add(result, left, -right_operand.immediate(), r0);
1850 } else {
1851 Register right_reg = EmitLoadRegister(right, ip);
1859 1852
1860 if (right->IsStackSlot()) { 1853 if (!can_overflow) {
1861 Register right_reg = EmitLoadRegister(right, ip); 1854 __ sub(result, left, right_reg);
1862 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1855 } else {
1863 } else { 1856 __ SubAndCheckForOverflow(result,
1864 DCHECK(right->IsRegister() || right->IsConstantOperand()); 1857 left,
1865 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1858 right_reg,
1859 scratch0(), r0);
1860 // Doptimize on overflow
1861 #if V8_TARGET_ARCH_PPC64
1862 if (!instr->hydrogen()->representation().IsSmi()) {
1863 __ extsw(scratch0(), scratch0(), SetRC);
1864 }
1865 #endif
1866 DeoptimizeIf(lt, instr->environment(), cr0);
1867 }
1866 } 1868 }
1867 1869
1868 if (can_overflow) { 1870 #if V8_TARGET_ARCH_PPC64
1869 DeoptimizeIf(vs, instr->environment()); 1871 if (!instr->hydrogen()->representation().IsSmi()) {
1872 __ extsw(result, result);
1870 } 1873 }
1874 #endif
1871 } 1875 }
1872 1876
1873 1877
1874 void LCodeGen::DoRSubI(LRSubI* instr) { 1878 void LCodeGen::DoRSubI(LRSubI* instr) {
1875 LOperand* left = instr->left(); 1879 LOperand* left = instr->left();
1876 LOperand* right = instr->right(); 1880 LOperand* right = instr->right();
1877 LOperand* result = instr->result(); 1881 LOperand* result = instr->result();
1878 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1879 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1880 1882
1881 if (right->IsStackSlot()) { 1883 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1882 Register right_reg = EmitLoadRegister(right, ip); 1884 right->IsConstantOperand());
1883 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1885
1886 Operand right_operand = ToOperand(right);
1887 if (is_int16(right_operand.immediate())) {
1888 __ subfic(ToRegister(result), ToRegister(left), right_operand);
1884 } else { 1889 } else {
1885 DCHECK(right->IsRegister() || right->IsConstantOperand()); 1890 __ mov(r0, right_operand);
1886 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1891 __ sub(ToRegister(result), r0, ToRegister(left));
1887 }
1888
1889 if (can_overflow) {
1890 DeoptimizeIf(vs, instr->environment());
1891 } 1892 }
1892 } 1893 }
1893 1894
1894 1895
1895 void LCodeGen::DoConstantI(LConstantI* instr) { 1896 void LCodeGen::DoConstantI(LConstantI* instr) {
1896 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1897 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1897 } 1898 }
1898 1899
1899 1900
1900 void LCodeGen::DoConstantS(LConstantS* instr) { 1901 void LCodeGen::DoConstantS(LConstantS* instr) {
1901 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1902 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1902 } 1903 }
1903 1904
1904 1905
1906 // TODO(penguin): put const to constant pool instead
1907 // of storing double to stack
1905 void LCodeGen::DoConstantD(LConstantD* instr) { 1908 void LCodeGen::DoConstantD(LConstantD* instr) {
1906 DCHECK(instr->result()->IsDoubleRegister()); 1909 DCHECK(instr->result()->IsDoubleRegister());
1907 DwVfpRegister result = ToDoubleRegister(instr->result()); 1910 DoubleRegister result = ToDoubleRegister(instr->result());
1908 double v = instr->value(); 1911 double v = instr->value();
1909 __ Vmov(result, v, scratch0()); 1912 __ LoadDoubleLiteral(result, v, scratch0());
1910 } 1913 }
1911 1914
1912 1915
1913 void LCodeGen::DoConstantE(LConstantE* instr) { 1916 void LCodeGen::DoConstantE(LConstantE* instr) {
1914 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1917 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1915 } 1918 }
1916 1919
1917 1920
1918 void LCodeGen::DoConstantT(LConstantT* instr) { 1921 void LCodeGen::DoConstantT(LConstantT* instr) {
1919 Handle<Object> object = instr->value(isolate()); 1922 Handle<Object> object = instr->value(isolate());
1920 AllowDeferredHandleDereference smi_check; 1923 AllowDeferredHandleDereference smi_check;
1921 __ Move(ToRegister(instr->result()), object); 1924 __ Move(ToRegister(instr->result()), object);
1922 } 1925 }
1923 1926
1924 1927
1925 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { 1928 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1926 Register result = ToRegister(instr->result()); 1929 Register result = ToRegister(instr->result());
1927 Register map = ToRegister(instr->value()); 1930 Register map = ToRegister(instr->value());
1928 __ EnumLength(result, map); 1931 __ EnumLength(result, map);
1929 } 1932 }
1930 1933
1931 1934
1932 void LCodeGen::DoDateField(LDateField* instr) { 1935 void LCodeGen::DoDateField(LDateField* instr) {
1933 Register object = ToRegister(instr->date()); 1936 Register object = ToRegister(instr->date());
1934 Register result = ToRegister(instr->result()); 1937 Register result = ToRegister(instr->result());
1935 Register scratch = ToRegister(instr->temp()); 1938 Register scratch = ToRegister(instr->temp());
1936 Smi* index = instr->index(); 1939 Smi* index = instr->index();
1937 Label runtime, done; 1940 Label runtime, done;
1938 DCHECK(object.is(result)); 1941 DCHECK(object.is(result));
1939 DCHECK(object.is(r0)); 1942 DCHECK(object.is(r3));
1940 DCHECK(!scratch.is(scratch0())); 1943 DCHECK(!scratch.is(scratch0()));
1941 DCHECK(!scratch.is(object)); 1944 DCHECK(!scratch.is(object));
1942 1945
1943 __ SmiTst(object); 1946 __ TestIfSmi(object, r0);
1944 DeoptimizeIf(eq, instr->environment()); 1947 DeoptimizeIf(eq, instr->environment(), cr0);
1945 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); 1948 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1946 DeoptimizeIf(ne, instr->environment()); 1949 DeoptimizeIf(ne, instr->environment());
1947 1950
1948 if (index->value() == 0) { 1951 if (index->value() == 0) {
1949 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); 1952 __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
1950 } else { 1953 } else {
1951 if (index->value() < JSDate::kFirstUncachedField) { 1954 if (index->value() < JSDate::kFirstUncachedField) {
1952 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 1955 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1953 __ mov(scratch, Operand(stamp)); 1956 __ mov(scratch, Operand(stamp));
1954 __ ldr(scratch, MemOperand(scratch)); 1957 __ LoadP(scratch, MemOperand(scratch));
1955 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); 1958 __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1956 __ cmp(scratch, scratch0()); 1959 __ cmp(scratch, scratch0());
1957 __ b(ne, &runtime); 1960 __ bne(&runtime);
1958 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset + 1961 __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset +
1959 kPointerSize * index->value())); 1962 kPointerSize * index->value()));
1960 __ jmp(&done); 1963 __ b(&done);
1961 } 1964 }
1962 __ bind(&runtime); 1965 __ bind(&runtime);
1963 __ PrepareCallCFunction(2, scratch); 1966 __ PrepareCallCFunction(2, scratch);
1964 __ mov(r1, Operand(index)); 1967 __ LoadSmiLiteral(r4, index);
1965 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); 1968 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1966 __ bind(&done); 1969 __ bind(&done);
1967 } 1970 }
1968 } 1971 }
1969 1972
1970 1973
1971 MemOperand LCodeGen::BuildSeqStringOperand(Register string, 1974 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1972 LOperand* index, 1975 LOperand* index,
1973 String::Encoding encoding) { 1976 String::Encoding encoding) {
1974 if (index->IsConstantOperand()) { 1977 if (index->IsConstantOperand()) {
1975 int offset = ToInteger32(LConstantOperand::cast(index)); 1978 int offset = ToInteger32(LConstantOperand::cast(index));
1976 if (encoding == String::TWO_BYTE_ENCODING) { 1979 if (encoding == String::TWO_BYTE_ENCODING) {
1977 offset *= kUC16Size; 1980 offset *= kUC16Size;
1978 } 1981 }
1979 STATIC_ASSERT(kCharSize == 1); 1982 STATIC_ASSERT(kCharSize == 1);
1980 return FieldMemOperand(string, SeqString::kHeaderSize + offset); 1983 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1981 } 1984 }
1982 Register scratch = scratch0(); 1985 Register scratch = scratch0();
1983 DCHECK(!scratch.is(string)); 1986 DCHECK(!scratch.is(string));
1984 DCHECK(!scratch.is(ToRegister(index))); 1987 DCHECK(!scratch.is(ToRegister(index)));
1985 if (encoding == String::ONE_BYTE_ENCODING) { 1988 if (encoding == String::ONE_BYTE_ENCODING) {
1986 __ add(scratch, string, Operand(ToRegister(index))); 1989 __ add(scratch, string, ToRegister(index));
1987 } else { 1990 } else {
1988 STATIC_ASSERT(kUC16Size == 2); 1991 STATIC_ASSERT(kUC16Size == 2);
1989 __ add(scratch, string, Operand(ToRegister(index), LSL, 1)); 1992 __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
1993 __ add(scratch, string, scratch);
1990 } 1994 }
1991 return FieldMemOperand(scratch, SeqString::kHeaderSize); 1995 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1992 } 1996 }
1993 1997
1994 1998
1995 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { 1999 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1996 String::Encoding encoding = instr->hydrogen()->encoding(); 2000 String::Encoding encoding = instr->hydrogen()->encoding();
1997 Register string = ToRegister(instr->string()); 2001 Register string = ToRegister(instr->string());
1998 Register result = ToRegister(instr->result()); 2002 Register result = ToRegister(instr->result());
1999 2003
2000 if (FLAG_debug_code) { 2004 if (FLAG_debug_code) {
2001 Register scratch = scratch0(); 2005 Register scratch = scratch0();
2002 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); 2006 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
2003 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 2007 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2004 2008
2005 __ and_(scratch, scratch, 2009 __ andi(scratch, scratch,
2006 Operand(kStringRepresentationMask | kStringEncodingMask)); 2010 Operand(kStringRepresentationMask | kStringEncodingMask));
2007 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 2011 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2008 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 2012 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2009 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING 2013 __ cmpi(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
2010 ? one_byte_seq_type : two_byte_seq_type)); 2014 ? one_byte_seq_type : two_byte_seq_type));
2011 __ Check(eq, kUnexpectedStringType); 2015 __ Check(eq, kUnexpectedStringType);
2012 } 2016 }
2013 2017
2014 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 2018 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2015 if (encoding == String::ONE_BYTE_ENCODING) { 2019 if (encoding == String::ONE_BYTE_ENCODING) {
2016 __ ldrb(result, operand); 2020 __ lbz(result, operand);
2017 } else { 2021 } else {
2018 __ ldrh(result, operand); 2022 __ lhz(result, operand);
2019 } 2023 }
2020 } 2024 }
2021 2025
2022 2026
2023 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 2027 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2024 String::Encoding encoding = instr->hydrogen()->encoding(); 2028 String::Encoding encoding = instr->hydrogen()->encoding();
2025 Register string = ToRegister(instr->string()); 2029 Register string = ToRegister(instr->string());
2026 Register value = ToRegister(instr->value()); 2030 Register value = ToRegister(instr->value());
2027 2031
2028 if (FLAG_debug_code) { 2032 if (FLAG_debug_code) {
2029 Register index = ToRegister(instr->index()); 2033 Register index = ToRegister(instr->index());
2030 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 2034 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2031 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 2035 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2032 int encoding_mask = 2036 int encoding_mask =
2033 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 2037 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2034 ? one_byte_seq_type : two_byte_seq_type; 2038 ? one_byte_seq_type : two_byte_seq_type;
2035 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); 2039 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2036 } 2040 }
2037 2041
2038 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 2042 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2039 if (encoding == String::ONE_BYTE_ENCODING) { 2043 if (encoding == String::ONE_BYTE_ENCODING) {
2040 __ strb(value, operand); 2044 __ stb(value, operand);
2041 } else { 2045 } else {
2042 __ strh(value, operand); 2046 __ sth(value, operand);
2043 } 2047 }
2044 } 2048 }
2045 2049
2046 2050
2047 void LCodeGen::DoAddI(LAddI* instr) { 2051 void LCodeGen::DoAddI(LAddI* instr) {
2048 LOperand* left = instr->left();
2049 LOperand* right = instr->right(); 2052 LOperand* right = instr->right();
2050 LOperand* result = instr->result(); 2053 Register left = ToRegister(instr->left());
2054 Register result = ToRegister(instr->result());
2051 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 2055 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2052 SBit set_cond = can_overflow ? SetCC : LeaveCC; 2056 #if V8_TARGET_ARCH_PPC64
2057 bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
2058 instr->hydrogen()->representation().IsExternal());
2059 #endif
2053 2060
2054 if (right->IsStackSlot()) { 2061 if (!can_overflow && right->IsConstantOperand()) {
2062 Operand right_operand = ToOperand(right);
2063 __ Add(result, left, right_operand.immediate(), r0);
2064 } else {
2055 Register right_reg = EmitLoadRegister(right, ip); 2065 Register right_reg = EmitLoadRegister(right, ip);
2056 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 2066
2057 } else { 2067 if (!can_overflow) {
2058 DCHECK(right->IsRegister() || right->IsConstantOperand()); 2068 __ add(result, left, right_reg);
2059 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 2069 } else { // can_overflow.
2070 __ AddAndCheckForOverflow(result,
2071 left,
2072 right_reg,
2073 scratch0(), r0);
2074 #if V8_TARGET_ARCH_PPC64
2075 if (isInteger) {
2076 __ extsw(scratch0(), scratch0(), SetRC);
2077 }
2078 #endif
2079 // Doptimize on overflow
2080 DeoptimizeIf(lt, instr->environment(), cr0);
2081 }
2060 } 2082 }
2061 2083
2062 if (can_overflow) { 2084 #if V8_TARGET_ARCH_PPC64
2063 DeoptimizeIf(vs, instr->environment()); 2085 if (isInteger) {
2086 __ extsw(result, result);
2064 } 2087 }
2088 #endif
2065 } 2089 }
2066 2090
2067 2091
2068 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 2092 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2069 LOperand* left = instr->left(); 2093 LOperand* left = instr->left();
2070 LOperand* right = instr->right(); 2094 LOperand* right = instr->right();
2071 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 2095 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2096 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
2072 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 2097 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2073 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2074 Register left_reg = ToRegister(left); 2098 Register left_reg = ToRegister(left);
2075 Operand right_op = (right->IsRegister() || right->IsConstantOperand()) 2099 Register right_reg = EmitLoadRegister(right, ip);
2076 ? ToOperand(right)
2077 : Operand(EmitLoadRegister(right, ip));
2078 Register result_reg = ToRegister(instr->result()); 2100 Register result_reg = ToRegister(instr->result());
2079 __ cmp(left_reg, right_op); 2101 Label return_left, done;
2080 __ Move(result_reg, left_reg, condition); 2102 #if V8_TARGET_ARCH_PPC64
2081 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); 2103 if (instr->hydrogen_value()->representation().IsSmi()) {
2104 #endif
2105 __ cmp(left_reg, right_reg);
2106 #if V8_TARGET_ARCH_PPC64
2107 } else {
2108 __ cmpw(left_reg, right_reg);
2109 }
2110 #endif
2111 __ b(cond, &return_left);
2112 __ Move(result_reg, right_reg);
2113 __ b(&done);
2114 __ bind(&return_left);
2115 __ Move(result_reg, left_reg);
2116 __ bind(&done);
2082 } else { 2117 } else {
2083 DCHECK(instr->hydrogen()->representation().IsDouble()); 2118 DCHECK(instr->hydrogen()->representation().IsDouble());
2084 DwVfpRegister left_reg = ToDoubleRegister(left); 2119 DoubleRegister left_reg = ToDoubleRegister(left);
2085 DwVfpRegister right_reg = ToDoubleRegister(right); 2120 DoubleRegister right_reg = ToDoubleRegister(right);
2086 DwVfpRegister result_reg = ToDoubleRegister(instr->result()); 2121 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2087 Label result_is_nan, return_left, return_right, check_zero, done; 2122 Label check_nan_left, check_zero, return_left, return_right, done;
2088 __ VFPCompareAndSetFlags(left_reg, right_reg); 2123 __ fcmpu(left_reg, right_reg);
2124 __ bunordered(&check_nan_left);
2125 __ beq(&check_zero);
2126 __ b(cond, &return_left);
2127 __ b(&return_right);
2128
2129 __ bind(&check_zero);
2130 __ fcmpu(left_reg, kDoubleRegZero);
2131 __ bne(&return_left); // left == right != 0.
2132
2133 // At this point, both left and right are either 0 or -0.
2134 // N.B. The following works because +0 + -0 == +0
2089 if (operation == HMathMinMax::kMathMin) { 2135 if (operation == HMathMinMax::kMathMin) {
2090 __ b(mi, &return_left); 2136 // For min we want logical-or of sign bit: -(-L + -R)
2091 __ b(gt, &return_right); 2137 __ fneg(left_reg, left_reg);
2138 __ fsub(result_reg, left_reg, right_reg);
2139 __ fneg(result_reg, result_reg);
2092 } else { 2140 } else {
2093 __ b(mi, &return_right); 2141 // For max we want logical-and of sign bit: (L + R)
2094 __ b(gt, &return_left); 2142 __ fadd(result_reg, left_reg, right_reg);
2095 }
2096 __ b(vs, &result_is_nan);
2097 // Left equals right => check for -0.
2098 __ VFPCompareAndSetFlags(left_reg, 0.0);
2099 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2100 __ b(ne, &done); // left == right != 0.
2101 } else {
2102 __ b(ne, &return_left); // left == right != 0.
2103 }
2104 // At this point, both left and right are either 0 or -0.
2105 if (operation == HMathMinMax::kMathMin) {
2106 // We could use a single 'vorr' instruction here if we had NEON support.
2107 __ vneg(left_reg, left_reg);
2108 __ vsub(result_reg, left_reg, right_reg);
2109 __ vneg(result_reg, result_reg);
2110 } else {
2111 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2112 // the decision for vadd is easy because vand is a NEON instruction.
2113 __ vadd(result_reg, left_reg, right_reg);
2114 } 2143 }
2115 __ b(&done); 2144 __ b(&done);
2116 2145
2117 __ bind(&result_is_nan); 2146 __ bind(&check_nan_left);
2118 __ vadd(result_reg, left_reg, right_reg); 2147 __ fcmpu(left_reg, left_reg);
2148 __ bunordered(&return_left); // left == NaN.
2149
2150 __ bind(&return_right);
2151 if (!right_reg.is(result_reg)) {
2152 __ fmr(result_reg, right_reg);
2153 }
2119 __ b(&done); 2154 __ b(&done);
2120 2155
2121 __ bind(&return_right); 2156 __ bind(&return_left);
2122 __ Move(result_reg, right_reg);
2123 if (!left_reg.is(result_reg)) { 2157 if (!left_reg.is(result_reg)) {
2124 __ b(&done); 2158 __ fmr(result_reg, left_reg);
2125 } 2159 }
2126
2127 __ bind(&return_left);
2128 __ Move(result_reg, left_reg);
2129
2130 __ bind(&done); 2160 __ bind(&done);
2131 } 2161 }
2132 } 2162 }
2133 2163
2134 2164
2135 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 2165 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2136 DwVfpRegister left = ToDoubleRegister(instr->left()); 2166 DoubleRegister left = ToDoubleRegister(instr->left());
2137 DwVfpRegister right = ToDoubleRegister(instr->right()); 2167 DoubleRegister right = ToDoubleRegister(instr->right());
2138 DwVfpRegister result = ToDoubleRegister(instr->result()); 2168 DoubleRegister result = ToDoubleRegister(instr->result());
2139 switch (instr->op()) { 2169 switch (instr->op()) {
2140 case Token::ADD: 2170 case Token::ADD:
2141 __ vadd(result, left, right); 2171 __ fadd(result, left, right);
2142 break; 2172 break;
2143 case Token::SUB: 2173 case Token::SUB:
2144 __ vsub(result, left, right); 2174 __ fsub(result, left, right);
2145 break; 2175 break;
2146 case Token::MUL: 2176 case Token::MUL:
2147 __ vmul(result, left, right); 2177 __ fmul(result, left, right);
2148 break; 2178 break;
2149 case Token::DIV: 2179 case Token::DIV:
2150 __ vdiv(result, left, right); 2180 __ fdiv(result, left, right);
2151 break; 2181 break;
2152 case Token::MOD: { 2182 case Token::MOD: {
2153 __ PrepareCallCFunction(0, 2, scratch0()); 2183 __ PrepareCallCFunction(0, 2, scratch0());
2154 __ MovToFloatParameters(left, right); 2184 __ MovToFloatParameters(left, right);
2155 __ CallCFunction( 2185 __ CallCFunction(
2156 ExternalReference::mod_two_doubles_operation(isolate()), 2186 ExternalReference::mod_two_doubles_operation(isolate()),
2157 0, 2); 2187 0, 2);
2158 // Move the result in the double result register. 2188 // Move the result in the double result register.
2159 __ MovFromFloatResult(result); 2189 __ MovFromFloatResult(result);
2160 break; 2190 break;
2161 } 2191 }
2162 default: 2192 default:
2163 UNREACHABLE(); 2193 UNREACHABLE();
2164 break; 2194 break;
2165 } 2195 }
2166 } 2196 }
2167 2197
2168 2198
2169 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2199 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2170 DCHECK(ToRegister(instr->context()).is(cp)); 2200 DCHECK(ToRegister(instr->context()).is(cp));
2171 DCHECK(ToRegister(instr->left()).is(r1)); 2201 DCHECK(ToRegister(instr->left()).is(r4));
2172 DCHECK(ToRegister(instr->right()).is(r0)); 2202 DCHECK(ToRegister(instr->right()).is(r3));
2173 DCHECK(ToRegister(instr->result()).is(r0)); 2203 DCHECK(ToRegister(instr->result()).is(r3));
2174 2204
2175 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); 2205 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
2176 // Block literal pool emission to ensure nop indicating no inlined smi code
2177 // is in the correct position.
2178 Assembler::BlockConstPoolScope block_const_pool(masm());
2179 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2206 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2180 } 2207 }
2181 2208
2182 2209
2183 template<class InstrType> 2210 template<class InstrType>
2184 void LCodeGen::EmitBranch(InstrType instr, Condition condition) { 2211 void LCodeGen::EmitBranch(InstrType instr, Condition cond,
2212 CRegister cr) {
2185 int left_block = instr->TrueDestination(chunk_); 2213 int left_block = instr->TrueDestination(chunk_);
2186 int right_block = instr->FalseDestination(chunk_); 2214 int right_block = instr->FalseDestination(chunk_);
2187 2215
2188 int next_block = GetNextEmittedBlock(); 2216 int next_block = GetNextEmittedBlock();
2189 2217
2190 if (right_block == left_block || condition == al) { 2218 if (right_block == left_block || cond == al) {
2191 EmitGoto(left_block); 2219 EmitGoto(left_block);
2192 } else if (left_block == next_block) { 2220 } else if (left_block == next_block) {
2193 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block)); 2221 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
2194 } else if (right_block == next_block) { 2222 } else if (right_block == next_block) {
2195 __ b(condition, chunk_->GetAssemblyLabel(left_block)); 2223 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2196 } else { 2224 } else {
2197 __ b(condition, chunk_->GetAssemblyLabel(left_block)); 2225 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2198 __ b(chunk_->GetAssemblyLabel(right_block)); 2226 __ b(chunk_->GetAssemblyLabel(right_block));
2199 } 2227 }
2200 } 2228 }
2201 2229
2202 2230
2203 template<class InstrType> 2231 template<class InstrType>
2204 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) { 2232 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond,
2233 CRegister cr) {
2205 int false_block = instr->FalseDestination(chunk_); 2234 int false_block = instr->FalseDestination(chunk_);
2206 __ b(condition, chunk_->GetAssemblyLabel(false_block)); 2235 __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
2207 } 2236 }
2208 2237
2209 2238
2210 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { 2239 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2211 __ stop("LBreak"); 2240 __ stop("LBreak");
2212 } 2241 }
2213 2242
2214 2243
2215 void LCodeGen::DoBranch(LBranch* instr) { 2244 void LCodeGen::DoBranch(LBranch* instr) {
2216 Representation r = instr->hydrogen()->value()->representation(); 2245 Representation r = instr->hydrogen()->value()->representation();
2217 if (r.IsInteger32() || r.IsSmi()) { 2246 DoubleRegister dbl_scratch = double_scratch0();
2247 const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
2248 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
2249
2250 if (r.IsInteger32()) {
2218 DCHECK(!info()->IsStub()); 2251 DCHECK(!info()->IsStub());
2219 Register reg = ToRegister(instr->value()); 2252 Register reg = ToRegister(instr->value());
2220 __ cmp(reg, Operand::Zero()); 2253 __ cmpwi(reg, Operand::Zero());
2254 EmitBranch(instr, ne);
2255 } else if (r.IsSmi()) {
2256 DCHECK(!info()->IsStub());
2257 Register reg = ToRegister(instr->value());
2258 __ cmpi(reg, Operand::Zero());
2221 EmitBranch(instr, ne); 2259 EmitBranch(instr, ne);
2222 } else if (r.IsDouble()) { 2260 } else if (r.IsDouble()) {
2223 DCHECK(!info()->IsStub()); 2261 DCHECK(!info()->IsStub());
2224 DwVfpRegister reg = ToDoubleRegister(instr->value()); 2262 DoubleRegister reg = ToDoubleRegister(instr->value());
2225 // Test the double value. Zero and NaN are false. 2263 // Test the double value. Zero and NaN are false.
2226 __ VFPCompareAndSetFlags(reg, 0.0); 2264 __ fcmpu(reg, kDoubleRegZero, cr7);
2227 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false) 2265 __ mfcr(r0);
2228 EmitBranch(instr, ne); 2266 __ andi(r0, r0, Operand(crZOrNaNBits));
2267 EmitBranch(instr, eq, cr0);
2229 } else { 2268 } else {
2230 DCHECK(r.IsTagged()); 2269 DCHECK(r.IsTagged());
2231 Register reg = ToRegister(instr->value()); 2270 Register reg = ToRegister(instr->value());
2232 HType type = instr->hydrogen()->value()->type(); 2271 HType type = instr->hydrogen()->value()->type();
2233 if (type.IsBoolean()) { 2272 if (type.IsBoolean()) {
2234 DCHECK(!info()->IsStub()); 2273 DCHECK(!info()->IsStub());
2235 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2274 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2236 EmitBranch(instr, eq); 2275 EmitBranch(instr, eq);
2237 } else if (type.IsSmi()) { 2276 } else if (type.IsSmi()) {
2238 DCHECK(!info()->IsStub()); 2277 DCHECK(!info()->IsStub());
2239 __ cmp(reg, Operand::Zero()); 2278 __ cmpi(reg, Operand::Zero());
2240 EmitBranch(instr, ne); 2279 EmitBranch(instr, ne);
2241 } else if (type.IsJSArray()) { 2280 } else if (type.IsJSArray()) {
2242 DCHECK(!info()->IsStub()); 2281 DCHECK(!info()->IsStub());
2243 EmitBranch(instr, al); 2282 EmitBranch(instr, al);
2244 } else if (type.IsHeapNumber()) { 2283 } else if (type.IsHeapNumber()) {
2245 DCHECK(!info()->IsStub()); 2284 DCHECK(!info()->IsStub());
2246 DwVfpRegister dbl_scratch = double_scratch0(); 2285 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2247 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2248 // Test the double value. Zero and NaN are false. 2286 // Test the double value. Zero and NaN are false.
2249 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); 2287 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2250 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN) 2288 __ mfcr(r0);
2251 EmitBranch(instr, ne); 2289 __ andi(r0, r0, Operand(crZOrNaNBits));
2290 EmitBranch(instr, eq, cr0);
2252 } else if (type.IsString()) { 2291 } else if (type.IsString()) {
2253 DCHECK(!info()->IsStub()); 2292 DCHECK(!info()->IsStub());
2254 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 2293 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2255 __ cmp(ip, Operand::Zero()); 2294 __ cmpi(ip, Operand::Zero());
2256 EmitBranch(instr, ne); 2295 EmitBranch(instr, ne);
2257 } else { 2296 } else {
2258 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 2297 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2259 // Avoid deopts in the case where we've never executed this path before. 2298 // Avoid deopts in the case where we've never executed this path before.
2260 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); 2299 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2261 2300
2262 if (expected.Contains(ToBooleanStub::UNDEFINED)) { 2301 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2263 // undefined -> false. 2302 // undefined -> false.
2264 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); 2303 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2265 __ b(eq, instr->FalseLabel(chunk_)); 2304 __ beq(instr->FalseLabel(chunk_));
2266 } 2305 }
2267 if (expected.Contains(ToBooleanStub::BOOLEAN)) { 2306 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2268 // Boolean -> its value. 2307 // Boolean -> its value.
2269 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2308 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2270 __ b(eq, instr->TrueLabel(chunk_)); 2309 __ beq(instr->TrueLabel(chunk_));
2271 __ CompareRoot(reg, Heap::kFalseValueRootIndex); 2310 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2272 __ b(eq, instr->FalseLabel(chunk_)); 2311 __ beq(instr->FalseLabel(chunk_));
2273 } 2312 }
2274 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { 2313 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2275 // 'null' -> false. 2314 // 'null' -> false.
2276 __ CompareRoot(reg, Heap::kNullValueRootIndex); 2315 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2277 __ b(eq, instr->FalseLabel(chunk_)); 2316 __ beq(instr->FalseLabel(chunk_));
2278 } 2317 }
2279 2318
2280 if (expected.Contains(ToBooleanStub::SMI)) { 2319 if (expected.Contains(ToBooleanStub::SMI)) {
2281 // Smis: 0 -> false, all other -> true. 2320 // Smis: 0 -> false, all other -> true.
2282 __ cmp(reg, Operand::Zero()); 2321 __ cmpi(reg, Operand::Zero());
2283 __ b(eq, instr->FalseLabel(chunk_)); 2322 __ beq(instr->FalseLabel(chunk_));
2284 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2323 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2285 } else if (expected.NeedsMap()) { 2324 } else if (expected.NeedsMap()) {
2286 // If we need a map later and have a Smi -> deopt. 2325 // If we need a map later and have a Smi -> deopt.
2287 __ SmiTst(reg); 2326 __ TestIfSmi(reg, r0);
2288 DeoptimizeIf(eq, instr->environment()); 2327 DeoptimizeIf(eq, instr->environment(), cr0);
2289 } 2328 }
2290 2329
2291 const Register map = scratch0(); 2330 const Register map = scratch0();
2292 if (expected.NeedsMap()) { 2331 if (expected.NeedsMap()) {
2293 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 2332 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2294 2333
2295 if (expected.CanBeUndetectable()) { 2334 if (expected.CanBeUndetectable()) {
2296 // Undetectable -> false. 2335 // Undetectable -> false.
2297 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); 2336 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2298 __ tst(ip, Operand(1 << Map::kIsUndetectable)); 2337 __ TestBit(ip, Map::kIsUndetectable, r0);
2299 __ b(ne, instr->FalseLabel(chunk_)); 2338 __ bne(instr->FalseLabel(chunk_), cr0);
2300 } 2339 }
2301 } 2340 }
2302 2341
2303 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { 2342 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2304 // spec object -> true. 2343 // spec object -> true.
2305 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); 2344 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2306 __ b(ge, instr->TrueLabel(chunk_)); 2345 __ bge(instr->TrueLabel(chunk_));
2307 } 2346 }
2308 2347
2309 if (expected.Contains(ToBooleanStub::STRING)) { 2348 if (expected.Contains(ToBooleanStub::STRING)) {
2310 // String value -> false iff empty. 2349 // String value -> false iff empty.
2311 Label not_string; 2350 Label not_string;
2312 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); 2351 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2313 __ b(ge, &not_string); 2352 __ bge(&not_string);
2314 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 2353 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2315 __ cmp(ip, Operand::Zero()); 2354 __ cmpi(ip, Operand::Zero());
2316 __ b(ne, instr->TrueLabel(chunk_)); 2355 __ bne(instr->TrueLabel(chunk_));
2317 __ b(instr->FalseLabel(chunk_)); 2356 __ b(instr->FalseLabel(chunk_));
2318 __ bind(&not_string); 2357 __ bind(&not_string);
2319 } 2358 }
2320 2359
2321 if (expected.Contains(ToBooleanStub::SYMBOL)) { 2360 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2322 // Symbol value -> true. 2361 // Symbol value -> true.
2323 __ CompareInstanceType(map, ip, SYMBOL_TYPE); 2362 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2324 __ b(eq, instr->TrueLabel(chunk_)); 2363 __ beq(instr->TrueLabel(chunk_));
2325 } 2364 }
2326 2365
2327 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { 2366 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2328 // heap number -> false iff +0, -0, or NaN. 2367 // heap number -> false iff +0, -0, or NaN.
2329 DwVfpRegister dbl_scratch = double_scratch0();
2330 Label not_heap_number; 2368 Label not_heap_number;
2331 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 2369 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2332 __ b(ne, &not_heap_number); 2370 __ bne(&not_heap_number);
2333 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2371 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2334 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); 2372 // Test the double value. Zero and NaN are false.
2335 __ cmp(r0, r0, vs); // NaN -> false. 2373 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2336 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. 2374 __ mfcr(r0);
2375 __ andi(r0, r0, Operand(crZOrNaNBits));
2376 __ bne(instr->FalseLabel(chunk_), cr0);
2337 __ b(instr->TrueLabel(chunk_)); 2377 __ b(instr->TrueLabel(chunk_));
2338 __ bind(&not_heap_number); 2378 __ bind(&not_heap_number);
2339 } 2379 }
2340 2380
2341 if (!expected.IsGeneric()) { 2381 if (!expected.IsGeneric()) {
2342 // We've seen something for the first time -> deopt. 2382 // We've seen something for the first time -> deopt.
2343 // This can only happen if we are not generic already. 2383 // This can only happen if we are not generic already.
2344 DeoptimizeIf(al, instr->environment()); 2384 DeoptimizeIf(al, instr->environment());
2345 } 2385 }
2346 } 2386 }
2347 } 2387 }
2348 } 2388 }
2349 2389
2350 2390
2351 void LCodeGen::EmitGoto(int block) { 2391 void LCodeGen::EmitGoto(int block) {
2352 if (!IsNextEmittedBlock(block)) { 2392 if (!IsNextEmittedBlock(block)) {
2353 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); 2393 __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2354 } 2394 }
2355 } 2395 }
2356 2396
2357 2397
2358 void LCodeGen::DoGoto(LGoto* instr) { 2398 void LCodeGen::DoGoto(LGoto* instr) {
2359 EmitGoto(instr->block_id()); 2399 EmitGoto(instr->block_id());
2360 } 2400 }
2361 2401
2362 2402
2363 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 2403 Condition LCodeGen::TokenToCondition(Token::Value op) {
2364 Condition cond = kNoCondition; 2404 Condition cond = kNoCondition;
2365 switch (op) { 2405 switch (op) {
2366 case Token::EQ: 2406 case Token::EQ:
2367 case Token::EQ_STRICT: 2407 case Token::EQ_STRICT:
2368 cond = eq; 2408 cond = eq;
2369 break; 2409 break;
2370 case Token::NE: 2410 case Token::NE:
2371 case Token::NE_STRICT: 2411 case Token::NE_STRICT:
2372 cond = ne; 2412 cond = ne;
2373 break; 2413 break;
2374 case Token::LT: 2414 case Token::LT:
2375 cond = is_unsigned ? lo : lt; 2415 cond = lt;
2376 break; 2416 break;
2377 case Token::GT: 2417 case Token::GT:
2378 cond = is_unsigned ? hi : gt; 2418 cond = gt;
2379 break; 2419 break;
2380 case Token::LTE: 2420 case Token::LTE:
2381 cond = is_unsigned ? ls : le; 2421 cond = le;
2382 break; 2422 break;
2383 case Token::GTE: 2423 case Token::GTE:
2384 cond = is_unsigned ? hs : ge; 2424 cond = ge;
2385 break; 2425 break;
2386 case Token::IN: 2426 case Token::IN:
2387 case Token::INSTANCEOF: 2427 case Token::INSTANCEOF:
2388 default: 2428 default:
2389 UNREACHABLE(); 2429 UNREACHABLE();
2390 } 2430 }
2391 return cond; 2431 return cond;
2392 } 2432 }
2393 2433
2394 2434
2395 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { 2435 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2396 LOperand* left = instr->left(); 2436 LOperand* left = instr->left();
2397 LOperand* right = instr->right(); 2437 LOperand* right = instr->right();
2398 bool is_unsigned = 2438 bool is_unsigned =
2399 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || 2439 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2400 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); 2440 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2401 Condition cond = TokenToCondition(instr->op(), is_unsigned); 2441 Condition cond = TokenToCondition(instr->op());
2402 2442
2403 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2443 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2404 // We can statically evaluate the comparison. 2444 // We can statically evaluate the comparison.
2405 double left_val = ToDouble(LConstantOperand::cast(left)); 2445 double left_val = ToDouble(LConstantOperand::cast(left));
2406 double right_val = ToDouble(LConstantOperand::cast(right)); 2446 double right_val = ToDouble(LConstantOperand::cast(right));
2407 int next_block = EvalComparison(instr->op(), left_val, right_val) ? 2447 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2408 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); 2448 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2409 EmitGoto(next_block); 2449 EmitGoto(next_block);
2410 } else { 2450 } else {
2411 if (instr->is_double()) { 2451 if (instr->is_double()) {
2412 // Compare left and right operands as doubles and load the 2452 // Compare left and right operands as doubles and load the
2413 // resulting flags into the normal status register. 2453 // resulting flags into the normal status register.
2414 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); 2454 __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
2415 // If a NaN is involved, i.e. the result is unordered (V set), 2455 // If a NaN is involved, i.e. the result is unordered,
2416 // jump to false block label. 2456 // jump to false block label.
2417 __ b(vs, instr->FalseLabel(chunk_)); 2457 __ bunordered(instr->FalseLabel(chunk_));
2418 } else { 2458 } else {
2419 if (right->IsConstantOperand()) { 2459 if (right->IsConstantOperand()) {
2420 int32_t value = ToInteger32(LConstantOperand::cast(right)); 2460 int32_t value = ToInteger32(LConstantOperand::cast(right));
2421 if (instr->hydrogen_value()->representation().IsSmi()) { 2461 if (instr->hydrogen_value()->representation().IsSmi()) {
2422 __ cmp(ToRegister(left), Operand(Smi::FromInt(value))); 2462 if (is_unsigned) {
2463 __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2464 } else {
2465 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2466 }
2423 } else { 2467 } else {
2424 __ cmp(ToRegister(left), Operand(value)); 2468 if (is_unsigned) {
2469 __ Cmplwi(ToRegister(left), Operand(value), r0);
2470 } else {
2471 __ Cmpwi(ToRegister(left), Operand(value), r0);
2472 }
2425 } 2473 }
2426 } else if (left->IsConstantOperand()) { 2474 } else if (left->IsConstantOperand()) {
2427 int32_t value = ToInteger32(LConstantOperand::cast(left)); 2475 int32_t value = ToInteger32(LConstantOperand::cast(left));
2428 if (instr->hydrogen_value()->representation().IsSmi()) { 2476 if (instr->hydrogen_value()->representation().IsSmi()) {
2429 __ cmp(ToRegister(right), Operand(Smi::FromInt(value))); 2477 if (is_unsigned) {
2478 __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2479 } else {
2480 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2481 }
2430 } else { 2482 } else {
2431 __ cmp(ToRegister(right), Operand(value)); 2483 if (is_unsigned) {
2484 __ Cmplwi(ToRegister(right), Operand(value), r0);
2485 } else {
2486 __ Cmpwi(ToRegister(right), Operand(value), r0);
2487 }
2432 } 2488 }
2433 // We commuted the operands, so commute the condition. 2489 // We commuted the operands, so commute the condition.
2434 cond = CommuteCondition(cond); 2490 cond = CommuteCondition(cond);
2491 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2492 if (is_unsigned) {
2493 __ cmpl(ToRegister(left), ToRegister(right));
2494 } else {
2495 __ cmp(ToRegister(left), ToRegister(right));
2496 }
2435 } else { 2497 } else {
2436 __ cmp(ToRegister(left), ToRegister(right)); 2498 if (is_unsigned) {
2499 __ cmplw(ToRegister(left), ToRegister(right));
2500 } else {
2501 __ cmpw(ToRegister(left), ToRegister(right));
2502 }
2437 } 2503 }
2438 } 2504 }
2439 EmitBranch(instr, cond); 2505 EmitBranch(instr, cond);
2440 } 2506 }
2441 } 2507 }
2442 2508
2443 2509
2444 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2510 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2445 Register left = ToRegister(instr->left()); 2511 Register left = ToRegister(instr->left());
2446 Register right = ToRegister(instr->right()); 2512 Register right = ToRegister(instr->right());
2447 2513
2448 __ cmp(left, Operand(right)); 2514 __ cmp(left, right);
2449 EmitBranch(instr, eq); 2515 EmitBranch(instr, eq);
2450 } 2516 }
2451 2517
2452 2518
2453 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { 2519 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2454 if (instr->hydrogen()->representation().IsTagged()) { 2520 if (instr->hydrogen()->representation().IsTagged()) {
2455 Register input_reg = ToRegister(instr->object()); 2521 Register input_reg = ToRegister(instr->object());
2456 __ mov(ip, Operand(factory()->the_hole_value())); 2522 __ mov(ip, Operand(factory()->the_hole_value()));
2457 __ cmp(input_reg, ip); 2523 __ cmp(input_reg, ip);
2458 EmitBranch(instr, eq); 2524 EmitBranch(instr, eq);
2459 return; 2525 return;
2460 } 2526 }
2461 2527
2462 DwVfpRegister input_reg = ToDoubleRegister(instr->object()); 2528 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2463 __ VFPCompareAndSetFlags(input_reg, input_reg); 2529 __ fcmpu(input_reg, input_reg);
2464 EmitFalseBranch(instr, vc); 2530 EmitFalseBranch(instr, ordered);
2465 2531
2466 Register scratch = scratch0(); 2532 Register scratch = scratch0();
2467 __ VmovHigh(scratch, input_reg); 2533 __ MovDoubleHighToInt(scratch, input_reg);
2468 __ cmp(scratch, Operand(kHoleNanUpper32)); 2534 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
2469 EmitBranch(instr, eq); 2535 EmitBranch(instr, eq);
2470 } 2536 }
2471 2537
2472 2538
2473 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { 2539 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2474 Representation rep = instr->hydrogen()->value()->representation(); 2540 Representation rep = instr->hydrogen()->value()->representation();
2475 DCHECK(!rep.IsInteger32()); 2541 DCHECK(!rep.IsInteger32());
2476 Register scratch = ToRegister(instr->temp()); 2542 Register scratch = ToRegister(instr->temp());
2477 2543
2478 if (rep.IsDouble()) { 2544 if (rep.IsDouble()) {
2479 DwVfpRegister value = ToDoubleRegister(instr->value()); 2545 DoubleRegister value = ToDoubleRegister(instr->value());
2480 __ VFPCompareAndSetFlags(value, 0.0); 2546 __ fcmpu(value, kDoubleRegZero);
2481 EmitFalseBranch(instr, ne); 2547 EmitFalseBranch(instr, ne);
2482 __ VmovHigh(scratch, value); 2548 #if V8_TARGET_ARCH_PPC64
2483 __ cmp(scratch, Operand(0x80000000)); 2549 __ MovDoubleToInt64(scratch, value);
2550 #else
2551 __ MovDoubleHighToInt(scratch, value);
2552 #endif
2553 __ cmpi(scratch, Operand::Zero());
2554 EmitBranch(instr, lt);
2484 } else { 2555 } else {
2485 Register value = ToRegister(instr->value()); 2556 Register value = ToRegister(instr->value());
2486 __ CheckMap(value, 2557 __ CheckMap(value,
2487 scratch, 2558 scratch,
2488 Heap::kHeapNumberMapRootIndex, 2559 Heap::kHeapNumberMapRootIndex,
2489 instr->FalseLabel(chunk()), 2560 instr->FalseLabel(chunk()),
2490 DO_SMI_CHECK); 2561 DO_SMI_CHECK);
2491 __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset)); 2562 #if V8_TARGET_ARCH_PPC64
2492 __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset)); 2563 __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2493 __ cmp(scratch, Operand(0x80000000)); 2564 __ li(ip, Operand(1));
2494 __ cmp(ip, Operand(0x00000000), eq); 2565 __ rotrdi(ip, ip, 1); // ip = 0x80000000_00000000
2566 __ cmp(scratch, ip);
2567 #else
2568 __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2569 __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2570 Label skip;
2571 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
2572 __ cmp(scratch, r0);
2573 __ bne(&skip);
2574 __ cmpi(ip, Operand::Zero());
2575 __ bind(&skip);
2576 #endif
2577 EmitBranch(instr, eq);
2495 } 2578 }
2496 EmitBranch(instr, eq);
2497 } 2579 }
2498 2580
2499 2581
2500 Condition LCodeGen::EmitIsObject(Register input, 2582 Condition LCodeGen::EmitIsObject(Register input,
2501 Register temp1, 2583 Register temp1,
2502 Label* is_not_object, 2584 Label* is_not_object,
2503 Label* is_object) { 2585 Label* is_object) {
2504 Register temp2 = scratch0(); 2586 Register temp2 = scratch0();
2505 __ JumpIfSmi(input, is_not_object); 2587 __ JumpIfSmi(input, is_not_object);
2506 2588
2507 __ LoadRoot(temp2, Heap::kNullValueRootIndex); 2589 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2508 __ cmp(input, temp2); 2590 __ cmp(input, temp2);
2509 __ b(eq, is_object); 2591 __ beq(is_object);
2510 2592
2511 // Load map. 2593 // Load map.
2512 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); 2594 __ LoadP(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2513 // Undetectable objects behave like undefined. 2595 // Undetectable objects behave like undefined.
2514 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); 2596 __ lbz(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2515 __ tst(temp2, Operand(1 << Map::kIsUndetectable)); 2597 __ TestBit(temp2, Map::kIsUndetectable, r0);
2516 __ b(ne, is_not_object); 2598 __ bne(is_not_object, cr0);
2517 2599
2518 // Load instance type and check that it is in object type range. 2600 // Load instance type and check that it is in object type range.
2519 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); 2601 __ lbz(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2520 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2602 __ cmpi(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2521 __ b(lt, is_not_object); 2603 __ blt(is_not_object);
2522 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2604 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2523 return le; 2605 return le;
2524 } 2606 }
2525 2607
2526 2608
2527 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { 2609 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2528 Register reg = ToRegister(instr->value()); 2610 Register reg = ToRegister(instr->value());
2529 Register temp1 = ToRegister(instr->temp()); 2611 Register temp1 = ToRegister(instr->temp());
2530 2612
2531 Condition true_cond = 2613 Condition true_cond =
2532 EmitIsObject(reg, temp1, 2614 EmitIsObject(reg, temp1,
(...skipping 25 matching lines...) Expand all
2558 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2640 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2559 Condition true_cond = 2641 Condition true_cond =
2560 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); 2642 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2561 2643
2562 EmitBranch(instr, true_cond); 2644 EmitBranch(instr, true_cond);
2563 } 2645 }
2564 2646
2565 2647
2566 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 2648 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2567 Register input_reg = EmitLoadRegister(instr->value(), ip); 2649 Register input_reg = EmitLoadRegister(instr->value(), ip);
2568 __ SmiTst(input_reg); 2650 __ TestIfSmi(input_reg, r0);
2569 EmitBranch(instr, eq); 2651 EmitBranch(instr, eq, cr0);
2570 } 2652 }
2571 2653
2572 2654
2573 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2655 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2574 Register input = ToRegister(instr->value()); 2656 Register input = ToRegister(instr->value());
2575 Register temp = ToRegister(instr->temp()); 2657 Register temp = ToRegister(instr->temp());
2576 2658
2577 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2659 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2578 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2660 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2579 } 2661 }
2580 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2662 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2581 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); 2663 __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2582 __ tst(temp, Operand(1 << Map::kIsUndetectable)); 2664 __ TestBit(temp, Map::kIsUndetectable, r0);
2583 EmitBranch(instr, ne); 2665 EmitBranch(instr, ne, cr0);
2584 } 2666 }
2585 2667
2586 2668
2587 static Condition ComputeCompareCondition(Token::Value op) { 2669 static Condition ComputeCompareCondition(Token::Value op) {
2588 switch (op) { 2670 switch (op) {
2589 case Token::EQ_STRICT: 2671 case Token::EQ_STRICT:
2590 case Token::EQ: 2672 case Token::EQ:
2591 return eq; 2673 return eq;
2592 case Token::LT: 2674 case Token::LT:
2593 return lt; 2675 return lt;
2594 case Token::GT: 2676 case Token::GT:
2595 return gt; 2677 return gt;
2596 case Token::LTE: 2678 case Token::LTE:
2597 return le; 2679 return le;
2598 case Token::GTE: 2680 case Token::GTE:
2599 return ge; 2681 return ge;
2600 default: 2682 default:
2601 UNREACHABLE(); 2683 UNREACHABLE();
2602 return kNoCondition; 2684 return kNoCondition;
2603 } 2685 }
2604 } 2686 }
2605 2687
2606 2688
2607 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2689 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2608 DCHECK(ToRegister(instr->context()).is(cp)); 2690 DCHECK(ToRegister(instr->context()).is(cp));
2609 Token::Value op = instr->op(); 2691 Token::Value op = instr->op();
2610 2692
2611 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 2693 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2612 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2694 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2613 // This instruction also signals no smi code inlined. 2695 // This instruction also signals no smi code inlined
2614 __ cmp(r0, Operand::Zero()); 2696 __ cmpi(r3, Operand::Zero());
2615 2697
2616 Condition condition = ComputeCompareCondition(op); 2698 Condition condition = ComputeCompareCondition(op);
2617 2699
2618 EmitBranch(instr, condition); 2700 EmitBranch(instr, condition);
2619 } 2701 }
2620 2702
2621 2703
2622 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2704 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2623 InstanceType from = instr->from(); 2705 InstanceType from = instr->from();
2624 InstanceType to = instr->to(); 2706 InstanceType to = instr->to();
2625 if (from == FIRST_TYPE) return to; 2707 if (from == FIRST_TYPE) return to;
2626 DCHECK(from == to || to == LAST_TYPE); 2708 DCHECK(from == to || to == LAST_TYPE);
2627 return from; 2709 return from;
2628 } 2710 }
2629 2711
2630 2712
2631 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2713 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2632 InstanceType from = instr->from(); 2714 InstanceType from = instr->from();
2633 InstanceType to = instr->to(); 2715 InstanceType to = instr->to();
2634 if (from == to) return eq; 2716 if (from == to) return eq;
2635 if (to == LAST_TYPE) return hs; 2717 if (to == LAST_TYPE) return ge;
2636 if (from == FIRST_TYPE) return ls; 2718 if (from == FIRST_TYPE) return le;
2637 UNREACHABLE(); 2719 UNREACHABLE();
2638 return eq; 2720 return eq;
2639 } 2721 }
2640 2722
2641 2723
2642 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 2724 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2643 Register scratch = scratch0(); 2725 Register scratch = scratch0();
2644 Register input = ToRegister(instr->value()); 2726 Register input = ToRegister(instr->value());
2645 2727
2646 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2728 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2647 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2729 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2648 } 2730 }
2649 2731
2650 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); 2732 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2651 EmitBranch(instr, BranchCondition(instr->hydrogen())); 2733 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2652 } 2734 }
2653 2735
2654 2736
2655 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2737 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2656 Register input = ToRegister(instr->value()); 2738 Register input = ToRegister(instr->value());
2657 Register result = ToRegister(instr->result()); 2739 Register result = ToRegister(instr->result());
2658 2740
2659 __ AssertString(input); 2741 __ AssertString(input);
2660 2742
2661 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset)); 2743 __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
2662 __ IndexFromHash(result, result); 2744 __ IndexFromHash(result, result);
2663 } 2745 }
2664 2746
2665 2747
2666 void LCodeGen::DoHasCachedArrayIndexAndBranch( 2748 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2667 LHasCachedArrayIndexAndBranch* instr) { 2749 LHasCachedArrayIndexAndBranch* instr) {
2668 Register input = ToRegister(instr->value()); 2750 Register input = ToRegister(instr->value());
2669 Register scratch = scratch0(); 2751 Register scratch = scratch0();
2670 2752
2671 __ ldr(scratch, 2753 __ lwz(scratch,
2672 FieldMemOperand(input, String::kHashFieldOffset)); 2754 FieldMemOperand(input, String::kHashFieldOffset));
2673 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); 2755 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
2674 EmitBranch(instr, eq); 2756 __ and_(r0, scratch, r0, SetRC);
2757 EmitBranch(instr, eq, cr0);
2675 } 2758 }
2676 2759
2677 2760
2678 // Branches to a label or falls through with the answer in flags. Trashes 2761 // Branches to a label or falls through with the answer in flags. Trashes
2679 // the temp registers, but not the input. 2762 // the temp registers, but not the input.
2680 void LCodeGen::EmitClassOfTest(Label* is_true, 2763 void LCodeGen::EmitClassOfTest(Label* is_true,
2681 Label* is_false, 2764 Label* is_false,
2682 Handle<String>class_name, 2765 Handle<String>class_name,
2683 Register input, 2766 Register input,
2684 Register temp, 2767 Register temp,
2685 Register temp2) { 2768 Register temp2) {
2686 DCHECK(!input.is(temp)); 2769 DCHECK(!input.is(temp));
2687 DCHECK(!input.is(temp2)); 2770 DCHECK(!input.is(temp2));
2688 DCHECK(!temp.is(temp2)); 2771 DCHECK(!temp.is(temp2));
2689 2772
2690 __ JumpIfSmi(input, is_false); 2773 __ JumpIfSmi(input, is_false);
2691 2774
2692 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { 2775 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2693 // Assuming the following assertions, we can use the same compares to test 2776 // Assuming the following assertions, we can use the same compares to test
2694 // for both being a function type and being in the object type range. 2777 // for both being a function type and being in the object type range.
2695 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 2778 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2696 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == 2779 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2697 FIRST_SPEC_OBJECT_TYPE + 1); 2780 FIRST_SPEC_OBJECT_TYPE + 1);
2698 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == 2781 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2699 LAST_SPEC_OBJECT_TYPE - 1); 2782 LAST_SPEC_OBJECT_TYPE - 1);
2700 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); 2783 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2701 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); 2784 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2702 __ b(lt, is_false); 2785 __ blt(is_false);
2703 __ b(eq, is_true); 2786 __ beq(is_true);
2704 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); 2787 __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2705 __ b(eq, is_true); 2788 __ beq(is_true);
2706 } else { 2789 } else {
2707 // Faster code path to avoid two compares: subtract lower bound from the 2790 // Faster code path to avoid two compares: subtract lower bound from the
2708 // actual type and do a signed compare with the width of the type range. 2791 // actual type and do a signed compare with the width of the type range.
2709 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2792 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2710 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); 2793 __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2711 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2794 __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2712 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - 2795 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2713 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2796 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2714 __ b(gt, is_false); 2797 __ bgt(is_false);
2715 } 2798 }
2716 2799
2717 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. 2800 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2718 // Check if the constructor in the map is a function. 2801 // Check if the constructor in the map is a function.
2719 __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); 2802 __ LoadP(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2720 2803
2721 // Objects with a non-function constructor have class 'Object'. 2804 // Objects with a non-function constructor have class 'Object'.
2722 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); 2805 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2723 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { 2806 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2724 __ b(ne, is_true); 2807 __ bne(is_true);
2725 } else { 2808 } else {
2726 __ b(ne, is_false); 2809 __ bne(is_false);
2727 } 2810 }
2728 2811
2729 // temp now contains the constructor function. Grab the 2812 // temp now contains the constructor function. Grab the
2730 // instance class name from there. 2813 // instance class name from there.
2731 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2814 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2732 __ ldr(temp, FieldMemOperand(temp, 2815 __ LoadP(temp, FieldMemOperand(temp,
2733 SharedFunctionInfo::kInstanceClassNameOffset)); 2816 SharedFunctionInfo::kInstanceClassNameOffset));
2734 // The class name we are testing against is internalized since it's a literal. 2817 // The class name we are testing against is internalized since it's a literal.
2735 // The name in the constructor is internalized because of the way the context 2818 // The name in the constructor is internalized because of the way the context
2736 // is booted. This routine isn't expected to work for random API-created 2819 // is booted. This routine isn't expected to work for random API-created
2737 // classes and it doesn't have to because you can't access it with natives 2820 // classes and it doesn't have to because you can't access it with natives
2738 // syntax. Since both sides are internalized it is sufficient to use an 2821 // syntax. Since both sides are internalized it is sufficient to use an
2739 // identity comparison. 2822 // identity comparison.
2740 __ cmp(temp, Operand(class_name)); 2823 __ Cmpi(temp, Operand(class_name), r0);
2741 // End with the answer in flags. 2824 // End with the answer in flags.
2742 } 2825 }
2743 2826
2744 2827
2745 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2828 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2746 Register input = ToRegister(instr->value()); 2829 Register input = ToRegister(instr->value());
2747 Register temp = scratch0(); 2830 Register temp = scratch0();
2748 Register temp2 = ToRegister(instr->temp()); 2831 Register temp2 = ToRegister(instr->temp());
2749 Handle<String> class_name = instr->hydrogen()->class_name(); 2832 Handle<String> class_name = instr->hydrogen()->class_name();
2750 2833
2751 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2834 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2752 class_name, input, temp, temp2); 2835 class_name, input, temp, temp2);
2753 2836
2754 EmitBranch(instr, eq); 2837 EmitBranch(instr, eq);
2755 } 2838 }
2756 2839
2757 2840
2758 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2841 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2759 Register reg = ToRegister(instr->value()); 2842 Register reg = ToRegister(instr->value());
2760 Register temp = ToRegister(instr->temp()); 2843 Register temp = ToRegister(instr->temp());
2761 2844
2762 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); 2845 __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2763 __ cmp(temp, Operand(instr->map())); 2846 __ Cmpi(temp, Operand(instr->map()), r0);
2764 EmitBranch(instr, eq); 2847 EmitBranch(instr, eq);
2765 } 2848 }
2766 2849
2767 2850
2768 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 2851 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2769 DCHECK(ToRegister(instr->context()).is(cp)); 2852 DCHECK(ToRegister(instr->context()).is(cp));
2770 DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0. 2853 DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
2771 DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1. 2854 DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
2772 2855
2773 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); 2856 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2774 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2857 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2775 2858
2776 __ cmp(r0, Operand::Zero()); 2859 Label equal, done;
2777 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); 2860 __ cmpi(r3, Operand::Zero());
2778 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); 2861 __ beq(&equal);
2862 __ mov(r3, Operand(factory()->false_value()));
2863 __ b(&done);
2864
2865 __ bind(&equal);
2866 __ mov(r3, Operand(factory()->true_value()));
2867 __ bind(&done);
2779 } 2868 }
2780 2869
2781 2870
2782 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 2871 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2783 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { 2872 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2784 public: 2873 public:
2785 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, 2874 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2786 LInstanceOfKnownGlobal* instr) 2875 LInstanceOfKnownGlobal* instr)
2787 : LDeferredCode(codegen), instr_(instr) { } 2876 : LDeferredCode(codegen), instr_(instr) { }
2788 virtual void Generate() V8_OVERRIDE { 2877 virtual void Generate() V8_OVERRIDE {
2789 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_, 2878 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2790 &load_bool_);
2791 } 2879 }
2792 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 2880 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2793 Label* map_check() { return &map_check_; } 2881 Label* map_check() { return &map_check_; }
2794 Label* load_bool() { return &load_bool_; }
2795
2796 private: 2882 private:
2797 LInstanceOfKnownGlobal* instr_; 2883 LInstanceOfKnownGlobal* instr_;
2798 Label map_check_; 2884 Label map_check_;
2799 Label load_bool_;
2800 }; 2885 };
2801 2886
2802 DeferredInstanceOfKnownGlobal* deferred; 2887 DeferredInstanceOfKnownGlobal* deferred;
2803 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); 2888 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2804 2889
2805 Label done, false_result; 2890 Label done, false_result;
2806 Register object = ToRegister(instr->value()); 2891 Register object = ToRegister(instr->value());
2807 Register temp = ToRegister(instr->temp()); 2892 Register temp = ToRegister(instr->temp());
2808 Register result = ToRegister(instr->result()); 2893 Register result = ToRegister(instr->result());
2809 2894
2810 // A Smi is not instance of anything. 2895 // A Smi is not instance of anything.
2811 __ JumpIfSmi(object, &false_result); 2896 __ JumpIfSmi(object, &false_result);
2812 2897
2813 // This is the inlined call site instanceof cache. The two occurences of the 2898 // This is the inlined call site instanceof cache. The two occurences of the
2814 // hole value will be patched to the last map/result pair generated by the 2899 // hole value will be patched to the last map/result pair generated by the
2815 // instanceof stub. 2900 // instanceof stub.
2816 Label cache_miss; 2901 Label cache_miss;
2817 Register map = temp; 2902 Register map = temp;
2818 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); 2903 __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
2819 { 2904 {
2820 // Block constant pool emission to ensure the positions of instructions are 2905 // Block constant pool emission to ensure the positions of instructions are
2821 // as expected by the patcher. See InstanceofStub::Generate(). 2906 // as expected by the patcher. See InstanceofStub::Generate().
2822 Assembler::BlockConstPoolScope block_const_pool(masm()); 2907 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2823 __ bind(deferred->map_check()); // Label for calculating code patching. 2908 __ bind(deferred->map_check()); // Label for calculating code patching.
2824 // We use Factory::the_hole_value() on purpose instead of loading from the 2909 // We use Factory::the_hole_value() on purpose instead of loading from the
2825 // root array to force relocation to be able to later patch with 2910 // root array to force relocation to be able to later patch with
2826 // the cached map. 2911 // the cached map.
2827 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); 2912 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2828 __ mov(ip, Operand(Handle<Object>(cell))); 2913 __ mov(ip, Operand(Handle<Object>(cell)));
2829 __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset)); 2914 __ LoadP(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
2830 __ cmp(map, Operand(ip)); 2915 __ cmp(map, ip);
2831 __ b(ne, &cache_miss); 2916 __ bne(&cache_miss);
2832 __ bind(deferred->load_bool()); // Label for calculating code patching.
2833 // We use Factory::the_hole_value() on purpose instead of loading from the 2917 // We use Factory::the_hole_value() on purpose instead of loading from the
2834 // root array to force relocation to be able to later patch 2918 // root array to force relocation to be able to later patch
2835 // with true or false. 2919 // with true or false.
2836 __ mov(result, Operand(factory()->the_hole_value())); 2920 __ mov(result, Operand(factory()->the_hole_value()));
2837 } 2921 }
2838 __ b(&done); 2922 __ b(&done);
2839 2923
2840 // The inlined call site cache did not match. Check null and string before 2924 // The inlined call site cache did not match. Check null and string before
2841 // calling the deferred code. 2925 // calling the deferred code.
2842 __ bind(&cache_miss); 2926 __ bind(&cache_miss);
2843 // Null is not instance of anything. 2927 // Null is not instance of anything.
2844 __ LoadRoot(ip, Heap::kNullValueRootIndex); 2928 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2845 __ cmp(object, Operand(ip)); 2929 __ cmp(object, ip);
2846 __ b(eq, &false_result); 2930 __ beq(&false_result);
2847 2931
2848 // String values is not instance of anything. 2932 // String values is not instance of anything.
2849 Condition is_string = masm_->IsObjectStringType(object, temp); 2933 Condition is_string = masm_->IsObjectStringType(object, temp);
2850 __ b(is_string, &false_result); 2934 __ b(is_string, &false_result, cr0);
2851 2935
2852 // Go to the deferred code. 2936 // Go to the deferred code.
2853 __ b(deferred->entry()); 2937 __ b(deferred->entry());
2854 2938
2855 __ bind(&false_result); 2939 __ bind(&false_result);
2856 __ LoadRoot(result, Heap::kFalseValueRootIndex); 2940 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2857 2941
2858 // Here result has either true or false. Deferred code also produces true or 2942 // Here result has either true or false. Deferred code also produces true or
2859 // false object. 2943 // false object.
2860 __ bind(deferred->exit()); 2944 __ bind(deferred->exit());
2861 __ bind(&done); 2945 __ bind(&done);
2862 } 2946 }
2863 2947
2864 2948
2865 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, 2949 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2866 Label* map_check, 2950 Label* map_check) {
2867 Label* bool_load) {
2868 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; 2951 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2869 flags = static_cast<InstanceofStub::Flags>( 2952 flags = static_cast<InstanceofStub::Flags>(
2870 flags | InstanceofStub::kArgsInRegisters); 2953 flags | InstanceofStub::kArgsInRegisters);
2871 flags = static_cast<InstanceofStub::Flags>( 2954 flags = static_cast<InstanceofStub::Flags>(
2872 flags | InstanceofStub::kCallSiteInlineCheck); 2955 flags | InstanceofStub::kCallSiteInlineCheck);
2873 flags = static_cast<InstanceofStub::Flags>( 2956 flags = static_cast<InstanceofStub::Flags>(
2874 flags | InstanceofStub::kReturnTrueFalseObject); 2957 flags | InstanceofStub::kReturnTrueFalseObject);
2875 InstanceofStub stub(isolate(), flags); 2958 InstanceofStub stub(isolate(), flags);
2876 2959
2877 PushSafepointRegistersScope scope(this); 2960 PushSafepointRegistersScope scope(this);
2878 LoadContextFromDeferred(instr->context()); 2961 LoadContextFromDeferred(instr->context());
2879 2962
2880 __ Move(InstanceofStub::right(), instr->function()); 2963 __ Move(InstanceofStub::right(), instr->function());
2881 2964 // Include instructions below in delta: mov + call = mov + (mov + 2)
2882 int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET); 2965 static const int kAdditionalDelta = (2 * Assembler::kMovInstructions) + 2;
2883 int additional_delta = (call_size / Assembler::kInstrSize) + 4; 2966 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2884 // Make sure that code size is predicable, since we use specific constants
2885 // offsets in the code to find embedded values..
2886 PredictableCodeSizeScope predictable(
2887 masm_, (additional_delta + 1) * Assembler::kInstrSize);
2888 // Make sure we don't emit any additional entries in the constant pool before
2889 // the call to ensure that the CallCodeSize() calculated the correct number of
2890 // instructions for the constant pool load.
2891 { 2967 {
2892 ConstantPoolUnavailableScope constant_pool_unavailable(masm_); 2968 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2893 int map_check_delta = 2969 // r8 is used to communicate the offset to the location of the map check.
2894 masm_->InstructionsGeneratedSince(map_check) + additional_delta; 2970 __ mov(r8, Operand(delta * Instruction::kInstrSize));
2895 int bool_load_delta =
2896 masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
2897 Label before_push_delta;
2898 __ bind(&before_push_delta);
2899 __ BlockConstPoolFor(additional_delta);
2900 // r5 is used to communicate the offset to the location of the map check.
2901 __ mov(r5, Operand(map_check_delta * kPointerSize));
2902 // r6 is used to communicate the offset to the location of the bool load.
2903 __ mov(r6, Operand(bool_load_delta * kPointerSize));
2904 // The mov above can generate one or two instructions. The delta was
2905 // computed for two instructions, so we need to pad here in case of one
2906 // instruction.
2907 while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
2908 __ nop();
2909 }
2910 } 2971 }
2911 CallCodeGeneric(stub.GetCode(), 2972 CallCodeGeneric(stub.GetCode(),
2912 RelocInfo::CODE_TARGET, 2973 RelocInfo::CODE_TARGET,
2913 instr, 2974 instr,
2914 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 2975 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2976 DCHECK(delta == masm_->InstructionsGeneratedSince(map_check));
2915 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); 2977 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2916 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2978 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2917 // Put the result value (r0) into the result register slot and 2979 // Put the result value (r3) into the result register slot and
2918 // restore all registers. 2980 // restore all registers.
2919 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result())); 2981 __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
2920 } 2982 }
2921 2983
2922 2984
2923 void LCodeGen::DoCmpT(LCmpT* instr) { 2985 void LCodeGen::DoCmpT(LCmpT* instr) {
2924 DCHECK(ToRegister(instr->context()).is(cp)); 2986 DCHECK(ToRegister(instr->context()).is(cp));
2925 Token::Value op = instr->op(); 2987 Token::Value op = instr->op();
2926 2988
2927 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 2989 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2928 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2990 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2929 // This instruction also signals no smi code inlined. 2991 // This instruction also signals no smi code inlined
2930 __ cmp(r0, Operand::Zero()); 2992 __ cmpi(r3, Operand::Zero());
2931 2993
2932 Condition condition = ComputeCompareCondition(op); 2994 Condition condition = ComputeCompareCondition(op);
2933 __ LoadRoot(ToRegister(instr->result()), 2995 Label true_value, done;
2934 Heap::kTrueValueRootIndex, 2996
2935 condition); 2997 __ b(condition, &true_value);
2936 __ LoadRoot(ToRegister(instr->result()), 2998
2937 Heap::kFalseValueRootIndex, 2999 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2938 NegateCondition(condition)); 3000 __ b(&done);
3001
3002 __ bind(&true_value);
3003 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
3004
3005 __ bind(&done);
2939 } 3006 }
2940 3007
2941 3008
2942 void LCodeGen::DoReturn(LReturn* instr) { 3009 void LCodeGen::DoReturn(LReturn* instr) {
2943 if (FLAG_trace && info()->IsOptimizing()) { 3010 if (FLAG_trace && info()->IsOptimizing()) {
2944 // Push the return value on the stack as the parameter. 3011 // Push the return value on the stack as the parameter.
2945 // Runtime::TraceExit returns its parameter in r0. We're leaving the code 3012 // Runtime::TraceExit returns its parameter in r3. We're leaving the code
2946 // managed by the register allocator and tearing down the frame, it's 3013 // managed by the register allocator and tearing down the frame, it's
2947 // safe to write to the context register. 3014 // safe to write to the context register.
2948 __ push(r0); 3015 __ push(r3);
2949 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3016 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2950 __ CallRuntime(Runtime::kTraceExit, 1); 3017 __ CallRuntime(Runtime::kTraceExit, 1);
2951 } 3018 }
2952 if (info()->saves_caller_doubles()) { 3019 if (info()->saves_caller_doubles()) {
2953 RestoreCallerDoubles(); 3020 RestoreCallerDoubles();
2954 } 3021 }
2955 int no_frame_start = -1; 3022 int no_frame_start = -1;
2956 if (NeedsEagerFrame()) { 3023 if (NeedsEagerFrame()) {
2957 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT); 3024 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2958 } 3025 }
2959 if (instr->has_constant_parameter_count()) { 3026 if (instr->has_constant_parameter_count()) {
2960 int parameter_count = ToInteger32(instr->constant_parameter_count()); 3027 int parameter_count = ToInteger32(instr->constant_parameter_count());
2961 int32_t sp_delta = (parameter_count + 1) * kPointerSize; 3028 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2962 if (sp_delta != 0) { 3029 if (sp_delta != 0) {
2963 __ add(sp, sp, Operand(sp_delta)); 3030 __ addi(sp, sp, Operand(sp_delta));
2964 } 3031 }
2965 } else { 3032 } else {
2966 Register reg = ToRegister(instr->parameter_count()); 3033 Register reg = ToRegister(instr->parameter_count());
2967 // The argument count parameter is a smi 3034 // The argument count parameter is a smi
2968 __ SmiUntag(reg); 3035 __ SmiToPtrArrayOffset(r0, reg);
2969 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); 3036 __ add(sp, sp, r0);
2970 } 3037 }
2971 3038
2972 __ Jump(lr); 3039 __ blr();
2973 3040
2974 if (no_frame_start != -1) { 3041 if (no_frame_start != -1) {
2975 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); 3042 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2976 } 3043 }
2977 } 3044 }
2978 3045
2979 3046
2980 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 3047 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2981 Register result = ToRegister(instr->result()); 3048 Register result = ToRegister(instr->result());
2982 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); 3049 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2983 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); 3050 __ LoadP(result, FieldMemOperand(ip, Cell::kValueOffset));
2984 if (instr->hydrogen()->RequiresHoleCheck()) { 3051 if (instr->hydrogen()->RequiresHoleCheck()) {
2985 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3052 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2986 __ cmp(result, ip); 3053 __ cmp(result, ip);
2987 DeoptimizeIf(eq, instr->environment()); 3054 DeoptimizeIf(eq, instr->environment());
2988 } 3055 }
2989 } 3056 }
2990 3057
2991 3058
2992 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 3059 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2993 DCHECK(ToRegister(instr->context()).is(cp)); 3060 DCHECK(ToRegister(instr->context()).is(cp));
2994 DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); 3061 DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
2995 DCHECK(ToRegister(instr->result()).is(r0)); 3062 DCHECK(ToRegister(instr->result()).is(r3));
2996 3063
2997 __ mov(LoadIC::NameRegister(), Operand(instr->name())); 3064 __ mov(LoadIC::NameRegister(), Operand(instr->name()));
2998 if (FLAG_vector_ics) { 3065 if (FLAG_vector_ics) {
2999 Register vector = ToRegister(instr->temp_vector()); 3066 Register vector = ToRegister(instr->temp_vector());
3000 DCHECK(vector.is(LoadIC::VectorRegister())); 3067 DCHECK(vector.is(LoadIC::VectorRegister()));
3001 __ Move(vector, instr->hydrogen()->feedback_vector()); 3068 __ Move(vector, instr->hydrogen()->feedback_vector());
3002 // No need to allocate this register. 3069 // No need to allocate this register.
3003 DCHECK(LoadIC::SlotRegister().is(r0)); 3070 DCHECK(LoadIC::SlotRegister().is(r0));
3004 __ mov(LoadIC::SlotRegister(), 3071 __ mov(LoadIC::SlotRegister(),
3005 Operand(Smi::FromInt(instr->hydrogen()->slot()))); 3072 Operand(Smi::FromInt(instr->hydrogen()->slot())));
(...skipping 11 matching lines...) Expand all
3017 // Load the cell. 3084 // Load the cell.
3018 __ mov(cell, Operand(instr->hydrogen()->cell().handle())); 3085 __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
3019 3086
3020 // If the cell we are storing to contains the hole it could have 3087 // If the cell we are storing to contains the hole it could have
3021 // been deleted from the property dictionary. In that case, we need 3088 // been deleted from the property dictionary. In that case, we need
3022 // to update the property details in the property dictionary to mark 3089 // to update the property details in the property dictionary to mark
3023 // it as no longer deleted. 3090 // it as no longer deleted.
3024 if (instr->hydrogen()->RequiresHoleCheck()) { 3091 if (instr->hydrogen()->RequiresHoleCheck()) {
3025 // We use a temp to check the payload (CompareRoot might clobber ip). 3092 // We use a temp to check the payload (CompareRoot might clobber ip).
3026 Register payload = ToRegister(instr->temp()); 3093 Register payload = ToRegister(instr->temp());
3027 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); 3094 __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset));
3028 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); 3095 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
3029 DeoptimizeIf(eq, instr->environment()); 3096 DeoptimizeIf(eq, instr->environment());
3030 } 3097 }
3031 3098
3032 // Store the value. 3099 // Store the value.
3033 __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); 3100 __ StoreP(value, FieldMemOperand(cell, Cell::kValueOffset), r0);
3034 // Cells are always rescanned, so no write barrier here. 3101 // Cells are always rescanned, so no write barrier here.
3035 } 3102 }
3036 3103
3037 3104
3038 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 3105 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3039 Register context = ToRegister(instr->context()); 3106 Register context = ToRegister(instr->context());
3040 Register result = ToRegister(instr->result()); 3107 Register result = ToRegister(instr->result());
3041 __ ldr(result, ContextOperand(context, instr->slot_index())); 3108 __ LoadP(result, ContextOperand(context, instr->slot_index()));
3042 if (instr->hydrogen()->RequiresHoleCheck()) { 3109 if (instr->hydrogen()->RequiresHoleCheck()) {
3043 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3110 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3044 __ cmp(result, ip); 3111 __ cmp(result, ip);
3045 if (instr->hydrogen()->DeoptimizesOnHole()) { 3112 if (instr->hydrogen()->DeoptimizesOnHole()) {
3046 DeoptimizeIf(eq, instr->environment()); 3113 DeoptimizeIf(eq, instr->environment());
3047 } else { 3114 } else {
3048 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); 3115 Label skip;
3116 __ bne(&skip);
3117 __ mov(result, Operand(factory()->undefined_value()));
3118 __ bind(&skip);
3049 } 3119 }
3050 } 3120 }
3051 } 3121 }
3052 3122
3053 3123
3054 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 3124 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3055 Register context = ToRegister(instr->context()); 3125 Register context = ToRegister(instr->context());
3056 Register value = ToRegister(instr->value()); 3126 Register value = ToRegister(instr->value());
3057 Register scratch = scratch0(); 3127 Register scratch = scratch0();
3058 MemOperand target = ContextOperand(context, instr->slot_index()); 3128 MemOperand target = ContextOperand(context, instr->slot_index());
3059 3129
3060 Label skip_assignment; 3130 Label skip_assignment;
3061 3131
3062 if (instr->hydrogen()->RequiresHoleCheck()) { 3132 if (instr->hydrogen()->RequiresHoleCheck()) {
3063 __ ldr(scratch, target); 3133 __ LoadP(scratch, target);
3064 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3134 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3065 __ cmp(scratch, ip); 3135 __ cmp(scratch, ip);
3066 if (instr->hydrogen()->DeoptimizesOnHole()) { 3136 if (instr->hydrogen()->DeoptimizesOnHole()) {
3067 DeoptimizeIf(eq, instr->environment()); 3137 DeoptimizeIf(eq, instr->environment());
3068 } else { 3138 } else {
3069 __ b(ne, &skip_assignment); 3139 __ bne(&skip_assignment);
3070 } 3140 }
3071 } 3141 }
3072 3142
3073 __ str(value, target); 3143 __ StoreP(value, target, r0);
3074 if (instr->hydrogen()->NeedsWriteBarrier()) { 3144 if (instr->hydrogen()->NeedsWriteBarrier()) {
3075 SmiCheck check_needed = 3145 SmiCheck check_needed =
3076 instr->hydrogen()->value()->type().IsHeapObject() 3146 instr->hydrogen()->value()->type().IsHeapObject()
3077 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 3147 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3078 __ RecordWriteContextSlot(context, 3148 __ RecordWriteContextSlot(context,
3079 target.offset(), 3149 target.offset(),
3080 value, 3150 value,
3081 scratch, 3151 scratch,
3082 GetLinkRegisterState(), 3152 GetLinkRegisterState(),
3083 kSaveFPRegs, 3153 kSaveFPRegs,
3084 EMIT_REMEMBERED_SET, 3154 EMIT_REMEMBERED_SET,
3085 check_needed); 3155 check_needed);
3086 } 3156 }
3087 3157
3088 __ bind(&skip_assignment); 3158 __ bind(&skip_assignment);
3089 } 3159 }
3090 3160
3091 3161
3092 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 3162 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3093 HObjectAccess access = instr->hydrogen()->access(); 3163 HObjectAccess access = instr->hydrogen()->access();
3094 int offset = access.offset(); 3164 int offset = access.offset();
3095 Register object = ToRegister(instr->object()); 3165 Register object = ToRegister(instr->object());
3096 3166
3097 if (access.IsExternalMemory()) { 3167 if (access.IsExternalMemory()) {
3098 Register result = ToRegister(instr->result()); 3168 Register result = ToRegister(instr->result());
3099 MemOperand operand = MemOperand(object, offset); 3169 MemOperand operand = MemOperand(object, offset);
3100 __ Load(result, operand, access.representation()); 3170 __ LoadRepresentation(result, operand, access.representation(), r0);
3101 return; 3171 return;
3102 } 3172 }
3103 3173
3104 if (instr->hydrogen()->representation().IsDouble()) { 3174 if (instr->hydrogen()->representation().IsDouble()) {
3105 DwVfpRegister result = ToDoubleRegister(instr->result()); 3175 DoubleRegister result = ToDoubleRegister(instr->result());
3106 __ vldr(result, FieldMemOperand(object, offset)); 3176 __ lfd(result, FieldMemOperand(object, offset));
3107 return; 3177 return;
3108 } 3178 }
3109 3179
3110 Register result = ToRegister(instr->result()); 3180 Register result = ToRegister(instr->result());
3111 if (!access.IsInobject()) { 3181 if (!access.IsInobject()) {
3112 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 3182 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3113 object = result; 3183 object = result;
3114 } 3184 }
3115 MemOperand operand = FieldMemOperand(object, offset); 3185
3116 __ Load(result, operand, access.representation()); 3186 Representation representation = access.representation();
3187
3188 #if V8_TARGET_ARCH_PPC64
3189 // 64-bit Smi optimization
3190 if (representation.IsSmi() &&
3191 instr->hydrogen()->representation().IsInteger32()) {
3192 // Read int value directly from upper half of the smi.
3193 STATIC_ASSERT(kSmiTag == 0);
3194 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3195 #if V8_TARGET_LITTLE_ENDIAN
3196 offset += kPointerSize / 2;
3197 #endif
3198 representation = Representation::Integer32();
3199 }
3200 #endif
3201
3202 __ LoadRepresentation(result, FieldMemOperand(object, offset),
3203 representation, r0);
3117 } 3204 }
3118 3205
3119 3206
3120 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 3207 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3121 DCHECK(ToRegister(instr->context()).is(cp)); 3208 DCHECK(ToRegister(instr->context()).is(cp));
3122 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); 3209 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3123 DCHECK(ToRegister(instr->result()).is(r0)); 3210 DCHECK(ToRegister(instr->result()).is(r3));
3124 3211
3125 // Name is always in r2.
3126 __ mov(LoadIC::NameRegister(), Operand(instr->name())); 3212 __ mov(LoadIC::NameRegister(), Operand(instr->name()));
3127 if (FLAG_vector_ics) { 3213 if (FLAG_vector_ics) {
3128 Register vector = ToRegister(instr->temp_vector()); 3214 Register vector = ToRegister(instr->temp_vector());
3129 DCHECK(vector.is(LoadIC::VectorRegister())); 3215 DCHECK(vector.is(LoadIC::VectorRegister()));
3130 __ Move(vector, instr->hydrogen()->feedback_vector()); 3216 __ Move(vector, instr->hydrogen()->feedback_vector());
3131 // No need to allocate this register. 3217 // No need to allocate this register.
3132 DCHECK(LoadIC::SlotRegister().is(r0)); 3218 DCHECK(LoadIC::SlotRegister().is(r0));
3133 __ mov(LoadIC::SlotRegister(), 3219 __ mov(LoadIC::SlotRegister(),
3134 Operand(Smi::FromInt(instr->hydrogen()->slot()))); 3220 Operand(Smi::FromInt(instr->hydrogen()->slot())));
3135 } 3221 }
3136 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); 3222 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3137 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 3223 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3138 } 3224 }
3139 3225
3140 3226
3141 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 3227 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3142 Register scratch = scratch0(); 3228 Register scratch = scratch0();
3143 Register function = ToRegister(instr->function()); 3229 Register function = ToRegister(instr->function());
3144 Register result = ToRegister(instr->result()); 3230 Register result = ToRegister(instr->result());
3145 3231
3146 // Get the prototype or initial map from the function. 3232 // Get the prototype or initial map from the function.
3147 __ ldr(result, 3233 __ LoadP(result,
3148 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3234 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3149 3235
3150 // Check that the function has a prototype or an initial map. 3236 // Check that the function has a prototype or an initial map.
3151 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3237 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3152 __ cmp(result, ip); 3238 __ cmp(result, ip);
3153 DeoptimizeIf(eq, instr->environment()); 3239 DeoptimizeIf(eq, instr->environment());
3154 3240
3155 // If the function does not have an initial map, we're done. 3241 // If the function does not have an initial map, we're done.
3156 Label done; 3242 Label done;
3157 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); 3243 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3158 __ b(ne, &done); 3244 __ bne(&done);
3159 3245
3160 // Get the prototype from the initial map. 3246 // Get the prototype from the initial map.
3161 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3247 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
3162 3248
3163 // All done. 3249 // All done.
3164 __ bind(&done); 3250 __ bind(&done);
3165 } 3251 }
3166 3252
3167 3253
3168 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 3254 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3169 Register result = ToRegister(instr->result()); 3255 Register result = ToRegister(instr->result());
3170 __ LoadRoot(result, instr->index()); 3256 __ LoadRoot(result, instr->index());
3171 } 3257 }
3172 3258
3173 3259
3174 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 3260 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3175 Register arguments = ToRegister(instr->arguments()); 3261 Register arguments = ToRegister(instr->arguments());
3176 Register result = ToRegister(instr->result()); 3262 Register result = ToRegister(instr->result());
3177 // There are two words between the frame pointer and the last argument. 3263 // There are two words between the frame pointer and the last argument.
3178 // Subtracting from length accounts for one of them add one more. 3264 // Subtracting from length accounts for one of them add one more.
3179 if (instr->length()->IsConstantOperand()) { 3265 if (instr->length()->IsConstantOperand()) {
3180 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); 3266 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3181 if (instr->index()->IsConstantOperand()) { 3267 if (instr->index()->IsConstantOperand()) {
3182 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3268 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3183 int index = (const_length - const_index) + 1; 3269 int index = (const_length - const_index) + 1;
3184 __ ldr(result, MemOperand(arguments, index * kPointerSize)); 3270 __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
3185 } else { 3271 } else {
3186 Register index = ToRegister(instr->index()); 3272 Register index = ToRegister(instr->index());
3187 __ rsb(result, index, Operand(const_length + 1)); 3273 __ subfic(result, index, Operand(const_length + 1));
3188 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); 3274 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3275 __ LoadPX(result, MemOperand(arguments, result));
3189 } 3276 }
3190 } else if (instr->index()->IsConstantOperand()) { 3277 } else if (instr->index()->IsConstantOperand()) {
3191 Register length = ToRegister(instr->length()); 3278 Register length = ToRegister(instr->length());
3192 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3279 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3193 int loc = const_index - 1; 3280 int loc = const_index - 1;
3194 if (loc != 0) { 3281 if (loc != 0) {
3195 __ sub(result, length, Operand(loc)); 3282 __ subi(result, length, Operand(loc));
3196 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); 3283 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3197 } else { 3284 __ LoadPX(result, MemOperand(arguments, result));
3198 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3199 }
3200 } else { 3285 } else {
3286 __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
3287 __ LoadPX(result, MemOperand(arguments, result));
3288 }
3289 } else {
3201 Register length = ToRegister(instr->length()); 3290 Register length = ToRegister(instr->length());
3202 Register index = ToRegister(instr->index()); 3291 Register index = ToRegister(instr->index());
3203 __ sub(result, length, index); 3292 __ sub(result, length, index);
3204 __ add(result, result, Operand(1)); 3293 __ addi(result, result, Operand(1));
3205 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); 3294 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3295 __ LoadPX(result, MemOperand(arguments, result));
3206 } 3296 }
3207 } 3297 }
3208 3298
3209 3299
3210 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 3300 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3211 Register external_pointer = ToRegister(instr->elements()); 3301 Register external_pointer = ToRegister(instr->elements());
3212 Register key = no_reg; 3302 Register key = no_reg;
3213 ElementsKind elements_kind = instr->elements_kind(); 3303 ElementsKind elements_kind = instr->elements_kind();
3214 bool key_is_constant = instr->key()->IsConstantOperand(); 3304 bool key_is_constant = instr->key()->IsConstantOperand();
3215 int constant_key = 0; 3305 int constant_key = 0;
3216 if (key_is_constant) { 3306 if (key_is_constant) {
3217 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3307 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3218 if (constant_key & 0xF0000000) { 3308 if (constant_key & 0xF0000000) {
3219 Abort(kArrayIndexConstantValueTooBig); 3309 Abort(kArrayIndexConstantValueTooBig);
3220 } 3310 }
3221 } else { 3311 } else {
3222 key = ToRegister(instr->key()); 3312 key = ToRegister(instr->key());
3223 } 3313 }
3224 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3314 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3225 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 3315 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3226 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3227 int base_offset = instr->base_offset(); 3316 int base_offset = instr->base_offset();
3228 3317
3229 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 3318 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3230 elements_kind == FLOAT32_ELEMENTS || 3319 elements_kind == FLOAT32_ELEMENTS ||
3231 elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 3320 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3232 elements_kind == FLOAT64_ELEMENTS) { 3321 elements_kind == FLOAT64_ELEMENTS) {
3233 int base_offset = instr->base_offset(); 3322 DoubleRegister result = ToDoubleRegister(instr->result());
3234 DwVfpRegister result = ToDoubleRegister(instr->result()); 3323 if (key_is_constant) {
3235 Operand operand = key_is_constant 3324 __ Add(scratch0(), external_pointer,
3236 ? Operand(constant_key << element_size_shift) 3325 constant_key << element_size_shift,
3237 : Operand(key, LSL, shift_size); 3326 r0);
3238 __ add(scratch0(), external_pointer, operand); 3327 } else {
3328 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3329 __ add(scratch0(), external_pointer, r0);
3330 }
3239 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 3331 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3240 elements_kind == FLOAT32_ELEMENTS) { 3332 elements_kind == FLOAT32_ELEMENTS) {
3241 __ vldr(double_scratch0().low(), scratch0(), base_offset); 3333 __ lfs(result, MemOperand(scratch0(), base_offset));
3242 __ vcvt_f64_f32(result, double_scratch0().low());
3243 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS 3334 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3244 __ vldr(result, scratch0(), base_offset); 3335 __ lfd(result, MemOperand(scratch0(), base_offset));
3245 } 3336 }
3246 } else { 3337 } else {
3247 Register result = ToRegister(instr->result()); 3338 Register result = ToRegister(instr->result());
3248 MemOperand mem_operand = PrepareKeyedOperand( 3339 MemOperand mem_operand = PrepareKeyedOperand(
3249 key, external_pointer, key_is_constant, constant_key, 3340 key, external_pointer, key_is_constant, key_is_smi, constant_key,
3250 element_size_shift, shift_size, base_offset); 3341 element_size_shift, base_offset);
3251 switch (elements_kind) { 3342 switch (elements_kind) {
3252 case EXTERNAL_INT8_ELEMENTS: 3343 case EXTERNAL_INT8_ELEMENTS:
3253 case INT8_ELEMENTS: 3344 case INT8_ELEMENTS:
3254 __ ldrsb(result, mem_operand); 3345 if (key_is_constant) {
3346 __ LoadByte(result, mem_operand, r0);
3347 } else {
3348 __ lbzx(result, mem_operand);
3349 }
3350 __ extsb(result, result);
3255 break; 3351 break;
3256 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: 3352 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3257 case EXTERNAL_UINT8_ELEMENTS: 3353 case EXTERNAL_UINT8_ELEMENTS:
3258 case UINT8_ELEMENTS: 3354 case UINT8_ELEMENTS:
3259 case UINT8_CLAMPED_ELEMENTS: 3355 case UINT8_CLAMPED_ELEMENTS:
3260 __ ldrb(result, mem_operand); 3356 if (key_is_constant) {
3357 __ LoadByte(result, mem_operand, r0);
3358 } else {
3359 __ lbzx(result, mem_operand);
3360 }
3261 break; 3361 break;
3262 case EXTERNAL_INT16_ELEMENTS: 3362 case EXTERNAL_INT16_ELEMENTS:
3263 case INT16_ELEMENTS: 3363 case INT16_ELEMENTS:
3264 __ ldrsh(result, mem_operand); 3364 if (key_is_constant) {
3365 __ LoadHalfWord(result, mem_operand, r0);
3366 } else {
3367 __ lhzx(result, mem_operand);
3368 }
3369 __ extsh(result, result);
3265 break; 3370 break;
3266 case EXTERNAL_UINT16_ELEMENTS: 3371 case EXTERNAL_UINT16_ELEMENTS:
3267 case UINT16_ELEMENTS: 3372 case UINT16_ELEMENTS:
3268 __ ldrh(result, mem_operand); 3373 if (key_is_constant) {
3374 __ LoadHalfWord(result, mem_operand, r0);
3375 } else {
3376 __ lhzx(result, mem_operand);
3377 }
3269 break; 3378 break;
3270 case EXTERNAL_INT32_ELEMENTS: 3379 case EXTERNAL_INT32_ELEMENTS:
3271 case INT32_ELEMENTS: 3380 case INT32_ELEMENTS:
3272 __ ldr(result, mem_operand); 3381 if (key_is_constant) {
3382 __ LoadWord(result, mem_operand, r0);
3383 } else {
3384 __ lwzx(result, mem_operand);
3385 }
3386 #if V8_TARGET_ARCH_PPC64
3387 __ extsw(result, result);
3388 #endif
3273 break; 3389 break;
3274 case EXTERNAL_UINT32_ELEMENTS: 3390 case EXTERNAL_UINT32_ELEMENTS:
3275 case UINT32_ELEMENTS: 3391 case UINT32_ELEMENTS:
3276 __ ldr(result, mem_operand); 3392 if (key_is_constant) {
3393 __ LoadWord(result, mem_operand, r0);
3394 } else {
3395 __ lwzx(result, mem_operand);
3396 }
3277 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3397 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3278 __ cmp(result, Operand(0x80000000)); 3398 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3279 DeoptimizeIf(cs, instr->environment()); 3399 __ cmplw(result, r0);
3400 DeoptimizeIf(ge, instr->environment());
3280 } 3401 }
3281 break; 3402 break;
3282 case FLOAT32_ELEMENTS: 3403 case FLOAT32_ELEMENTS:
3283 case FLOAT64_ELEMENTS: 3404 case FLOAT64_ELEMENTS:
3284 case EXTERNAL_FLOAT32_ELEMENTS: 3405 case EXTERNAL_FLOAT32_ELEMENTS:
3285 case EXTERNAL_FLOAT64_ELEMENTS: 3406 case EXTERNAL_FLOAT64_ELEMENTS:
3286 case FAST_HOLEY_DOUBLE_ELEMENTS: 3407 case FAST_HOLEY_DOUBLE_ELEMENTS:
3287 case FAST_HOLEY_ELEMENTS: 3408 case FAST_HOLEY_ELEMENTS:
3288 case FAST_HOLEY_SMI_ELEMENTS: 3409 case FAST_HOLEY_SMI_ELEMENTS:
3289 case FAST_DOUBLE_ELEMENTS: 3410 case FAST_DOUBLE_ELEMENTS:
3290 case FAST_ELEMENTS: 3411 case FAST_ELEMENTS:
3291 case FAST_SMI_ELEMENTS: 3412 case FAST_SMI_ELEMENTS:
3292 case DICTIONARY_ELEMENTS: 3413 case DICTIONARY_ELEMENTS:
3293 case SLOPPY_ARGUMENTS_ELEMENTS: 3414 case SLOPPY_ARGUMENTS_ELEMENTS:
3294 UNREACHABLE(); 3415 UNREACHABLE();
3295 break; 3416 break;
3296 } 3417 }
3297 } 3418 }
3298 } 3419 }
3299 3420
3300 3421
3301 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 3422 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3302 Register elements = ToRegister(instr->elements()); 3423 Register elements = ToRegister(instr->elements());
3303 bool key_is_constant = instr->key()->IsConstantOperand(); 3424 bool key_is_constant = instr->key()->IsConstantOperand();
3304 Register key = no_reg; 3425 Register key = no_reg;
3305 DwVfpRegister result = ToDoubleRegister(instr->result()); 3426 DoubleRegister result = ToDoubleRegister(instr->result());
3306 Register scratch = scratch0(); 3427 Register scratch = scratch0();
3307 3428
3308 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 3429 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3309 3430 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3310 int base_offset = instr->base_offset(); 3431 int constant_key = 0;
3311 if (key_is_constant) { 3432 if (key_is_constant) {
3312 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3433 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3313 if (constant_key & 0xF0000000) { 3434 if (constant_key & 0xF0000000) {
3314 Abort(kArrayIndexConstantValueTooBig); 3435 Abort(kArrayIndexConstantValueTooBig);
3315 } 3436 }
3316 base_offset += constant_key * kDoubleSize; 3437 } else {
3317 }
3318 __ add(scratch, elements, Operand(base_offset));
3319
3320 if (!key_is_constant) {
3321 key = ToRegister(instr->key()); 3438 key = ToRegister(instr->key());
3322 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3323 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3324 __ add(scratch, scratch, Operand(key, LSL, shift_size));
3325 } 3439 }
3326 3440
3327 __ vldr(result, scratch, 0); 3441 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
3442 if (!key_is_constant) {
3443 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3444 __ add(scratch, elements, r0);
3445 elements = scratch;
3446 }
3447 if (!is_int16(base_offset)) {
3448 __ Add(scratch, elements, base_offset, r0);
3449 base_offset = 0;
3450 elements = scratch;
3451 }
3452 __ lfd(result, MemOperand(elements, base_offset));
3328 3453
3329 if (instr->hydrogen()->RequiresHoleCheck()) { 3454 if (instr->hydrogen()->RequiresHoleCheck()) {
3330 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); 3455 if (is_int16(base_offset + Register::kExponentOffset)) {
3331 __ cmp(scratch, Operand(kHoleNanUpper32)); 3456 __ lwz(scratch, MemOperand(elements,
3457 base_offset + Register::kExponentOffset));
3458 } else {
3459 __ addi(scratch, elements, Operand(base_offset));
3460 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
3461 }
3462 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
3332 DeoptimizeIf(eq, instr->environment()); 3463 DeoptimizeIf(eq, instr->environment());
3333 } 3464 }
3334 } 3465 }
3335 3466
3336 3467
3337 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3468 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3469 HLoadKeyed* hinstr = instr->hydrogen();
3338 Register elements = ToRegister(instr->elements()); 3470 Register elements = ToRegister(instr->elements());
3339 Register result = ToRegister(instr->result()); 3471 Register result = ToRegister(instr->result());
3340 Register scratch = scratch0(); 3472 Register scratch = scratch0();
3341 Register store_base = scratch; 3473 Register store_base = scratch;
3342 int offset = instr->base_offset(); 3474 int offset = instr->base_offset();
3343 3475
3344 if (instr->key()->IsConstantOperand()) { 3476 if (instr->key()->IsConstantOperand()) {
3345 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3477 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3346 offset += ToInteger32(const_operand) * kPointerSize; 3478 offset += ToInteger32(const_operand) * kPointerSize;
3347 store_base = elements; 3479 store_base = elements;
3348 } else { 3480 } else {
3349 Register key = ToRegister(instr->key()); 3481 Register key = ToRegister(instr->key());
3350 // Even though the HLoadKeyed instruction forces the input 3482 // Even though the HLoadKeyed instruction forces the input
3351 // representation for the key to be an integer, the input gets replaced 3483 // representation for the key to be an integer, the input gets replaced
3352 // during bound check elimination with the index argument to the bounds 3484 // during bound check elimination with the index argument to the bounds
3353 // check, which can be tagged, so that case must be handled here, too. 3485 // check, which can be tagged, so that case must be handled here, too.
3354 if (instr->hydrogen()->key()->representation().IsSmi()) { 3486 if (hinstr->key()->representation().IsSmi()) {
3355 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); 3487 __ SmiToPtrArrayOffset(r0, key);
3356 } else { 3488 } else {
3357 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); 3489 __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
3358 } 3490 }
3491 __ add(scratch, elements, r0);
3359 } 3492 }
3360 __ ldr(result, MemOperand(store_base, offset)); 3493
3494 bool requires_hole_check = hinstr->RequiresHoleCheck();
3495 Representation representation = hinstr->representation();
3496
3497 #if V8_TARGET_ARCH_PPC64
3498 // 64-bit Smi optimization
3499 if (representation.IsInteger32() &&
3500 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3501 DCHECK(!requires_hole_check);
3502 // Read int value directly from upper half of the smi.
3503 STATIC_ASSERT(kSmiTag == 0);
3504 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3505 #if V8_TARGET_LITTLE_ENDIAN
3506 offset += kPointerSize / 2;
3507 #endif
3508 }
3509 #endif
3510
3511 __ LoadRepresentation(result, MemOperand(store_base, offset),
3512 representation, r0);
3361 3513
3362 // Check for the hole value. 3514 // Check for the hole value.
3363 if (instr->hydrogen()->RequiresHoleCheck()) { 3515 if (requires_hole_check) {
3364 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3516 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3365 __ SmiTst(result); 3517 __ TestIfSmi(result, r0);
3366 DeoptimizeIf(ne, instr->environment()); 3518 DeoptimizeIf(ne, instr->environment(), cr0);
3367 } else { 3519 } else {
3368 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3520 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3369 __ cmp(result, scratch); 3521 __ cmp(result, scratch);
3370 DeoptimizeIf(eq, instr->environment()); 3522 DeoptimizeIf(eq, instr->environment());
3371 } 3523 }
3372 } 3524 }
3373 } 3525 }
3374 3526
3375 3527
3376 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 3528 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3377 if (instr->is_typed_elements()) { 3529 if (instr->is_typed_elements()) {
3378 DoLoadKeyedExternalArray(instr); 3530 DoLoadKeyedExternalArray(instr);
3379 } else if (instr->hydrogen()->representation().IsDouble()) { 3531 } else if (instr->hydrogen()->representation().IsDouble()) {
3380 DoLoadKeyedFixedDoubleArray(instr); 3532 DoLoadKeyedFixedDoubleArray(instr);
3381 } else { 3533 } else {
3382 DoLoadKeyedFixedArray(instr); 3534 DoLoadKeyedFixedArray(instr);
3383 } 3535 }
3384 } 3536 }
3385 3537
3386 3538
3387 MemOperand LCodeGen::PrepareKeyedOperand(Register key, 3539 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3388 Register base, 3540 Register base,
3389 bool key_is_constant, 3541 bool key_is_constant,
3542 bool key_is_smi,
3390 int constant_key, 3543 int constant_key,
3391 int element_size, 3544 int element_size_shift,
3392 int shift_size,
3393 int base_offset) { 3545 int base_offset) {
3546 Register scratch = scratch0();
3547
3394 if (key_is_constant) { 3548 if (key_is_constant) {
3395 return MemOperand(base, (constant_key << element_size) + base_offset); 3549 return MemOperand(base, (constant_key << element_size_shift) + base_offset);
3396 } 3550 }
3397 3551
3398 if (base_offset == 0) { 3552 bool needs_shift = (element_size_shift != (key_is_smi ?
3399 if (shift_size >= 0) { 3553 kSmiTagSize + kSmiShiftSize : 0));
3400 return MemOperand(base, key, LSL, shift_size); 3554
3401 } else { 3555 if (!(base_offset || needs_shift)) {
3402 DCHECK_EQ(-1, shift_size); 3556 return MemOperand(base, key);
3403 return MemOperand(base, key, LSR, 1);
3404 }
3405 } 3557 }
3406 3558
3407 if (shift_size >= 0) { 3559 if (needs_shift) {
3408 __ add(scratch0(), base, Operand(key, LSL, shift_size)); 3560 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3409 return MemOperand(scratch0(), base_offset); 3561 key = scratch;
3410 } else {
3411 DCHECK_EQ(-1, shift_size);
3412 __ add(scratch0(), base, Operand(key, ASR, 1));
3413 return MemOperand(scratch0(), base_offset);
3414 } 3562 }
3563
3564 if (base_offset) {
3565 __ Add(scratch, key, base_offset, r0);
3566 }
3567
3568 return MemOperand(base, scratch);
3415 } 3569 }
3416 3570
3417 3571
3418 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3572 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3419 DCHECK(ToRegister(instr->context()).is(cp)); 3573 DCHECK(ToRegister(instr->context()).is(cp));
3420 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); 3574 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3421 DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister())); 3575 DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
3422 3576
3423 if (FLAG_vector_ics) { 3577 if (FLAG_vector_ics) {
3424 Register vector = ToRegister(instr->temp_vector()); 3578 Register vector = ToRegister(instr->temp_vector());
3425 DCHECK(vector.is(LoadIC::VectorRegister())); 3579 DCHECK(vector.is(LoadIC::VectorRegister()));
3426 __ Move(vector, instr->hydrogen()->feedback_vector()); 3580 __ Move(vector, instr->hydrogen()->feedback_vector());
3427 // No need to allocate this register. 3581 // No need to allocate this register.
3428 DCHECK(LoadIC::SlotRegister().is(r0)); 3582 DCHECK(LoadIC::SlotRegister().is(r0));
3429 __ mov(LoadIC::SlotRegister(), 3583 __ mov(LoadIC::SlotRegister(),
3430 Operand(Smi::FromInt(instr->hydrogen()->slot()))); 3584 Operand(Smi::FromInt(instr->hydrogen()->slot())));
3431 } 3585 }
3432 3586
3433 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); 3587 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3434 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 3588 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3435 } 3589 }
3436 3590
3437 3591
3438 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 3592 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3439 Register scratch = scratch0(); 3593 Register scratch = scratch0();
3440 Register result = ToRegister(instr->result()); 3594 Register result = ToRegister(instr->result());
3441 3595
3442 if (instr->hydrogen()->from_inlined()) { 3596 if (instr->hydrogen()->from_inlined()) {
3443 __ sub(result, sp, Operand(2 * kPointerSize)); 3597 __ subi(result, sp, Operand(2 * kPointerSize));
3444 } else { 3598 } else {
3445 // Check if the calling frame is an arguments adaptor frame. 3599 // Check if the calling frame is an arguments adaptor frame.
3446 Label done, adapted; 3600 Label done, adapted;
3447 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3601 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3448 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); 3602 __ LoadP(result,
3449 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3603 MemOperand(scratch, StandardFrameConstants::kContextOffset));
3604 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3450 3605
3451 // Result is the frame pointer for the frame if not adapted and for the real 3606 // Result is the frame pointer for the frame if not adapted and for the real
3452 // frame below the adaptor frame if adapted. 3607 // frame below the adaptor frame if adapted.
3453 __ mov(result, fp, LeaveCC, ne); 3608 __ beq(&adapted);
3454 __ mov(result, scratch, LeaveCC, eq); 3609 __ mr(result, fp);
3610 __ b(&done);
3611
3612 __ bind(&adapted);
3613 __ mr(result, scratch);
3614 __ bind(&done);
3455 } 3615 }
3456 } 3616 }
3457 3617
3458 3618
3459 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 3619 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3460 Register elem = ToRegister(instr->elements()); 3620 Register elem = ToRegister(instr->elements());
3461 Register result = ToRegister(instr->result()); 3621 Register result = ToRegister(instr->result());
3462 3622
3463 Label done; 3623 Label done;
3464 3624
3465 // If no arguments adaptor frame the number of arguments is fixed. 3625 // If no arguments adaptor frame the number of arguments is fixed.
3466 __ cmp(fp, elem); 3626 __ cmp(fp, elem);
3467 __ mov(result, Operand(scope()->num_parameters())); 3627 __ mov(result, Operand(scope()->num_parameters()));
3468 __ b(eq, &done); 3628 __ beq(&done);
3469 3629
3470 // Arguments adaptor frame present. Get argument length from there. 3630 // Arguments adaptor frame present. Get argument length from there.
3471 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3631 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3472 __ ldr(result, 3632 __ LoadP(result,
3473 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3633 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3474 __ SmiUntag(result); 3634 __ SmiUntag(result);
3475 3635
3476 // Argument length is in result register. 3636 // Argument length is in result register.
3477 __ bind(&done); 3637 __ bind(&done);
3478 } 3638 }
3479 3639
3480 3640
3481 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 3641 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3482 Register receiver = ToRegister(instr->receiver()); 3642 Register receiver = ToRegister(instr->receiver());
3483 Register function = ToRegister(instr->function()); 3643 Register function = ToRegister(instr->function());
3484 Register result = ToRegister(instr->result()); 3644 Register result = ToRegister(instr->result());
3485 Register scratch = scratch0(); 3645 Register scratch = scratch0();
3486 3646
3487 // If the receiver is null or undefined, we have to pass the global 3647 // If the receiver is null or undefined, we have to pass the global
3488 // object as a receiver to normal functions. Values have to be 3648 // object as a receiver to normal functions. Values have to be
3489 // passed unchanged to builtins and strict-mode functions. 3649 // passed unchanged to builtins and strict-mode functions.
3490 Label global_object, result_in_receiver; 3650 Label global_object, result_in_receiver;
3491 3651
3492 if (!instr->hydrogen()->known_function()) { 3652 if (!instr->hydrogen()->known_function()) {
3493 // Do not transform the receiver to object for strict mode 3653 // Do not transform the receiver to object for strict mode
3494 // functions. 3654 // functions.
3495 __ ldr(scratch, 3655 __ LoadP(scratch,
3496 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3656 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3497 __ ldr(scratch, 3657 __ lwz(scratch,
3498 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); 3658 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3499 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize); 3659 __ TestBit(scratch,
3500 __ tst(scratch, Operand(mask)); 3660 #if V8_TARGET_ARCH_PPC64
3501 __ b(ne, &result_in_receiver); 3661 SharedFunctionInfo::kStrictModeFunction,
3662 #else
3663 SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
3664 #endif
3665 r0);
3666 __ bne(&result_in_receiver, cr0);
3502 3667
3503 // Do not transform the receiver to object for builtins. 3668 // Do not transform the receiver to object for builtins.
3504 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); 3669 __ TestBit(scratch,
3505 __ b(ne, &result_in_receiver); 3670 #if V8_TARGET_ARCH_PPC64
3671 SharedFunctionInfo::kNative,
3672 #else
3673 SharedFunctionInfo::kNative + kSmiTagSize,
3674 #endif
3675 r0);
3676 __ bne(&result_in_receiver, cr0);
3506 } 3677 }
3507 3678
3508 // Normal function. Replace undefined or null with global receiver. 3679 // Normal function. Replace undefined or null with global receiver.
3509 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 3680 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3510 __ cmp(receiver, scratch); 3681 __ cmp(receiver, scratch);
3511 __ b(eq, &global_object); 3682 __ beq(&global_object);
3512 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 3683 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3513 __ cmp(receiver, scratch); 3684 __ cmp(receiver, scratch);
3514 __ b(eq, &global_object); 3685 __ beq(&global_object);
3515 3686
3516 // Deoptimize if the receiver is not a JS object. 3687 // Deoptimize if the receiver is not a JS object.
3517 __ SmiTst(receiver); 3688 __ TestIfSmi(receiver, r0);
3518 DeoptimizeIf(eq, instr->environment()); 3689 DeoptimizeIf(eq, instr->environment(), cr0);
3519 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); 3690 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3520 DeoptimizeIf(lt, instr->environment()); 3691 DeoptimizeIf(lt, instr->environment());
3521 3692
3522 __ b(&result_in_receiver); 3693 __ b(&result_in_receiver);
3523 __ bind(&global_object); 3694 __ bind(&global_object);
3524 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); 3695 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3525 __ ldr(result, 3696 __ LoadP(result,
3526 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); 3697 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3527 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); 3698 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3528
3529 if (result.is(receiver)) { 3699 if (result.is(receiver)) {
3530 __ bind(&result_in_receiver); 3700 __ bind(&result_in_receiver);
3531 } else { 3701 } else {
3532 Label result_ok; 3702 Label result_ok;
3533 __ b(&result_ok); 3703 __ b(&result_ok);
3534 __ bind(&result_in_receiver); 3704 __ bind(&result_in_receiver);
3535 __ mov(result, receiver); 3705 __ mr(result, receiver);
3536 __ bind(&result_ok); 3706 __ bind(&result_ok);
3537 } 3707 }
3538 } 3708 }
3539 3709
3540 3710
3541 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 3711 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3542 Register receiver = ToRegister(instr->receiver()); 3712 Register receiver = ToRegister(instr->receiver());
3543 Register function = ToRegister(instr->function()); 3713 Register function = ToRegister(instr->function());
3544 Register length = ToRegister(instr->length()); 3714 Register length = ToRegister(instr->length());
3545 Register elements = ToRegister(instr->elements()); 3715 Register elements = ToRegister(instr->elements());
3546 Register scratch = scratch0(); 3716 Register scratch = scratch0();
3547 DCHECK(receiver.is(r0)); // Used for parameter count. 3717 DCHECK(receiver.is(r3)); // Used for parameter count.
3548 DCHECK(function.is(r1)); // Required by InvokeFunction. 3718 DCHECK(function.is(r4)); // Required by InvokeFunction.
3549 DCHECK(ToRegister(instr->result()).is(r0)); 3719 DCHECK(ToRegister(instr->result()).is(r3));
3550 3720
3551 // Copy the arguments to this function possibly from the 3721 // Copy the arguments to this function possibly from the
3552 // adaptor frame below it. 3722 // adaptor frame below it.
3553 const uint32_t kArgumentsLimit = 1 * KB; 3723 const uint32_t kArgumentsLimit = 1 * KB;
3554 __ cmp(length, Operand(kArgumentsLimit)); 3724 __ cmpli(length, Operand(kArgumentsLimit));
3555 DeoptimizeIf(hi, instr->environment()); 3725 DeoptimizeIf(gt, instr->environment());
3556 3726
3557 // Push the receiver and use the register to keep the original 3727 // Push the receiver and use the register to keep the original
3558 // number of arguments. 3728 // number of arguments.
3559 __ push(receiver); 3729 __ push(receiver);
3560 __ mov(receiver, length); 3730 __ mr(receiver, length);
3561 // The arguments are at a one pointer size offset from elements. 3731 // The arguments are at a one pointer size offset from elements.
3562 __ add(elements, elements, Operand(1 * kPointerSize)); 3732 __ addi(elements, elements, Operand(1 * kPointerSize));
3563 3733
3564 // Loop through the arguments pushing them onto the execution 3734 // Loop through the arguments pushing them onto the execution
3565 // stack. 3735 // stack.
3566 Label invoke, loop; 3736 Label invoke, loop;
3567 // length is a small non-negative integer, due to the test above. 3737 // length is a small non-negative integer, due to the test above.
3568 __ cmp(length, Operand::Zero()); 3738 __ cmpi(length, Operand::Zero());
3569 __ b(eq, &invoke); 3739 __ beq(&invoke);
3740 __ mtctr(length);
3570 __ bind(&loop); 3741 __ bind(&loop);
3571 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); 3742 __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
3743 __ LoadPX(scratch, MemOperand(elements, r0));
3572 __ push(scratch); 3744 __ push(scratch);
3573 __ sub(length, length, Operand(1), SetCC); 3745 __ addi(length, length, Operand(-1));
3574 __ b(ne, &loop); 3746 __ bdnz(&loop);
3575 3747
3576 __ bind(&invoke); 3748 __ bind(&invoke);
3577 DCHECK(instr->HasPointerMap()); 3749 DCHECK(instr->HasPointerMap());
3578 LPointerMap* pointers = instr->pointer_map(); 3750 LPointerMap* pointers = instr->pointer_map();
3579 SafepointGenerator safepoint_generator( 3751 SafepointGenerator safepoint_generator(
3580 this, pointers, Safepoint::kLazyDeopt); 3752 this, pointers, Safepoint::kLazyDeopt);
3581 // The number of arguments is stored in receiver which is r0, as expected 3753 // The number of arguments is stored in receiver which is r3, as expected
3582 // by InvokeFunction. 3754 // by InvokeFunction.
3583 ParameterCount actual(receiver); 3755 ParameterCount actual(receiver);
3584 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); 3756 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3585 } 3757 }
3586 3758
3587 3759
3588 void LCodeGen::DoPushArgument(LPushArgument* instr) { 3760 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3589 LOperand* argument = instr->value(); 3761 LOperand* argument = instr->value();
3590 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { 3762 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3591 Abort(kDoPushArgumentNotImplementedForDoubleType); 3763 Abort(kDoPushArgumentNotImplementedForDoubleType);
3592 } else { 3764 } else {
3593 Register argument_reg = EmitLoadRegister(argument, ip); 3765 Register argument_reg = EmitLoadRegister(argument, ip);
3594 __ push(argument_reg); 3766 __ push(argument_reg);
3595 } 3767 }
3596 } 3768 }
3597 3769
3598 3770
3599 void LCodeGen::DoDrop(LDrop* instr) { 3771 void LCodeGen::DoDrop(LDrop* instr) {
3600 __ Drop(instr->count()); 3772 __ Drop(instr->count());
3601 } 3773 }
3602 3774
3603 3775
3604 void LCodeGen::DoThisFunction(LThisFunction* instr) { 3776 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3605 Register result = ToRegister(instr->result()); 3777 Register result = ToRegister(instr->result());
3606 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 3778 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3607 } 3779 }
3608 3780
3609 3781
3610 void LCodeGen::DoContext(LContext* instr) { 3782 void LCodeGen::DoContext(LContext* instr) {
3611 // If there is a non-return use, the context must be moved to a register. 3783 // If there is a non-return use, the context must be moved to a register.
3612 Register result = ToRegister(instr->result()); 3784 Register result = ToRegister(instr->result());
3613 if (info()->IsOptimizing()) { 3785 if (info()->IsOptimizing()) {
3614 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3786 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3615 } else { 3787 } else {
3616 // If there is no frame, the context must be in cp. 3788 // If there is no frame, the context must be in cp.
3617 DCHECK(result.is(cp)); 3789 DCHECK(result.is(cp));
3618 } 3790 }
3619 } 3791 }
3620 3792
3621 3793
3622 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3794 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3623 DCHECK(ToRegister(instr->context()).is(cp)); 3795 DCHECK(ToRegister(instr->context()).is(cp));
3624 __ push(cp); // The context is the first argument. 3796 __ push(cp); // The context is the first argument.
3625 __ Move(scratch0(), instr->hydrogen()->pairs()); 3797 __ Move(scratch0(), instr->hydrogen()->pairs());
3626 __ push(scratch0()); 3798 __ push(scratch0());
3627 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); 3799 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3628 __ push(scratch0()); 3800 __ push(scratch0());
3629 CallRuntime(Runtime::kDeclareGlobals, 3, instr); 3801 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3630 } 3802 }
3631 3803
3632 3804
3633 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3805 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3634 int formal_parameter_count, 3806 int formal_parameter_count,
3635 int arity, 3807 int arity,
3636 LInstruction* instr, 3808 LInstruction* instr,
3637 R1State r1_state) { 3809 R4State r4_state) {
3638 bool dont_adapt_arguments = 3810 bool dont_adapt_arguments =
3639 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3811 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3640 bool can_invoke_directly = 3812 bool can_invoke_directly =
3641 dont_adapt_arguments || formal_parameter_count == arity; 3813 dont_adapt_arguments || formal_parameter_count == arity;
3642 3814
3643 LPointerMap* pointers = instr->pointer_map(); 3815 LPointerMap* pointers = instr->pointer_map();
3644 3816
3645 if (can_invoke_directly) { 3817 if (can_invoke_directly) {
3646 if (r1_state == R1_UNINITIALIZED) { 3818 if (r4_state == R4_UNINITIALIZED) {
3647 __ Move(r1, function); 3819 __ Move(r4, function);
3648 } 3820 }
3649 3821
3650 // Change context. 3822 // Change context.
3651 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); 3823 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
3652 3824
3653 // Set r0 to arguments count if adaption is not needed. Assumes that r0 3825 // Set r3 to arguments count if adaption is not needed. Assumes that r3
3654 // is available to write to at this point. 3826 // is available to write to at this point.
3655 if (dont_adapt_arguments) { 3827 if (dont_adapt_arguments) {
3656 __ mov(r0, Operand(arity)); 3828 __ mov(r3, Operand(arity));
3657 } 3829 }
3658 3830
3659 // Invoke function. 3831 // Invoke function.
3660 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 3832 if (function.is_identical_to(info()->closure())) {
3661 __ Call(ip); 3833 __ CallSelf();
3834 } else {
3835 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
3836 __ Call(ip);
3837 }
3662 3838
3663 // Set up deoptimization. 3839 // Set up deoptimization.
3664 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3840 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3665 } else { 3841 } else {
3666 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3842 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3667 ParameterCount count(arity); 3843 ParameterCount count(arity);
3668 ParameterCount expected(formal_parameter_count); 3844 ParameterCount expected(formal_parameter_count);
3669 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); 3845 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3670 } 3846 }
3671 } 3847 }
3672 3848
3673 3849
3674 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3850 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3675 DCHECK(instr->context() != NULL); 3851 DCHECK(instr->context() != NULL);
3676 DCHECK(ToRegister(instr->context()).is(cp)); 3852 DCHECK(ToRegister(instr->context()).is(cp));
3677 Register input = ToRegister(instr->value()); 3853 Register input = ToRegister(instr->value());
3678 Register result = ToRegister(instr->result()); 3854 Register result = ToRegister(instr->result());
3679 Register scratch = scratch0(); 3855 Register scratch = scratch0();
3680 3856
3681 // Deoptimize if not a heap number. 3857 // Deoptimize if not a heap number.
3682 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 3858 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3683 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 3859 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3684 __ cmp(scratch, Operand(ip)); 3860 __ cmp(scratch, ip);
3685 DeoptimizeIf(ne, instr->environment()); 3861 DeoptimizeIf(ne, instr->environment());
3686 3862
3687 Label done; 3863 Label done;
3688 Register exponent = scratch0(); 3864 Register exponent = scratch0();
3689 scratch = no_reg; 3865 scratch = no_reg;
3690 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3866 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3691 // Check the sign of the argument. If the argument is positive, just 3867 // Check the sign of the argument. If the argument is positive, just
3692 // return it. 3868 // return it.
3693 __ tst(exponent, Operand(HeapNumber::kSignMask)); 3869 __ cmpwi(exponent, Operand::Zero());
3694 // Move the input to the result if necessary. 3870 // Move the input to the result if necessary.
3695 __ Move(result, input); 3871 __ Move(result, input);
3696 __ b(eq, &done); 3872 __ bge(&done);
3697 3873
3698 // Input is negative. Reverse its sign. 3874 // Input is negative. Reverse its sign.
3699 // Preserve the value of all registers. 3875 // Preserve the value of all registers.
3700 { 3876 {
3701 PushSafepointRegistersScope scope(this); 3877 PushSafepointRegistersScope scope(this);
3702 3878
3703 // Registers were saved at the safepoint, so we can use 3879 // Registers were saved at the safepoint, so we can use
3704 // many scratch registers. 3880 // many scratch registers.
3705 Register tmp1 = input.is(r1) ? r0 : r1; 3881 Register tmp1 = input.is(r4) ? r3 : r4;
3706 Register tmp2 = input.is(r2) ? r0 : r2; 3882 Register tmp2 = input.is(r5) ? r3 : r5;
3707 Register tmp3 = input.is(r3) ? r0 : r3; 3883 Register tmp3 = input.is(r6) ? r3 : r6;
3708 Register tmp4 = input.is(r4) ? r0 : r4; 3884 Register tmp4 = input.is(r7) ? r3 : r7;
3709 3885
3710 // exponent: floating point exponent value. 3886 // exponent: floating point exponent value.
3711 3887
3712 Label allocated, slow; 3888 Label allocated, slow;
3713 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); 3889 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3714 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); 3890 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3715 __ b(&allocated); 3891 __ b(&allocated);
3716 3892
3717 // Slow case: Call the runtime system to do the number allocation. 3893 // Slow case: Call the runtime system to do the number allocation.
3718 __ bind(&slow); 3894 __ bind(&slow);
3719 3895
3720 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, 3896 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3721 instr->context()); 3897 instr->context());
3722 // Set the pointer to the new heap number in tmp. 3898 // Set the pointer to the new heap number in tmp.
3723 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); 3899 if (!tmp1.is(r3)) __ mr(tmp1, r3);
3724 // Restore input_reg after call to runtime. 3900 // Restore input_reg after call to runtime.
3725 __ LoadFromSafepointRegisterSlot(input, input); 3901 __ LoadFromSafepointRegisterSlot(input, input);
3726 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3902 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3727 3903
3728 __ bind(&allocated); 3904 __ bind(&allocated);
3729 // exponent: floating point exponent value. 3905 // exponent: floating point exponent value.
3730 // tmp1: allocated heap number. 3906 // tmp1: allocated heap number.
3731 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask)); 3907 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
3732 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); 3908 __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
3733 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); 3909 __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3734 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); 3910 __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3911 __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3735 3912
3736 __ StoreToSafepointRegisterSlot(tmp1, result); 3913 __ StoreToSafepointRegisterSlot(tmp1, result);
3737 } 3914 }
3738 3915
3739 __ bind(&done); 3916 __ bind(&done);
3740 } 3917 }
3741 3918
3742 3919
3743 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { 3920 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3744 Register input = ToRegister(instr->value()); 3921 Register input = ToRegister(instr->value());
3745 Register result = ToRegister(instr->result()); 3922 Register result = ToRegister(instr->result());
3746 __ cmp(input, Operand::Zero()); 3923 Label done;
3747 __ Move(result, input, pl); 3924 __ cmpi(input, Operand::Zero());
3748 // We can make rsb conditional because the previous cmp instruction 3925 __ Move(result, input);
3749 // will clear the V (overflow) flag and rsb won't set this flag 3926 __ bge(&done);
3750 // if input is positive. 3927 __ li(r0, Operand::Zero()); // clear xer
3751 __ rsb(result, input, Operand::Zero(), SetCC, mi); 3928 __ mtxer(r0);
3929 __ neg(result, result, SetOE, SetRC);
3752 // Deoptimize on overflow. 3930 // Deoptimize on overflow.
3753 DeoptimizeIf(vs, instr->environment()); 3931 DeoptimizeIf(overflow, instr->environment(), cr0);
3932 __ bind(&done);
3754 } 3933 }
3755 3934
3756 3935
3936 #if V8_TARGET_ARCH_PPC64
3937 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3938 Register input = ToRegister(instr->value());
3939 Register result = ToRegister(instr->result());
3940 Label done;
3941 __ cmpwi(input, Operand::Zero());
3942 __ Move(result, input);
3943 __ bge(&done);
3944
3945 // Deoptimize on overflow.
3946 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3947 __ cmpw(input, r0);
3948 DeoptimizeIf(eq, instr->environment());
3949
3950 __ neg(result, result);
3951 __ bind(&done);
3952 }
3953 #endif
3954
3955
3757 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3956 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3758 // Class for deferred case. 3957 // Class for deferred case.
3759 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { 3958 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3760 public: 3959 public:
3761 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) 3960 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3762 : LDeferredCode(codegen), instr_(instr) { } 3961 : LDeferredCode(codegen), instr_(instr) { }
3763 virtual void Generate() V8_OVERRIDE { 3962 virtual void Generate() V8_OVERRIDE {
3764 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3963 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3765 } 3964 }
3766 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 3965 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3767 private: 3966 private:
3768 LMathAbs* instr_; 3967 LMathAbs* instr_;
3769 }; 3968 };
3770 3969
3771 Representation r = instr->hydrogen()->value()->representation(); 3970 Representation r = instr->hydrogen()->value()->representation();
3772 if (r.IsDouble()) { 3971 if (r.IsDouble()) {
3773 DwVfpRegister input = ToDoubleRegister(instr->value()); 3972 DoubleRegister input = ToDoubleRegister(instr->value());
3774 DwVfpRegister result = ToDoubleRegister(instr->result()); 3973 DoubleRegister result = ToDoubleRegister(instr->result());
3775 __ vabs(result, input); 3974 __ fabs(result, input);
3975 #if V8_TARGET_ARCH_PPC64
3976 } else if (r.IsInteger32()) {
3977 EmitInteger32MathAbs(instr);
3978 } else if (r.IsSmi()) {
3979 #else
3776 } else if (r.IsSmiOrInteger32()) { 3980 } else if (r.IsSmiOrInteger32()) {
3777 EmitIntegerMathAbs(instr); 3981 #endif
3982 EmitMathAbs(instr);
3778 } else { 3983 } else {
3779 // Representation is tagged. 3984 // Representation is tagged.
3780 DeferredMathAbsTaggedHeapNumber* deferred = 3985 DeferredMathAbsTaggedHeapNumber* deferred =
3781 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3986 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3782 Register input = ToRegister(instr->value()); 3987 Register input = ToRegister(instr->value());
3783 // Smi check. 3988 // Smi check.
3784 __ JumpIfNotSmi(input, deferred->entry()); 3989 __ JumpIfNotSmi(input, deferred->entry());
3785 // If smi, handle it directly. 3990 // If smi, handle it directly.
3786 EmitIntegerMathAbs(instr); 3991 EmitMathAbs(instr);
3787 __ bind(deferred->exit()); 3992 __ bind(deferred->exit());
3788 } 3993 }
3789 } 3994 }
3790 3995
3791 3996
3792 void LCodeGen::DoMathFloor(LMathFloor* instr) { 3997 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3793 DwVfpRegister input = ToDoubleRegister(instr->value()); 3998 DoubleRegister input = ToDoubleRegister(instr->value());
3794 Register result = ToRegister(instr->result()); 3999 Register result = ToRegister(instr->result());
3795 Register input_high = scratch0(); 4000 Register input_high = scratch0();
4001 Register scratch = ip;
3796 Label done, exact; 4002 Label done, exact;
3797 4003
3798 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); 4004 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(),
4005 &done, &exact);
3799 DeoptimizeIf(al, instr->environment()); 4006 DeoptimizeIf(al, instr->environment());
3800 4007
3801 __ bind(&exact); 4008 __ bind(&exact);
3802 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4009 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3803 // Test for -0. 4010 // Test for -0.
3804 __ cmp(result, Operand::Zero()); 4011 __ cmpi(result, Operand::Zero());
3805 __ b(ne, &done); 4012 __ bne(&done);
3806 __ cmp(input_high, Operand::Zero()); 4013 __ cmpwi(input_high, Operand::Zero());
3807 DeoptimizeIf(mi, instr->environment()); 4014 DeoptimizeIf(lt, instr->environment());
3808 } 4015 }
3809 __ bind(&done); 4016 __ bind(&done);
3810 } 4017 }
3811 4018
3812 4019
3813 void LCodeGen::DoMathRound(LMathRound* instr) { 4020 void LCodeGen::DoMathRound(LMathRound* instr) {
3814 DwVfpRegister input = ToDoubleRegister(instr->value()); 4021 DoubleRegister input = ToDoubleRegister(instr->value());
3815 Register result = ToRegister(instr->result()); 4022 Register result = ToRegister(instr->result());
3816 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); 4023 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3817 DwVfpRegister input_plus_dot_five = double_scratch1; 4024 DoubleRegister input_plus_dot_five = double_scratch1;
3818 Register input_high = scratch0(); 4025 Register scratch1 = scratch0();
3819 DwVfpRegister dot_five = double_scratch0(); 4026 Register scratch2 = ip;
4027 DoubleRegister dot_five = double_scratch0();
3820 Label convert, done; 4028 Label convert, done;
3821 4029
3822 __ Vmov(dot_five, 0.5, scratch0()); 4030 __ LoadDoubleLiteral(dot_five, 0.5, r0);
3823 __ vabs(double_scratch1, input); 4031 __ fabs(double_scratch1, input);
3824 __ VFPCompareAndSetFlags(double_scratch1, dot_five); 4032 __ fcmpu(double_scratch1, dot_five);
4033 DeoptimizeIf(unordered, instr->environment());
3825 // If input is in [-0.5, -0], the result is -0. 4034 // If input is in [-0.5, -0], the result is -0.
3826 // If input is in [+0, +0.5[, the result is +0. 4035 // If input is in [+0, +0.5[, the result is +0.
3827 // If the input is +0.5, the result is 1. 4036 // If the input is +0.5, the result is 1.
3828 __ b(hi, &convert); // Out of [-0.5, +0.5]. 4037 __ bgt(&convert); // Out of [-0.5, +0.5].
3829 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4038 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3830 __ VmovHigh(input_high, input); 4039 #if V8_TARGET_ARCH_PPC64
3831 __ cmp(input_high, Operand::Zero()); 4040 __ MovDoubleToInt64(scratch1, input);
3832 DeoptimizeIf(mi, instr->environment()); // [-0.5, -0]. 4041 #else
4042 __ MovDoubleHighToInt(scratch1, input);
4043 #endif
4044 __ cmpi(scratch1, Operand::Zero());
4045 DeoptimizeIf(lt, instr->environment()); // [-0.5, -0].
3833 } 4046 }
3834 __ VFPCompareAndSetFlags(input, dot_five); 4047 Label return_zero;
3835 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. 4048 __ fcmpu(input, dot_five);
4049 __ bne(&return_zero);
4050 __ li(result, Operand(1)); // +0.5.
4051 __ b(&done);
3836 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on 4052 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3837 // flag kBailoutOnMinusZero. 4053 // flag kBailoutOnMinusZero.
3838 __ mov(result, Operand::Zero(), LeaveCC, ne); 4054 __ bind(&return_zero);
4055 __ li(result, Operand::Zero());
3839 __ b(&done); 4056 __ b(&done);
3840 4057
3841 __ bind(&convert); 4058 __ bind(&convert);
3842 __ vadd(input_plus_dot_five, input, dot_five); 4059 __ fadd(input_plus_dot_five, input, dot_five);
3843 // Reuse dot_five (double_scratch0) as we no longer need this value. 4060 // Reuse dot_five (double_scratch0) as we no longer need this value.
3844 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), 4061 __ TryInt32Floor(result, input_plus_dot_five, scratch1,
4062 scratch2, double_scratch0(),
3845 &done, &done); 4063 &done, &done);
3846 DeoptimizeIf(al, instr->environment()); 4064 DeoptimizeIf(al, instr->environment());
3847 __ bind(&done); 4065 __ bind(&done);
3848 } 4066 }
3849 4067
3850 4068
3851 void LCodeGen::DoMathFround(LMathFround* instr) { 4069 void LCodeGen::DoMathFround(LMathFround* instr) {
3852 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); 4070 DoubleRegister input_reg = ToDoubleRegister(instr->value());
3853 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); 4071 DoubleRegister output_reg = ToDoubleRegister(instr->result());
3854 LowDwVfpRegister scratch = double_scratch0(); 4072 __ frsp(output_reg, input_reg);
3855 __ vcvt_f32_f64(scratch.low(), input_reg);
3856 __ vcvt_f64_f32(output_reg, scratch.low());
3857 } 4073 }
3858 4074
3859 4075
3860 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 4076 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3861 DwVfpRegister input = ToDoubleRegister(instr->value()); 4077 DoubleRegister input = ToDoubleRegister(instr->value());
3862 DwVfpRegister result = ToDoubleRegister(instr->result()); 4078 DoubleRegister result = ToDoubleRegister(instr->result());
3863 __ vsqrt(result, input); 4079 __ fsqrt(result, input);
3864 } 4080 }
3865 4081
3866 4082
3867 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 4083 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3868 DwVfpRegister input = ToDoubleRegister(instr->value()); 4084 DoubleRegister input = ToDoubleRegister(instr->value());
3869 DwVfpRegister result = ToDoubleRegister(instr->result()); 4085 DoubleRegister result = ToDoubleRegister(instr->result());
3870 DwVfpRegister temp = double_scratch0(); 4086 DoubleRegister temp = double_scratch0();
3871 4087
3872 // Note that according to ECMA-262 15.8.2.13: 4088 // Note that according to ECMA-262 15.8.2.13:
3873 // Math.pow(-Infinity, 0.5) == Infinity 4089 // Math.pow(-Infinity, 0.5) == Infinity
3874 // Math.sqrt(-Infinity) == NaN 4090 // Math.sqrt(-Infinity) == NaN
3875 Label done; 4091 Label skip, done;
3876 __ vmov(temp, -V8_INFINITY, scratch0()); 4092
3877 __ VFPCompareAndSetFlags(input, temp); 4093 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
3878 __ vneg(result, temp, eq); 4094 __ fcmpu(input, temp);
3879 __ b(&done, eq); 4095 __ bne(&skip);
4096 __ fneg(result, temp);
4097 __ b(&done);
3880 4098
3881 // Add +0 to convert -0 to +0. 4099 // Add +0 to convert -0 to +0.
3882 __ vadd(result, input, kDoubleRegZero); 4100 __ bind(&skip);
3883 __ vsqrt(result, result); 4101 __ fadd(result, input, kDoubleRegZero);
4102 __ fsqrt(result, result);
3884 __ bind(&done); 4103 __ bind(&done);
3885 } 4104 }
3886 4105
3887 4106
3888 void LCodeGen::DoPower(LPower* instr) { 4107 void LCodeGen::DoPower(LPower* instr) {
3889 Representation exponent_type = instr->hydrogen()->right()->representation(); 4108 Representation exponent_type = instr->hydrogen()->right()->representation();
3890 // Having marked this as a call, we can use any registers. 4109 // Having marked this as a call, we can use any registers.
3891 // Just make sure that the input/output registers are the expected ones. 4110 // Just make sure that the input/output registers are the expected ones.
3892 DCHECK(!instr->right()->IsDoubleRegister() || 4111 DCHECK(!instr->right()->IsDoubleRegister() ||
3893 ToDoubleRegister(instr->right()).is(d1)); 4112 ToDoubleRegister(instr->right()).is(d2));
3894 DCHECK(!instr->right()->IsRegister() || 4113 DCHECK(!instr->right()->IsRegister() ||
3895 ToRegister(instr->right()).is(r2)); 4114 ToRegister(instr->right()).is(r5));
3896 DCHECK(ToDoubleRegister(instr->left()).is(d0)); 4115 DCHECK(ToDoubleRegister(instr->left()).is(d1));
3897 DCHECK(ToDoubleRegister(instr->result()).is(d2)); 4116 DCHECK(ToDoubleRegister(instr->result()).is(d3));
3898 4117
3899 if (exponent_type.IsSmi()) { 4118 if (exponent_type.IsSmi()) {
3900 MathPowStub stub(isolate(), MathPowStub::TAGGED); 4119 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3901 __ CallStub(&stub); 4120 __ CallStub(&stub);
3902 } else if (exponent_type.IsTagged()) { 4121 } else if (exponent_type.IsTagged()) {
3903 Label no_deopt; 4122 Label no_deopt;
3904 __ JumpIfSmi(r2, &no_deopt); 4123 __ JumpIfSmi(r5, &no_deopt);
3905 __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset)); 4124 __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset));
3906 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4125 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3907 __ cmp(r6, Operand(ip)); 4126 __ cmp(r10, ip);
3908 DeoptimizeIf(ne, instr->environment()); 4127 DeoptimizeIf(ne, instr->environment());
3909 __ bind(&no_deopt); 4128 __ bind(&no_deopt);
3910 MathPowStub stub(isolate(), MathPowStub::TAGGED); 4129 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3911 __ CallStub(&stub); 4130 __ CallStub(&stub);
3912 } else if (exponent_type.IsInteger32()) { 4131 } else if (exponent_type.IsInteger32()) {
3913 MathPowStub stub(isolate(), MathPowStub::INTEGER); 4132 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3914 __ CallStub(&stub); 4133 __ CallStub(&stub);
3915 } else { 4134 } else {
3916 DCHECK(exponent_type.IsDouble()); 4135 DCHECK(exponent_type.IsDouble());
3917 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 4136 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3918 __ CallStub(&stub); 4137 __ CallStub(&stub);
3919 } 4138 }
3920 } 4139 }
3921 4140
3922 4141
3923 void LCodeGen::DoMathExp(LMathExp* instr) { 4142 void LCodeGen::DoMathExp(LMathExp* instr) {
3924 DwVfpRegister input = ToDoubleRegister(instr->value()); 4143 DoubleRegister input = ToDoubleRegister(instr->value());
3925 DwVfpRegister result = ToDoubleRegister(instr->result()); 4144 DoubleRegister result = ToDoubleRegister(instr->result());
3926 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); 4145 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3927 DwVfpRegister double_scratch2 = double_scratch0(); 4146 DoubleRegister double_scratch2 = double_scratch0();
3928 Register temp1 = ToRegister(instr->temp1()); 4147 Register temp1 = ToRegister(instr->temp1());
3929 Register temp2 = ToRegister(instr->temp2()); 4148 Register temp2 = ToRegister(instr->temp2());
3930 4149
3931 MathExpGenerator::EmitMathExp( 4150 MathExpGenerator::EmitMathExp(
3932 masm(), input, result, double_scratch1, double_scratch2, 4151 masm(), input, result, double_scratch1, double_scratch2,
3933 temp1, temp2, scratch0()); 4152 temp1, temp2, scratch0());
3934 } 4153 }
3935 4154
3936 4155
3937 void LCodeGen::DoMathLog(LMathLog* instr) { 4156 void LCodeGen::DoMathLog(LMathLog* instr) {
3938 __ PrepareCallCFunction(0, 1, scratch0()); 4157 __ PrepareCallCFunction(0, 1, scratch0());
3939 __ MovToFloatParameter(ToDoubleRegister(instr->value())); 4158 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3940 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 4159 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3941 0, 1); 4160 0, 1);
3942 __ MovFromFloatResult(ToDoubleRegister(instr->result())); 4161 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3943 } 4162 }
3944 4163
3945 4164
3946 void LCodeGen::DoMathClz32(LMathClz32* instr) { 4165 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3947 Register input = ToRegister(instr->value()); 4166 Register input = ToRegister(instr->value());
3948 Register result = ToRegister(instr->result()); 4167 Register result = ToRegister(instr->result());
3949 __ clz(result, input); 4168 __ cntlzw_(result, input);
3950 } 4169 }
3951 4170
3952 4171
3953 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 4172 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3954 DCHECK(ToRegister(instr->context()).is(cp)); 4173 DCHECK(ToRegister(instr->context()).is(cp));
3955 DCHECK(ToRegister(instr->function()).is(r1)); 4174 DCHECK(ToRegister(instr->function()).is(r4));
3956 DCHECK(instr->HasPointerMap()); 4175 DCHECK(instr->HasPointerMap());
3957 4176
3958 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); 4177 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3959 if (known_function.is_null()) { 4178 if (known_function.is_null()) {
3960 LPointerMap* pointers = instr->pointer_map(); 4179 LPointerMap* pointers = instr->pointer_map();
3961 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 4180 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3962 ParameterCount count(instr->arity()); 4181 ParameterCount count(instr->arity());
3963 __ InvokeFunction(r1, count, CALL_FUNCTION, generator); 4182 __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
3964 } else { 4183 } else {
3965 CallKnownFunction(known_function, 4184 CallKnownFunction(known_function,
3966 instr->hydrogen()->formal_parameter_count(), 4185 instr->hydrogen()->formal_parameter_count(),
3967 instr->arity(), 4186 instr->arity(),
3968 instr, 4187 instr,
3969 R1_CONTAINS_TARGET); 4188 R4_CONTAINS_TARGET);
3970 } 4189 }
3971 } 4190 }
3972 4191
3973 4192
3974 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 4193 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3975 DCHECK(ToRegister(instr->result()).is(r0)); 4194 DCHECK(ToRegister(instr->result()).is(r3));
3976 4195
3977 LPointerMap* pointers = instr->pointer_map(); 4196 LPointerMap* pointers = instr->pointer_map();
3978 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 4197 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3979 4198
3980 if (instr->target()->IsConstantOperand()) { 4199 if (instr->target()->IsConstantOperand()) {
3981 LConstantOperand* target = LConstantOperand::cast(instr->target()); 4200 LConstantOperand* target = LConstantOperand::cast(instr->target());
3982 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 4201 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3983 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 4202 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3984 PlatformInterfaceDescriptor* call_descriptor = 4203 __ Call(code, RelocInfo::CODE_TARGET);
3985 instr->descriptor()->platform_specific_descriptor();
3986 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
3987 call_descriptor->storage_mode());
3988 } else { 4204 } else {
3989 DCHECK(instr->target()->IsRegister()); 4205 DCHECK(instr->target()->IsRegister());
3990 Register target = ToRegister(instr->target()); 4206 Register target = ToRegister(instr->target());
3991 generator.BeforeCall(__ CallSize(target)); 4207 generator.BeforeCall(__ CallSize(target));
3992 // Make sure we don't emit any additional entries in the constant pool 4208 __ addi(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3993 // before the call to ensure that the CallCodeSize() calculated the correct
3994 // number of instructions for the constant pool load.
3995 {
3996 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
3997 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3998 }
3999 __ Call(target); 4209 __ Call(target);
4000 } 4210 }
4001 generator.AfterCall(); 4211 generator.AfterCall();
4002 } 4212 }
4003 4213
4004 4214
4005 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { 4215 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4006 DCHECK(ToRegister(instr->function()).is(r1)); 4216 DCHECK(ToRegister(instr->function()).is(r4));
4007 DCHECK(ToRegister(instr->result()).is(r0)); 4217 DCHECK(ToRegister(instr->result()).is(r3));
4008 4218
4009 if (instr->hydrogen()->pass_argument_count()) { 4219 if (instr->hydrogen()->pass_argument_count()) {
4010 __ mov(r0, Operand(instr->arity())); 4220 __ mov(r3, Operand(instr->arity()));
4011 } 4221 }
4012 4222
4013 // Change context. 4223 // Change context.
4014 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); 4224 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
4015 4225
4016 // Load the code entry address 4226 // Load the code entry address
4017 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 4227 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
4018 __ Call(ip); 4228 __ Call(ip);
4019 4229
4020 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 4230 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4021 } 4231 }
4022 4232
4023 4233
4024 void LCodeGen::DoCallFunction(LCallFunction* instr) { 4234 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4025 DCHECK(ToRegister(instr->context()).is(cp)); 4235 DCHECK(ToRegister(instr->context()).is(cp));
4026 DCHECK(ToRegister(instr->function()).is(r1)); 4236 DCHECK(ToRegister(instr->function()).is(r4));
4027 DCHECK(ToRegister(instr->result()).is(r0)); 4237 DCHECK(ToRegister(instr->result()).is(r3));
4028 4238
4029 int arity = instr->arity(); 4239 int arity = instr->arity();
4030 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); 4240 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4031 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4241 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4032 } 4242 }
4033 4243
4034 4244
4035 void LCodeGen::DoCallNew(LCallNew* instr) { 4245 void LCodeGen::DoCallNew(LCallNew* instr) {
4036 DCHECK(ToRegister(instr->context()).is(cp)); 4246 DCHECK(ToRegister(instr->context()).is(cp));
4037 DCHECK(ToRegister(instr->constructor()).is(r1)); 4247 DCHECK(ToRegister(instr->constructor()).is(r4));
4038 DCHECK(ToRegister(instr->result()).is(r0)); 4248 DCHECK(ToRegister(instr->result()).is(r3));
4039 4249
4040 __ mov(r0, Operand(instr->arity())); 4250 __ mov(r3, Operand(instr->arity()));
4041 // No cell in r2 for construct type feedback in optimized code 4251 // No cell in r5 for construct type feedback in optimized code
4042 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 4252 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4043 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); 4253 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4044 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4254 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4045 } 4255 }
4046 4256
4047 4257
4048 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 4258 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4049 DCHECK(ToRegister(instr->context()).is(cp)); 4259 DCHECK(ToRegister(instr->context()).is(cp));
4050 DCHECK(ToRegister(instr->constructor()).is(r1)); 4260 DCHECK(ToRegister(instr->constructor()).is(r4));
4051 DCHECK(ToRegister(instr->result()).is(r0)); 4261 DCHECK(ToRegister(instr->result()).is(r3));
4052 4262
4053 __ mov(r0, Operand(instr->arity())); 4263 __ mov(r3, Operand(instr->arity()));
4054 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 4264 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4055 ElementsKind kind = instr->hydrogen()->elements_kind(); 4265 ElementsKind kind = instr->hydrogen()->elements_kind();
4056 AllocationSiteOverrideMode override_mode = 4266 AllocationSiteOverrideMode override_mode =
4057 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 4267 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4058 ? DISABLE_ALLOCATION_SITES 4268 ? DISABLE_ALLOCATION_SITES
4059 : DONT_OVERRIDE; 4269 : DONT_OVERRIDE;
4060 4270
4061 if (instr->arity() == 0) { 4271 if (instr->arity() == 0) {
4062 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); 4272 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4063 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4273 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4064 } else if (instr->arity() == 1) { 4274 } else if (instr->arity() == 1) {
4065 Label done; 4275 Label done;
4066 if (IsFastPackedElementsKind(kind)) { 4276 if (IsFastPackedElementsKind(kind)) {
4067 Label packed_case; 4277 Label packed_case;
4068 // We might need a change here 4278 // We might need a change here
4069 // look at the first argument 4279 // look at the first argument
4070 __ ldr(r5, MemOperand(sp, 0)); 4280 __ LoadP(r8, MemOperand(sp, 0));
4071 __ cmp(r5, Operand::Zero()); 4281 __ cmpi(r8, Operand::Zero());
4072 __ b(eq, &packed_case); 4282 __ beq(&packed_case);
4073 4283
4074 ElementsKind holey_kind = GetHoleyElementsKind(kind); 4284 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4075 ArraySingleArgumentConstructorStub stub(isolate(), 4285 ArraySingleArgumentConstructorStub stub(isolate(),
4076 holey_kind, 4286 holey_kind,
4077 override_mode); 4287 override_mode);
4078 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4288 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4079 __ jmp(&done); 4289 __ b(&done);
4080 __ bind(&packed_case); 4290 __ bind(&packed_case);
4081 } 4291 }
4082 4292
4083 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 4293 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4084 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4294 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4085 __ bind(&done); 4295 __ bind(&done);
4086 } else { 4296 } else {
4087 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); 4297 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4088 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4298 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4089 } 4299 }
4090 } 4300 }
4091 4301
4092 4302
4093 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 4303 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4094 CallRuntime(instr->function(), instr->arity(), instr); 4304 CallRuntime(instr->function(), instr->arity(), instr);
4095 } 4305 }
4096 4306
4097 4307
4098 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 4308 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4099 Register function = ToRegister(instr->function()); 4309 Register function = ToRegister(instr->function());
4100 Register code_object = ToRegister(instr->code_object()); 4310 Register code_object = ToRegister(instr->code_object());
4101 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag)); 4311 __ addi(code_object, code_object,
4102 __ str(code_object, 4312 Operand(Code::kHeaderSize - kHeapObjectTag));
4103 FieldMemOperand(function, JSFunction::kCodeEntryOffset)); 4313 __ StoreP(code_object,
4314 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
4104 } 4315 }
4105 4316
4106 4317
4107 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 4318 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4108 Register result = ToRegister(instr->result()); 4319 Register result = ToRegister(instr->result());
4109 Register base = ToRegister(instr->base_object()); 4320 Register base = ToRegister(instr->base_object());
4110 if (instr->offset()->IsConstantOperand()) { 4321 if (instr->offset()->IsConstantOperand()) {
4111 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); 4322 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4112 __ add(result, base, Operand(ToInteger32(offset))); 4323 __ Add(result, base, ToInteger32(offset), r0);
4113 } else { 4324 } else {
4114 Register offset = ToRegister(instr->offset()); 4325 Register offset = ToRegister(instr->offset());
4115 __ add(result, base, offset); 4326 __ add(result, base, offset);
4116 } 4327 }
4117 } 4328 }
4118 4329
4119 4330
4120 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 4331 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4332 HStoreNamedField* hinstr = instr->hydrogen();
4121 Representation representation = instr->representation(); 4333 Representation representation = instr->representation();
4122 4334
4123 Register object = ToRegister(instr->object()); 4335 Register object = ToRegister(instr->object());
4124 Register scratch = scratch0(); 4336 Register scratch = scratch0();
4125 HObjectAccess access = instr->hydrogen()->access(); 4337 HObjectAccess access = hinstr->access();
4126 int offset = access.offset(); 4338 int offset = access.offset();
4127 4339
4128 if (access.IsExternalMemory()) { 4340 if (access.IsExternalMemory()) {
4129 Register value = ToRegister(instr->value()); 4341 Register value = ToRegister(instr->value());
4130 MemOperand operand = MemOperand(object, offset); 4342 MemOperand operand = MemOperand(object, offset);
4131 __ Store(value, operand, representation); 4343 __ StoreRepresentation(value, operand, representation, r0);
4132 return; 4344 return;
4133 } 4345 }
4134 4346
4135 __ AssertNotSmi(object); 4347 __ AssertNotSmi(object);
4136 4348
4349 #if V8_TARGET_ARCH_PPC64
4350 DCHECK(!representation.IsSmi() ||
4351 !instr->value()->IsConstantOperand() ||
4352 IsInteger32(LConstantOperand::cast(instr->value())));
4353 #else
4137 DCHECK(!representation.IsSmi() || 4354 DCHECK(!representation.IsSmi() ||
4138 !instr->value()->IsConstantOperand() || 4355 !instr->value()->IsConstantOperand() ||
4139 IsSmi(LConstantOperand::cast(instr->value()))); 4356 IsSmi(LConstantOperand::cast(instr->value())));
4357 #endif
4140 if (representation.IsDouble()) { 4358 if (representation.IsDouble()) {
4141 DCHECK(access.IsInobject()); 4359 DCHECK(access.IsInobject());
4142 DCHECK(!instr->hydrogen()->has_transition()); 4360 DCHECK(!hinstr->has_transition());
4143 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 4361 DCHECK(!hinstr->NeedsWriteBarrier());
4144 DwVfpRegister value = ToDoubleRegister(instr->value()); 4362 DoubleRegister value = ToDoubleRegister(instr->value());
4145 __ vstr(value, FieldMemOperand(object, offset)); 4363 __ stfd(value, FieldMemOperand(object, offset));
4146 return; 4364 return;
4147 } 4365 }
4148 4366
4149 if (instr->hydrogen()->has_transition()) { 4367 if (hinstr->has_transition()) {
4150 Handle<Map> transition = instr->hydrogen()->transition_map(); 4368 Handle<Map> transition = hinstr->transition_map();
4151 AddDeprecationDependency(transition); 4369 AddDeprecationDependency(transition);
4152 __ mov(scratch, Operand(transition)); 4370 __ mov(scratch, Operand(transition));
4153 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 4371 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
4154 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { 4372 if (hinstr->NeedsWriteBarrierForMap()) {
4155 Register temp = ToRegister(instr->temp()); 4373 Register temp = ToRegister(instr->temp());
4156 // Update the write barrier for the map field. 4374 // Update the write barrier for the map field.
4157 __ RecordWriteForMap(object, 4375 __ RecordWriteForMap(object,
4158 scratch, 4376 scratch,
4159 temp, 4377 temp,
4160 GetLinkRegisterState(), 4378 GetLinkRegisterState(),
4161 kSaveFPRegs); 4379 kSaveFPRegs);
4162 } 4380 }
4163 } 4381 }
4164 4382
4165 // Do the store. 4383 // Do the store.
4166 Register value = ToRegister(instr->value()); 4384 Register value = ToRegister(instr->value());
4385
4386 #if V8_TARGET_ARCH_PPC64
4387 // 64-bit Smi optimization
4388 if (representation.IsSmi() &&
4389 hinstr->value()->representation().IsInteger32()) {
4390 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4391 // Store int value directly to upper half of the smi.
4392 STATIC_ASSERT(kSmiTag == 0);
4393 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4394 #if V8_TARGET_LITTLE_ENDIAN
4395 offset += kPointerSize / 2;
4396 #endif
4397 representation = Representation::Integer32();
4398 }
4399 #endif
4400
4167 if (access.IsInobject()) { 4401 if (access.IsInobject()) {
4168 MemOperand operand = FieldMemOperand(object, offset); 4402 MemOperand operand = FieldMemOperand(object, offset);
4169 __ Store(value, operand, representation); 4403 __ StoreRepresentation(value, operand, representation, r0);
4170 if (instr->hydrogen()->NeedsWriteBarrier()) { 4404 if (hinstr->NeedsWriteBarrier()) {
4171 // Update the write barrier for the object for in-object properties. 4405 // Update the write barrier for the object for in-object properties.
4172 __ RecordWriteField(object, 4406 __ RecordWriteField(object,
4173 offset, 4407 offset,
4174 value, 4408 value,
4175 scratch, 4409 scratch,
4176 GetLinkRegisterState(), 4410 GetLinkRegisterState(),
4177 kSaveFPRegs, 4411 kSaveFPRegs,
4178 EMIT_REMEMBERED_SET, 4412 EMIT_REMEMBERED_SET,
4179 instr->hydrogen()->SmiCheckForWriteBarrier(), 4413 hinstr->SmiCheckForWriteBarrier(),
4180 instr->hydrogen()->PointersToHereCheckForValue()); 4414 hinstr->PointersToHereCheckForValue());
4181 } 4415 }
4182 } else { 4416 } else {
4183 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); 4417 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4184 MemOperand operand = FieldMemOperand(scratch, offset); 4418 MemOperand operand = FieldMemOperand(scratch, offset);
4185 __ Store(value, operand, representation); 4419 __ StoreRepresentation(value, operand, representation, r0);
4186 if (instr->hydrogen()->NeedsWriteBarrier()) { 4420 if (hinstr->NeedsWriteBarrier()) {
4187 // Update the write barrier for the properties array. 4421 // Update the write barrier for the properties array.
4188 // object is used as a scratch register. 4422 // object is used as a scratch register.
4189 __ RecordWriteField(scratch, 4423 __ RecordWriteField(scratch,
4190 offset, 4424 offset,
4191 value, 4425 value,
4192 object, 4426 object,
4193 GetLinkRegisterState(), 4427 GetLinkRegisterState(),
4194 kSaveFPRegs, 4428 kSaveFPRegs,
4195 EMIT_REMEMBERED_SET, 4429 EMIT_REMEMBERED_SET,
4196 instr->hydrogen()->SmiCheckForWriteBarrier(), 4430 hinstr->SmiCheckForWriteBarrier(),
4197 instr->hydrogen()->PointersToHereCheckForValue()); 4431 hinstr->PointersToHereCheckForValue());
4198 } 4432 }
4199 } 4433 }
4200 } 4434 }
4201 4435
4202 4436
4203 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 4437 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4204 DCHECK(ToRegister(instr->context()).is(cp)); 4438 DCHECK(ToRegister(instr->context()).is(cp));
4205 DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); 4439 DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
4206 DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister())); 4440 DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
4207 4441
4208 __ mov(StoreIC::NameRegister(), Operand(instr->name())); 4442 __ mov(StoreIC::NameRegister(), Operand(instr->name()));
4209 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); 4443 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4210 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4444 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4211 } 4445 }
4212 4446
4213 4447
4214 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 4448 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4215 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; 4449 Representation representation = instr->hydrogen()->length()->representation();
4216 if (instr->index()->IsConstantOperand()) { 4450 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4217 Operand index = ToOperand(instr->index()); 4451 DCHECK(representation.IsSmiOrInteger32());
4452
4453 Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4454 if (instr->length()->IsConstantOperand()) {
4455 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4456 Register index = ToRegister(instr->index());
4457 if (representation.IsSmi()) {
4458 __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
4459 } else {
4460 __ Cmplwi(index, Operand(length), r0);
4461 }
4462 cc = CommuteCondition(cc);
4463 } else if (instr->index()->IsConstantOperand()) {
4464 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4218 Register length = ToRegister(instr->length()); 4465 Register length = ToRegister(instr->length());
4219 __ cmp(length, index); 4466 if (representation.IsSmi()) {
4220 cc = CommuteCondition(cc); 4467 __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
4468 } else {
4469 __ Cmplwi(length, Operand(index), r0);
4470 }
4221 } else { 4471 } else {
4222 Register index = ToRegister(instr->index()); 4472 Register index = ToRegister(instr->index());
4223 Operand length = ToOperand(instr->length()); 4473 Register length = ToRegister(instr->length());
4224 __ cmp(index, length); 4474 if (representation.IsSmi()) {
4475 __ cmpl(length, index);
4476 } else {
4477 __ cmplw(length, index);
4478 }
4225 } 4479 }
4226 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 4480 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4227 Label done; 4481 Label done;
4228 __ b(NegateCondition(cc), &done); 4482 __ b(NegateCondition(cc), &done);
4229 __ stop("eliminated bounds check failed"); 4483 __ stop("eliminated bounds check failed");
4230 __ bind(&done); 4484 __ bind(&done);
4231 } else { 4485 } else {
4232 DeoptimizeIf(cc, instr->environment()); 4486 DeoptimizeIf(cc, instr->environment());
4233 } 4487 }
4234 } 4488 }
4235 4489
4236 4490
4237 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4491 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4238 Register external_pointer = ToRegister(instr->elements()); 4492 Register external_pointer = ToRegister(instr->elements());
4239 Register key = no_reg; 4493 Register key = no_reg;
4240 ElementsKind elements_kind = instr->elements_kind(); 4494 ElementsKind elements_kind = instr->elements_kind();
4241 bool key_is_constant = instr->key()->IsConstantOperand(); 4495 bool key_is_constant = instr->key()->IsConstantOperand();
4242 int constant_key = 0; 4496 int constant_key = 0;
4243 if (key_is_constant) { 4497 if (key_is_constant) {
4244 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4498 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4245 if (constant_key & 0xF0000000) { 4499 if (constant_key & 0xF0000000) {
4246 Abort(kArrayIndexConstantValueTooBig); 4500 Abort(kArrayIndexConstantValueTooBig);
4247 } 4501 }
4248 } else { 4502 } else {
4249 key = ToRegister(instr->key()); 4503 key = ToRegister(instr->key());
4250 } 4504 }
4251 int element_size_shift = ElementsKindToShiftSize(elements_kind); 4505 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4252 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 4506 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4253 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4254 int base_offset = instr->base_offset(); 4507 int base_offset = instr->base_offset();
4255 4508
4256 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 4509 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4257 elements_kind == FLOAT32_ELEMENTS || 4510 elements_kind == FLOAT32_ELEMENTS ||
4258 elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 4511 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4259 elements_kind == FLOAT64_ELEMENTS) { 4512 elements_kind == FLOAT64_ELEMENTS) {
4260 Register address = scratch0(); 4513 Register address = scratch0();
4261 DwVfpRegister value(ToDoubleRegister(instr->value())); 4514 DoubleRegister value(ToDoubleRegister(instr->value()));
4262 if (key_is_constant) { 4515 if (key_is_constant) {
4263 if (constant_key != 0) { 4516 if (constant_key != 0) {
4264 __ add(address, external_pointer, 4517 __ Add(address, external_pointer,
4265 Operand(constant_key << element_size_shift)); 4518 constant_key << element_size_shift,
4519 r0);
4266 } else { 4520 } else {
4267 address = external_pointer; 4521 address = external_pointer;
4268 } 4522 }
4269 } else { 4523 } else {
4270 __ add(address, external_pointer, Operand(key, LSL, shift_size)); 4524 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
4525 __ add(address, external_pointer, r0);
4271 } 4526 }
4272 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 4527 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4273 elements_kind == FLOAT32_ELEMENTS) { 4528 elements_kind == FLOAT32_ELEMENTS) {
4274 __ vcvt_f32_f64(double_scratch0().low(), value); 4529 __ frsp(double_scratch0(), value);
4275 __ vstr(double_scratch0().low(), address, base_offset); 4530 __ stfs(double_scratch0(), MemOperand(address, base_offset));
4276 } else { // Storing doubles, not floats. 4531 } else { // Storing doubles, not floats.
4277 __ vstr(value, address, base_offset); 4532 __ stfd(value, MemOperand(address, base_offset));
4278 } 4533 }
4279 } else { 4534 } else {
4280 Register value(ToRegister(instr->value())); 4535 Register value(ToRegister(instr->value()));
4281 MemOperand mem_operand = PrepareKeyedOperand( 4536 MemOperand mem_operand = PrepareKeyedOperand(
4282 key, external_pointer, key_is_constant, constant_key, 4537 key, external_pointer, key_is_constant, key_is_smi, constant_key,
4283 element_size_shift, shift_size, 4538 element_size_shift, base_offset);
4284 base_offset);
4285 switch (elements_kind) { 4539 switch (elements_kind) {
4286 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: 4540 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4287 case EXTERNAL_INT8_ELEMENTS: 4541 case EXTERNAL_INT8_ELEMENTS:
4288 case EXTERNAL_UINT8_ELEMENTS: 4542 case EXTERNAL_UINT8_ELEMENTS:
4289 case UINT8_ELEMENTS: 4543 case UINT8_ELEMENTS:
4290 case UINT8_CLAMPED_ELEMENTS: 4544 case UINT8_CLAMPED_ELEMENTS:
4291 case INT8_ELEMENTS: 4545 case INT8_ELEMENTS:
4292 __ strb(value, mem_operand); 4546 if (key_is_constant) {
4547 __ StoreByte(value, mem_operand, r0);
4548 } else {
4549 __ stbx(value, mem_operand);
4550 }
4293 break; 4551 break;
4294 case EXTERNAL_INT16_ELEMENTS: 4552 case EXTERNAL_INT16_ELEMENTS:
4295 case EXTERNAL_UINT16_ELEMENTS: 4553 case EXTERNAL_UINT16_ELEMENTS:
4296 case INT16_ELEMENTS: 4554 case INT16_ELEMENTS:
4297 case UINT16_ELEMENTS: 4555 case UINT16_ELEMENTS:
4298 __ strh(value, mem_operand); 4556 if (key_is_constant) {
4557 __ StoreHalfWord(value, mem_operand, r0);
4558 } else {
4559 __ sthx(value, mem_operand);
4560 }
4299 break; 4561 break;
4300 case EXTERNAL_INT32_ELEMENTS: 4562 case EXTERNAL_INT32_ELEMENTS:
4301 case EXTERNAL_UINT32_ELEMENTS: 4563 case EXTERNAL_UINT32_ELEMENTS:
4302 case INT32_ELEMENTS: 4564 case INT32_ELEMENTS:
4303 case UINT32_ELEMENTS: 4565 case UINT32_ELEMENTS:
4304 __ str(value, mem_operand); 4566 if (key_is_constant) {
4567 __ StoreWord(value, mem_operand, r0);
4568 } else {
4569 __ stwx(value, mem_operand);
4570 }
4305 break; 4571 break;
4306 case FLOAT32_ELEMENTS: 4572 case FLOAT32_ELEMENTS:
4307 case FLOAT64_ELEMENTS: 4573 case FLOAT64_ELEMENTS:
4308 case EXTERNAL_FLOAT32_ELEMENTS: 4574 case EXTERNAL_FLOAT32_ELEMENTS:
4309 case EXTERNAL_FLOAT64_ELEMENTS: 4575 case EXTERNAL_FLOAT64_ELEMENTS:
4310 case FAST_DOUBLE_ELEMENTS: 4576 case FAST_DOUBLE_ELEMENTS:
4311 case FAST_ELEMENTS: 4577 case FAST_ELEMENTS:
4312 case FAST_SMI_ELEMENTS: 4578 case FAST_SMI_ELEMENTS:
4313 case FAST_HOLEY_DOUBLE_ELEMENTS: 4579 case FAST_HOLEY_DOUBLE_ELEMENTS:
4314 case FAST_HOLEY_ELEMENTS: 4580 case FAST_HOLEY_ELEMENTS:
4315 case FAST_HOLEY_SMI_ELEMENTS: 4581 case FAST_HOLEY_SMI_ELEMENTS:
4316 case DICTIONARY_ELEMENTS: 4582 case DICTIONARY_ELEMENTS:
4317 case SLOPPY_ARGUMENTS_ELEMENTS: 4583 case SLOPPY_ARGUMENTS_ELEMENTS:
4318 UNREACHABLE(); 4584 UNREACHABLE();
4319 break; 4585 break;
4320 } 4586 }
4321 } 4587 }
4322 } 4588 }
4323 4589
4324 4590
4325 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4591 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4326 DwVfpRegister value = ToDoubleRegister(instr->value()); 4592 DoubleRegister value = ToDoubleRegister(instr->value());
4327 Register elements = ToRegister(instr->elements()); 4593 Register elements = ToRegister(instr->elements());
4594 Register key = no_reg;
4328 Register scratch = scratch0(); 4595 Register scratch = scratch0();
4329 DwVfpRegister double_scratch = double_scratch0(); 4596 DoubleRegister double_scratch = double_scratch0();
4330 bool key_is_constant = instr->key()->IsConstantOperand(); 4597 bool key_is_constant = instr->key()->IsConstantOperand();
4331 int base_offset = instr->base_offset(); 4598 int constant_key = 0;
4332 4599
4333 // Calculate the effective address of the slot in the array to store the 4600 // Calculate the effective address of the slot in the array to store the
4334 // double value. 4601 // double value.
4335 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4336 if (key_is_constant) { 4602 if (key_is_constant) {
4337 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4603 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4338 if (constant_key & 0xF0000000) { 4604 if (constant_key & 0xF0000000) {
4339 Abort(kArrayIndexConstantValueTooBig); 4605 Abort(kArrayIndexConstantValueTooBig);
4340 } 4606 }
4341 __ add(scratch, elements,
4342 Operand((constant_key << element_size_shift) + base_offset));
4343 } else { 4607 } else {
4344 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 4608 key = ToRegister(instr->key());
4345 ? (element_size_shift - kSmiTagSize) : element_size_shift; 4609 }
4346 __ add(scratch, elements, Operand(base_offset)); 4610 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4347 __ add(scratch, scratch, 4611 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4348 Operand(ToRegister(instr->key()), LSL, shift_size)); 4612 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4613 if (!key_is_constant) {
4614 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
4615 __ add(scratch, elements, scratch);
4616 elements = scratch;
4617 }
4618 if (!is_int16(base_offset)) {
4619 __ Add(scratch, elements, base_offset, r0);
4620 base_offset = 0;
4621 elements = scratch;
4349 } 4622 }
4350 4623
4351 if (instr->NeedsCanonicalization()) { 4624 if (instr->NeedsCanonicalization()) {
4352 // Force a canonical NaN. 4625 // Force a canonical NaN.
4353 if (masm()->emit_debug_code()) { 4626 __ CanonicalizeNaN(double_scratch, value);
4354 __ vmrs(ip); 4627 __ stfd(double_scratch, MemOperand(elements, base_offset));
4355 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4356 __ Assert(ne, kDefaultNaNModeNotSet);
4357 }
4358 __ VFPCanonicalizeNaN(double_scratch, value);
4359 __ vstr(double_scratch, scratch, 0);
4360 } else { 4628 } else {
4361 __ vstr(value, scratch, 0); 4629 __ stfd(value, MemOperand(elements, base_offset));
4362 } 4630 }
4363 } 4631 }
4364 4632
4365 4633
4366 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4634 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4635 HStoreKeyed* hinstr = instr->hydrogen();
4367 Register value = ToRegister(instr->value()); 4636 Register value = ToRegister(instr->value());
4368 Register elements = ToRegister(instr->elements()); 4637 Register elements = ToRegister(instr->elements());
4369 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) 4638 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4370 : no_reg;
4371 Register scratch = scratch0(); 4639 Register scratch = scratch0();
4372 Register store_base = scratch; 4640 Register store_base = scratch;
4373 int offset = instr->base_offset(); 4641 int offset = instr->base_offset();
4374 4642
4375 // Do the store. 4643 // Do the store.
4376 if (instr->key()->IsConstantOperand()) { 4644 if (instr->key()->IsConstantOperand()) {
4377 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); 4645 DCHECK(!hinstr->NeedsWriteBarrier());
4378 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 4646 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4379 offset += ToInteger32(const_operand) * kPointerSize; 4647 offset += ToInteger32(const_operand) * kPointerSize;
4380 store_base = elements; 4648 store_base = elements;
4381 } else { 4649 } else {
4382 // Even though the HLoadKeyed instruction forces the input 4650 // Even though the HLoadKeyed instruction forces the input
4383 // representation for the key to be an integer, the input gets replaced 4651 // representation for the key to be an integer, the input gets replaced
4384 // during bound check elimination with the index argument to the bounds 4652 // during bound check elimination with the index argument to the bounds
4385 // check, which can be tagged, so that case must be handled here, too. 4653 // check, which can be tagged, so that case must be handled here, too.
4386 if (instr->hydrogen()->key()->representation().IsSmi()) { 4654 if (hinstr->key()->representation().IsSmi()) {
4387 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); 4655 __ SmiToPtrArrayOffset(scratch, key);
4388 } else { 4656 } else {
4389 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); 4657 __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
4390 } 4658 }
4659 __ add(scratch, elements, scratch);
4391 } 4660 }
4392 __ str(value, MemOperand(store_base, offset));
4393 4661
4394 if (instr->hydrogen()->NeedsWriteBarrier()) { 4662 Representation representation = hinstr->value()->representation();
4395 SmiCheck check_needed = 4663
4396 instr->hydrogen()->value()->type().IsHeapObject() 4664 #if V8_TARGET_ARCH_PPC64
4397 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4665 // 64-bit Smi optimization
4666 if (representation.IsInteger32()) {
4667 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4668 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4669 // Store int value directly to upper half of the smi.
4670 STATIC_ASSERT(kSmiTag == 0);
4671 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4672 #if V8_TARGET_LITTLE_ENDIAN
4673 offset += kPointerSize / 2;
4674 #endif
4675 }
4676 #endif
4677
4678 __ StoreRepresentation(value, MemOperand(store_base, offset),
4679 representation, r0);
4680
4681 if (hinstr->NeedsWriteBarrier()) {
4682 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4683 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4398 // Compute address of modified element and store it into key register. 4684 // Compute address of modified element and store it into key register.
4399 __ add(key, store_base, Operand(offset)); 4685 __ Add(key, store_base, offset, r0);
4400 __ RecordWrite(elements, 4686 __ RecordWrite(elements,
4401 key, 4687 key,
4402 value, 4688 value,
4403 GetLinkRegisterState(), 4689 GetLinkRegisterState(),
4404 kSaveFPRegs, 4690 kSaveFPRegs,
4405 EMIT_REMEMBERED_SET, 4691 EMIT_REMEMBERED_SET,
4406 check_needed, 4692 check_needed,
4407 instr->hydrogen()->PointersToHereCheckForValue()); 4693 hinstr->PointersToHereCheckForValue());
4408 } 4694 }
4409 } 4695 }
4410 4696
4411 4697
4412 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4698 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4413 // By cases: external, fast double 4699 // By cases: external, fast double
4414 if (instr->is_typed_elements()) { 4700 if (instr->is_typed_elements()) {
4415 DoStoreKeyedExternalArray(instr); 4701 DoStoreKeyedExternalArray(instr);
4416 } else if (instr->hydrogen()->value()->representation().IsDouble()) { 4702 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4417 DoStoreKeyedFixedDoubleArray(instr); 4703 DoStoreKeyedFixedDoubleArray(instr);
4418 } else { 4704 } else {
4419 DoStoreKeyedFixedArray(instr); 4705 DoStoreKeyedFixedArray(instr);
4420 } 4706 }
4421 } 4707 }
4422 4708
4423 4709
4424 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4710 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4425 DCHECK(ToRegister(instr->context()).is(cp)); 4711 DCHECK(ToRegister(instr->context()).is(cp));
4426 DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); 4712 DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
4427 DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); 4713 DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
4428 DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); 4714 DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
4429 4715
4430 Handle<Code> ic = instr->strict_mode() == STRICT 4716 Handle<Code> ic = (instr->strict_mode() == STRICT)
4431 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 4717 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4432 : isolate()->builtins()->KeyedStoreIC_Initialize(); 4718 : isolate()->builtins()->KeyedStoreIC_Initialize();
4433 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4719 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4434 } 4720 }
4435 4721
4436 4722
4437 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4723 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4438 Register object_reg = ToRegister(instr->object()); 4724 Register object_reg = ToRegister(instr->object());
4439 Register scratch = scratch0(); 4725 Register scratch = scratch0();
4440 4726
4441 Handle<Map> from_map = instr->original_map(); 4727 Handle<Map> from_map = instr->original_map();
4442 Handle<Map> to_map = instr->transitioned_map(); 4728 Handle<Map> to_map = instr->transitioned_map();
4443 ElementsKind from_kind = instr->from_kind(); 4729 ElementsKind from_kind = instr->from_kind();
4444 ElementsKind to_kind = instr->to_kind(); 4730 ElementsKind to_kind = instr->to_kind();
4445 4731
4446 Label not_applicable; 4732 Label not_applicable;
4447 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4733 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4448 __ cmp(scratch, Operand(from_map)); 4734 __ Cmpi(scratch, Operand(from_map), r0);
4449 __ b(ne, &not_applicable); 4735 __ bne(&not_applicable);
4450 4736
4451 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4737 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4452 Register new_map_reg = ToRegister(instr->new_map_temp()); 4738 Register new_map_reg = ToRegister(instr->new_map_temp());
4453 __ mov(new_map_reg, Operand(to_map)); 4739 __ mov(new_map_reg, Operand(to_map));
4454 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4740 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
4741 r0);
4455 // Write barrier. 4742 // Write barrier.
4456 __ RecordWriteForMap(object_reg, 4743 __ RecordWriteForMap(object_reg,
4457 new_map_reg, 4744 new_map_reg,
4458 scratch, 4745 scratch,
4459 GetLinkRegisterState(), 4746 GetLinkRegisterState(),
4460 kDontSaveFPRegs); 4747 kDontSaveFPRegs);
4461 } else { 4748 } else {
4462 DCHECK(ToRegister(instr->context()).is(cp)); 4749 DCHECK(ToRegister(instr->context()).is(cp));
4463 DCHECK(object_reg.is(r0)); 4750 DCHECK(object_reg.is(r3));
4464 PushSafepointRegistersScope scope(this); 4751 PushSafepointRegistersScope scope(this);
4465 __ Move(r1, to_map); 4752 __ Move(r4, to_map);
4466 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; 4753 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4467 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); 4754 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4468 __ CallStub(&stub); 4755 __ CallStub(&stub);
4469 RecordSafepointWithRegisters( 4756 RecordSafepointWithRegisters(
4470 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 4757 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4471 } 4758 }
4472 __ bind(&not_applicable); 4759 __ bind(&not_applicable);
4473 } 4760 }
4474 4761
4475 4762
4476 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4763 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4477 Register object = ToRegister(instr->object()); 4764 Register object = ToRegister(instr->object());
4478 Register temp = ToRegister(instr->temp()); 4765 Register temp = ToRegister(instr->temp());
4479 Label no_memento_found; 4766 Label no_memento_found;
4480 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); 4767 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4481 DeoptimizeIf(eq, instr->environment()); 4768 DeoptimizeIf(eq, instr->environment());
4482 __ bind(&no_memento_found); 4769 __ bind(&no_memento_found);
4483 } 4770 }
4484 4771
4485 4772
4486 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4773 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4487 DCHECK(ToRegister(instr->context()).is(cp)); 4774 DCHECK(ToRegister(instr->context()).is(cp));
4488 DCHECK(ToRegister(instr->left()).is(r1)); 4775 DCHECK(ToRegister(instr->left()).is(r4));
4489 DCHECK(ToRegister(instr->right()).is(r0)); 4776 DCHECK(ToRegister(instr->right()).is(r3));
4490 StringAddStub stub(isolate(), 4777 StringAddStub stub(isolate(),
4491 instr->hydrogen()->flags(), 4778 instr->hydrogen()->flags(),
4492 instr->hydrogen()->pretenure_flag()); 4779 instr->hydrogen()->pretenure_flag());
4493 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4780 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4494 } 4781 }
4495 4782
4496 4783
4497 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4784 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4498 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { 4785 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4499 public: 4786 public:
(...skipping 20 matching lines...) Expand all
4520 4807
4521 4808
4522 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 4809 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4523 Register string = ToRegister(instr->string()); 4810 Register string = ToRegister(instr->string());
4524 Register result = ToRegister(instr->result()); 4811 Register result = ToRegister(instr->result());
4525 Register scratch = scratch0(); 4812 Register scratch = scratch0();
4526 4813
4527 // TODO(3095996): Get rid of this. For now, we need to make the 4814 // TODO(3095996): Get rid of this. For now, we need to make the
4528 // result register contain a valid pointer because it is already 4815 // result register contain a valid pointer because it is already
4529 // contained in the register pointer map. 4816 // contained in the register pointer map.
4530 __ mov(result, Operand::Zero()); 4817 __ li(result, Operand::Zero());
4531 4818
4532 PushSafepointRegistersScope scope(this); 4819 PushSafepointRegistersScope scope(this);
4533 __ push(string); 4820 __ push(string);
4534 // Push the index as a smi. This is safe because of the checks in 4821 // Push the index as a smi. This is safe because of the checks in
4535 // DoStringCharCodeAt above. 4822 // DoStringCharCodeAt above.
4536 if (instr->index()->IsConstantOperand()) { 4823 if (instr->index()->IsConstantOperand()) {
4537 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 4824 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4538 __ mov(scratch, Operand(Smi::FromInt(const_index))); 4825 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4539 __ push(scratch); 4826 __ push(scratch);
4540 } else { 4827 } else {
4541 Register index = ToRegister(instr->index()); 4828 Register index = ToRegister(instr->index());
4542 __ SmiTag(index); 4829 __ SmiTag(index);
4543 __ push(index); 4830 __ push(index);
4544 } 4831 }
4545 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, 4832 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4546 instr->context()); 4833 instr->context());
4547 __ AssertSmi(r0); 4834 __ AssertSmi(r3);
4548 __ SmiUntag(r0); 4835 __ SmiUntag(r3);
4549 __ StoreToSafepointRegisterSlot(r0, result); 4836 __ StoreToSafepointRegisterSlot(r3, result);
4550 } 4837 }
4551 4838
4552 4839
4553 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 4840 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4554 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { 4841 class DeferredStringCharFromCode: public LDeferredCode {
4555 public: 4842 public:
4556 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) 4843 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4557 : LDeferredCode(codegen), instr_(instr) { } 4844 : LDeferredCode(codegen), instr_(instr) { }
4558 virtual void Generate() V8_OVERRIDE { 4845 virtual void Generate() V8_OVERRIDE {
4559 codegen()->DoDeferredStringCharFromCode(instr_); 4846 codegen()->DoDeferredStringCharFromCode(instr_);
4560 } 4847 }
4561 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4848 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4562 private: 4849 private:
4563 LStringCharFromCode* instr_; 4850 LStringCharFromCode* instr_;
4564 }; 4851 };
4565 4852
4566 DeferredStringCharFromCode* deferred = 4853 DeferredStringCharFromCode* deferred =
4567 new(zone()) DeferredStringCharFromCode(this, instr); 4854 new(zone()) DeferredStringCharFromCode(this, instr);
4568 4855
4569 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); 4856 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4570 Register char_code = ToRegister(instr->char_code()); 4857 Register char_code = ToRegister(instr->char_code());
4571 Register result = ToRegister(instr->result()); 4858 Register result = ToRegister(instr->result());
4572 DCHECK(!char_code.is(result)); 4859 DCHECK(!char_code.is(result));
4573 4860
4574 __ cmp(char_code, Operand(String::kMaxOneByteCharCode)); 4861 __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
4575 __ b(hi, deferred->entry()); 4862 __ bgt(deferred->entry());
4576 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 4863 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4577 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); 4864 __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
4578 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); 4865 __ add(result, result, r0);
4866 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4579 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4867 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4580 __ cmp(result, ip); 4868 __ cmp(result, ip);
4581 __ b(eq, deferred->entry()); 4869 __ beq(deferred->entry());
4582 __ bind(deferred->exit()); 4870 __ bind(deferred->exit());
4583 } 4871 }
4584 4872
4585 4873
4586 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4874 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4587 Register char_code = ToRegister(instr->char_code()); 4875 Register char_code = ToRegister(instr->char_code());
4588 Register result = ToRegister(instr->result()); 4876 Register result = ToRegister(instr->result());
4589 4877
4590 // TODO(3095996): Get rid of this. For now, we need to make the 4878 // TODO(3095996): Get rid of this. For now, we need to make the
4591 // result register contain a valid pointer because it is already 4879 // result register contain a valid pointer because it is already
4592 // contained in the register pointer map. 4880 // contained in the register pointer map.
4593 __ mov(result, Operand::Zero()); 4881 __ li(result, Operand::Zero());
4594 4882
4595 PushSafepointRegistersScope scope(this); 4883 PushSafepointRegistersScope scope(this);
4596 __ SmiTag(char_code); 4884 __ SmiTag(char_code);
4597 __ push(char_code); 4885 __ push(char_code);
4598 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); 4886 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4599 __ StoreToSafepointRegisterSlot(r0, result); 4887 __ StoreToSafepointRegisterSlot(r3, result);
4600 } 4888 }
4601 4889
4602 4890
4603 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4891 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4604 LOperand* input = instr->value(); 4892 LOperand* input = instr->value();
4605 DCHECK(input->IsRegister() || input->IsStackSlot()); 4893 DCHECK(input->IsRegister() || input->IsStackSlot());
4606 LOperand* output = instr->result(); 4894 LOperand* output = instr->result();
4607 DCHECK(output->IsDoubleRegister()); 4895 DCHECK(output->IsDoubleRegister());
4608 SwVfpRegister single_scratch = double_scratch0().low();
4609 if (input->IsStackSlot()) { 4896 if (input->IsStackSlot()) {
4610 Register scratch = scratch0(); 4897 Register scratch = scratch0();
4611 __ ldr(scratch, ToMemOperand(input)); 4898 __ LoadP(scratch, ToMemOperand(input));
4612 __ vmov(single_scratch, scratch); 4899 __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
4613 } else { 4900 } else {
4614 __ vmov(single_scratch, ToRegister(input)); 4901 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
4615 } 4902 }
4616 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4617 } 4903 }
4618 4904
4619 4905
4620 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4906 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4621 LOperand* input = instr->value(); 4907 LOperand* input = instr->value();
4622 LOperand* output = instr->result(); 4908 LOperand* output = instr->result();
4623 4909 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
4624 SwVfpRegister flt_scratch = double_scratch0().low();
4625 __ vmov(flt_scratch, ToRegister(input));
4626 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4627 } 4910 }
4628 4911
4629 4912
4630 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4913 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4631 class DeferredNumberTagI V8_FINAL : public LDeferredCode { 4914 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4632 public: 4915 public:
4633 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) 4916 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4634 : LDeferredCode(codegen), instr_(instr) { } 4917 : LDeferredCode(codegen), instr_(instr) { }
4635 virtual void Generate() V8_OVERRIDE { 4918 virtual void Generate() V8_OVERRIDE {
4636 codegen()->DoDeferredNumberTagIU(instr_, 4919 codegen()->DoDeferredNumberTagIU(instr_,
4637 instr_->value(), 4920 instr_->value(),
4638 instr_->temp1(), 4921 instr_->temp1(),
4639 instr_->temp2(), 4922 instr_->temp2(),
4640 SIGNED_INT32); 4923 SIGNED_INT32);
4641 } 4924 }
4642 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4925 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4643 private: 4926 private:
4644 LNumberTagI* instr_; 4927 LNumberTagI* instr_;
4645 }; 4928 };
4646 4929
4647 Register src = ToRegister(instr->value()); 4930 Register src = ToRegister(instr->value());
4648 Register dst = ToRegister(instr->result()); 4931 Register dst = ToRegister(instr->result());
4649 4932
4650 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); 4933 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4651 __ SmiTag(dst, src, SetCC); 4934 #if V8_TARGET_ARCH_PPC64
4652 __ b(vs, deferred->entry()); 4935 __ SmiTag(dst, src);
4936 #else
4937 __ SmiTagCheckOverflow(dst, src, r0);
4938 __ BranchOnOverflow(deferred->entry());
4939 #endif
4653 __ bind(deferred->exit()); 4940 __ bind(deferred->exit());
4654 } 4941 }
4655 4942
4656 4943
4657 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4944 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4658 class DeferredNumberTagU V8_FINAL : public LDeferredCode { 4945 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4659 public: 4946 public:
4660 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4947 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4661 : LDeferredCode(codegen), instr_(instr) { } 4948 : LDeferredCode(codegen), instr_(instr) { }
4662 virtual void Generate() V8_OVERRIDE { 4949 virtual void Generate() V8_OVERRIDE {
4663 codegen()->DoDeferredNumberTagIU(instr_, 4950 codegen()->DoDeferredNumberTagIU(instr_,
4664 instr_->value(), 4951 instr_->value(),
4665 instr_->temp1(), 4952 instr_->temp1(),
4666 instr_->temp2(), 4953 instr_->temp2(),
4667 UNSIGNED_INT32); 4954 UNSIGNED_INT32);
4668 } 4955 }
4669 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4956 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4670 private: 4957 private:
4671 LNumberTagU* instr_; 4958 LNumberTagU* instr_;
4672 }; 4959 };
4673 4960
4674 Register input = ToRegister(instr->value()); 4961 Register input = ToRegister(instr->value());
4675 Register result = ToRegister(instr->result()); 4962 Register result = ToRegister(instr->result());
4676 4963
4677 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); 4964 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4678 __ cmp(input, Operand(Smi::kMaxValue)); 4965 __ Cmpli(input, Operand(Smi::kMaxValue), r0);
4679 __ b(hi, deferred->entry()); 4966 __ bgt(deferred->entry());
4680 __ SmiTag(result, input); 4967 __ SmiTag(result, input);
4681 __ bind(deferred->exit()); 4968 __ bind(deferred->exit());
4682 } 4969 }
4683 4970
4684 4971
4685 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, 4972 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4686 LOperand* value, 4973 LOperand* value,
4687 LOperand* temp1, 4974 LOperand* temp1,
4688 LOperand* temp2, 4975 LOperand* temp2,
4689 IntegerSignedness signedness) { 4976 IntegerSignedness signedness) {
4690 Label done, slow; 4977 Label done, slow;
4691 Register src = ToRegister(value); 4978 Register src = ToRegister(value);
4692 Register dst = ToRegister(instr->result()); 4979 Register dst = ToRegister(instr->result());
4693 Register tmp1 = scratch0(); 4980 Register tmp1 = scratch0();
4694 Register tmp2 = ToRegister(temp1); 4981 Register tmp2 = ToRegister(temp1);
4695 Register tmp3 = ToRegister(temp2); 4982 Register tmp3 = ToRegister(temp2);
4696 LowDwVfpRegister dbl_scratch = double_scratch0(); 4983 DoubleRegister dbl_scratch = double_scratch0();
4697 4984
4698 if (signedness == SIGNED_INT32) { 4985 if (signedness == SIGNED_INT32) {
4699 // There was overflow, so bits 30 and 31 of the original integer 4986 // There was overflow, so bits 30 and 31 of the original integer
4700 // disagree. Try to allocate a heap number in new space and store 4987 // disagree. Try to allocate a heap number in new space and store
4701 // the value in there. If that fails, call the runtime system. 4988 // the value in there. If that fails, call the runtime system.
4702 if (dst.is(src)) { 4989 if (dst.is(src)) {
4703 __ SmiUntag(src, dst); 4990 __ SmiUntag(src, dst);
4704 __ eor(src, src, Operand(0x80000000)); 4991 __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
4705 } 4992 }
4706 __ vmov(dbl_scratch.low(), src); 4993 __ ConvertIntToDouble(src, dbl_scratch);
4707 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4708 } else { 4994 } else {
4709 __ vmov(dbl_scratch.low(), src); 4995 __ ConvertUnsignedIntToDouble(src, dbl_scratch);
4710 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4711 } 4996 }
4712 4997
4713 if (FLAG_inline_new) { 4998 if (FLAG_inline_new) {
4714 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); 4999 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4715 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT); 5000 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
4716 __ b(&done); 5001 __ b(&done);
4717 } 5002 }
4718 5003
4719 // Slow case: Call the runtime system to do the number allocation. 5004 // Slow case: Call the runtime system to do the number allocation.
4720 __ bind(&slow); 5005 __ bind(&slow);
4721 { 5006 {
4722 // TODO(3095996): Put a valid pointer value in the stack slot where the 5007 // TODO(3095996): Put a valid pointer value in the stack slot where the
4723 // result register is stored, as this register is in the pointer map, but 5008 // result register is stored, as this register is in the pointer map, but
4724 // contains an integer value. 5009 // contains an integer value.
4725 __ mov(dst, Operand::Zero()); 5010 __ li(dst, Operand::Zero());
4726 5011
4727 // Preserve the value of all registers. 5012 // Preserve the value of all registers.
4728 PushSafepointRegistersScope scope(this); 5013 PushSafepointRegistersScope scope(this);
4729 5014
4730 // NumberTagI and NumberTagD use the context from the frame, rather than 5015 // NumberTagI and NumberTagD use the context from the frame, rather than
4731 // the environment's HContext or HInlinedContext value. 5016 // the environment's HContext or HInlinedContext value.
4732 // They only call Runtime::kAllocateHeapNumber. 5017 // They only call Runtime::kAllocateHeapNumber.
4733 // The corresponding HChange instructions are added in a phase that does 5018 // The corresponding HChange instructions are added in a phase that does
4734 // not have easy access to the local context. 5019 // not have easy access to the local context.
4735 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 5020 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4736 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 5021 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4737 RecordSafepointWithRegisters( 5022 RecordSafepointWithRegisters(
4738 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 5023 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4739 __ sub(r0, r0, Operand(kHeapObjectTag)); 5024 __ StoreToSafepointRegisterSlot(r3, dst);
4740 __ StoreToSafepointRegisterSlot(r0, dst);
4741 } 5025 }
4742 5026
4743 // Done. Put the value in dbl_scratch into the value of the allocated heap 5027 // Done. Put the value in dbl_scratch into the value of the allocated heap
4744 // number. 5028 // number.
4745 __ bind(&done); 5029 __ bind(&done);
4746 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); 5030 __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4747 __ add(dst, dst, Operand(kHeapObjectTag));
4748 } 5031 }
4749 5032
4750 5033
4751 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 5034 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4752 class DeferredNumberTagD V8_FINAL : public LDeferredCode { 5035 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4753 public: 5036 public:
4754 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 5037 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4755 : LDeferredCode(codegen), instr_(instr) { } 5038 : LDeferredCode(codegen), instr_(instr) { }
4756 virtual void Generate() V8_OVERRIDE { 5039 virtual void Generate() V8_OVERRIDE {
4757 codegen()->DoDeferredNumberTagD(instr_); 5040 codegen()->DoDeferredNumberTagD(instr_);
4758 } 5041 }
4759 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5042 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4760 private: 5043 private:
4761 LNumberTagD* instr_; 5044 LNumberTagD* instr_;
4762 }; 5045 };
4763 5046
4764 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); 5047 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4765 Register scratch = scratch0(); 5048 Register scratch = scratch0();
4766 Register reg = ToRegister(instr->result()); 5049 Register reg = ToRegister(instr->result());
4767 Register temp1 = ToRegister(instr->temp()); 5050 Register temp1 = ToRegister(instr->temp());
4768 Register temp2 = ToRegister(instr->temp2()); 5051 Register temp2 = ToRegister(instr->temp2());
4769 5052
4770 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 5053 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4771 if (FLAG_inline_new) { 5054 if (FLAG_inline_new) {
4772 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 5055 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4773 // We want the untagged address first for performance 5056 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4774 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4775 DONT_TAG_RESULT);
4776 } else { 5057 } else {
4777 __ jmp(deferred->entry()); 5058 __ b(deferred->entry());
4778 } 5059 }
4779 __ bind(deferred->exit()); 5060 __ bind(deferred->exit());
4780 __ vstr(input_reg, reg, HeapNumber::kValueOffset); 5061 __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4781 // Now that we have finished with the object's real address tag it
4782 __ add(reg, reg, Operand(kHeapObjectTag));
4783 } 5062 }
4784 5063
4785 5064
4786 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 5065 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4787 // TODO(3095996): Get rid of this. For now, we need to make the 5066 // TODO(3095996): Get rid of this. For now, we need to make the
4788 // result register contain a valid pointer because it is already 5067 // result register contain a valid pointer because it is already
4789 // contained in the register pointer map. 5068 // contained in the register pointer map.
4790 Register reg = ToRegister(instr->result()); 5069 Register reg = ToRegister(instr->result());
4791 __ mov(reg, Operand::Zero()); 5070 __ li(reg, Operand::Zero());
4792 5071
4793 PushSafepointRegistersScope scope(this); 5072 PushSafepointRegistersScope scope(this);
4794 // NumberTagI and NumberTagD use the context from the frame, rather than 5073 // NumberTagI and NumberTagD use the context from the frame, rather than
4795 // the environment's HContext or HInlinedContext value. 5074 // the environment's HContext or HInlinedContext value.
4796 // They only call Runtime::kAllocateHeapNumber. 5075 // They only call Runtime::kAllocateHeapNumber.
4797 // The corresponding HChange instructions are added in a phase that does 5076 // The corresponding HChange instructions are added in a phase that does
4798 // not have easy access to the local context. 5077 // not have easy access to the local context.
4799 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 5078 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4800 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 5079 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4801 RecordSafepointWithRegisters( 5080 RecordSafepointWithRegisters(
4802 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 5081 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4803 __ sub(r0, r0, Operand(kHeapObjectTag)); 5082 __ StoreToSafepointRegisterSlot(r3, reg);
4804 __ StoreToSafepointRegisterSlot(r0, reg);
4805 } 5083 }
4806 5084
4807 5085
4808 void LCodeGen::DoSmiTag(LSmiTag* instr) { 5086 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4809 HChange* hchange = instr->hydrogen(); 5087 HChange* hchange = instr->hydrogen();
4810 Register input = ToRegister(instr->value()); 5088 Register input = ToRegister(instr->value());
4811 Register output = ToRegister(instr->result()); 5089 Register output = ToRegister(instr->result());
4812 if (hchange->CheckFlag(HValue::kCanOverflow) && 5090 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4813 hchange->value()->CheckFlag(HValue::kUint32)) { 5091 hchange->value()->CheckFlag(HValue::kUint32)) {
4814 __ tst(input, Operand(0xc0000000)); 5092 __ TestUnsignedSmiCandidate(input, r0);
4815 DeoptimizeIf(ne, instr->environment()); 5093 DeoptimizeIf(ne, instr->environment(), cr0);
4816 } 5094 }
5095 #if !V8_TARGET_ARCH_PPC64
4817 if (hchange->CheckFlag(HValue::kCanOverflow) && 5096 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4818 !hchange->value()->CheckFlag(HValue::kUint32)) { 5097 !hchange->value()->CheckFlag(HValue::kUint32)) {
4819 __ SmiTag(output, input, SetCC); 5098 __ SmiTagCheckOverflow(output, input, r0);
4820 DeoptimizeIf(vs, instr->environment()); 5099 DeoptimizeIf(lt, instr->environment(), cr0);
4821 } else { 5100 } else {
5101 #endif
4822 __ SmiTag(output, input); 5102 __ SmiTag(output, input);
5103 #if !V8_TARGET_ARCH_PPC64
4823 } 5104 }
5105 #endif
4824 } 5106 }
4825 5107
4826 5108
4827 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 5109 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5110 Register scratch = scratch0();
4828 Register input = ToRegister(instr->value()); 5111 Register input = ToRegister(instr->value());
4829 Register result = ToRegister(instr->result()); 5112 Register result = ToRegister(instr->result());
4830 if (instr->needs_check()) { 5113 if (instr->needs_check()) {
4831 STATIC_ASSERT(kHeapObjectTag == 1); 5114 STATIC_ASSERT(kHeapObjectTag == 1);
4832 // If the input is a HeapObject, SmiUntag will set the carry flag. 5115 // If the input is a HeapObject, value of scratch won't be zero.
4833 __ SmiUntag(result, input, SetCC); 5116 __ andi(scratch, input, Operand(kHeapObjectTag));
4834 DeoptimizeIf(cs, instr->environment()); 5117 __ SmiUntag(result, input);
5118 DeoptimizeIf(ne, instr->environment(), cr0);
4835 } else { 5119 } else {
4836 __ SmiUntag(result, input); 5120 __ SmiUntag(result, input);
4837 } 5121 }
4838 } 5122 }
4839 5123
4840 5124
4841 void LCodeGen::EmitNumberUntagD(Register input_reg, 5125 void LCodeGen::EmitNumberUntagD(Register input_reg,
4842 DwVfpRegister result_reg, 5126 DoubleRegister result_reg,
4843 bool can_convert_undefined_to_nan, 5127 bool can_convert_undefined_to_nan,
4844 bool deoptimize_on_minus_zero, 5128 bool deoptimize_on_minus_zero,
4845 LEnvironment* env, 5129 LEnvironment* env,
4846 NumberUntagDMode mode) { 5130 NumberUntagDMode mode) {
4847 Register scratch = scratch0(); 5131 Register scratch = scratch0();
4848 SwVfpRegister flt_scratch = double_scratch0().low();
4849 DCHECK(!result_reg.is(double_scratch0())); 5132 DCHECK(!result_reg.is(double_scratch0()));
5133
4850 Label convert, load_smi, done; 5134 Label convert, load_smi, done;
5135
4851 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 5136 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4852 // Smi check. 5137 // Smi check.
4853 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 5138 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
5139
4854 // Heap number map check. 5140 // Heap number map check.
4855 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5141 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4856 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 5142 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4857 __ cmp(scratch, Operand(ip)); 5143 __ cmp(scratch, ip);
4858 if (can_convert_undefined_to_nan) { 5144 if (can_convert_undefined_to_nan) {
4859 __ b(ne, &convert); 5145 __ bne(&convert);
4860 } else { 5146 } else {
4861 DeoptimizeIf(ne, env); 5147 DeoptimizeIf(ne, env);
4862 } 5148 }
4863 // load heap number 5149 // load heap number
4864 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); 5150 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4865 if (deoptimize_on_minus_zero) { 5151 if (deoptimize_on_minus_zero) {
4866 __ VmovLow(scratch, result_reg); 5152 #if V8_TARGET_ARCH_PPC64
4867 __ cmp(scratch, Operand::Zero()); 5153 __ MovDoubleToInt64(scratch, result_reg);
4868 __ b(ne, &done); 5154 // rotate left by one for simple compare.
4869 __ VmovHigh(scratch, result_reg); 5155 __ rldicl(scratch, scratch, 1, 0);
4870 __ cmp(scratch, Operand(HeapNumber::kSignMask)); 5156 __ cmpi(scratch, Operand(1));
5157 #else
5158 __ MovDoubleToInt64(scratch, ip, result_reg);
5159 __ cmpi(ip, Operand::Zero());
5160 __ bne(&done);
5161 __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
5162 #endif
4871 DeoptimizeIf(eq, env); 5163 DeoptimizeIf(eq, env);
4872 } 5164 }
4873 __ jmp(&done); 5165 __ b(&done);
4874 if (can_convert_undefined_to_nan) { 5166 if (can_convert_undefined_to_nan) {
4875 __ bind(&convert); 5167 __ bind(&convert);
4876 // Convert undefined (and hole) to NaN. 5168 // Convert undefined (and hole) to NaN.
4877 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5169 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4878 __ cmp(input_reg, Operand(ip)); 5170 __ cmp(input_reg, ip);
4879 DeoptimizeIf(ne, env); 5171 DeoptimizeIf(ne, env);
4880 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 5172 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4881 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); 5173 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4882 __ jmp(&done); 5174 __ b(&done);
4883 } 5175 }
4884 } else { 5176 } else {
4885 __ SmiUntag(scratch, input_reg); 5177 __ SmiUntag(scratch, input_reg);
4886 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 5178 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4887 } 5179 }
4888 // Smi to double register conversion 5180 // Smi to double register conversion
4889 __ bind(&load_smi); 5181 __ bind(&load_smi);
4890 // scratch: untagged value of input_reg 5182 // scratch: untagged value of input_reg
4891 __ vmov(flt_scratch, scratch); 5183 __ ConvertIntToDouble(scratch, result_reg);
4892 __ vcvt_f64_s32(result_reg, flt_scratch);
4893 __ bind(&done); 5184 __ bind(&done);
4894 } 5185 }
4895 5186
4896 5187
4897 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { 5188 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4898 Register input_reg = ToRegister(instr->value()); 5189 Register input_reg = ToRegister(instr->value());
4899 Register scratch1 = scratch0(); 5190 Register scratch1 = scratch0();
4900 Register scratch2 = ToRegister(instr->temp()); 5191 Register scratch2 = ToRegister(instr->temp());
4901 LowDwVfpRegister double_scratch = double_scratch0(); 5192 DoubleRegister double_scratch = double_scratch0();
4902 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2()); 5193 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4903 5194
4904 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); 5195 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4905 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); 5196 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4906 5197
4907 Label done; 5198 Label done;
4908 5199
4909 // The input was optimistically untagged; revert it.
4910 // The carry flag is set when we reach this deferred code as we just executed
4911 // SmiUntag(heap_object, SetCC)
4912 STATIC_ASSERT(kHeapObjectTag == 1);
4913 __ adc(scratch2, input_reg, Operand(input_reg));
4914
4915 // Heap number map check. 5200 // Heap number map check.
4916 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset)); 5201 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4917 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 5202 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4918 __ cmp(scratch1, Operand(ip)); 5203 __ cmp(scratch1, ip);
4919 5204
4920 if (instr->truncating()) { 5205 if (instr->truncating()) {
4921 // Performs a truncating conversion of a floating point number as used by 5206 // Performs a truncating conversion of a floating point number as used by
4922 // the JS bitwise operations. 5207 // the JS bitwise operations.
4923 Label no_heap_number, check_bools, check_false; 5208 Label no_heap_number, check_bools, check_false;
4924 __ b(ne, &no_heap_number); 5209 __ bne(&no_heap_number);
5210 __ mr(scratch2, input_reg);
4925 __ TruncateHeapNumberToI(input_reg, scratch2); 5211 __ TruncateHeapNumberToI(input_reg, scratch2);
4926 __ b(&done); 5212 __ b(&done);
4927 5213
4928 // Check for Oddballs. Undefined/False is converted to zero and True to one 5214 // Check for Oddballs. Undefined/False is converted to zero and True to one
4929 // for truncating conversions. 5215 // for truncating conversions.
4930 __ bind(&no_heap_number); 5216 __ bind(&no_heap_number);
4931 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5217 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4932 __ cmp(scratch2, Operand(ip)); 5218 __ cmp(input_reg, ip);
4933 __ b(ne, &check_bools); 5219 __ bne(&check_bools);
4934 __ mov(input_reg, Operand::Zero()); 5220 __ li(input_reg, Operand::Zero());
4935 __ b(&done); 5221 __ b(&done);
4936 5222
4937 __ bind(&check_bools); 5223 __ bind(&check_bools);
4938 __ LoadRoot(ip, Heap::kTrueValueRootIndex); 5224 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4939 __ cmp(scratch2, Operand(ip)); 5225 __ cmp(input_reg, ip);
4940 __ b(ne, &check_false); 5226 __ bne(&check_false);
4941 __ mov(input_reg, Operand(1)); 5227 __ li(input_reg, Operand(1));
4942 __ b(&done); 5228 __ b(&done);
4943 5229
4944 __ bind(&check_false); 5230 __ bind(&check_false);
4945 __ LoadRoot(ip, Heap::kFalseValueRootIndex); 5231 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4946 __ cmp(scratch2, Operand(ip)); 5232 __ cmp(input_reg, ip);
4947 DeoptimizeIf(ne, instr->environment()); 5233 DeoptimizeIf(ne, instr->environment());
4948 __ mov(input_reg, Operand::Zero()); 5234 __ li(input_reg, Operand::Zero());
4949 __ b(&done);
4950 } else { 5235 } else {
4951 // Deoptimize if we don't have a heap number. 5236 // Deoptimize if we don't have a heap number.
4952 DeoptimizeIf(ne, instr->environment()); 5237 DeoptimizeIf(ne, instr->environment());
4953 5238
4954 __ sub(ip, scratch2, Operand(kHeapObjectTag)); 5239 __ lfd(double_scratch2,
4955 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); 5240 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4956 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); 5241 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5242 // preserve heap number pointer in scratch2 for minus zero check below
5243 __ mr(scratch2, input_reg);
5244 }
5245 __ TryDoubleToInt32Exact(input_reg, double_scratch2,
5246 scratch1, double_scratch);
4957 DeoptimizeIf(ne, instr->environment()); 5247 DeoptimizeIf(ne, instr->environment());
4958 5248
4959 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5249 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4960 __ cmp(input_reg, Operand::Zero()); 5250 __ cmpi(input_reg, Operand::Zero());
4961 __ b(ne, &done); 5251 __ bne(&done);
4962 __ VmovHigh(scratch1, double_scratch2); 5252 __ lwz(scratch1, FieldMemOperand(scratch2, HeapNumber::kValueOffset +
4963 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5253 Register::kExponentOffset));
4964 DeoptimizeIf(ne, instr->environment()); 5254 __ cmpwi(scratch1, Operand::Zero());
5255 DeoptimizeIf(lt, instr->environment());
4965 } 5256 }
4966 } 5257 }
4967 __ bind(&done); 5258 __ bind(&done);
4968 } 5259 }
4969 5260
4970 5261
4971 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 5262 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4972 class DeferredTaggedToI V8_FINAL : public LDeferredCode { 5263 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
4973 public: 5264 public:
4974 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 5265 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
(...skipping 10 matching lines...) Expand all
4985 DCHECK(input->IsRegister()); 5276 DCHECK(input->IsRegister());
4986 DCHECK(input->Equals(instr->result())); 5277 DCHECK(input->Equals(instr->result()));
4987 5278
4988 Register input_reg = ToRegister(input); 5279 Register input_reg = ToRegister(input);
4989 5280
4990 if (instr->hydrogen()->value()->representation().IsSmi()) { 5281 if (instr->hydrogen()->value()->representation().IsSmi()) {
4991 __ SmiUntag(input_reg); 5282 __ SmiUntag(input_reg);
4992 } else { 5283 } else {
4993 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 5284 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4994 5285
4995 // Optimistically untag the input. 5286 // Branch to deferred code if the input is a HeapObject.
4996 // If the input is a HeapObject, SmiUntag will set the carry flag. 5287 __ JumpIfNotSmi(input_reg, deferred->entry());
4997 __ SmiUntag(input_reg, SetCC); 5288
4998 // Branch to deferred code if the input was tagged. 5289 __ SmiUntag(input_reg);
4999 // The deferred code will take care of restoring the tag.
5000 __ b(cs, deferred->entry());
5001 __ bind(deferred->exit()); 5290 __ bind(deferred->exit());
5002 } 5291 }
5003 } 5292 }
5004 5293
5005 5294
5006 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5295 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5007 LOperand* input = instr->value(); 5296 LOperand* input = instr->value();
5008 DCHECK(input->IsRegister()); 5297 DCHECK(input->IsRegister());
5009 LOperand* result = instr->result(); 5298 LOperand* result = instr->result();
5010 DCHECK(result->IsDoubleRegister()); 5299 DCHECK(result->IsDoubleRegister());
5011 5300
5012 Register input_reg = ToRegister(input); 5301 Register input_reg = ToRegister(input);
5013 DwVfpRegister result_reg = ToDoubleRegister(result); 5302 DoubleRegister result_reg = ToDoubleRegister(result);
5014 5303
5015 HValue* value = instr->hydrogen()->value(); 5304 HValue* value = instr->hydrogen()->value();
5016 NumberUntagDMode mode = value->representation().IsSmi() 5305 NumberUntagDMode mode = value->representation().IsSmi()
5017 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 5306 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5018 5307
5019 EmitNumberUntagD(input_reg, result_reg, 5308 EmitNumberUntagD(input_reg, result_reg,
5020 instr->hydrogen()->can_convert_undefined_to_nan(), 5309 instr->hydrogen()->can_convert_undefined_to_nan(),
5021 instr->hydrogen()->deoptimize_on_minus_zero(), 5310 instr->hydrogen()->deoptimize_on_minus_zero(),
5022 instr->environment(), 5311 instr->environment(),
5023 mode); 5312 mode);
5024 } 5313 }
5025 5314
5026 5315
5027 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5316 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5028 Register result_reg = ToRegister(instr->result()); 5317 Register result_reg = ToRegister(instr->result());
5029 Register scratch1 = scratch0(); 5318 Register scratch1 = scratch0();
5030 DwVfpRegister double_input = ToDoubleRegister(instr->value()); 5319 DoubleRegister double_input = ToDoubleRegister(instr->value());
5031 LowDwVfpRegister double_scratch = double_scratch0(); 5320 DoubleRegister double_scratch = double_scratch0();
5032 5321
5033 if (instr->truncating()) { 5322 if (instr->truncating()) {
5034 __ TruncateDoubleToI(result_reg, double_input); 5323 __ TruncateDoubleToI(result_reg, double_input);
5035 } else { 5324 } else {
5036 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); 5325 __ TryDoubleToInt32Exact(result_reg, double_input,
5326 scratch1, double_scratch);
5037 // Deoptimize if the input wasn't a int32 (inside a double). 5327 // Deoptimize if the input wasn't a int32 (inside a double).
5038 DeoptimizeIf(ne, instr->environment()); 5328 DeoptimizeIf(ne, instr->environment());
5039 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5329 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5040 Label done; 5330 Label done;
5041 __ cmp(result_reg, Operand::Zero()); 5331 __ cmpi(result_reg, Operand::Zero());
5042 __ b(ne, &done); 5332 __ bne(&done);
5043 __ VmovHigh(scratch1, double_input); 5333 #if V8_TARGET_ARCH_PPC64
5044 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5334 __ MovDoubleToInt64(scratch1, double_input);
5045 DeoptimizeIf(ne, instr->environment()); 5335 #else
5336 __ MovDoubleHighToInt(scratch1, double_input);
5337 #endif
5338 __ cmpi(scratch1, Operand::Zero());
5339 DeoptimizeIf(lt, instr->environment());
5046 __ bind(&done); 5340 __ bind(&done);
5047 } 5341 }
5048 } 5342 }
5049 } 5343 }
5050 5344
5051 5345
5052 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 5346 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5053 Register result_reg = ToRegister(instr->result()); 5347 Register result_reg = ToRegister(instr->result());
5054 Register scratch1 = scratch0(); 5348 Register scratch1 = scratch0();
5055 DwVfpRegister double_input = ToDoubleRegister(instr->value()); 5349 DoubleRegister double_input = ToDoubleRegister(instr->value());
5056 LowDwVfpRegister double_scratch = double_scratch0(); 5350 DoubleRegister double_scratch = double_scratch0();
5057 5351
5058 if (instr->truncating()) { 5352 if (instr->truncating()) {
5059 __ TruncateDoubleToI(result_reg, double_input); 5353 __ TruncateDoubleToI(result_reg, double_input);
5060 } else { 5354 } else {
5061 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); 5355 __ TryDoubleToInt32Exact(result_reg, double_input,
5356 scratch1, double_scratch);
5062 // Deoptimize if the input wasn't a int32 (inside a double). 5357 // Deoptimize if the input wasn't a int32 (inside a double).
5063 DeoptimizeIf(ne, instr->environment()); 5358 DeoptimizeIf(ne, instr->environment());
5064 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5359 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5065 Label done; 5360 Label done;
5066 __ cmp(result_reg, Operand::Zero()); 5361 __ cmpi(result_reg, Operand::Zero());
5067 __ b(ne, &done); 5362 __ bne(&done);
5068 __ VmovHigh(scratch1, double_input); 5363 #if V8_TARGET_ARCH_PPC64
5069 __ tst(scratch1, Operand(HeapNumber::kSignMask)); 5364 __ MovDoubleToInt64(scratch1, double_input);
5070 DeoptimizeIf(ne, instr->environment()); 5365 #else
5366 __ MovDoubleHighToInt(scratch1, double_input);
5367 #endif
5368 __ cmpi(scratch1, Operand::Zero());
5369 DeoptimizeIf(lt, instr->environment());
5071 __ bind(&done); 5370 __ bind(&done);
5072 } 5371 }
5073 } 5372 }
5074 __ SmiTag(result_reg, SetCC); 5373 #if V8_TARGET_ARCH_PPC64
5075 DeoptimizeIf(vs, instr->environment()); 5374 __ SmiTag(result_reg);
5375 #else
5376 __ SmiTagCheckOverflow(result_reg, r0);
5377 DeoptimizeIf(lt, instr->environment(), cr0);
5378 #endif
5076 } 5379 }
5077 5380
5078 5381
5079 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 5382 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5080 LOperand* input = instr->value(); 5383 LOperand* input = instr->value();
5081 __ SmiTst(ToRegister(input)); 5384 __ TestIfSmi(ToRegister(input), r0);
5082 DeoptimizeIf(ne, instr->environment()); 5385 DeoptimizeIf(ne, instr->environment(), cr0);
5083 } 5386 }
5084 5387
5085 5388
5086 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 5389 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5087 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 5390 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5088 LOperand* input = instr->value(); 5391 LOperand* input = instr->value();
5089 __ SmiTst(ToRegister(input)); 5392 __ TestIfSmi(ToRegister(input), r0);
5090 DeoptimizeIf(eq, instr->environment()); 5393 DeoptimizeIf(eq, instr->environment(), cr0);
5091 } 5394 }
5092 } 5395 }
5093 5396
5094 5397
5095 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 5398 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5096 Register input = ToRegister(instr->value()); 5399 Register input = ToRegister(instr->value());
5097 Register scratch = scratch0(); 5400 Register scratch = scratch0();
5098 5401
5099 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5402 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5100 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 5403 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5101 5404
5102 if (instr->hydrogen()->is_interval_check()) { 5405 if (instr->hydrogen()->is_interval_check()) {
5103 InstanceType first; 5406 InstanceType first;
5104 InstanceType last; 5407 InstanceType last;
5105 instr->hydrogen()->GetCheckInterval(&first, &last); 5408 instr->hydrogen()->GetCheckInterval(&first, &last);
5106 5409
5107 __ cmp(scratch, Operand(first)); 5410 __ cmpli(scratch, Operand(first));
5108 5411
5109 // If there is only one type in the interval check for equality. 5412 // If there is only one type in the interval check for equality.
5110 if (first == last) { 5413 if (first == last) {
5111 DeoptimizeIf(ne, instr->environment()); 5414 DeoptimizeIf(ne, instr->environment());
5112 } else { 5415 } else {
5113 DeoptimizeIf(lo, instr->environment()); 5416 DeoptimizeIf(lt, instr->environment());
5114 // Omit check for the last type. 5417 // Omit check for the last type.
5115 if (last != LAST_TYPE) { 5418 if (last != LAST_TYPE) {
5116 __ cmp(scratch, Operand(last)); 5419 __ cmpli(scratch, Operand(last));
5117 DeoptimizeIf(hi, instr->environment()); 5420 DeoptimizeIf(gt, instr->environment());
5118 } 5421 }
5119 } 5422 }
5120 } else { 5423 } else {
5121 uint8_t mask; 5424 uint8_t mask;
5122 uint8_t tag; 5425 uint8_t tag;
5123 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 5426 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5124 5427
5125 if (IsPowerOf2(mask)) { 5428 if (IsPowerOf2(mask)) {
5126 DCHECK(tag == 0 || IsPowerOf2(tag)); 5429 DCHECK(tag == 0 || IsPowerOf2(tag));
5127 __ tst(scratch, Operand(mask)); 5430 __ andi(r0, scratch, Operand(mask));
5128 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); 5431 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(), cr0);
5129 } else { 5432 } else {
5130 __ and_(scratch, scratch, Operand(mask)); 5433 __ andi(scratch, scratch, Operand(mask));
5131 __ cmp(scratch, Operand(tag)); 5434 __ cmpi(scratch, Operand(tag));
5132 DeoptimizeIf(ne, instr->environment()); 5435 DeoptimizeIf(ne, instr->environment());
5133 } 5436 }
5134 } 5437 }
5135 } 5438 }
5136 5439
5137 5440
5138 void LCodeGen::DoCheckValue(LCheckValue* instr) { 5441 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5139 Register reg = ToRegister(instr->value()); 5442 Register reg = ToRegister(instr->value());
5140 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 5443 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5141 AllowDeferredHandleDereference smi_check; 5444 AllowDeferredHandleDereference smi_check;
5142 if (isolate()->heap()->InNewSpace(*object)) { 5445 if (isolate()->heap()->InNewSpace(*object)) {
5143 Register reg = ToRegister(instr->value()); 5446 Register reg = ToRegister(instr->value());
5144 Handle<Cell> cell = isolate()->factory()->NewCell(object); 5447 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5145 __ mov(ip, Operand(Handle<Object>(cell))); 5448 __ mov(ip, Operand(Handle<Object>(cell)));
5146 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); 5449 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
5147 __ cmp(reg, ip); 5450 __ cmp(reg, ip);
5148 } else { 5451 } else {
5149 __ cmp(reg, Operand(object)); 5452 __ Cmpi(reg, Operand(object), r0);
5150 } 5453 }
5151 DeoptimizeIf(ne, instr->environment()); 5454 DeoptimizeIf(ne, instr->environment());
5152 } 5455 }
5153 5456
5154 5457
5155 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 5458 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5156 { 5459 {
5157 PushSafepointRegistersScope scope(this); 5460 PushSafepointRegistersScope scope(this);
5158 __ push(object); 5461 __ push(object);
5159 __ mov(cp, Operand::Zero()); 5462 __ li(cp, Operand::Zero());
5160 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 5463 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5161 RecordSafepointWithRegisters( 5464 RecordSafepointWithRegisters(
5162 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 5465 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5163 __ StoreToSafepointRegisterSlot(r0, scratch0()); 5466 __ StoreToSafepointRegisterSlot(r3, scratch0());
5164 } 5467 }
5165 __ tst(scratch0(), Operand(kSmiTagMask)); 5468 __ TestIfSmi(scratch0(), r0);
5166 DeoptimizeIf(eq, instr->environment()); 5469 DeoptimizeIf(eq, instr->environment(), cr0);
5167 } 5470 }
5168 5471
5169 5472
5170 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5473 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5171 class DeferredCheckMaps V8_FINAL : public LDeferredCode { 5474 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5172 public: 5475 public:
5173 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 5476 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5174 : LDeferredCode(codegen), instr_(instr), object_(object) { 5477 : LDeferredCode(codegen), instr_(instr), object_(object) {
5175 SetExit(check_maps()); 5478 SetExit(check_maps());
5176 } 5479 }
(...skipping 15 matching lines...) Expand all
5192 } 5495 }
5193 return; 5496 return;
5194 } 5497 }
5195 5498
5196 Register map_reg = scratch0(); 5499 Register map_reg = scratch0();
5197 5500
5198 LOperand* input = instr->value(); 5501 LOperand* input = instr->value();
5199 DCHECK(input->IsRegister()); 5502 DCHECK(input->IsRegister());
5200 Register reg = ToRegister(input); 5503 Register reg = ToRegister(input);
5201 5504
5202 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); 5505 __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5203 5506
5204 DeferredCheckMaps* deferred = NULL; 5507 DeferredCheckMaps* deferred = NULL;
5205 if (instr->hydrogen()->HasMigrationTarget()) { 5508 if (instr->hydrogen()->HasMigrationTarget()) {
5206 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); 5509 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5207 __ bind(deferred->check_maps()); 5510 __ bind(deferred->check_maps());
5208 } 5511 }
5209 5512
5210 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5513 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5211 Label success; 5514 Label success;
5212 for (int i = 0; i < maps->size() - 1; i++) { 5515 for (int i = 0; i < maps->size() - 1; i++) {
5213 Handle<Map> map = maps->at(i).handle(); 5516 Handle<Map> map = maps->at(i).handle();
5214 __ CompareMap(map_reg, map, &success); 5517 __ CompareMap(map_reg, map, &success);
5215 __ b(eq, &success); 5518 __ beq(&success);
5216 } 5519 }
5217 5520
5218 Handle<Map> map = maps->at(maps->size() - 1).handle(); 5521 Handle<Map> map = maps->at(maps->size() - 1).handle();
5219 __ CompareMap(map_reg, map, &success); 5522 __ CompareMap(map_reg, map, &success);
5220 if (instr->hydrogen()->HasMigrationTarget()) { 5523 if (instr->hydrogen()->HasMigrationTarget()) {
5221 __ b(ne, deferred->entry()); 5524 __ bne(deferred->entry());
5222 } else { 5525 } else {
5223 DeoptimizeIf(ne, instr->environment()); 5526 DeoptimizeIf(ne, instr->environment());
5224 } 5527 }
5225 5528
5226 __ bind(&success); 5529 __ bind(&success);
5227 } 5530 }
5228 5531
5229 5532
5230 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5533 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5231 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); 5534 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5232 Register result_reg = ToRegister(instr->result()); 5535 Register result_reg = ToRegister(instr->result());
5233 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); 5536 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5234 } 5537 }
5235 5538
5236 5539
5237 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5540 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5238 Register unclamped_reg = ToRegister(instr->unclamped()); 5541 Register unclamped_reg = ToRegister(instr->unclamped());
5239 Register result_reg = ToRegister(instr->result()); 5542 Register result_reg = ToRegister(instr->result());
5240 __ ClampUint8(result_reg, unclamped_reg); 5543 __ ClampUint8(result_reg, unclamped_reg);
5241 } 5544 }
5242 5545
5243 5546
5244 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5547 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5245 Register scratch = scratch0(); 5548 Register scratch = scratch0();
5246 Register input_reg = ToRegister(instr->unclamped()); 5549 Register input_reg = ToRegister(instr->unclamped());
5247 Register result_reg = ToRegister(instr->result()); 5550 Register result_reg = ToRegister(instr->result());
5248 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); 5551 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5249 Label is_smi, done, heap_number; 5552 Label is_smi, done, heap_number;
5250 5553
5251 // Both smi and heap number cases are handled. 5554 // Both smi and heap number cases are handled.
5252 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); 5555 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5253 5556
5254 // Check for heap number 5557 // Check for heap number
5255 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5558 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5256 __ cmp(scratch, Operand(factory()->heap_number_map())); 5559 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
5257 __ b(eq, &heap_number); 5560 __ beq(&heap_number);
5258 5561
5259 // Check for undefined. Undefined is converted to zero for clamping 5562 // Check for undefined. Undefined is converted to zero for clamping
5260 // conversions. 5563 // conversions.
5261 __ cmp(input_reg, Operand(factory()->undefined_value())); 5564 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
5262 DeoptimizeIf(ne, instr->environment()); 5565 DeoptimizeIf(ne, instr->environment());
5263 __ mov(result_reg, Operand::Zero()); 5566 __ li(result_reg, Operand::Zero());
5264 __ jmp(&done); 5567 __ b(&done);
5265 5568
5266 // Heap number 5569 // Heap number
5267 __ bind(&heap_number); 5570 __ bind(&heap_number);
5268 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 5571 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5269 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); 5572 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5270 __ jmp(&done); 5573 __ b(&done);
5271 5574
5272 // smi 5575 // smi
5273 __ bind(&is_smi); 5576 __ bind(&is_smi);
5274 __ ClampUint8(result_reg, result_reg); 5577 __ ClampUint8(result_reg, result_reg);
5275 5578
5276 __ bind(&done); 5579 __ bind(&done);
5277 } 5580 }
5278 5581
5279 5582
5280 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { 5583 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5281 DwVfpRegister value_reg = ToDoubleRegister(instr->value()); 5584 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5282 Register result_reg = ToRegister(instr->result()); 5585 Register result_reg = ToRegister(instr->result());
5586
5283 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { 5587 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5284 __ VmovHigh(result_reg, value_reg); 5588 __ MovDoubleHighToInt(result_reg, value_reg);
5285 } else { 5589 } else {
5286 __ VmovLow(result_reg, value_reg); 5590 __ MovDoubleLowToInt(result_reg, value_reg);
5287 } 5591 }
5288 } 5592 }
5289 5593
5290 5594
5291 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { 5595 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5292 Register hi_reg = ToRegister(instr->hi()); 5596 Register hi_reg = ToRegister(instr->hi());
5293 Register lo_reg = ToRegister(instr->lo()); 5597 Register lo_reg = ToRegister(instr->lo());
5294 DwVfpRegister result_reg = ToDoubleRegister(instr->result()); 5598 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5295 __ VmovHigh(result_reg, hi_reg); 5599 #if V8_TARGET_ARCH_PPC64
5296 __ VmovLow(result_reg, lo_reg); 5600 __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
5601 #else
5602 __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
5603 #endif
5297 } 5604 }
5298 5605
5299 5606
5300 void LCodeGen::DoAllocate(LAllocate* instr) { 5607 void LCodeGen::DoAllocate(LAllocate* instr) {
5301 class DeferredAllocate V8_FINAL : public LDeferredCode { 5608 class DeferredAllocate V8_FINAL : public LDeferredCode {
5302 public: 5609 public:
5303 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 5610 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5304 : LDeferredCode(codegen), instr_(instr) { } 5611 : LDeferredCode(codegen), instr_(instr) { }
5305 virtual void Generate() V8_OVERRIDE { 5612 virtual void Generate() V8_OVERRIDE {
5306 codegen()->DoDeferredAllocate(instr_); 5613 codegen()->DoDeferredAllocate(instr_);
(...skipping 22 matching lines...) Expand all
5329 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5636 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5330 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5637 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5331 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); 5638 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5332 } 5639 }
5333 5640
5334 if (instr->size()->IsConstantOperand()) { 5641 if (instr->size()->IsConstantOperand()) {
5335 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5642 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5336 if (size <= Page::kMaxRegularHeapObjectSize) { 5643 if (size <= Page::kMaxRegularHeapObjectSize) {
5337 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5644 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5338 } else { 5645 } else {
5339 __ jmp(deferred->entry()); 5646 __ b(deferred->entry());
5340 } 5647 }
5341 } else { 5648 } else {
5342 Register size = ToRegister(instr->size()); 5649 Register size = ToRegister(instr->size());
5343 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5650 __ Allocate(size,
5651 result,
5652 scratch,
5653 scratch2,
5654 deferred->entry(),
5655 flags);
5344 } 5656 }
5345 5657
5346 __ bind(deferred->exit()); 5658 __ bind(deferred->exit());
5347 5659
5348 if (instr->hydrogen()->MustPrefillWithFiller()) { 5660 if (instr->hydrogen()->MustPrefillWithFiller()) {
5349 STATIC_ASSERT(kHeapObjectTag == 1); 5661 STATIC_ASSERT(kHeapObjectTag == 1);
5350 if (instr->size()->IsConstantOperand()) { 5662 if (instr->size()->IsConstantOperand()) {
5351 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5663 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5352 __ mov(scratch, Operand(size - kHeapObjectTag)); 5664 __ LoadIntLiteral(scratch, size - kHeapObjectTag);
5353 } else { 5665 } else {
5354 __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); 5666 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5355 } 5667 }
5356 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); 5668 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5357 Label loop; 5669 Label loop;
5358 __ bind(&loop); 5670 __ bind(&loop);
5359 __ sub(scratch, scratch, Operand(kPointerSize), SetCC); 5671 __ subi(scratch, scratch, Operand(kPointerSize));
5360 __ str(scratch2, MemOperand(result, scratch)); 5672 __ StorePX(scratch2, MemOperand(result, scratch));
5361 __ b(ge, &loop); 5673 __ cmpi(scratch, Operand::Zero());
5674 __ bge(&loop);
5362 } 5675 }
5363 } 5676 }
5364 5677
5365 5678
5366 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 5679 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5367 Register result = ToRegister(instr->result()); 5680 Register result = ToRegister(instr->result());
5368 5681
5369 // TODO(3095996): Get rid of this. For now, we need to make the 5682 // TODO(3095996): Get rid of this. For now, we need to make the
5370 // result register contain a valid pointer because it is already 5683 // result register contain a valid pointer because it is already
5371 // contained in the register pointer map. 5684 // contained in the register pointer map.
5372 __ mov(result, Operand(Smi::FromInt(0))); 5685 __ LoadSmiLiteral(result, Smi::FromInt(0));
5373 5686
5374 PushSafepointRegistersScope scope(this); 5687 PushSafepointRegistersScope scope(this);
5375 if (instr->size()->IsRegister()) { 5688 if (instr->size()->IsRegister()) {
5376 Register size = ToRegister(instr->size()); 5689 Register size = ToRegister(instr->size());
5377 DCHECK(!size.is(result)); 5690 DCHECK(!size.is(result));
5378 __ SmiTag(size); 5691 __ SmiTag(size);
5379 __ push(size); 5692 __ push(size);
5380 } else { 5693 } else {
5381 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5694 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5695 #if !V8_TARGET_ARCH_PPC64
5382 if (size >= 0 && size <= Smi::kMaxValue) { 5696 if (size >= 0 && size <= Smi::kMaxValue) {
5697 #endif
5383 __ Push(Smi::FromInt(size)); 5698 __ Push(Smi::FromInt(size));
5699 #if !V8_TARGET_ARCH_PPC64
5384 } else { 5700 } else {
5385 // We should never get here at runtime => abort 5701 // We should never get here at runtime => abort
5386 __ stop("invalid allocation size"); 5702 __ stop("invalid allocation size");
5387 return; 5703 return;
5388 } 5704 }
5705 #endif
5389 } 5706 }
5390 5707
5391 int flags = AllocateDoubleAlignFlag::encode( 5708 int flags = AllocateDoubleAlignFlag::encode(
5392 instr->hydrogen()->MustAllocateDoubleAligned()); 5709 instr->hydrogen()->MustAllocateDoubleAligned());
5393 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5710 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5394 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5711 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5395 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5712 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5396 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); 5713 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5397 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5714 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5398 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5715 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5399 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); 5716 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5400 } else { 5717 } else {
5401 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 5718 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5402 } 5719 }
5403 __ Push(Smi::FromInt(flags)); 5720 __ Push(Smi::FromInt(flags));
5404 5721
5405 CallRuntimeFromDeferred( 5722 CallRuntimeFromDeferred(
5406 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); 5723 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5407 __ StoreToSafepointRegisterSlot(r0, result); 5724 __ StoreToSafepointRegisterSlot(r3, result);
5408 } 5725 }
5409 5726
5410 5727
5411 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 5728 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5412 DCHECK(ToRegister(instr->value()).is(r0)); 5729 DCHECK(ToRegister(instr->value()).is(r3));
5413 __ push(r0); 5730 __ push(r3);
5414 CallRuntime(Runtime::kToFastProperties, 1, instr); 5731 CallRuntime(Runtime::kToFastProperties, 1, instr);
5415 } 5732 }
5416 5733
5417 5734
5418 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { 5735 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5419 DCHECK(ToRegister(instr->context()).is(cp)); 5736 DCHECK(ToRegister(instr->context()).is(cp));
5420 Label materialized; 5737 Label materialized;
5421 // Registers will be used as follows: 5738 // Registers will be used as follows:
5422 // r6 = literals array. 5739 // r10 = literals array.
5423 // r1 = regexp literal. 5740 // r4 = regexp literal.
5424 // r0 = regexp literal clone. 5741 // r3 = regexp literal clone.
5425 // r2-5 are used as temporaries. 5742 // r5 and r7-r9 are used as temporaries.
5426 int literal_offset = 5743 int literal_offset =
5427 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); 5744 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5428 __ Move(r6, instr->hydrogen()->literals()); 5745 __ Move(r10, instr->hydrogen()->literals());
5429 __ ldr(r1, FieldMemOperand(r6, literal_offset)); 5746 __ LoadP(r4, FieldMemOperand(r10, literal_offset));
5430 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 5747 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5431 __ cmp(r1, ip); 5748 __ cmp(r4, ip);
5432 __ b(ne, &materialized); 5749 __ bne(&materialized);
5433 5750
5434 // Create regexp literal using runtime function 5751 // Create regexp literal using runtime function
5435 // Result will be in r0. 5752 // Result will be in r3.
5436 __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); 5753 __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index()));
5437 __ mov(r4, Operand(instr->hydrogen()->pattern())); 5754 __ mov(r8, Operand(instr->hydrogen()->pattern()));
5438 __ mov(r3, Operand(instr->hydrogen()->flags())); 5755 __ mov(r7, Operand(instr->hydrogen()->flags()));
5439 __ Push(r6, r5, r4, r3); 5756 __ Push(r10, r9, r8, r7);
5440 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); 5757 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5441 __ mov(r1, r0); 5758 __ mr(r4, r3);
5442 5759
5443 __ bind(&materialized); 5760 __ bind(&materialized);
5444 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; 5761 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5445 Label allocated, runtime_allocate; 5762 Label allocated, runtime_allocate;
5446 5763
5447 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); 5764 __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
5448 __ jmp(&allocated); 5765 __ b(&allocated);
5449 5766
5450 __ bind(&runtime_allocate); 5767 __ bind(&runtime_allocate);
5451 __ mov(r0, Operand(Smi::FromInt(size))); 5768 __ LoadSmiLiteral(r3, Smi::FromInt(size));
5452 __ Push(r1, r0); 5769 __ Push(r4, r3);
5453 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); 5770 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5454 __ pop(r1); 5771 __ pop(r4);
5455 5772
5456 __ bind(&allocated); 5773 __ bind(&allocated);
5457 // Copy the content into the newly allocated memory. 5774 // Copy the content into the newly allocated memory.
5458 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize); 5775 __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
5459 } 5776 }
5460 5777
5461 5778
5462 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 5779 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5463 DCHECK(ToRegister(instr->context()).is(cp)); 5780 DCHECK(ToRegister(instr->context()).is(cp));
5464 // Use the fast case closure allocation code that allocates in new 5781 // Use the fast case closure allocation code that allocates in new
5465 // space for nested functions that don't need literals cloning. 5782 // space for nested functions that don't need literals cloning.
5466 bool pretenure = instr->hydrogen()->pretenure(); 5783 bool pretenure = instr->hydrogen()->pretenure();
5467 if (!pretenure && instr->hydrogen()->has_no_literals()) { 5784 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5468 FastNewClosureStub stub(isolate(), 5785 FastNewClosureStub stub(isolate(),
5469 instr->hydrogen()->strict_mode(), 5786 instr->hydrogen()->strict_mode(),
5470 instr->hydrogen()->is_generator()); 5787 instr->hydrogen()->is_generator());
5471 __ mov(r2, Operand(instr->hydrogen()->shared_info())); 5788 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5472 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5789 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5473 } else { 5790 } else {
5474 __ mov(r2, Operand(instr->hydrogen()->shared_info())); 5791 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5475 __ mov(r1, Operand(pretenure ? factory()->true_value() 5792 __ mov(r4, Operand(pretenure ? factory()->true_value()
5476 : factory()->false_value())); 5793 : factory()->false_value()));
5477 __ Push(cp, r2, r1); 5794 __ Push(cp, r5, r4);
5478 CallRuntime(Runtime::kNewClosure, 3, instr); 5795 CallRuntime(Runtime::kNewClosure, 3, instr);
5479 } 5796 }
5480 } 5797 }
5481 5798
5482 5799
5483 void LCodeGen::DoTypeof(LTypeof* instr) { 5800 void LCodeGen::DoTypeof(LTypeof* instr) {
5484 Register input = ToRegister(instr->value()); 5801 Register input = ToRegister(instr->value());
5485 __ push(input); 5802 __ push(input);
5486 CallRuntime(Runtime::kTypeof, 1, instr); 5803 CallRuntime(Runtime::kTypeof, 1, instr);
5487 } 5804 }
(...skipping 14 matching lines...) Expand all
5502 5819
5503 Condition LCodeGen::EmitTypeofIs(Label* true_label, 5820 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5504 Label* false_label, 5821 Label* false_label,
5505 Register input, 5822 Register input,
5506 Handle<String> type_name) { 5823 Handle<String> type_name) {
5507 Condition final_branch_condition = kNoCondition; 5824 Condition final_branch_condition = kNoCondition;
5508 Register scratch = scratch0(); 5825 Register scratch = scratch0();
5509 Factory* factory = isolate()->factory(); 5826 Factory* factory = isolate()->factory();
5510 if (String::Equals(type_name, factory->number_string())) { 5827 if (String::Equals(type_name, factory->number_string())) {
5511 __ JumpIfSmi(input, true_label); 5828 __ JumpIfSmi(input, true_label);
5512 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5829 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5513 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); 5830 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5514 final_branch_condition = eq; 5831 final_branch_condition = eq;
5515 5832
5516 } else if (String::Equals(type_name, factory->string_string())) { 5833 } else if (String::Equals(type_name, factory->string_string())) {
5517 __ JumpIfSmi(input, false_label); 5834 __ JumpIfSmi(input, false_label);
5518 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); 5835 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5519 __ b(ge, false_label); 5836 __ bge(false_label);
5520 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5837 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5521 __ tst(scratch, Operand(1 << Map::kIsUndetectable)); 5838 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5839 __ cmpi(r0, Operand::Zero());
5522 final_branch_condition = eq; 5840 final_branch_condition = eq;
5523 5841
5524 } else if (String::Equals(type_name, factory->symbol_string())) { 5842 } else if (String::Equals(type_name, factory->symbol_string())) {
5525 __ JumpIfSmi(input, false_label); 5843 __ JumpIfSmi(input, false_label);
5526 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); 5844 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5527 final_branch_condition = eq; 5845 final_branch_condition = eq;
5528 5846
5529 } else if (String::Equals(type_name, factory->boolean_string())) { 5847 } else if (String::Equals(type_name, factory->boolean_string())) {
5530 __ CompareRoot(input, Heap::kTrueValueRootIndex); 5848 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5531 __ b(eq, true_label); 5849 __ beq(true_label);
5532 __ CompareRoot(input, Heap::kFalseValueRootIndex); 5850 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5533 final_branch_condition = eq; 5851 final_branch_condition = eq;
5534 5852
5535 } else if (String::Equals(type_name, factory->undefined_string())) { 5853 } else if (String::Equals(type_name, factory->undefined_string())) {
5536 __ CompareRoot(input, Heap::kUndefinedValueRootIndex); 5854 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5537 __ b(eq, true_label); 5855 __ beq(true_label);
5538 __ JumpIfSmi(input, false_label); 5856 __ JumpIfSmi(input, false_label);
5539 // Check for undetectable objects => true. 5857 // Check for undetectable objects => true.
5540 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5858 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5541 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5859 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5542 __ tst(scratch, Operand(1 << Map::kIsUndetectable)); 5860 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5861 __ cmpi(r0, Operand::Zero());
5543 final_branch_condition = ne; 5862 final_branch_condition = ne;
5544 5863
5545 } else if (String::Equals(type_name, factory->function_string())) { 5864 } else if (String::Equals(type_name, factory->function_string())) {
5546 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 5865 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5547 Register type_reg = scratch; 5866 Register type_reg = scratch;
5548 __ JumpIfSmi(input, false_label); 5867 __ JumpIfSmi(input, false_label);
5549 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE); 5868 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5550 __ b(eq, true_label); 5869 __ beq(true_label);
5551 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE)); 5870 __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5552 final_branch_condition = eq; 5871 final_branch_condition = eq;
5553 5872
5554 } else if (String::Equals(type_name, factory->object_string())) { 5873 } else if (String::Equals(type_name, factory->object_string())) {
5555 Register map = scratch; 5874 Register map = scratch;
5556 __ JumpIfSmi(input, false_label); 5875 __ JumpIfSmi(input, false_label);
5557 __ CompareRoot(input, Heap::kNullValueRootIndex); 5876 __ CompareRoot(input, Heap::kNullValueRootIndex);
5558 __ b(eq, true_label); 5877 __ beq(true_label);
5559 __ CheckObjectTypeRange(input, 5878 __ CheckObjectTypeRange(input,
5560 map, 5879 map,
5561 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, 5880 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5562 LAST_NONCALLABLE_SPEC_OBJECT_TYPE, 5881 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5563 false_label); 5882 false_label);
5564 // Check for undetectable objects => false. 5883 // Check for undetectable objects => false.
5565 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); 5884 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5566 __ tst(scratch, Operand(1 << Map::kIsUndetectable)); 5885 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5886 __ cmpi(r0, Operand::Zero());
5567 final_branch_condition = eq; 5887 final_branch_condition = eq;
5568 5888
5569 } else { 5889 } else {
5570 __ b(false_label); 5890 __ b(false_label);
5571 } 5891 }
5572 5892
5573 return final_branch_condition; 5893 return final_branch_condition;
5574 } 5894 }
5575 5895
5576 5896
5577 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { 5897 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5578 Register temp1 = ToRegister(instr->temp()); 5898 Register temp1 = ToRegister(instr->temp());
5579 5899
5580 EmitIsConstructCall(temp1, scratch0()); 5900 EmitIsConstructCall(temp1, scratch0());
5581 EmitBranch(instr, eq); 5901 EmitBranch(instr, eq);
5582 } 5902 }
5583 5903
5584 5904
5585 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { 5905 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5586 DCHECK(!temp1.is(temp2)); 5906 DCHECK(!temp1.is(temp2));
5587 // Get the frame pointer for the calling frame. 5907 // Get the frame pointer for the calling frame.
5588 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 5908 __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5589 5909
5590 // Skip the arguments adaptor frame if it exists. 5910 // Skip the arguments adaptor frame if it exists.
5591 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); 5911 Label check_frame_marker;
5592 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 5912 __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5593 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq); 5913 __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
5914 __ bne(&check_frame_marker);
5915 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5594 5916
5595 // Check the marker in the calling frame. 5917 // Check the marker in the calling frame.
5596 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); 5918 __ bind(&check_frame_marker);
5597 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); 5919 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5920 __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0);
5598 } 5921 }
5599 5922
5600 5923
5601 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 5924 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5602 if (!info()->IsStub()) { 5925 if (!info()->IsStub()) {
5603 // Ensure that we have enough space after the previous lazy-bailout 5926 // Ensure that we have enough space after the previous lazy-bailout
5604 // instruction for patching the code here. 5927 // instruction for patching the code here.
5605 int current_pc = masm()->pc_offset(); 5928 int current_pc = masm()->pc_offset();
5606 if (current_pc < last_lazy_deopt_pc_ + space_needed) { 5929 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5607 // Block literal pool emission for duration of padding.
5608 Assembler::BlockConstPoolScope block_const_pool(masm());
5609 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 5930 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5610 DCHECK_EQ(0, padding_size % Assembler::kInstrSize); 5931 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5611 while (padding_size > 0) { 5932 while (padding_size > 0) {
5612 __ nop(); 5933 __ nop();
5613 padding_size -= Assembler::kInstrSize; 5934 padding_size -= Assembler::kInstrSize;
5614 } 5935 }
5615 } 5936 }
5616 } 5937 }
5617 last_lazy_deopt_pc_ = masm()->pc_offset(); 5938 last_lazy_deopt_pc_ = masm()->pc_offset();
5618 } 5939 }
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
5678 }; 5999 };
5679 6000
5680 DCHECK(instr->HasEnvironment()); 6001 DCHECK(instr->HasEnvironment());
5681 LEnvironment* env = instr->environment(); 6002 LEnvironment* env = instr->environment();
5682 // There is no LLazyBailout instruction for stack-checks. We have to 6003 // There is no LLazyBailout instruction for stack-checks. We have to
5683 // prepare for lazy deoptimization explicitly here. 6004 // prepare for lazy deoptimization explicitly here.
5684 if (instr->hydrogen()->is_function_entry()) { 6005 if (instr->hydrogen()->is_function_entry()) {
5685 // Perform stack overflow check. 6006 // Perform stack overflow check.
5686 Label done; 6007 Label done;
5687 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 6008 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5688 __ cmp(sp, Operand(ip)); 6009 __ cmpl(sp, ip);
5689 __ b(hs, &done); 6010 __ bge(&done);
5690 Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5691 PredictableCodeSizeScope predictable(masm(),
5692 CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5693 DCHECK(instr->context()->IsRegister()); 6011 DCHECK(instr->context()->IsRegister());
5694 DCHECK(ToRegister(instr->context()).is(cp)); 6012 DCHECK(ToRegister(instr->context()).is(cp));
5695 CallCode(stack_check, RelocInfo::CODE_TARGET, instr); 6013 CallCode(isolate()->builtins()->StackCheck(),
6014 RelocInfo::CODE_TARGET,
6015 instr);
5696 __ bind(&done); 6016 __ bind(&done);
5697 } else { 6017 } else {
5698 DCHECK(instr->hydrogen()->is_backwards_branch()); 6018 DCHECK(instr->hydrogen()->is_backwards_branch());
5699 // Perform stack overflow check if this goto needs it before jumping. 6019 // Perform stack overflow check if this goto needs it before jumping.
5700 DeferredStackCheck* deferred_stack_check = 6020 DeferredStackCheck* deferred_stack_check =
5701 new(zone()) DeferredStackCheck(this, instr); 6021 new(zone()) DeferredStackCheck(this, instr);
5702 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 6022 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5703 __ cmp(sp, Operand(ip)); 6023 __ cmpl(sp, ip);
5704 __ b(lo, deferred_stack_check->entry()); 6024 __ blt(deferred_stack_check->entry());
5705 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 6025 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5706 __ bind(instr->done_label()); 6026 __ bind(instr->done_label());
5707 deferred_stack_check->SetExit(instr->done_label()); 6027 deferred_stack_check->SetExit(instr->done_label());
5708 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 6028 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5709 // Don't record a deoptimization index for the safepoint here. 6029 // Don't record a deoptimization index for the safepoint here.
5710 // This will be done explicitly when emitting call and the safepoint in 6030 // This will be done explicitly when emitting call and the safepoint in
5711 // the deferred code. 6031 // the deferred code.
5712 } 6032 }
5713 } 6033 }
5714 6034
5715 6035
5716 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 6036 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5717 // This is a pseudo-instruction that ensures that the environment here is 6037 // This is a pseudo-instruction that ensures that the environment here is
5718 // properly registered for deoptimization and records the assembler's PC 6038 // properly registered for deoptimization and records the assembler's PC
5719 // offset. 6039 // offset.
5720 LEnvironment* environment = instr->environment(); 6040 LEnvironment* environment = instr->environment();
5721 6041
5722 // If the environment were already registered, we would have no way of 6042 // If the environment were already registered, we would have no way of
5723 // backpatching it with the spill slot operands. 6043 // backpatching it with the spill slot operands.
5724 DCHECK(!environment->HasBeenRegistered()); 6044 DCHECK(!environment->HasBeenRegistered());
5725 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 6045 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5726 6046
5727 GenerateOsrPrologue(); 6047 GenerateOsrPrologue();
5728 } 6048 }
5729 6049
5730 6050
5731 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 6051 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5732 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 6052 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5733 __ cmp(r0, ip); 6053 __ cmp(r3, ip);
5734 DeoptimizeIf(eq, instr->environment()); 6054 DeoptimizeIf(eq, instr->environment());
5735 6055
5736 Register null_value = r5; 6056 Register null_value = r8;
5737 __ LoadRoot(null_value, Heap::kNullValueRootIndex); 6057 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5738 __ cmp(r0, null_value); 6058 __ cmp(r3, null_value);
5739 DeoptimizeIf(eq, instr->environment()); 6059 DeoptimizeIf(eq, instr->environment());
5740 6060
5741 __ SmiTst(r0); 6061 __ TestIfSmi(r3, r0);
5742 DeoptimizeIf(eq, instr->environment()); 6062 DeoptimizeIf(eq, instr->environment(), cr0);
5743 6063
5744 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); 6064 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5745 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); 6065 __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
5746 DeoptimizeIf(le, instr->environment()); 6066 DeoptimizeIf(le, instr->environment());
5747 6067
5748 Label use_cache, call_runtime; 6068 Label use_cache, call_runtime;
5749 __ CheckEnumCache(null_value, &call_runtime); 6069 __ CheckEnumCache(null_value, &call_runtime);
5750 6070
5751 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); 6071 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
5752 __ b(&use_cache); 6072 __ b(&use_cache);
5753 6073
5754 // Get the set of properties to enumerate. 6074 // Get the set of properties to enumerate.
5755 __ bind(&call_runtime); 6075 __ bind(&call_runtime);
5756 __ push(r0); 6076 __ push(r3);
5757 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); 6077 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5758 6078
5759 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); 6079 __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
5760 __ LoadRoot(ip, Heap::kMetaMapRootIndex); 6080 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5761 __ cmp(r1, ip); 6081 __ cmp(r4, ip);
5762 DeoptimizeIf(ne, instr->environment()); 6082 DeoptimizeIf(ne, instr->environment());
5763 __ bind(&use_cache); 6083 __ bind(&use_cache);
5764 } 6084 }
5765 6085
5766 6086
5767 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 6087 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5768 Register map = ToRegister(instr->map()); 6088 Register map = ToRegister(instr->map());
5769 Register result = ToRegister(instr->result()); 6089 Register result = ToRegister(instr->result());
5770 Label load_cache, done; 6090 Label load_cache, done;
5771 __ EnumLength(result, map); 6091 __ EnumLength(result, map);
5772 __ cmp(result, Operand(Smi::FromInt(0))); 6092 __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
5773 __ b(ne, &load_cache); 6093 __ bne(&load_cache);
5774 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); 6094 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5775 __ jmp(&done); 6095 __ b(&done);
5776 6096
5777 __ bind(&load_cache); 6097 __ bind(&load_cache);
5778 __ LoadInstanceDescriptors(map, result); 6098 __ LoadInstanceDescriptors(map, result);
5779 __ ldr(result, 6099 __ LoadP(result,
5780 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 6100 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5781 __ ldr(result, 6101 __ LoadP(result,
5782 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 6102 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5783 __ cmp(result, Operand::Zero()); 6103 __ cmpi(result, Operand::Zero());
5784 DeoptimizeIf(eq, instr->environment()); 6104 DeoptimizeIf(eq, instr->environment());
5785 6105
5786 __ bind(&done); 6106 __ bind(&done);
5787 } 6107 }
5788 6108
5789 6109
5790 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 6110 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5791 Register object = ToRegister(instr->value()); 6111 Register object = ToRegister(instr->value());
5792 Register map = ToRegister(instr->map()); 6112 Register map = ToRegister(instr->map());
5793 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 6113 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5794 __ cmp(map, scratch0()); 6114 __ cmp(map, scratch0());
5795 DeoptimizeIf(ne, instr->environment()); 6115 DeoptimizeIf(ne, instr->environment());
5796 } 6116 }
5797 6117
5798 6118
5799 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 6119 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5800 Register result, 6120 Register result,
5801 Register object, 6121 Register object,
5802 Register index) { 6122 Register index) {
5803 PushSafepointRegistersScope scope(this); 6123 PushSafepointRegistersScope scope(this);
5804 __ Push(object); 6124 __ Push(object, index);
5805 __ Push(index); 6125 __ li(cp, Operand::Zero());
5806 __ mov(cp, Operand::Zero());
5807 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); 6126 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5808 RecordSafepointWithRegisters( 6127 RecordSafepointWithRegisters(
5809 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); 6128 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5810 __ StoreToSafepointRegisterSlot(r0, result); 6129 __ StoreToSafepointRegisterSlot(r3, result);
5811 } 6130 }
5812 6131
5813 6132
5814 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 6133 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5815 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { 6134 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
5816 public: 6135 public:
5817 DeferredLoadMutableDouble(LCodeGen* codegen, 6136 DeferredLoadMutableDouble(LCodeGen* codegen,
5818 LLoadFieldByIndex* instr, 6137 LLoadFieldByIndex* instr,
5819 Register result, 6138 Register result,
5820 Register object, 6139 Register object,
(...skipping 19 matching lines...) Expand all
5840 Register index = ToRegister(instr->index()); 6159 Register index = ToRegister(instr->index());
5841 Register result = ToRegister(instr->result()); 6160 Register result = ToRegister(instr->result());
5842 Register scratch = scratch0(); 6161 Register scratch = scratch0();
5843 6162
5844 DeferredLoadMutableDouble* deferred; 6163 DeferredLoadMutableDouble* deferred;
5845 deferred = new(zone()) DeferredLoadMutableDouble( 6164 deferred = new(zone()) DeferredLoadMutableDouble(
5846 this, instr, result, object, index); 6165 this, instr, result, object, index);
5847 6166
5848 Label out_of_object, done; 6167 Label out_of_object, done;
5849 6168
5850 __ tst(index, Operand(Smi::FromInt(1))); 6169 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
5851 __ b(ne, deferred->entry()); 6170 __ bne(deferred->entry(), cr0);
5852 __ mov(index, Operand(index, ASR, 1)); 6171 __ ShiftRightArithImm(index, index, 1);
5853 6172
5854 __ cmp(index, Operand::Zero()); 6173 __ cmpi(index, Operand::Zero());
5855 __ b(lt, &out_of_object); 6174 __ blt(&out_of_object);
5856 6175
5857 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index)); 6176 __ SmiToPtrArrayOffset(r0, index);
5858 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); 6177 __ add(scratch, object, r0);
6178 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5859 6179
5860 __ b(&done); 6180 __ b(&done);
5861 6181
5862 __ bind(&out_of_object); 6182 __ bind(&out_of_object);
5863 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 6183 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5864 // Index is equal to negated out of object property index plus 1. 6184 // Index is equal to negated out of object property index plus 1.
5865 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); 6185 __ SmiToPtrArrayOffset(r0, index);
5866 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); 6186 __ sub(scratch, result, r0);
5867 __ ldr(result, FieldMemOperand(scratch, 6187 __ LoadP(result, FieldMemOperand(scratch,
5868 FixedArray::kHeaderSize - kPointerSize)); 6188 FixedArray::kHeaderSize - kPointerSize));
5869 __ bind(deferred->exit()); 6189 __ bind(deferred->exit());
5870 __ bind(&done); 6190 __ bind(&done);
5871 } 6191 }
5872 6192
5873 6193
5874 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { 6194 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5875 Register context = ToRegister(instr->context()); 6195 Register context = ToRegister(instr->context());
5876 __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); 6196 __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5877 } 6197 }
5878 6198
5879 6199
5880 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { 6200 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5881 Handle<ScopeInfo> scope_info = instr->scope_info(); 6201 Handle<ScopeInfo> scope_info = instr->scope_info();
5882 __ Push(scope_info); 6202 __ Push(scope_info);
5883 __ push(ToRegister(instr->function())); 6203 __ push(ToRegister(instr->function()));
5884 CallRuntime(Runtime::kPushBlockContext, 2, instr); 6204 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5885 RecordSafepoint(Safepoint::kNoLazyDeopt); 6205 RecordSafepoint(Safepoint::kNoLazyDeopt);
5886 } 6206 }
5887 6207
5888 6208
5889 #undef __ 6209 #undef __
5890 6210
5891 } } // namespace v8::internal 6211 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698